hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4431c36c4fd3e408f1bf638719ad1ac60b363c44
| 45,011
|
py
|
Python
|
main.py
|
Shabaquib/Library-Scraper-Using-BeautifulSoup
|
fa454521530471b48c346531c10041d45787a6a7
|
[
"MIT"
] | null | null | null |
main.py
|
Shabaquib/Library-Scraper-Using-BeautifulSoup
|
fa454521530471b48c346531c10041d45787a6a7
|
[
"MIT"
] | null | null | null |
main.py
|
Shabaquib/Library-Scraper-Using-BeautifulSoup
|
fa454521530471b48c346531c10041d45787a6a7
|
[
"MIT"
] | null | null | null |
# Z library
import wx
from wx.lib import scrolledpanel
import webbrowser
import io
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import pyscreenshot
import win32gui
class PanelBase(wx.Panel):
'''Base Panel class for all panels'''
def __init__(self, parent, label, on_click):
super(PanelBase, self).__init__(parent, id=-1)
self.parent_frame = parent
self.SetSize(704, 421)
class MainPanel(PanelBase):
def __init__(self, parent, on_click):
super(MainPanel, self).__init__(parent, 'MainPanel', on_click)
self.width, self.height = 704, 421
self.on_click = on_click
self.Main_UI()
def Main_UI(self):
''' Create the UI on start '''
self.font1 = wx.Font(15, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Times New Roman')
self.main_panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.main_panel_sizer)
self.search_sub_sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
self.search_sub_sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
self.recom_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.logo_image = wx.Image(r'.\\Resources\\icons\\lib-logo.png')
self.logo_image = self.logo_image.Rescale(width=220, height=80, quality=wx.IMAGE_QUALITY_NORMAL)
self.search_panel = wx.Panel(self, id=-1, size=(self.width, int(3 * self.height / 10)), pos=(0, 0),
style=wx.SIMPLE_BORDER)
self.search_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.search_panel.SetSizer(self.search_sizer)
self.search_panel.SetSizer(self.search_sizer)
self.search_sizer.Add(self.search_sub_sizer_1, 5, wx.EXPAND, 15)
self.search_sizer.Add(self.search_sub_sizer_2, 5, wx.EXPAND, 15)
self.recom_panel = scrolledpanel.ScrolledPanel(self, id=-1, size=(self.width, int(7 * self.height / 10)),
pos=(0, int(3 * self.height / 10)),
style=wx.TAB_TRAVERSAL)
print(self.search_panel.GetSize())
self.recom_panel.SetupScrolling()
self.recom_panel.SetSizer(self.recom_sizer)
# initialize scraping and save the page for later use
self.url = "https://1lib.in/"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(self.url, headers=hdr)
self.page = urlopen(req)
self.page_read = self.page.read()
self.page.close()
self.page_soup = BeautifulSoup(self.page_read, features='html.parser')
self.logo = wx.BitmapButton(self.search_panel, id=-1, bitmap=wx.Bitmap(self.logo_image), size=(230, 100),
pos=(50, 0))
self.logo.SetToolTip(wx.ToolTip("Z-Library"))
self.search_sub_sizer_1.Add(self.logo, 0, wx.LEFT | wx.ALIGN_CENTER_VERTICAL, 25)
self.search_box = wx.TextCtrl(self.search_panel, id=-1, value="", size=(300, 25),
style=wx.TE_PROCESS_ENTER)
self.search_box.SetFont(self.font1)
self.search_box.Bind(wx.EVT_TEXT_ENTER, self.on_click)
self.search_sub_sizer_2.Add(self.search_box, 0, wx.ALIGN_CENTER_VERTICAL)
self.main_panel_sizer.Add(self.search_panel, 0, wx.ALL)
self.main_panel_sizer.Add(self.recom_panel, 0, wx.ALL)
self.recom_UI()
def recom_UI(self):
'''creates the GUI holder for the recommended books in advance'''
self.recom_list = [None] * 20
self.def_image = wx.Image(r".\\Resources\\icons\\book-icon.png")
for x in range(20):
self.recom_list[x] = wx.BitmapButton(self.recom_panel, id=-1, bitmap=wx.Bitmap(self.def_image),
size=(161, 250), style=wx.BU_NOTEXT)
self.recom_sizer.Add(self.recom_list[x], 0, wx.ALL, 5)
self.recom_panel.Refresh()
self.recom_panel.Show()
self.scrape_recom()
def scrape_recom(self):
''' Called just after the holder is created, this
function now will scrape the data and insert it in book holders'''
self.containers = self.page_soup.find_all('div', class_='brick checkBookDownloaded')
print(len(self.containers))
x = 0
for container in self.containers:
print(container.a['href'])
print(container.a['title'])
width, height = self.recom_list[x].GetSizeTuple()
self.recom_list[x].SetToolTip(wx.ToolTip(container.a['title']))
self.recom_list[x].SetLabel("https://1lib.in" + str(container.a['href']))
self.recom_list[x].Bind(wx.EVT_BUTTON, self.on_click)
self.img_stream = io.BytesIO(urlopen(Request(container.a.img['src'])).read())
self.image_buff = wx.Image(self.img_stream)
self.image_buff = self.image_buff.Rescale(width, height, wx.IMAGE_QUALITY_NORMAL)
self.recom_list[x].SetBitmap(wx.Bitmap(self.image_buff))
x += 1
class LogoPanel(PanelBase):
def __init__(self, parent, on_click):
super(LogoPanel, self).__init__(parent, 'LogoPanel', on_click)
class DetailPanel1(PanelBase):
'''
Panel class if a book is selected from the main page recommendations
'''
def __init__(self, parent, on_click):
super(DetailPanel1, self).__init__(parent, 'DetailPanel1', on_click)
self.width, self.height = 704, 421
self.SetSize(self.width, self.height)
self.on_click = on_click
self.main_scrape_UI_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(self.main_scrape_UI_sizer)
self.def_image = wx.Image(r".\\Resources\\icons\\book-icon.png")
self.detail_panel = wx.Panel(self, id=-1, size=(int(self.width / 2) + 90, self.height), style=wx.BORDER_NONE)
self.detail_sizer = wx.BoxSizer(wx.VERTICAL)
self.detail_panel.SetSizer(self.detail_sizer)
self.main_scrape_UI_sizer.Add(self.detail_panel)
self.index_panel = scrolledpanel.ScrolledPanel(self, id=-1, size=(int(self.width / 2) - 90, self.height),
style=wx.TE_BESTWRAP)
self.main_scrape_UI_sizer.Add(self.index_panel)
self.detail_sub_panel_11 = wx.Panel(self.detail_panel, id=-1,
size=(int(self.width / 2) + 90, (self.height / 2) - 30),
style=wx.NO_BORDER)
self.detail_sub_panel_11_sizer = wx.BoxSizer(wx.HORIZONTAL)
print(self.detail_sub_panel_11.GetClientSize())
self.detail_sub_panel_11.SetSizer(self.detail_sub_panel_11_sizer)
self.detail_sub_panel_12 = wx.Panel(self.detail_panel, id=-1,
size=(int(self.width / 2) + 90, (self.height / 2) + 30),
style=wx.NO_BORDER)
self.detail_sub_panel_12_sizer = wx.BoxSizer(wx.VERTICAL)
self.detail_sub_panel_12.SetSizer(self.detail_sub_panel_12_sizer)
self.detail_sizer.Add(self.detail_sub_panel_11)
self.detail_sizer.Add(self.detail_sub_panel_12)
# sun sizer 1
self.book_bitmap = wx.BitmapButton(self.detail_sub_panel_11, id=-1, size=(120, int((2 * self.height) / 5)),
bitmap=wx.Bitmap(self.def_image), style=wx.BU_BOTTOM)
self.detail_sub_panel_11_sizer.Add(self.book_bitmap, 0, wx.LEFT | wx.TOP | wx.BOTTOM, 5)
self.book_detail_panel = wx.Panel(self.detail_sub_panel_11, id=-1, size=(310, int((2 * self.height) / 5)),
style=wx.NO_BORDER)
self.detail_sub_panel_11_sizer.Add(self.book_detail_panel, 0, wx.ALL, 5)
self.book_detail_sizer = wx.BoxSizer(wx.VERTICAL)
self.book_detail_panel.SetSizer(self.book_detail_sizer)
self.font2 = wx.Font(14, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Arial')
self.font3 = wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Roboto')
self.font4 = wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Roboto')
self.book_detail_sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
self.book_detail_sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
# objects created to hold the book details scraped from the site
self.book_name = wx.StaticText(self.book_detail_panel, id=-1, label="Book")
self.book_name.SetFont(self.font2)
self.book_isbn = wx.StaticText(self.book_detail_panel, id=-1, label="ISBN: ")
self.book_isbn_10 = wx.StaticText(self.book_detail_panel, id=-1, label="ISBN 10: ")
self.book_isbn_13 = wx.StaticText(self.book_detail_panel, id=-1, label="ISBN 13: ")
self.book_author = wx.StaticText(self.book_detail_panel, id=-1, label="Author: ")
self.pages = wx.StaticText(self.book_detail_panel, id=-1, label=" No. of pages: ")
self.book_category = wx.StaticText(self.book_detail_panel, id=-1, label="Publisher: ")
self.file_type = wx.StaticText(self.book_detail_panel, id=-1, label="File: ")
self.pub_year = wx.StaticText(self.book_detail_panel, id=-1, label=" Year: ")
self.book_detail_sizer.Add(self.book_name, 0, flag=wx.TOP | wx.LEFT, border=5)
self.book_detail_sizer.Add(self.book_isbn, flag=wx.TOP | wx.LEFT, border=5)
self.book_detail_sizer.Add(self.book_isbn_10, flag=wx.TOP | wx.LEFT, border=5)
self.book_detail_sizer.Add(self.book_isbn_13, flag=wx.TOP | wx.LEFT, border=5)
self.book_detail_sizer_1.Add(self.book_author, 0)
self.book_detail_sizer_1.Add(self.pages, 0, flag=wx.RIGHT, border=10)
self.book_detail_sizer.Add(self.book_detail_sizer_1, flag=wx.TOP | wx.LEFT, border=5)
self.book_detail_sizer.Add(self.book_category, flag=wx.TOP | wx.LEFT, border=5)
self.book_detail_sizer_2.Add(self.file_type)
self.book_detail_sizer_2.Add(self.pub_year, flag=wx.RIGHT, border=10)
self.book_detail_sizer.Add(self.book_detail_sizer_2, flag=wx.TOP | wx.LEFT, border=5)
self.x = " " * 15
self.descrip_label = wx.StaticText(self.detail_sub_panel_12, id=-1, label=self.x + "DESCRIPTION")
self.descrip_label.SetFont(self.font4)
self.book_description = wx.TextCtrl(self.detail_sub_panel_12, id=-1, value="",
size=(int(self.width / 2) + 90, (self.height / 2) + 10),
style=wx.TE_MULTILINE)
self.book_description.SetFont(self.font3)
self.detail_sub_panel_12_sizer.Add(self.descrip_label)
self.detail_sub_panel_12_sizer.Add(self.book_description)
self.main_list_sizer = wx.GridSizer (cols=2, vgap=5, hgap=5)
self.index_panel.SetSizer(self.main_list_sizer)
def scrape_detail(self, url):
'''
Function to be called when the user selects a book from the main page,
and the whole GUI for the detail page is ready to hold it.
'''
self.url_passed = url
# initialize scraping
hdr = {'User-Agent': 'Mozilla/5.0'}
request = Request(self.url_passed, headers=hdr)
self.page_hex = urlopen(request)
self.page_code = self.page_hex.read()
self.page_hex.close()
self.page_analytic = BeautifulSoup(self.page_code, features='html.parser')
self.page_var = self.page_analytic.find('div', {"itemtype": "http://schema.org/Book"})
self.b_name = self.page_var.h1.get_text().strip()
self.b_details = self.page_var.find('div', {'class': "bookDetailsBox"})
# details
try:
self.b_auth = self.page_analytic.find('div', {'class': "col-sm-9"}).i.get_text()
print(self.b_auth)
except AttributeError:
self.b_auth = "--"
try:
self.b_isbn10 = self.b_details.find('div', {'class': "bookProperty property_isbn 10"}).find('div', {
'class': "property_value"}).get_text()
print(self.b_isbn10)
except AttributeError:
self.b_isbn10 = "--"
try:
self.b_isbn = self.b_details.find('div', {'class': "bookProperty property_isbn"}).find('div', {
'class': "property_value"}).get_text()
print(self.b_isbn)
except AttributeError:
self.b_isbn = "--"
try:
self.b_isbn13 = self.b_details.find('div', {'class': "bookProperty property_isbn 13"}).find('div', {
'class': "property_value"}).get_text()
print(self.b_isbn13)
except AttributeError:
self.b_isbn13 = "--"
self.b_year = self.b_details.find('div', {'class': "bookProperty property_year"}).find('div', {
'class': "property_value"}).get_text()
print(self.b_year)
try:
self.b_pages = self.b_details.find('div', {'class': "bookProperty property_pages"}).find('div', {
'class': "property_value"}).span.get_text()
print(self.b_pages)
except AttributeError:
self.b_pages = "--"
self.b_type = self.b_details.find('div', {'class': "bookProperty property__file"}).find('div', {
'class': "property_value"}).get_text()
print(self.b_type)
try:
self.b_pub = self.b_details.find('div', {'class': "bookProperty property_publisher"}).find('div', {
'class': "property_value"}).get_text()
print(self.b_pub)
except AttributeError:
self.b_pub = "--"
self.content_group = self.page_analytic.find ('div', {'id': "bookDescriptionBox"})
self.content_truth = False
self.b_descrip = " "
try:
self.content = self.content_group.find_all('span')
except AttributeError:
print("First Test failed")
self.content=[]
self.content_truth = False
if not self.content_truth:
try:
print("Second Try")
self.content = self.content_group.find_all('p')
if self.content:
self.content_truth = True
for text in self.content:
self.b_descrip = self.b_descrip + str(text.get_text()) + "\n"
except AttributeError:
self.b_descrip = "--No Description Found--"
print("Test 2 failed!!")
else:
for text in self.content:
self.b_descrip = self.b_descrip + str(text.get_text()) + "\n"
self.content_truth = True
print("Content test successful")
if not self.content_truth:
try:
print("Third test")
self.content = self.content_group.get_text()
self.b_descrip = " "
if self.content:
self.b_descrip = self.b_descrip + self.content.string
except AttributeError:
self.b_descrip = "--No Description Found--"
self.b_map = io.BytesIO(
urlopen(Request(self.page_analytic.find('div', {'class': "col-sm-3"}).a.img['src'])).read())
self.b_buffer = wx.Image(self.b_map).Rescale(width=120, height=int((2 * self.height) / 5),
quality=wx.IMAGE_QUALITY_NORMAL)
self.book_bitmap.SetBitmap(wx.Bitmap(self.b_buffer))
self.book_name.SetLabel(self.b_name)
self.book_name.SetToolTip(wx.ToolTip(self.b_name))
self.book_isbn.SetLabel("ISBN: "+self.b_isbn)
self.book_isbn_10.SetLabel("ISBN 10: "+self.b_isbn10)
self.book_isbn_13.SetLabel("ISBN 13: "+self.b_isbn13)
self.book_author.SetLabel("Author: "+self.b_auth)
self.pages.SetLabel(" No. of pages: " + self.b_pages)
self.book_category.SetLabel("Publisher: " + self.b_pub)
self.file_type.SetLabel("File: " + self.b_type)
self.pub_year.SetLabel(" Year: " + self.b_year)
self.book_description.SetValue (self.b_descrip)
self.detail_panel.Layout()
self.onmainpanelclick()
def onmainpanelclick(self):
'''
This function can be called over and over again as the recommendations are accessed
:return:
'''
# checks the scrolled panel sizer if it has any children from past and removes it
self.children = self.main_list_sizer.GetChildren()
i = len(self.children)-1
if len(self.children) != 0:
while i >= 0:
win = self.main_list_sizer.GetItem(0).GetWindow()
win.Destroy()
i -= 1
self.main_list_sizer.Layout()
else:
print("Empty")
self.containers = self.page_analytic.find_all('div', {'class': "brick checkBookDownloaded"})
print(self.containers)
self.button_spec_1 = [None] * len(self.containers)
print("Dynamic length: "+str(len(self.containers)))
i = 0
for container in self.containers:
print(i)
self.url_param = container.a['href']
try:
self.buffer = wx.Image (io.BytesIO(urlopen(Request(container.a.img['src'])).read()))
self.buffer = self.buffer.Rescale(width=120, height=int ((2 * self.height) / 5),
quality=wx.IMAGE_QUALITY_HIGH)
except TypeError:
self.buffer = self.def_image
self.button_spec_1[i] = wx.BitmapButton(self.index_panel, id=-1, size=(120, int((2 * self.height) / 5)),
bitmap=wx.Bitmap(self.buffer), style=wx.BU_NOTEXT)
self.button_spec_1[i].SetLabel("https://1lib.in"+ self.url_param)
self.button_spec_1[i].Bind(wx.EVT_BUTTON, self.on_click)
self.main_list_sizer.Add(self.button_spec_1[i])
self.button_spec_1[i].SetToolTip(wx.ToolTip(container.a['title']))
i += 1
self.main_list_sizer.Layout()
self.index_panel.Bind(wx.EVT_MOTION, self.OnMouse)
self.index_panel.SetupScrolling()
self.main_list_sizer.Layout()
self.main_scrape_UI_sizer.Layout()
def OnMouse(self, event):
self.index_panel.SetFocus()
class DetailPanel2(PanelBase):
'''
This panel is responsible to show the results from search
'''
def __init__(self, parent, on_click):
super(DetailPanel2, self).__init__(parent, 'DetailPanel2', on_click)
self.parent_frame = parent
self.width, self.height = 704, 421
self.on_click = on_click
self.font1 = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Calibri')
self.font2 = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Lucida Console')
self.go_img = wx.Image(r".\\Resources\\icons\\go_to.png")
self.go_img = self.go_img.Rescale(width=28, height=28, quality=wx.IMAGE_QUALITY_HIGH)
self.next_page = wx.Image(r".\\Resources\\icons\\next.png")
self.next_page = self.next_page.Rescale(width=28, height=28, quality=wx.IMAGE_QUALITY_HIGH)
self.prev_page = wx.Image(r".\\Resources\\icons\\prev.png")
self.prev_page = self.prev_page.Rescale(width=28, height=28, quality=wx.IMAGE_QUALITY_HIGH)
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.main_sizer)
self.page_index = 1
self.list_counter = 0
self.MainUI()
def MainUI(self):
# top panel
self.main_top_panel = wx.Panel(self, id=-1, size=(self.width, self.height / 10), pos=(0, 0),
style=wx.SIMPLE_BORDER)
self.main_top_panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.main_top_panel.SetSizer(self.main_top_panel_sizer)
self.search_box = wx.TextCtrl(self.main_top_panel, id=-1, value="", size=(400, 20),
style=wx.TE_LEFT | wx.TE_PROCESS_ENTER)
self.search_box.SetFont(self.font1)
self.search_box.Bind(wx.EVT_TEXT_ENTER, self.on_click)
self.search_button = wx.BitmapButton(self.main_top_panel, id=-1, bitmap=wx.Bitmap(self.go_img),
size=(20, 20), style=wx.BU_LEFT)
self.next_arrow = wx.BitmapButton(self.main_top_panel, id=-1, bitmap=wx.Bitmap(self.next_page),
size=(30, 30), style=wx.BU_NOTEXT)
self.next_arrow.Bind(wx.EVT_BUTTON, self.on_list1)
self.prev_arrow = wx.BitmapButton(self.main_top_panel, id=-1, bitmap=wx.Bitmap(self.prev_page),
size=(30, 30), style=wx.BU_NOTEXT)
self.prev_arrow.Bind(wx.EVT_BUTTON, self.on_list2)
self.main_top_panel_sizer.Add(self.search_box, 0, wx.LEFT | wx.TOP, 10)
self.main_top_panel_sizer.Add(self.search_button, 0, wx.LEFT | wx.TOP, 10)
self.main_top_panel_sizer.Add(self.prev_arrow, 0, wx.LEFT, 10)
self.main_top_panel_sizer.Add(self.next_arrow, 0, wx.LEFT, 10)
self.main_sizer.Add(self.main_top_panel)
# bottom panel
self.buffer_panel = scrolledpanel.ScrolledPanel(self, id=-1,
size=(self.width, int(9 * self.height / 10)),
pos=(0, 9 * self.height / 10), style=wx.TAB_TRAVERSAL)
self.buffer_panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.buffer_panel.SetSizer(self.buffer_panel_sizer)
self.main_sizer.Add(self.buffer_panel)
def on_list1(self, event):
'''
this function is responsible to edit the url when the user wants the next node list of the search result
'''
self.page_index += 1
self.list_counter += 1
if "?page=" in self.search_box.GetValue():
self.arg = "https://1lib.in/s/"+self.search_box.GetValue()[:-1]+str(self.page_index)
else:
self.arg = "https://1lib.in/s/"+self.search_box.GetValue()+ "?page="+str(self.page_index)
self.children = self.buffer_panel_sizer.GetChildren()
i = len(self.children) - 1
if len(self.children) != 0:
while i >= 0:
win = self.buffer_panel_sizer.GetItem(0).GetWindow()
print(win)
win.Destroy()
i -= 1
self.buffer_panel_sizer.Layout()
else:
print("Empty")
self.buffer_panel_sizer.Layout()
self.parent_frame.Fit()
self.list_scrape(self.arg)
def on_list2(self, event):
'''
this function is responsible to edit the url when the user wants the previous node list of the search result
'''
self.page_index -= 1
self.list_counter -= 1
if "?page=" in self.search_box.GetValue():
self.arg = "https://1lib.in/s/" + self.search_box.GetValue()[:-1] + str(self.page_index)
else:
self.arg = "https://1lib.in/s/" + self.search_box.GetValue() + "?page=" + str(self.page_index)
self.children = self.buffer_panel_sizer.GetChildren()
i = len(self.children) - 1
if len(self.children) != 0:
while i >= 0:
win = self.buffer_panel_sizer.GetItem(0).GetWindow()
print(win)
win.Destroy()
i -= 1
else:
print("Empty")
self.buffer_panel_sizer.Layout()
self.parent_frame.Fit()
self.list_scrape(self.arg)
def list_scrape(self, url):
'''
function to scrape the search results
'''
self.url_pick = url
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(self.url_pick, headers=hdr)
self.page = urlopen(req)
self.page_pick = self.page.read()
self.page.close()
self.page_soup = BeautifulSoup(self.page_pick, features='html.parser')
self.counter = 0
print("Counter set to: " + str(self.counter))
self.search_value = self.url_pick.split('/')
self.search_box.SetValue(self.search_value[len(self.search_value) - 1])
self.page_content = self.page_soup.find_all('td', {'style': 'vertical-align: top;'})
self.scrape_creation()
def scrape_creation(self):
'''
function that inserts the result in the container UI
'''
for i in range(len(self.page_content)):
self.content_list = self.page_content[self.counter].find_all('a')
self.content_list2 = self.page_content[self.counter].find_all('div', {'class': "property_value"})
self.buffer_low_panel = wx.Panel(self.buffer_panel, id=-1, size=(self.width - 30, 50),
style=wx.SIMPLE_BORDER)
self.buffer_top_sizer = wx.BoxSizer(wx.VERTICAL)
self.buffer_low_panel.SetSizer(self.buffer_top_sizer)
self.sub_1_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sub_2_sizer = wx.GridSizer(cols=4, vgap=3, hgap=3)
self.buffer_top_sizer.Add(self.sub_1_sizer)
self.buffer_top_sizer.Add(self.sub_2_sizer)
self.buffer = self.content_list[0].get_text()
print(self.buffer)
self.buff_label = wx.StaticText(self.buffer_low_panel, id=-1, label=self.buffer[:80])
self.buff_label.SetFont(self.font2)
self.sub_1_sizer.Add(self.buff_label, 0, wx.LEFT | wx.TOP, 5)
self.buffer = self.content_list[1].get_text()[:25]
print(self.buffer)
self.buff_auth = wx.StaticText(self.buffer_low_panel, id=-1, label="By: " + str(self.buffer))
self.sub_2_sizer.Add(self.buff_auth, 0, wx.LEFT | wx.ALIGN_CENTER_VERTICAL, 20)
self.buffer = self.content_list2[0].get_text()
print(self.buffer)
self.buff_type = wx.StaticText(self.buffer_low_panel, id=-1, label="File type: " + str(self.buffer))
self.sub_2_sizer.Add(self.buff_type, 0, wx.LEFT | wx.ALIGN_CENTER_VERTICAL, 20)
self.go_to = wx.BitmapButton(self.buffer_low_panel, id=-1, bitmap=wx.Bitmap(self.go_img), size=(30, 30),
style=wx.BU_NOTEXT)
self.go_to.SetLabel("https://1lib.in" + self.content_list[0]['href'])
self.go_to.Bind(wx.EVT_BUTTON, self.on_click)
self.sub_2_sizer.Add(self.go_to, 0, wx.LEFT, 20)
self.buffer_panel_sizer.Add(self.buffer_low_panel, 0, flag=wx.TOP | wx.LEFT, border=5)
self.counter += 1
self.buffer_panel_sizer.Layout()
self.buffer_panel.SetupScrolling()
self.buffer_panel.Show()
class MePanel(PanelBase):
'''
Class for displaying details of the project
'''
def __init__(self, parent, on_click):
super(MePanel, self).__init__(parent, "About the Dev", on_click)
self.on_click = on_click
self.parent_frame = parent
self.width, self.height = 704, 421
self.MainUI()
def MainUI(self):
self.main_panel = scrolledpanel.ScrolledPanel(self, id=-1, size=(self.width, self.height),
style=wx.TAB_TRAVERSAL)
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.main_sizer)
self.main_sizer.Add(self.main_panel)
self.panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.main_panel.SetSizer(self.panel_sizer)
self.logo = wx.Image(r".\\Resources\\icons\\lib-logo.png")
self.logo = self.logo.Rescale(width=301, height=79, quality=wx.IMAGE_QUALITY_NORMAL)
self.logo_bitmap = wx.Bitmap(self.logo)
self.logo_stat = wx.StaticBitmap(self.main_panel, id=-1, bitmap=self.logo_bitmap, size=(301, 79))
self.panel_sizer.Add(self.logo_stat, 0, wx.LEFT|wx.TOP, 25)
self.font1 = wx.Font(15, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Calibri')
self.font2 = wx.Font(11, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Calibri')
self.dots = "------------------------------------------------------------"
self.line = wx.StaticText(self.main_panel, id=-1, label=self.dots)
self.line.SetFont(self.font1)
self.line.SetLabel(self.dots)
self.panel_sizer.Add(self.line, 0, wx.LEFT|wx.TOP, 25)
self.text = "This application is the devloper\'s first ever interaction with wxpython\n"+ \
"(A python GUI FrameWork), and web scraping library BeautifulSoup 4.\n"+ \
"\n"+ \
"About Z library:\n"+ \
"-------------------------------------\n"+ \
"The footer in the project's pages contains the phrase \"Free ebooks since 2009.\"\n"+ \
"Z-Library is one of the more well known shadow libraries, along with Sci-Hub and\n"+ \
"Library Genesis, with publishers and government organizations usually putting the\n"+ \
"three in the same category when pursuing anti-piracy cases. UK organization The Publishers\n"+ \
"Association has attempted to enact ISP level blocks on Z-Library. In late 2015,\n"+ \
"Publisher Elsevier filed a successful court request that ordered the registrar of\n"+ \
"bookfi.org to seize the site's domain. Bookfi.org, booksc.org and b-ok.org were\n"+ \
"included in the 2017 US government report for Notorious Markets.\n"+ \
"\n"
self.text1 = "About the Dev:\n"+ \
"--------------------------------------\n"+ \
"Ahsan Aquib Raushan is 20 years old undergraduate currently in India.\n"+ \
"Completing his Diploma in Computer Science from a renowned university Jamia Millia Islamia\n"+ \
"(Batch of 2020), he usually is spending his time during lockdown"+ \
" sharpening his skills in IT.\n"+ \
"You've obviously found him on Github, for connecting with him on other platforms, Look here:\n"
self.text_obj1 = wx.StaticText(self.main_panel, id=-1, label="")
self.text_obj1.SetFont(self.font2)
self.text_obj1.SetLabel(self.text)
self.text_obj2 = wx.StaticText(self.main_panel, id=-1, label="")
self.text_obj2.SetFont(self.font2)
self.text_obj2.SetLabel(self.text1)
self.panel_sizer.Add(self.text_obj1, 0, wx.LEFT|wx.TOP, 25)
self.panel_sizer.Add(self.text_obj2, 0, wx.LEFT|wx.TOP, 25 )
self.dev_panel = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(self.dev_panel)
self.insta = wx.Image(r".\\Resources\\icons\\insta.png")
self.insta = self.insta.Rescale(width=30, height=30, quality=wx.IMAGE_QUALITY_NORMAL)
self.instagram = wx.BitmapButton(self.main_panel, id=-1, bitmap=wx.Bitmap(self.insta), size=(40, 40),
style=wx.BU_NOTEXT)
self.instagram.Bind(wx.EVT_BUTTON, self.on_insta)
self.link = wx.Image(r".\\Resources\\icons\\link.png")
self.link = self.link.Rescale(width=24, height=24, quality=wx.IMAGE_QUALITY_NORMAL)
self.linkedin = wx.BitmapButton(self.main_panel, id=-1, bitmap=wx.Bitmap(self.link), size=(40, 40),
style=wx.BU_NOTEXT)
self.linkedin.Bind(wx.EVT_BUTTON, self.on_linkedin)
self.dev_panel.Add(self.instagram, 0, wx.LEFT|wx.TOP, 25)
self.dev_panel.Add(self.linkedin, 0, wx.LEFT|wx.TOP, 25)
self.main_panel.SetupScrolling()
self.panel_sizer.Layout()
def on_insta(self, event):
webbrowser.open(url="https://www.instagram.com/graciously_olive/", new=2, autoraise=True)
def on_linkedin(self, event):
webbrowser.open(url="https://www.linkedin.com/in/ahsan-aquib-raushan-b5b59118a/", new=2, autoraise=True)
class PanelSwitcher(wx.BoxSizer):
'''
Class extended from BoxSizer to hold all panels
'''
def __init__(self, parent, panels):
wx.BoxSizer.__init__(self)
parent.SetSizer(self)
self.parent = parent
self.panels = panels
for panel in self.panels:
self.Add(panel, 1, wx.EXPAND)
self.panels[0].Show()
def Show(self, panel):
for p in self.panels:
if p == panel:
p.Show()
else:
p.Hide()
self.parent.Layout()
class ShareFrame(wx.Frame):
'''
Frame displayed as a card to enable sharing
'''
def __init__(self, title, parent=None):
super(ShareFrame, self).__init__(parent=parent, title=title, style=wx.CAPTION|wx.CLOSE_BOX)
self.SetBackgroundColour('#DDB892')
self.SetId(wx.ID_ANY)
self.SetSize(400, 320)
# Resources
self.default_image = wx.Image(r".\\Resources\\mominamustehsan.png")
self.default_image = self.default_image.Rescale(width=111, height=151, quality=wx.IMAGE_QUALITY_HIGH)
self.font1 = wx.Font(11, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Calibri')
self.font2 = wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Roboto')
self.font2.MakeBold()
self.MainUI()
def MainUI(self):
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.main_sizer)
self.top_panel = wx.Panel(self, id=-1, size=(400, 53), pos=(0, 0), style=wx.NO_BORDER)
self.top_panel.SetBackgroundColour('#DDB892')
self.top_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.top_panel.SetSizer(self.top_sizer)
self.middle_panel = wx.Panel(self, id=-1, size=(400, 170), pos=(0, 53), style=wx.NO_BORDER)
self.middle_panel.SetBackgroundColour('#DDB892')
self.middle_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.middle_panel.SetSizer(self.middle_sizer)
self.lower_panel = wx.Panel(self, id=-1, size=(400, 56), pos=(0, 223), style=wx.NO_BORDER)
self.lower_panel.SetBackgroundColour('#DDB892')
self.lower_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.lower_panel.SetSizer(self.lower_sizer)
self.main_sizer.Add(self.top_panel)
self.main_sizer.Add(self.middle_panel)
self.main_sizer.Add(self.lower_panel)
# top panel element
self.book_name = wx.StaticText(self.top_panel, id=-1, label="--Book Name--", style=wx.ST_ELLIPSIZE_END)
self.book_name.SetFont(self.font2)
self.top_sizer.Add(self.book_name, 0, flag=wx.TOP|wx.LEFT, border=20)
# middle panel elements
self.book_bitmap = wx.BitmapButton(self.middle_panel, id=-1, size=(111, 151),
bitmap=wx.Bitmap(self.default_image),
style=wx.BU_NOTEXT)
self.middle_sizer.Add(self.book_bitmap, 0, flag=wx.LEFT | wx.TOP, border=10)
self.detail_sizer = wx.BoxSizer(wx.VERTICAL)
self.middle_sizer.Add(self.detail_sizer, 0, wx.LEFT, 15)
# detail sizer elements
self.author = wx.StaticText(self.middle_panel, id=-1, label="Author: --")
self.author.SetFont(self.font1)
self.isbn = wx.StaticText(self.middle_panel, id=-1, label="ISBN: --")
self.isbn.SetFont(self.font1)
self.isbn_10 = wx.StaticText(self.middle_panel, id=-1, label="ISBN 10: --")
self.isbn_10.SetFont(self.font1)
self.isbn_13 = wx.StaticText(self.middle_panel, id=-1, label="ISBN 13: --")
self.isbn_13.SetFont(self.font1)
self.year = wx.StaticText(self.middle_panel, id=-1, label="Year: --")
self.year.SetFont(self.font1)
self.file = wx.StaticText(self.middle_panel, id=-1, label="File Type: --")
self.file.SetFont(self.font1)
self.detail_sizer.Add(self.author, 0, flag=wx.LEFT|wx.TOP, border=5)
self.detail_sizer.Add(self.isbn, 0, flag=wx.LEFT|wx.TOP, border=5)
self.detail_sizer.Add(self.isbn_10, 0, flag=wx.LEFT|wx.TOP, border=5)
self.detail_sizer.Add(self.isbn_13, 0, flag=wx.LEFT|wx.TOP, border=5)
self.detail_sizer.Add(self.year, 0, flag=wx.LEFT|wx.TOP, border=5)
self.detail_sizer.Add(self.file, 0, flag=wx.LEFT|wx.TOP, border=5)
# bottom sizer elements
self.share_icon = wx.Image(r".\\Resources\\icons\\share.png")
self.share_icon.Rescale(width=29, height=29, quality=wx.IMAGE_QUALITY_NORMAL)
self.button1 = wx.BitmapButton(self.lower_panel, id=-1, size=(35, 35), bitmap=wx.Bitmap(self.share_icon),
style=wx.BU_NOTEXT)
self.button1.SetToolTip(wx.ToolTip("Share as Screenshot"))
self.button1.Bind(wx.EVT_BUTTON, self.on_share)
self.download_icon = wx.Image(r".\\Resources\icons\\download.png")
self.download_icon.Rescale(width=29, height=29, quality=wx.IMAGE_QUALITY_NORMAL)
self.button2 = wx.BitmapButton(self.lower_panel, id=-1, size=(35, 35), bitmap=wx.Bitmap(self.download_icon),
style=wx.BU_NOTEXT)
self.button2.SetToolTip(wx.ToolTip("Download from site"))
self.button2.Bind(wx.EVT_BUTTON, self.on_down)
self.lower_sizer.Add(self.button1, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 24)
self.lower_sizer.Add(self.button2, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 24)
def on_share(self, event):
hwnd = win32gui.FindWindow(None, "Book Details--")
print(win32gui.GetWindowRect(hwnd))
left, top, right, bottom = win32gui.GetWindowRect(hwnd)
self.image = pyscreenshot.grab(bbox=(left+7, top, right-7, bottom-7))
self.image.save("screenshot.png")
def on_down(self, event):
webbrowser.open(url=self.url_passed, new=2, autoraise=True)
def scrape_detail(self, arg):
# init contents
self.url_passed = arg
# initialize scraping
hdr = {'User-Agent': 'Mozilla/5.0'}
request = Request(self.url_passed, headers=hdr)
self.page_hex = urlopen(request)
self.page_code = self.page_hex.read()
self.page_hex.close()
self.page_analytic = BeautifulSoup(self.page_code, features='html.parser')
self.page_var = self.page_analytic.find('div', {"itemtype": "http://schema.org/Book"})
self.b_details = self.page_var.find('div', {'class': "bookDetailsBox"})
try:
self.b_name = self.page_var.h1.get_text().strip()
self.book_name.SetLabel(self.b_name[:34]+ "...")
self.book_name.SetToolTip(wx.ToolTip(self.b_name))
except AttributeError:
pass
# details
try:
self.b_auth = self.page_analytic.find('div', {'class': "col-sm-9"}).i.get_text()
self.author.SetLabel("Author: "+self.b_auth)
except AttributeError:
pass
try:
self.b_isbn10 = self.b_details.find('div', {'class': "bookProperty property_isbn 10"}).find('div', {
'class': "property_value"}).get_text()
self.isbn_10.SetLabel("ISBN 10: "+self.b_isbn10)
except AttributeError:
pass
try:
self.b_isbn = self.b_details.find('div', {'class': "bookProperty property_isbn"}).find('div', {
'class': "property_value"}).get_text()
self.isbn.SetLabel("ISBN: "+self.b_isbn)
except AttributeError:
pass
try:
self.b_isbn13 = self.b_details.find('div', {'class': "bookProperty property_isbn 13"}).find('div', {
'class': "property_value"}).get_text()
self.isbn_13.SetLabel("ISBN 13:"+self.b_isbn13)
except AttributeError:
pass
try:
self.b_year = self.b_details.find('div', {'class': "bookProperty property_year"}).find('div', {
'class': "property_value"}).get_text()
self.year.SetLabel("Year: " + self.b_year)
except AttributeError:
pass
self.b_type = self.b_details.find('div', {'class': "bookProperty property__file"}).find('div', {
'class': "property_value"}).get_text()
self.file.SetLabel("File Type: "+self.b_type)
self.b_map = io.BytesIO(
urlopen(Request(self.page_analytic.find('div', {'class': "col-sm-3"}).a.img['src'])).read())
self.b_buffer = wx.Image(self.b_map).Rescale(width=111, height=151, quality=wx.IMAGE_QUALITY_NORMAL)
self.book_bitmap.SetBitmap(wx.Bitmap(self.b_buffer))
self.top_panel.Update()
self.middle_panel.Update()
self.lower_panel.Update()
class WindowFrame(wx.Frame):
'''
Main frame for all panels
'''
def __init__(self, parent=None):
super(WindowFrame, self).__init__(parent, title='Z-library', style=wx.DEFAULT_FRAME_STYLE)
self.SetSize((720, 480))
self.Centre(direction=wx.BOTH)
self.SetIcon(wx.Icon(r'.\\Resources\\Icons\\Logo.png'))
self.main_panel = MainPanel(self, self.on_main_panel_click)
self.d1_panel = DetailPanel1(self, self.on_d1_panel_click)
self.d2_panel = DetailPanel2(self, self.on_d2_panel_click)
self.me_panel = MePanel(self, self.on_me_click)
self.panel_switch = PanelSwitcher(self, [self.main_panel,
self.d1_panel,self.d2_panel, self.me_panel])
self.Menu_ui()
def Menu_ui(self):
self.menubar = wx.MenuBar(style=wx.MB_DOCKABLE)
self.setting = wx.Menu()
self.setting.AppendSeparator()
self.about_me = wx.MenuItem(self.setting, id=-1, text="About", kind=wx.ITEM_NORMAL)
self.panel_changer = wx.MenuItem(self.setting, id=-1, text="Back", kind=wx.ITEM_NORMAL)
self.quit = wx.MenuItem(self.setting, id=-1, text="Quit", kind=wx.ITEM_NORMAL)
self.setting.Append(self.about_me)
self.setting.Append(self.panel_changer)
self.setting.Append(self.quit)
self.menubar.Append(self.setting, "Settings")
self.SetMenuBar(self.menubar)
self.Bind(wx.EVT_MENU, self.on_me_click, self.about_me)
self.Bind(wx.EVT_MENU, self.on_switch, self.panel_changer)
self.temp = None
def on_main_panel_click(self, event):
arg = event.GetEventObject().GetLabel()
if "mostpopular" in arg:
self.panel_switch.Hide(self.main_panel)
self.temp = self.main_panel
self.panel_switch.Show(self.d1_panel)
self.d1_panel.scrape_detail(arg)
else:
print("Here")
arg = event.GetEventObject().GetValue()
arg = "https://1lib.in/s/" + arg
print("Url_accessing: " + arg)
self.panel_switch.Hide(self.d1_panel)
self.temp = self.main_panel
self.d2_panel.list_scrape(arg)
self.panel_switch.Show(self.d2_panel)
def on_d1_panel_click(self, event):
if "mostpopular" in event.GetEventObject().GetLabel():
self.panel_switch.Hide(self.d1_panel)
self.temp = self.d1_panel
self.d1_panel.scrape_detail(event.GetEventObject().GetLabel())
self.panel_switch.Show(self.d1_panel)
def on_d2_panel_click(self, event):
self.args = event.GetEventObject().GetLabel()
self.obj = ShareFrame("Book Details--", self)
self.obj.scrape_detail(self.args)
self.obj.Show()
def on_me_click(self, event):
self.panel_switch.Hide(self.main_panel)
self.temp = self.main_panel
self.panel_switch.Show(self.me_panel)
def on_switch(self, event):
print("I am here")
self.panel_switch.Show(self.temp)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
app_obj = wx.App(redirect=False)
app_obj.locale = wx.Locale(wx.LANGUAGE_ENGLISH)
frame_obj = WindowFrame()
frame_obj.Show()
app_obj.MainLoop()
| 45.465657
| 120
| 0.595121
|
a15ac9fb6eec6ab47400f9ac96accd44eef14b72
| 1,232
|
py
|
Python
|
lib/surface/compute/ssl_certificates/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/ssl_certificates/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/ssl_certificates/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating SSL certificates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class SslCertificates(base.Group):
"""List, create, and delete Google Compute Engine SSL certificates.
List, create and delete Google Compute Engine SSL certificates that can be
used to configure a target HTTPS proxy. For more information, see:
[](https://cloud.google.com/load-balancing/docs/ssl-certificates)
"""
SslCertificates.category = base.COMPUTE_LOAD_BALANCING_CATEGORY
| 35.2
| 76
| 0.774351
|
9db6e20a263980c2394a103dd73f2952c68ab86d
| 1,269
|
py
|
Python
|
kafka/producer.py
|
MRazaKazmi/Startup-Where
|
5e012be43bc2beee1e387af81bc45f1e237d6216
|
[
"CECILL-B"
] | null | null | null |
kafka/producer.py
|
MRazaKazmi/Startup-Where
|
5e012be43bc2beee1e387af81bc45f1e237d6216
|
[
"CECILL-B"
] | null | null | null |
kafka/producer.py
|
MRazaKazmi/Startup-Where
|
5e012be43bc2beee1e387af81bc45f1e237d6216
|
[
"CECILL-B"
] | null | null | null |
import time
import requests
from kafka import KafkaProducer
from json import dumps
import json
meetup_dot_com_rsvp_stream_api_url = "http://stream.meetup.com/2/rsvps"
kafka_topic_name = "meetuprsvptopic"
kafka_bootstrap_servers = 'localhost:9092'
if __name__ == "__main__":
print("Kafka Producer Application Started ... ")
kafka_producer_obj = KafkaProducer(bootstrap_servers=kafka_bootstrap_servers, value_serializer=lambda x: dumps(x).encode('utf-8'))
while True:
try:
stream_api_response = requests.get(meetup_dot_com_rsvp_stream_api_url, stream=True)
if stream_api_response.status_code == 200:
for api_response_message in stream_api_response.iter_lines():
print("Message received: ")
print(api_response_message)
api_response_message = json.loads(api_response_message)
print("Message to be sent: ")
print(api_response_message)
kafka_producer_obj.send(kafka_topic_name, api_response_message)
time.sleep(1)
except Exception as ex:
print('Connection to meetup stream api could not established.')
print("Printing after while loop complete.")
| 40.935484
| 134
| 0.676123
|
7c03b7fe57db756e38060a2cc74eebd2da21a47c
| 42,178
|
py
|
Python
|
mne/viz/ica.py
|
bloyl/mne-python
|
bf945f19d221a4955b59758fdb9777d35315c042
|
[
"BSD-3-Clause"
] | 1
|
2016-10-27T20:07:23.000Z
|
2016-10-27T20:07:23.000Z
|
mne/viz/ica.py
|
bloyl/mne-python
|
bf945f19d221a4955b59758fdb9777d35315c042
|
[
"BSD-3-Clause"
] | null | null | null |
mne/viz/ica.py
|
bloyl/mne-python
|
bf945f19d221a4955b59758fdb9777d35315c042
|
[
"BSD-3-Clause"
] | null | null | null |
"""Functions to plot ICA specific data (besides topographies)."""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Teon Brooks <teon.brooks@gmail.com>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: Simplified BSD
from functools import partial
import warnings
import numpy as np
from .utils import (tight_layout, _make_event_color_dict,
plt_show, _convert_psds, _compute_scalings)
from .topomap import _plot_ica_topomap
from .epochs import plot_epochs_image
from .evoked import _butterfly_on_button_press, _butterfly_onpick
from ..utils import _validate_type, fill_doc
from ..defaults import _handle_default
from ..io.meas_info import create_info
from ..io.pick import pick_types, _picks_to_idx
from ..time_frequency.psd import psd_multitaper
from ..utils import _reject_data_segments, verbose
@fill_doc
def plot_ica_sources(ica, inst, picks=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False, show_scrollbars=True,
time_format='float'):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
%(picks_base)s all sources in the order as fitted.
start, stop : float | int | None
If ``inst`` is a `~mne.io.Raw` or an `~mne.Evoked` object, the first and
last time point (in seconds) of the data to plot. If ``inst`` is a
`~mne.io.Raw` object, ``start=None`` and ``stop=None`` will be
translated into ``start=0.`` and ``stop=3.``, respectively. For
`~mne.Evoked`, ``None`` refers to the beginning and end of the evoked
signal. If ``inst`` is an `~mne.Epochs` object, specifies the index of
the first and last epoch to show.
title : str | None
The window title. If None a default is provided.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for interactive selection of components in raw and epoch
plotter. For evoked, this parameter has no effect. Defaults to False.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
%(show_scrollbars)s
%(time_format)s
Returns
-------
fig : instance of Figure
The figure.
Notes
-----
For raw and epoch instances, it is possible to select components for
exclusion by clicking on the line. The selected components are added to
``ica.exclude`` on close.
.. versionadded:: 0.10.0
"""
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
exclude = ica.exclude
picks = _picks_to_idx(ica.n_components_, picks, 'all')
if isinstance(inst, (BaseRaw, BaseEpochs)):
fig = _plot_sources(ica, inst, picks, exclude, start=start, stop=stop,
show=show, title=title, block=block,
show_first_samp=show_first_samp,
show_scrollbars=show_scrollbars,
time_format=time_format)
elif isinstance(inst, Evoked):
if start is not None or stop is not None:
inst = inst.copy().crop(start, stop)
sources = ica.get_sources(inst)
fig = _plot_ica_sources_evoked(
evoked=sources, picks=picks, exclude=exclude, title=title,
labels=getattr(ica, 'labels_', None), show=show, ica=ica)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return fig
def _create_properties_layout(figsize=None, fig=None):
"""Create main figure and axes layout used by plot_ica_properties."""
import matplotlib.pyplot as plt
if fig is not None and figsize is not None:
raise ValueError('Cannot specify both fig and figsize.')
if figsize is None:
figsize = [7., 6.]
if fig is None:
fig = plt.figure(figsize=figsize, facecolor=[0.95] * 3)
axes_params = (('topomap', [0.08, 0.5, 0.3, 0.45]),
('image', [0.5, 0.6, 0.45, 0.35]),
('erp', [0.5, 0.5, 0.45, 0.1]),
('spectrum', [0.08, 0.1, 0.32, 0.3]),
('variance', [0.5, 0.1, 0.45, 0.25]))
axes = [fig.add_axes(loc, label=name) for name, loc in axes_params]
return fig, axes
def _plot_ica_properties(pick, ica, inst, psds_mean, freqs, n_trials,
epoch_var, plot_lowpass_edge, epochs_src,
set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes,
kind, dropped_indices):
"""Plot ICA properties (helper)."""
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from scipy.stats import gaussian_kde
topo_ax, image_ax, erp_ax, spec_ax, var_ax = axes
# plotting
# --------
# component topomap
_plot_ica_topomap(ica, pick, show=False, axes=topo_ax, **topomap_args)
# image and erp
# we create a new epoch with dropped rows
epoch_data = epochs_src.get_data()
epoch_data = np.insert(arr=epoch_data,
obj=(dropped_indices -
np.arange(len(dropped_indices))).astype(int),
values=0.0,
axis=0)
from ..epochs import EpochsArray
epochs_src = EpochsArray(epoch_data, epochs_src.info, tmin=epochs_src.tmin,
verbose=0)
plot_epochs_image(epochs_src, picks=pick, axes=[image_ax, erp_ax],
combine=None, colorbar=False, show=False,
**image_args)
# spectrum
spec_ax.plot(freqs, psds_mean, color='k')
if plot_std:
spec_ax.fill_between(freqs, psds_mean - spectrum_std[0],
psds_mean + spectrum_std[1],
color='k', alpha=.2)
if plot_lowpass_edge:
spec_ax.axvline(inst.info['lowpass'], lw=2, linestyle='--',
color='k', alpha=0.2)
# epoch variance
var_ax_divider = make_axes_locatable(var_ax)
hist_ax = var_ax_divider.append_axes("right", size="33%", pad="2.5%")
var_ax.scatter(range(len(epoch_var)), epoch_var, alpha=0.5,
facecolor=[0, 0, 0], lw=0)
# rejected epochs in red
var_ax.scatter(dropped_indices, epoch_var[dropped_indices],
alpha=1., facecolor=[1, 0, 0], lw=0)
# compute percentage of dropped epochs
var_percent = float(len(dropped_indices)) / float(len(epoch_var)) * 100.
# histogram & histogram
_, counts, _ = hist_ax.hist(epoch_var, orientation="horizontal",
color="k", alpha=.5)
# kde
ymin, ymax = hist_ax.get_ylim()
try:
kde = gaussian_kde(epoch_var)
except np.linalg.LinAlgError:
pass # singular: happens when there is nothing plotted
else:
x = np.linspace(ymin, ymax, 50)
kde_ = kde(x)
kde_ /= kde_.max() or 1.
kde_ *= hist_ax.get_xlim()[-1] * .9
hist_ax.plot(kde_, x, color="k")
hist_ax.set_ylim(ymin, ymax)
# aesthetics
# ----------
topo_ax.set_title(ica._ica_names[pick])
set_title_and_labels(image_ax, kind + ' image and ERP/ERF', [], kind)
# erp
set_title_and_labels(erp_ax, [], 'Time (s)', 'AU')
erp_ax.spines["right"].set_color('k')
erp_ax.set_xlim(epochs_src.times[[0, -1]])
# remove half of yticks if more than 5
yt = erp_ax.get_yticks()
if len(yt) > 5:
erp_ax.yaxis.set_ticks(yt[::2])
# remove xticks - erp plot shows xticks for both image and erp plot
image_ax.xaxis.set_ticks([])
yt = image_ax.get_yticks()
image_ax.yaxis.set_ticks(yt[1:])
image_ax.set_ylim([-0.5, n_trials + 0.5])
# spectrum
set_title_and_labels(spec_ax, 'Spectrum', 'Frequency (Hz)', psd_ylabel)
spec_ax.yaxis.labelpad = 0
spec_ax.set_xlim(freqs[[0, -1]])
ylim = spec_ax.get_ylim()
air = np.diff(ylim)[0] * 0.1
spec_ax.set_ylim(ylim[0] - air, ylim[1] + air)
image_ax.axhline(0, color='k', linewidth=.5)
# epoch variance
var_ax_title = 'Dropped segments: %.2f %%' % var_percent
set_title_and_labels(var_ax, var_ax_title, kind, 'Variance (AU)')
hist_ax.set_ylabel("")
hist_ax.set_yticks([])
set_title_and_labels(hist_ax, None, None, None)
return fig
def _get_psd_label_and_std(this_psd, dB, ica, num_std):
"""Handle setting up PSD for one component, for plot_ica_properties."""
psd_ylabel = _convert_psds(this_psd, dB, estimate='auto', scaling=1.,
unit='AU', first_dim='epoch')
psds_mean = this_psd.mean(axis=0)
diffs = this_psd - psds_mean
# the distribution of power for each frequency bin is highly
# skewed so we calculate std for values below and above average
# separately - this is used for fill_between shade
with warnings.catch_warnings(): # mean of empty slice
warnings.simplefilter('ignore')
spectrum_std = [
[np.sqrt((d[d < 0] ** 2).mean(axis=0)) for d in diffs.T],
[np.sqrt((d[d > 0] ** 2).mean(axis=0)) for d in diffs.T]]
spectrum_std = np.array(spectrum_std) * num_std
return psd_ylabel, psds_mean, spectrum_std
@verbose
def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True, reject='auto',
reject_by_annotation=True, *, verbose=None):
"""Display component properties.
Properties include the topography, epochs image, ERP/ERF, power
spectrum, and epoch variance.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of Epochs or Raw
The data to use in plotting properties.
%(picks_base)s the first five sources.
If more than one components were chosen in the picks,
each one will be plotted in a separate figure.
axes : list of Axes | None
List of five matplotlib axes to use in plotting: [topomap_axis,
image_axis, erp_axis, spectrum_axis, variance_axis]. If None a new
figure with relevant axes is created. Defaults to None.
dB : bool
Whether to plot spectrum in dB. Defaults to True.
plot_std : bool | float
Whether to plot standard deviation/confidence intervals in ERP/ERF and
spectrum plots.
Defaults to True, which plots one standard deviation above/below for
the spectrum. If set to float allows to control how many standard
deviations are plotted for the spectrum. For example 2.5 will plot 2.5
standard deviation above/below.
For the ERP/ERF, by default, plot the 95 percent parametric confidence
interval is calculated. To change this, use ``ci`` in ``ts_args`` in
``image_args`` (see below).
topomap_args : dict | None
Dictionary of arguments to ``plot_topomap``. If None, doesn't pass any
additional arguments. Defaults to None.
image_args : dict | None
Dictionary of arguments to ``plot_epochs_image``. If None, doesn't pass
any additional arguments. Defaults to None.
psd_args : dict | None
Dictionary of arguments to ``psd_multitaper``. If None, doesn't pass
any additional arguments. Defaults to None.
figsize : array-like, shape (2,) | None
Allows to control size of the figure. If None, the figure size
defaults to [7., 6.].
show : bool
Show figure if True.
reject : 'auto' | dict | None
Allows to specify rejection parameters used to drop epochs
(or segments if continuous signal is passed as inst).
If None, no rejection is applied. The default is 'auto',
which applies the rejection parameters used when fitting
the ICA object.
%(reject_by_annotation_raw)s
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : list
List of matplotlib figures.
Notes
-----
.. versionadded:: 0.13
"""
return _fast_plot_ica_properties(ica, inst, picks=picks, axes=axes, dB=dB,
plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
figsize=figsize, show=show,
reject=reject,
reject_by_annotation=reject_by_annotation,
verbose=verbose, precomputed_data=None)
def _fast_plot_ica_properties(ica, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None,
image_args=None, psd_args=None, figsize=None,
show=True, reject='auto', precomputed_data=None,
reject_by_annotation=True, *, verbose=None):
"""Display component properties."""
from ..preprocessing import ICA
# input checks and defaults
# -------------------------
_validate_type(ica, ICA, "ica", "ICA")
_validate_type(plot_std, (bool, 'numeric'), 'plot_std')
if isinstance(plot_std, bool):
num_std = 1. if plot_std else 0.
else:
plot_std = True
num_std = float(plot_std)
# if no picks given - plot the first 5 components
limit = min(5, ica.n_components_) if picks is None else len(ica.ch_names)
picks = _picks_to_idx(ica.info, picks, 'all')[:limit]
if axes is None:
fig, axes = _create_properties_layout(figsize=figsize)
else:
if len(picks) > 1:
raise ValueError('Only a single pick can be drawn '
'to a set of axes.')
from .utils import _validate_if_list_of_axes
_validate_if_list_of_axes(axes, obligatory_len=5)
fig = axes[0].get_figure()
psd_args = dict() if psd_args is None else psd_args
topomap_args = dict() if topomap_args is None else topomap_args
image_args = dict() if image_args is None else image_args
image_args["ts_args"] = dict(truncate_xaxis=False, show_sensors=False)
if plot_std:
from ..stats.parametric import _parametric_ci
image_args["ts_args"]["ci"] = _parametric_ci
elif "ts_args" not in image_args or "ci" not in image_args["ts_args"]:
image_args["ts_args"]["ci"] = False
for item_name, item in (("psd_args", psd_args),
("topomap_args", topomap_args),
("image_args", image_args)):
_validate_type(item, dict, item_name, "dictionary")
if dB is not None:
_validate_type(dB, bool, "dB", "bool")
# calculations
# ------------
if isinstance(precomputed_data, tuple):
kind, dropped_indices, epochs_src, data = precomputed_data
else:
kind, dropped_indices, epochs_src, data = _prepare_data_ica_properties(
inst, ica, reject_by_annotation, reject)
ica_data = np.swapaxes(data[:, picks, :], 0, 1)
dropped_src = ica_data
# spectrum
Nyquist = inst.info['sfreq'] / 2.
lp = inst.info['lowpass']
if 'fmax' not in psd_args:
psd_args['fmax'] = min(lp * 1.25, Nyquist)
plot_lowpass_edge = lp < Nyquist and (psd_args['fmax'] > lp)
psds, freqs = psd_multitaper(epochs_src, picks=picks, **psd_args)
def set_title_and_labels(ax, title, xlab, ylab):
if title:
ax.set_title(title)
if xlab:
ax.set_xlabel(xlab)
if ylab:
ax.set_ylabel(ylab)
ax.axis('auto')
ax.tick_params('both', labelsize=8)
ax.axis('tight')
# plot
# ----
all_fig = list()
for idx, pick in enumerate(picks):
# calculate component-specific spectrum stuff
psd_ylabel, psds_mean, spectrum_std = _get_psd_label_and_std(
psds[:, idx, :].copy(), dB, ica, num_std)
# if more than one component, spawn additional figures and axes
if idx > 0:
fig, axes = _create_properties_layout(figsize=figsize)
# we reconstruct an epoch_variance with 0 where indexes where dropped
epoch_var = np.var(ica_data[idx], axis=1)
drop_var = np.var(dropped_src[idx], axis=1)
drop_indices_corrected = \
(dropped_indices -
np.arange(len(dropped_indices))).astype(int)
epoch_var = np.insert(arr=epoch_var,
obj=drop_indices_corrected,
values=drop_var[dropped_indices],
axis=0)
# the actual plot
fig = _plot_ica_properties(
pick, ica, inst, psds_mean, freqs, ica_data.shape[1],
epoch_var, plot_lowpass_edge,
epochs_src, set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes, kind,
dropped_indices)
all_fig.append(fig)
plt_show(show)
return all_fig
def _prepare_data_ica_properties(inst, ica, reject_by_annotation=True,
reject='auto'):
"""Prepare Epochs sources to plot ICA properties.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of Epochs or Raw
The data to use in plotting properties.
reject_by_annotation : bool, optional
[description], by default True
reject : str, optional
[description], by default 'auto'
Returns
-------
kind : str
"Segment" for BaseRaw and "Epochs" for BaseEpochs
dropped_indices : list
Dropped epochs indexes.
epochs_src : instance of Epochs
Segmented data of ICA sources.
data : array of shape (n_epochs, n_ica_sources, n_times)
A view on epochs ICA sources data.
"""
from ..io.base import BaseRaw
from ..io import RawArray
from ..epochs import BaseEpochs
_validate_type(inst, (BaseRaw, BaseEpochs), "inst", "Raw or Epochs")
if isinstance(inst, BaseRaw):
# when auto, delegate reject to the ica
from ..epochs import make_fixed_length_epochs
if reject == 'auto':
reject = getattr(ica, 'reject_', None)
if reject is None:
drop_inds = None
dropped_indices = []
# break up continuous signal into segments
epochs_src = make_fixed_length_epochs(
ica.get_sources(inst),
duration=2,
preload=True,
reject_by_annotation=reject_by_annotation,
proj=False,
verbose=False)
else:
data = inst.get_data()
data, drop_inds = _reject_data_segments(data, ica.reject_,
flat=None, decim=None,
info=inst.info,
tstep=2.0)
inst_rejected = RawArray(data, inst.info)
# break up continuous signal into segments
epochs_src = make_fixed_length_epochs(
ica.get_sources(inst_rejected),
duration=2,
preload=True,
reject_by_annotation=reject_by_annotation,
proj=False,
verbose=False)
# getting dropped epochs indexes
dropped_indices = [(d[0] // len(epochs_src.times)) + 1
for d in drop_inds]
kind = "Segment"
else:
drop_inds = None
epochs_src = ica.get_sources(inst)
dropped_indices = []
kind = "Epochs"
return kind, dropped_indices, epochs_src, epochs_src.get_data()
def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, ica,
labels=None):
"""Plot average over epochs in ICA space.
Parameters
----------
evoked : instance of mne.Evoked
The Evoked to be used.
%(picks_base)s all sources in the order as fitted.
exclude : array-like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
title : str
The figure title.
show : bool
Show figure if True.
labels : None | dict
The ICA labels attribute.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
if title is None:
title = 'Reconstructed latent sources, time-locked'
fig, axes = plt.subplots(1)
ax = axes
axes = [axes]
times = evoked.times * 1e3
# plot unclassified sources and label excluded ones
lines = list()
texts = list()
picks = np.sort(picks)
idxs = [picks]
if labels is not None:
labels_used = [k for k in labels if '/' not in k]
exclude_labels = list()
for ii in picks:
if ii in exclude:
line_label = ica._ica_names[ii]
if labels is not None:
annot = list()
for this_label in labels_used:
indices = labels[this_label]
if ii in indices:
annot.append(this_label)
if annot:
line_label += (' – ' + ', '.join(annot)) # Unicode en-dash
exclude_labels.append(line_label)
else:
exclude_labels.append(None)
label_props = [('k', '-') if lb is None else ('r', '-') for lb in
exclude_labels]
styles = ['-', '--', ':', '-.']
if labels is not None:
# differentiate categories by linestyle and components by color
col_lbs = [it for it in exclude_labels if it is not None]
cmap = plt.get_cmap('tab10', len(col_lbs))
unique_labels = set()
for label in exclude_labels:
if label is None:
continue
elif ' – ' in label:
unique_labels.add(label.split(' – ')[1])
else:
unique_labels.add('')
# Determine up to 4 different styles for n categories
cat_styles = dict(zip(unique_labels,
map(lambda ux: styles[int(ux % len(styles))],
range(len(unique_labels)))))
for label_idx, label in enumerate(exclude_labels):
if label is not None:
color = cmap(col_lbs.index(label))
if ' – ' in label:
label_name = label.split(' – ')[1]
else:
label_name = ''
style = cat_styles[label_name]
label_props[label_idx] = (color, style)
for exc_label, ii in zip(exclude_labels, picks):
color, style = label_props[ii]
# ensure traces of excluded components are plotted on top
zorder = 2 if exc_label is None else 10
lines.extend(ax.plot(times, evoked.data[ii].T, picker=True,
zorder=zorder, color=color, linestyle=style,
label=exc_label))
lines[-1].set_pickradius(3.)
ax.set(title=title, xlim=times[[0, -1]], xlabel='Time (ms)', ylabel='(NA)')
if len(exclude) > 0:
plt.legend(loc='best')
tight_layout(fig=fig)
texts.append(ax.text(0, 0, '', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
# this is done to give the structure of a list of lists of a group of lines
# in each subplot
lines = [lines]
ch_names = evoked.ch_names
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
ch_names=ch_names, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
plt_show(show)
return fig
def plot_ica_scores(ica, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None,
n_cols=None, show=True):
"""Plot scores related to detected components.
Use this function to asses how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
scores : array-like of float, shape (n_ica_components,) | list of array
Scores based on arbitrary metric to characterize ICA components.
exclude : array-like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
labels : str | list | 'ecg' | 'eog' | None
The labels to consider for the axes tests. Defaults to None.
If list, should match the outer shape of ``scores``.
If 'ecg' or 'eog', the ``labels_`` attributes will be looked up.
Note that '/' is used internally for sublabels specifying ECG and
EOG channels.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int | None
The figure size. If None it gets set automatically.
n_cols : int | None
Scores are plotted in a grid. This parameter controls how
many to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure object.
"""
import matplotlib.pyplot as plt
my_range = np.arange(ica.n_components_)
if exclude is None:
exclude = ica.exclude
exclude = np.unique(exclude)
if not isinstance(scores[0], (list, np.ndarray)):
scores = [scores]
n_scores = len(scores)
if n_cols is None:
# prefer more rows.
n_rows = int(np.ceil(np.sqrt(n_scores)))
n_cols = (n_scores - 1) // n_rows + 1
else:
n_cols = min(n_scores, n_cols)
n_rows = (n_scores - 1) // n_cols + 1
if figsize is None:
figsize = (6.4 * n_cols, 2.7 * n_rows)
fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize,
sharex=True, sharey=True)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
fig.suptitle(title)
if labels == 'ecg':
labels = [label for label in ica.labels_ if label.startswith('ecg/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
if len(labels) == 0:
labels = [label for label in ica.labels_ if
label.startswith('ecg')]
elif labels == 'eog':
labels = [label for label in ica.labels_ if label.startswith('eog/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
if len(labels) == 0:
labels = [label for label in ica.labels_ if
label.startswith('eog')]
elif isinstance(labels, str):
labels = [labels]
elif labels is None:
labels = (None,) * n_scores
if len(labels) != n_scores:
raise ValueError('Need as many labels (%i) as scores (%i)'
% (len(labels), n_scores))
for label, this_scores, ax in zip(labels, scores, axes):
if len(my_range) != len(this_scores):
raise ValueError('The length of `scores` must equal the '
'number of ICA components.')
ax.bar(my_range, this_scores, color='gray', edgecolor='k')
for excl in exclude:
ax.bar(my_range[excl], this_scores[excl], color='r', edgecolor='k')
if axhline is not None:
if np.isscalar(axhline):
axhline = [axhline]
for axl in axhline:
ax.axhline(axl, color='r', linestyle='--')
ax.set_ylabel('score')
if label is not None:
if 'eog/' in label:
split = label.split('/')
label = ', '.join([split[0], split[2]])
elif '/' in label:
label = ', '.join(label.split('/'))
ax.set_title('(%s)' % label)
ax.set_xlabel('ICA components')
ax.set_xlim(-0.6, len(this_scores) - 0.4)
tight_layout(fig=fig)
adjust_top = 0.8 if len(fig.axes) == 1 else 0.9
fig.subplots_adjust(top=adjust_top)
fig.canvas.draw()
plt_show(show)
return fig
@fill_doc
def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True, n_pca_components=None):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
inst : instance of mne.io.Raw or mne.Evoked
The signal to plot. If `~mne.io.Raw`, the raw data is displayed before
and after cleaning. In a second panel, the cross-channel average will
be displayed. Since dipolar sources will be canceled out, this
representation is sensitive to artifacts. If `~mne.Evoked`, butterfly
traces for signals before and after cleaning will be superimposed.
exclude : array-like of int | None (default)
The components marked for exclusion. If ``None`` (default), ICA.exclude
will be used.
%(picks_base)s all channels that were included during fitting.
start, stop : float | None
The first and last time point (in seconds) of the data to plot. If
``inst`` is a `~mne.io.Raw` object, ``start=None`` and ``stop=None``
will be translated into ``start=0.`` and ``stop=3.``, respectively. For
`~mne.Evoked`, ``None`` refers to the beginning and end of the evoked
signal.
%(title_None)s
%(show)s
%(n_pca_components_apply)s
.. versionadded:: 0.22
Returns
-------
fig : instance of Figure
The figure.
"""
# avoid circular imports
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..preprocessing.ica import _check_start_stop
if ica.current_fit == 'unfitted':
raise RuntimeError('You need to fit the ICA first')
_validate_type(inst, (BaseRaw, Evoked), "inst", "Raw or Evoked")
if title is None:
title = 'Signals before (red) and after (black) cleaning'
picks = ica.ch_names if picks is None else picks
picks = _picks_to_idx(inst.info, picks, exclude=())
ch_types_used = inst.get_channel_types(picks=picks, unique=True)
if exclude is None:
exclude = ica.exclude
if not isinstance(exclude, (np.ndarray, list)):
raise TypeError('exclude must be of type list. Got %s'
% type(exclude))
if isinstance(inst, BaseRaw):
if start is None:
start = 0.0
if stop is None:
stop = 3.0
start_compare, stop_compare = _check_start_stop(inst, start, stop)
data, times = inst[picks, start_compare:stop_compare]
raw_cln = ica.apply(inst.copy(), exclude=exclude,
start=start, stop=stop,
n_pca_components=n_pca_components)
data_cln, _ = raw_cln[picks, start_compare:stop_compare]
fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
times=times, title=title,
ch_types_used=ch_types_used, show=show)
else:
assert isinstance(inst, Evoked)
inst = inst.copy().crop(start, stop)
if picks is not None:
with inst.info._unlock():
inst.info['comps'] = [] # can be safely disabled
inst.pick_channels([inst.ch_names[p] for p in picks])
evoked_cln = ica.apply(inst.copy(), exclude=exclude,
n_pca_components=n_pca_components)
fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
title=title, show=show)
return fig
def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
"""
import matplotlib.pyplot as plt
# Restore sensor space data and keep all PCA components
# let's now compare the date before and after cleaning.
# first the raw data
assert data.shape == data_cln.shape
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.suptitle(title)
ax1.plot(times, data.T, color='r')
ax1.plot(times, data_cln.T, color='k')
ax1.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Raw data')
_ch_types = {'mag': 'Magnetometers',
'grad': 'Gradiometers',
'eeg': 'EEG'}
ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
ax2.set_title('Average across channels ({})'.format(ch_types))
ax2.plot(times, data.mean(0), color='r')
ax2.plot(times, data_cln.mean(0), color='k')
ax2.set(xlabel='Time (s)', xlim=times[[0, -1]])
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
evoked : instance of mne.Evoked
The Evoked before IC rejection.
evoked_cln : instance of mne.Evoked
The Evoked after IC rejection.
title : str | None
The title of the figure.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of Figure
"""
import matplotlib.pyplot as plt
ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
n_rows = len(ch_types_used)
ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
c in evoked_cln]
if len(ch_types_used) != len(ch_types_used_cln):
raise ValueError('Raw and clean evokeds must match. '
'Found different channels.')
fig, axes = plt.subplots(n_rows, 1)
if title is None:
title = 'Average signal before (red) and after (black) ICA'
fig.suptitle(title)
axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
evoked.plot(axes=axes, show=False, time_unit='s')
for ax in fig.axes:
for line in ax.get_lines():
line.set_color('r')
fig.canvas.draw()
evoked_cln.plot(axes=axes, show=False, time_unit='s')
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_sources(ica, inst, picks, exclude, start, stop, show, title, block,
show_scrollbars, show_first_samp, time_format):
"""Plot the ICA components as a RawArray or EpochsArray."""
from ._figure import _get_browser
from .. import EpochsArray, BaseEpochs
from ..io import RawArray, BaseRaw
# handle defaults / check arg validity
is_raw = isinstance(inst, BaseRaw)
is_epo = isinstance(inst, BaseEpochs)
sfreq = inst.info['sfreq']
color = _handle_default('color', (0., 0., 0.))
units = _handle_default('units', None)
scalings = (_compute_scalings(None, inst) if is_raw else
_handle_default('scalings_plot_raw'))
scalings['misc'] = 5.
scalings['whitened'] = 1.
unit_scalings = _handle_default('scalings', None)
# data
if is_raw:
data = ica._transform_raw(inst, 0, len(inst.times))[picks]
else:
data = ica._transform_epochs(inst, concatenate=True)[picks]
# events
if is_epo:
event_id_rev = {v: k for k, v in inst.event_id.items()}
event_nums = inst.events[:, 2]
event_color_dict = _make_event_color_dict(None, inst.events,
inst.event_id)
# channel properties / trace order / picks
ch_names = list(ica._ica_names) # copy
ch_types = ['misc' for _ in picks]
# add EOG/ECG channels if present
eog_chs = pick_types(inst.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(inst.info, meg=False, ecg=True, ref_meg=False)
for eog_idx in eog_chs:
ch_names.append(inst.ch_names[eog_idx])
ch_types.append('eog')
for ecg_idx in ecg_chs:
ch_names.append(inst.ch_names[ecg_idx])
ch_types.append('ecg')
extra_picks = np.concatenate((eog_chs, ecg_chs)).astype(int)
if len(extra_picks):
if is_raw:
eog_ecg_data, _ = inst[extra_picks, :]
else:
eog_ecg_data = np.concatenate(inst.get_data(extra_picks), axis=1)
data = np.append(data, eog_ecg_data, axis=0)
picks = np.concatenate(
(picks, ica.n_components_ + np.arange(len(extra_picks))))
ch_order = np.arange(len(picks))
n_channels = min([20, len(picks)])
ch_names_picked = [ch_names[x] for x in picks]
# create info
info = create_info(ch_names_picked, sfreq, ch_types=ch_types)
with info._unlock():
info['meas_date'] = inst.info['meas_date']
info['bads'] = [ch_names[x] for x in exclude if x in picks]
if is_raw:
inst_array = RawArray(data, info, inst.first_samp)
inst_array.set_annotations(inst.annotations)
else:
data = data.reshape(-1, len(inst), len(inst.times)).swapaxes(0, 1)
inst_array = EpochsArray(data, info)
# handle time dimension
start = 0 if start is None else start
_last = inst.times[-1] if is_raw else len(inst.events)
stop = min(start + 20, _last) if stop is None else stop
first_time = inst._first_time if show_first_samp else 0
if is_raw:
duration = stop - start
start += first_time
else:
n_epochs = stop - start
total_epochs = len(inst)
epoch_n_times = len(inst.times)
n_epochs = min(n_epochs, total_epochs)
n_times = total_epochs * epoch_n_times
duration = n_epochs * epoch_n_times / sfreq
event_times = (np.arange(total_epochs) * epoch_n_times
+ inst.time_as_index(0)) / sfreq
# NB: this includes start and end of data:
boundary_times = np.arange(total_epochs + 1) * epoch_n_times / sfreq
if duration <= 0:
raise RuntimeError('Stop must be larger than start.')
# misc
bad_color = (0.8, 0.8, 0.8)
title = 'ICA components' if title is None else title
params = dict(inst=inst_array,
ica=ica,
ica_inst=inst,
info=info,
# channels and channel order
ch_names=np.array(ch_names_picked),
ch_types=np.array(ch_types),
ch_order=ch_order,
picks=picks,
n_channels=n_channels,
picks_data=list(),
# time
t_start=start if is_raw else boundary_times[start],
duration=duration,
n_times=inst.n_times if is_raw else n_times,
first_time=first_time,
time_format=time_format,
decim=1,
# events
event_times=None if is_raw else event_times,
# preprocessing
projs=list(),
projs_on=np.array([], dtype=bool),
apply_proj=False,
remove_dc=True, # for EOG/ECG
filter_coefs=None,
filter_bounds=None,
noise_cov=None,
# scalings
scalings=scalings,
units=units,
unit_scalings=unit_scalings,
# colors
ch_color_bad=bad_color,
ch_color_dict=color,
# display
butterfly=False,
clipping=None,
scrollbars_visible=show_scrollbars,
scalebars_visible=False,
window_title=title)
if is_epo:
params.update(n_epochs=n_epochs,
boundary_times=boundary_times,
event_id_rev=event_id_rev,
event_color_dict=event_color_dict,
event_nums=event_nums,
epoch_color_bad=(1, 0, 0),
epoch_colors=None,
xlabel='Epoch number')
fig = _get_browser(**params)
fig._update_picks()
# update data, and plot
fig._update_trace_offsets()
fig._update_data()
fig._draw_traces()
# plot annotations (if any)
if is_raw:
fig._setup_annotation_colors()
fig._update_annotation_segments()
fig._draw_annotations()
plt_show(show, block=block)
return fig
| 37.793907
| 79
| 0.594362
|
072313288a0147d787e6fe2050ed7b0f6ef19dc9
| 1,461
|
py
|
Python
|
Task2G.py
|
bendomb/IA-Flood-Warning-System
|
8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f
|
[
"MIT"
] | null | null | null |
Task2G.py
|
bendomb/IA-Flood-Warning-System
|
8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f
|
[
"MIT"
] | null | null | null |
Task2G.py
|
bendomb/IA-Flood-Warning-System
|
8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f
|
[
"MIT"
] | null | null | null |
from floodsystem.stationdata import build_station_list
from floodsystem.stationdata import update_water_levels
from floodsystem.flood import stations_level_over_threshold
from floodsystem.flood import stations_highest_rel_level
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.analysis import polyfit
from floodsystem.plot import plot_water_level_with_fit
import matplotlib.pyplot as plt
import datetime
import numpy as np
from floodsystem.plot import plot_water_level_with_fit, plot_water_levels
def run():
"""Requirements for Task 2G"""
# Build list of stations
stations = build_station_list()
# Ensure stations have up-to-date data
update_water_levels(stations)
# Create list of stations and their relative water levels
levelstations = stations_highest_rel_level(stations, len(stations))
# Create empty arrays for different risk categories
low_risk, moderate_risk, high_risk, severe_risk = [], [], [], []
# Iterate over stations
for i in levelstations:
# Separating into categories by relative water level
if i[1]<-1:
low_risk.append(i[0].name)
elif i[1]<0:
moderate_risk.append(i[0].name)
elif i[1]<1:
high_risk.append(i[0].name)
else:
severe_risk.append(i[0].name)
print(severe_risk)
if __name__ == "__main__":
print("*** Task 2G: CUED Part IA Flood Warning System ***")
run()
| 31.76087
| 73
| 0.723477
|
0b12c4c0045f7bb5ca3800e10d5c2ee9b9dc1ddf
| 23,680
|
py
|
Python
|
scripts/scenes.py
|
Mu-L/yocto-gl
|
c5650f26166807e7046caf5072ce803d9220ed16
|
[
"MIT"
] | null | null | null |
scripts/scenes.py
|
Mu-L/yocto-gl
|
c5650f26166807e7046caf5072ce803d9220ed16
|
[
"MIT"
] | null | null | null |
scripts/scenes.py
|
Mu-L/yocto-gl
|
c5650f26166807e7046caf5072ce803d9220ed16
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3 -B
import click, glob, os, sys, math, json, csv
@click.group()
def cli():
pass
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='json')
@click.option('--mode', '-m', default='path')
@click.option('--envname', '-e', default='')
def view(directory='mcguire', scene='*', format='json', mode='path', envname=''):
modes = {
'path': '--resolution 1280 --bounces 8 --clamp 10',
'embree': '--resolution 1280 --bounces 8 --clamp 10 --embreebvh',
'eyelight': '--resolution 1280 -t eyelight --bounces 8 --clamp 10',
'eyelight-quick': '--resolution 1280 --samples 16 --sampler eyelight --bounces 8 --clamp 10'
}
options = modes[mode]
envoptions = f'--envname {envname}' if envname else ''
for dirname in sorted(glob.glob(f'{directory}/{format}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
extraoptions = ''
if os.path.exists(f'{dirname}/yscene_render.txt'):
with open(f'{dirname}/yscene_render.txt') as f:
extraoptions = f.read().strip()
for filename in sorted(glob.glob(f'{dirname}/*.{format}')):
if format == 'pbrt':
with open(filename) as f:
if 'WorldBegin' not in f.read(): continue
cmd = f'../yocto-gl/bin/yscene view {options} {extraoptions} {envoptions} --scene {filename}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='json')
@click.option('--mode', '-m', default='path')
def render(directory='mcguire', scene='*', format='json', mode='path'):
modes = {
'path': '--samples 64 --resolution 1280 --bounces 8 --clamp 10',
'path-face': '--samples 256 --resolution 1280 --bounces 8 --clamp 10',
'embree': '--samples 256 --resolution 1280 --bounces 8 --clamp 10 --embreebvh',
'eyelight': '--samples 16 --resolution 1280 --bounces 8 --clamp 10 --sampler eyelight',
'embree-face': '--samples 1024 --resolution 1280 --bounces 8 --clamp 10 --embreebvh',
'final': '--samples 4096 --resolution 1280 --bounces 8 --clamp 10 --embreebvh',
}
options = modes[mode]
outformat = 'png' if 'eyelight' in mode else 'hdr'
outprefix = 'eyelight' if 'eyelight' in mode else 'images'
for dirname in sorted(glob.glob(f'{directory}/{format}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
extracams = []
if 'sanmiguel' in dirname: extracams = ['camera2', 'camera3']
if 'island' in dirname: extracams = ["beachCam", "birdseyeCam", "dunesACam", "grassCam", "palmsCam", "rootsCam", "shotCam"]
if 'landscape' in dirname: extracams = ['camera2', 'camera3', 'camera4']
extraoptions = ''
if os.path.exists(f'{dirname}/yscene_render.txt'):
with open(f'{dirname}/yscene_render.txt') as f:
extraoptions = f.read().strip()
for filename in sorted(glob.glob(f'{dirname}/*.{format}')):
if format == 'pbrt':
with open(filename) as f:
if 'WorldBegin' not in f.read(): continue
basename = os.path.basename(filename).replace(f'.{format}', '')
os.system(f'mkdir -p {directory}/{outprefix}-{format}')
imagename = f'{directory}/{outprefix}-{format}/{basename}.{outformat}'
cmd = f'../yocto-gl/bin/yscene render --output {imagename} {options} {extraoptions} {filename}'
print(cmd, file=sys.stderr)
os.system(cmd)
for idx, cam in enumerate(extracams, 1):
imagename = f'{directory}/{outprefix}-{format}/{basename}-c{idx}.{outformat}'
cmd = f'../yocto-gl/bin/yscene render --output {imagename} --camera {cam} {options} {extraoptions} --scene {filename}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='json')
@click.option('--mode', '-m', default='default')
def info(directory='mcguire', scene='*', format='json', mode='default'):
modes = {
'default': '',
'validate': '--validate'
}
options = modes[mode]
for dirname in sorted(glob.glob(f'{directory}/{format}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
extraoptions = ''
if os.path.exists(f'{dirname}/yscene_render.txt'):
with open(f'{dirname}/yscene_render.txt') as f:
extraoptions = f.read().strip()
for filename in sorted(glob.glob(f'{dirname}/*.{format}')):
if format == 'pbrt':
with open(filename) as f:
if 'WorldBegin' not in f.read(): continue
cmd = f'../yocto-gl/bin/yscene info {options} {extraoptions} --scene {filename}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='json')
@click.option('--mode', '-m', default='default')
def validate(directory='mcguire', scene='*', format='json', mode='default'):
modes = {
'default': ''
}
options = modes[mode]
schema = '../yocto-gl/scripts/scene.schema.json'
for dirname in sorted(glob.glob(f'{directory}/{format}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
extraoptions = ''
for filename in sorted(glob.glob(f'{dirname}/*.{format}')):
cmd = f'../yocto-gl/scripts/validate-scene.py {schema} {filename} {options}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='json')
@click.option('--mode', '-m', default='linear')
def tonemap(directory='mcguire', scene='*', format='json', mode='filmic'):
modes = {
'linear': '',
'filmic': '--filmic --exposure 0.5'
}
options = modes[mode]
outformat = 'png'
outprefix = 'images'
from PIL import Image, ImageFont, ImageDraw
fontname1 = '~/Library/Fonts/FiraSansCondensed-Regular.ttf'
fontname2 = '~/Library/Fonts/FiraSansCondensed-Regular.ttf'
font1 = ImageFont.truetype(fontname1, 30)
font2 = ImageFont.truetype(fontname2, 18)
for filename in sorted(
glob.glob(f'{directory}/{outprefix}-{format}/{scene}.hdr') +
glob.glob(f'{directory}/{outprefix}-{format}/{scene}.exr')):
imagename = filename.replace(f'.exr', f'.{outformat}').replace(
f'.hdr', f'.{outformat}')
cmd = f'../yocto-gl/bin/yimage convert --output {imagename} {options} --image {filename}'
print(cmd, file=sys.stderr)
os.system(cmd)
img = Image.open(imagename)
w, h = img.size
draw = ImageDraw.Draw(img)
tw, _ = draw.textsize("Yocto/GL", font=font1)
draw.rectangle([w - 8, h - 32 - 8, w - 8 - 8 - tw, h - 8], (0, 0, 0))
draw.text((w - 8 - 4, h - 26 - 8 - 4), "Yocto/GL", (255, 255, 255), font=font1, anchor='rt')
if directory in ['bitterli', 'disney', 'mcguire', 'pbrt3', 'yocto', 'heads', 'blender', 'fabio']:
authorfilename = filename.replace('images-json/', f'{format}/').replace(
'-fr.', '.').replace('-hr.', '.').replace('-c1.', '.').replace(
'-c2.', '.').replace('-c3.', '.').replace('-c4.', '.').replace(
'-c5.', '.').replace('-c6.', '.').replace('-c7.', '.').replace(
'.hdr', '') + '/AUTHOR.txt'
print(authorfilename)
with open(authorfilename) as f:
text = f.read().strip()
tw, _ = draw.textsize(text, font=font2)
draw.rectangle([8, h - 26 - 8, 8 + 8 + tw, h - 8], (0, 0, 0))
draw.text((8 + 4, h - 20 - 8 - 4), text, (255, 255, 255), font=font2)
img.save(imagename)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='json')
@click.option('--mode', '-m', default='jpg')
def gallery(directory='mcguire', scene='*', format='json', mode='filmic'):
modes = {
'jpg': ''
}
options = modes[mode]
inprefix = 'images'
outformat = 'jpg'
outprefix = 'gallery'
os.system(f'mkdir -p {directory}/{outprefix}-{format}')
from PIL import Image
for filename in sorted(glob.glob(f'{directory}/{inprefix}-{format}/{scene}.png')):
imagename = filename.replace(f'{inprefix}-', f'{outprefix}-').replace('.png',f'.{outformat}')
print(filename, file=sys.stderr)
img = Image.open(filename)
rgb_img = img.convert('RGB')
rgb_img.save(imagename)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='obj')
@click.option('--clean/--no-clean', '-C', default=False)
def sync_images(directory='mcguire',
scene='*',
format='obj',
clean=True):
for dirname in sorted(glob.glob(f'{directory}/{format}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
for filename in sorted(glob.glob(f'{dirname}/*.{format}')):
if format == 'pbrt':
with open(filename) as f:
if 'WorldBegin' not in f.read(): continue
basename = os.path.basename(filename).replace(f'.{format}', '')
os.system(f'mkdir -p {directory}/images-{format}')
imagename = f'{directory}/images-{format}/ytrace-{mode}-{basename}.*'
if clean:
cmd = f'rm {dirname}/*.png'
print(cmd, file=sys.stderr)
os.system(cmd)
cmd = f'rm {dirname}/*.hdr'
print(cmd, file=sys.stderr)
os.system(cmd)
cmd = f'cp {imagename} {dirname}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='obj')
@click.option('--outformat', '-F', default='json')
@click.option('--mode', '-m', default='default')
@click.option('--clean/--no-clean', '-C', default=False)
def convert(directory='mcguire',
scene='*',
format='obj',
outformat="json",
mode='path',
clean=True):
modes = {
'default': '',
}
options = modes[mode]
for dirname in sorted(glob.glob(f'{directory}/source/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
copyright_options = ''
if os.path.exists(f'{dirname}/AUTHOR.txt'):
with open(f'{dirname}/AUTHOR.txt') as f:
copyright = f.read().strip().replace('"', '')
copyright_options += f'--copyright "{copyright}"'
outdirname = dirname.replace(f'/source/', f'/{outformat}/')
if clean: os.system(f'rm -rf {outdirname}')
os.system(f'mkdir -p {outdirname}')
for auxname in ['AUTHOR.txt', 'LICENSE.txt', 'LINKS.txt', 'README.txt', 'yscene_render.txt']:
if os.path.exists(f'{dirname}/{auxname}'):
os.system(f'cp {dirname}/{auxname} {outdirname}/')
for filename in sorted(glob.glob(f'{dirname}/*.{format}')):
if format == 'pbrt':
with open(filename) as f:
if 'WorldBegin' not in f.read(): continue
outname = filename.replace(f'/source/', f'/{outformat}/').replace(
f'.{format}', f'.{outformat}')
cmd = f'../yocto-gl/bin/yscene convert --output {outname} {options} {filename} {copyright_options}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='obj')
@click.option('--outformat', '-F', default='json')
@click.option('--mode', '-m', default='default')
def copyright(directory='mcguire',
scene='*',
format='obj',
outformat="json",
mode='default'):
modes = {
'default': '',
}
options = modes[mode]
for dirname in sorted(glob.glob(f'{directory}/source/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
outdirname = dirname.replace(f'/source/', f'/{outformat}/')
os.system(f'mkdir -p {outdirname}')
if os.path.exists(f'{dirname}/AUTHOR.txt'):
os.system(f'cp {dirname}/AUTHOR.txt {outdirname}/')
if os.path.exists(f'{dirname}/LICENSE.txt'):
os.system(f'cp {dirname}/LICENSE.txt {outdirname}/')
if os.path.exists(f'{dirname}/LINKS.txt'):
os.system(f'cp {dirname}/LINKS.txt {outdirname}/')
@cli.command()
@click.option('--directory', '-d', default='yuksel')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='hair')
@click.option('--outformat', '-F', default='ply')
@click.option('--mode', '-m', default='default')
@click.option('--clean-models/--no-clean-models', '-C', default=False)
def convert_hair(directory='yuksel',
scene='*',
format='hair',
outformat="ply",
mode='path',
clean_models=True):
modes = {'default': ''}
options = modes[mode]
for dirname in sorted(glob.glob(f'{directory}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
if 'ecosys' in dirname and outformat == 'obj': continue
if 'landscape' in dirname and outformat == 'obj': continue
if 'fractal' in dirname and outformat == 'obj': continue
if 'pavilion' in dirname and outformat == 'obj': continue
for filename in sorted(glob.glob(f'{dirname}/{format}/*.{format}')):
outname = filename.replace(f'/{format}/', f'/json/').replace(
f'.{format}', f'.{outformat}')
filedir = os.path.dirname(filename)
cmd = f'../yocto-gl/bin/ymshproc -o {outname} {options} {filename}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='obj')
@click.option('--mode', '-m', default='default')
def backup(directory='mcguire', scene='*', format='obj', mode='default'):
modes = {
'default': '-r -X -q',
}
options = modes[mode]
for dirname in sorted(glob.glob(f'{directory}/{format}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
outdir = f'{directory}/backup-{format}'
basedir = f'{directory}/{format}'
os.system(f'mkdir -p {outdir}')
dirname = dirname.replace(basedir + '/', '')
outname = dirname + '.zip'
os.system(f'rm {outdir}/{outname}')
cmd = f'cd {basedir}; zip {options} {outname} {dirname}; mv {outname} ../../{outdir}/'
print(cmd)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='procedurals')
@click.option('--mode', '-m', default='skies')
@click.option('--clean/--no-clean', '-C', default=False)
def make_procedurals(directory='procedurals', mode='skies', clean=False):
if mode == 'skies':
dirname = f'{directory}/hdr/textures'
os.system(f'mkdir -p {dirname}')
angles = [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 85, 90]
for name in ['sky', 'sun']:
for angle in angles:
jsonname = f'{dirname}/_proc.json'
outname = f'{dirname}/{name}-{angle:02}.hdr'
js = {
'type': 'sky',
'width': 2048,
'height': 1024,
'sun_angle': math.radians(angle),
'has_sun': 'sun' in name,
'turbidity': 3
}
with open(jsonname, 'w') as f:
json.dump(js, f, indent=2)
cmd = f'../yocto-gl/bin/yimproc -o {outname} {jsonname}'
print(cmd, file=sys.stderr)
os.system(cmd)
os.system(f'rm {jsonname}')
else:
print('unknown mode')
@cli.command()
def sync():
os.system(
"rsync -avcm --delete --include '*/' --include '*.zip' --include '*.tgz' --include '*.pdf' --exclude='*' ./ ../yocto-scenes"
)
# os.system('rsync -avc --delete ./ ../yocto-scenes')
@cli.command()
@click.option('--directory', '-d', default='pbrt-v3-scenes')
@click.option('--scene', '-s', default='*')
def pbrtparse(directory='pbrt-v3-scenes', scene='*'):
broken_scenes = [
'bunny-fur/f3-15.pbrt',
"dambreak/dambreak0.pbrt",
"hair/curly-hair.pbrt",
"contemporary-bathroom/contemporary-bathroom.pbrt",
"head/head.pbrt",
"ecosys/ecosys.pbrt",
"sanmiguel/sanmiguel.pbrt",
"sssdragon/dragon_50.pbrt",
"white-room/whiteroom-daytime.pbrt",
]
scenes = [
'barcelona-pavilion/pavilion-day.pbrt',
'barcelona-pavilion/pavilion-night.pbrt',
'bathroom/bathroom.pbrt',
'bmw-m6/bmw-m6.pbrt',
'breakfast/breakfast.pbrt',
'buddha-fractal/buddha-fractal.pbrt',
'bunny-fur/f3-15.pbrt',
'caustic-glass/glass.pbrt',
"chopper-titan/chopper-titan.pbrt",
"cloud/cloud.pbrt",
"coffee-splash/splash.pbrt",
"contemporary-bathroom/contemporary-bathroom.pbrt",
"crown/crown.pbrt",
"dambreak/dambreak0.pbrt",
"dragon/f8-4a.pbrt",
"ecosys/ecosys.pbrt",
"ganesha/ganesha.pbrt",
"hair/curly-hair.pbrt",
"hair/sphere-hairblock.pbr",
"head/head.pbrt",
"killeroos/killeroo-simple.pbrt",
"landscape/view-0.pbrt",
"lte-orb/lte-orb-silver.pbrt",
"measure-one/frame85.pbrt",
"pbrt-book/book.pbrt",
"sanmiguel/sanmiguel.pbrt",
"simple/dof-dragons.pbrt",
"smoke-plume/plume-184.pbrt",
"sportscar/sportscar.pbrt",
"sssdragon/dragon_50.pbrt",
"structuresynth/microcity.pbrt",
"transparent-machines/frame542.pbrt",
"tt/tt.pbrt",
"veach-bidir/bidir.pbrt",
"veach-mis/mis.pbrt",
"villa/villa-daylight.pbrt",
"volume-caustic/caustic.pbrt",
"vw-van/vw-van.pbrt",
"white-room/whiteroom-daytime.pbrt",
"yeahright/yeahright.pbrt",
]
# for filename in scenes:
# if scene != '*' and not filename.startswith(f'{scene}/'): continue
# cmd = f'../yocto-gl/bin/yitrace {filename}'
# print(cmd, file=sys.stderr)
# os.system(cmd)
for filename in scenes:
if scene != '*' and not filename.startswith(f'{scene}/'): continue
cmd = f'../yocto-gl/bin/yitrace {directory}/{filename}'
print(cmd, file=sys.stderr)
os.system(cmd)
@cli.command()
@click.option('--directory', '-d', default='mcguire')
@click.option('--scene', '-s', default='*')
@click.option('--format', '-f', default='json')
@click.option('--outformat', '-F', default='csv')
@click.option('--mode', '-m', default='default')
def stats(directory='mcguire',
scene='*',
format='json',
outformat="csv",
mode='default'):
stats = []
keys = [
'name', 'cameras', 'environments', 'shapes', 'subdivs', 'textures',
'stextures'
]
for dirname in sorted(glob.glob(f'{directory}/{format}/{scene}')):
if not os.path.isdir(dirname): continue
if '/_' in dirname: continue
for filename in sorted(glob.glob(f'{dirname}/*.{format}')):
with open(filename) as f:
scene = json.load(f)
stat = {}
stat['name'] = filename.partition('/')[2].partition('.')[0]
stat['cameras'] = len(
scene['cameras']) if 'cameras' in scene else 0
stat['environments'] = len(
scene['environments']) if 'environments' in scene else 0
stat['shapes'] = len(scene['shapes']) if 'shapes' in scene else 0
stat['subdivs'] = len(
scene['subdivs']) if 'subdivs' in scene else 0
textures = {}
for shape in scene['shapes']:
for key, value in shape.items():
if '_tex' not in key: continue
if value not in textures: textures[value] = 0
textures[value] += 1
stat['textures'] = len(textures)
stat['stextures'] = sum(count for _, count in textures.items())
stats += [stat]
os.system(f'mkdir -p {directory}/_stats-{format}')
with open(f'{directory}/_stats-{format}/stats.{outformat}',
'w',
newline='') as f:
writer = csv.writer(f)
writer.writerow(keys)
for stat in stats:
writer.writerow([stat[key] for key in keys])
@cli.command()
@click.option('--directory', '-d', default='mcguire')
def fix_objx(directory='mcguire'):
for filename in glob.glob(directory + "/source/*/*.objx"):
newname = filename.replace('.objx', '.obx')
obx = ''
with open(filename) as f:
for line in f:
if line.startswith('c'):
tokens = line.split()
for i in range(len(tokens)):
if i in [0, 1, 2]: continue
tokens[i] = float(tokens[i])
obx += 'newCam {}\n'.format(tokens[1])
obx += ' Ca {}\n'.format(round(tokens[3] / tokens[4], 3))
obx += ' Cl {}\n'.format(round(tokens[5] * 0.036 / tokens[3],3))
obx += ' Ct {} {} {} {} {} {} 0 1 0\n'.format(round(tokens[17], 2), round(tokens[18], 2), round(tokens[19], 2), round(tokens[17] - tokens[14] * tokens[6], 2), round(tokens[18] - tokens[15] * tokens[6], 2), round(tokens[19] - tokens[16] * tokens[6], 2))
obx += '\n';
if line.startswith('e'):
tokens = line.split()
for i in range(len(tokens)):
if i in [0, 1, 5]: continue
tokens[i] = float(tokens[i])
obx += 'newEnv {}\n'.format(tokens[1])
obx += ' Ee {} {} {}\n'.format(round(tokens[2], 1), round(tokens[3], 1), round(tokens[4], 1))
if tokens[5] != '""': obx += ' map_Ee {}\n'.format(tokens[5])
obx += ' Et {} {} {} {} {} {} 0 1 0\n'.format(round(tokens[15], 2), round(tokens[16], 2), round(tokens[17], 2), round(tokens[15] + tokens[12], 2), round(tokens[16] + tokens[13], 2), round(tokens[17] + tokens[14], 2))
obx += '\n';
with open(newname, 'wt') as f:
f.write(obx)
cli()
| 43.132969
| 273
| 0.547128
|
490882b05fcf6417078848aa9d3cb0d22b1c4262
| 5,720
|
py
|
Python
|
tests/test_manage_py_scan.py
|
JulianFeinauer/pytest-django
|
f27c6aa1d44beb46fefa4718fefcf541478643d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_manage_py_scan.py
|
JulianFeinauer/pytest-django
|
f27c6aa1d44beb46fefa4718fefcf541478643d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_manage_py_scan.py
|
JulianFeinauer/pytest-django
|
f27c6aa1d44beb46fefa4718fefcf541478643d7
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True)
def test_django_project_found(django_testdir) -> None:
# XXX: Important: Do not chdir() to django_project_root since runpytest_subprocess
# will call "python /path/to/pytest.py", which will implicitly add cwd to
# the path. By instead calling "python /path/to/pytest.py
# django_project_root", we avoid implicitly adding the project to sys.path
# This matches the behaviour when pytest is called directly as an
# executable (cwd is not added to the Python path)
django_testdir.create_test_module(
"""
def test_foobar():
assert 1 + 1 == 2
"""
)
result = django_testdir.runpytest_subprocess("django_project_root")
assert result.ret == 0
outcomes = result.parseoutcomes()
assert outcomes["passed"] == 1
@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True)
def test_django_project_found_with_k(django_testdir, monkeypatch) -> None:
"""Test that cwd is checked as fallback with non-args via '-k foo'."""
testfile = django_testdir.create_test_module(
"""
def test_foobar():
assert True
""",
"sub/test_in_sub.py",
)
monkeypatch.chdir(testfile.dirname)
result = django_testdir.runpytest_subprocess("-k", "test_foobar")
assert result.ret == 0
outcomes = result.parseoutcomes()
assert outcomes["passed"] == 1
@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True)
def test_django_project_found_with_k_and_cwd(django_testdir, monkeypatch) -> None:
"""Cover cwd not used as fallback if present already in args."""
testfile = django_testdir.create_test_module(
"""
def test_foobar():
assert True
""",
"sub/test_in_sub.py",
)
monkeypatch.chdir(testfile.dirname)
result = django_testdir.runpytest_subprocess(testfile.dirname, "-k", "test_foobar")
assert result.ret == 0
outcomes = result.parseoutcomes()
assert outcomes["passed"] == 1
@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True)
def test_django_project_found_absolute(django_testdir, monkeypatch) -> None:
"""This only tests that "." is added as an absolute path (#637)."""
django_testdir.create_test_module(
"""
def test_dot_not_in_syspath():
import sys
assert '.' not in sys.path[:5]
"""
)
monkeypatch.chdir("django_project_root")
# NOTE: the "." here is important to test for an absolute path being used.
result = django_testdir.runpytest_subprocess("-s", ".")
assert result.ret == 0
outcomes = result.parseoutcomes()
assert outcomes["passed"] == 1
@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True)
def test_django_project_found_invalid_settings(django_testdir, monkeypatch) -> None:
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "DOES_NOT_EXIST")
result = django_testdir.runpytest_subprocess("django_project_root")
assert result.ret != 0
result.stderr.fnmatch_lines(["*ImportError:*DOES_NOT_EXIST*"])
result.stderr.fnmatch_lines(["*pytest-django found a Django project*"])
def test_django_project_scan_disabled_invalid_settings(django_testdir, monkeypatch) -> None:
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "DOES_NOT_EXIST")
django_testdir.makeini(
"""
[pytest]
django_find_project = false
"""
)
result = django_testdir.runpytest_subprocess("django_project_root")
assert result.ret != 0
result.stderr.fnmatch_lines(["*ImportError*DOES_NOT_EXIST*"])
result.stderr.fnmatch_lines(
["*pytest-django did not search for " "Django projects*"]
)
@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True)
def test_django_project_found_invalid_settings_version(django_testdir, monkeypatch) -> None:
"""Invalid DSM should not cause an error with --help or --version."""
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "DOES_NOT_EXIST")
result = django_testdir.runpytest_subprocess("django_project_root", "--version", "--version")
assert result.ret == 0
if hasattr(pytest, "version_tuple") and pytest.version_tuple >= (7, 0):
version_out = result.stdout
else:
version_out = result.stderr
version_out.fnmatch_lines(["*This is pytest version*"])
result = django_testdir.runpytest_subprocess("django_project_root", "--help")
assert result.ret == 0
result.stdout.fnmatch_lines(["*usage:*"])
@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True)
def test_runs_without_error_on_long_args(django_testdir) -> None:
django_testdir.create_test_module(
"""
def test_this_is_a_long_message_which_caused_a_bug_when_scanning_for_manage_py_12346712341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234112341234112451234123412341234123412341234123412341234123412341234123412341234123412341234123412341234():
assert 1 + 1 == 2
""" # noqa: E501
)
result = django_testdir.runpytest_subprocess(
"-k",
"this_is_a_long_message_which_caused_a_bug_when_scanning_for_manage_py_12346712341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234112341234112451234123412341234123412341234123412341234123412341234123412341234123412341234123412341234", # noqa: E501
"django_project_root",
)
assert result.ret == 0
| 38.648649
| 355
| 0.740734
|
673f278418177ff9d8eaef3e77169115851cd127
| 14,600
|
py
|
Python
|
src/sentry/models/debugfile.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/debugfile.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/debugfile.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import re
import os
import six
import uuid
import time
import errno
import shutil
import hashlib
import logging
import tempfile
from django.db import models
from symbolic import Archive, SymbolicError, ObjectErrorUnsupportedObject, normalize_debug_id
from sentry import options
from sentry.constants import KNOWN_DIF_FORMATS
from sentry.db.models import FlexibleForeignKey, Model, sane_repr, BaseManager, JSONField
from sentry.models.file import File
from sentry.reprocessing import resolve_processing_issue, bump_reprocessing_revision
from sentry.utils.zip import safe_extract_zip
logger = logging.getLogger(__name__)
ONE_DAY = 60 * 60 * 24
ONE_DAY_AND_A_HALF = int(ONE_DAY * 1.5)
# How long we cache a conversion failure by checksum in cache. Currently
# 10 minutes is assumed to be a reasonable value here.
CONVERSION_ERROR_TTL = 60 * 10
DIF_MIMETYPES = dict((v, k) for k, v in KNOWN_DIF_FORMATS.items())
_proguard_file_re = re.compile(r"/proguard/(?:mapping-)?(.*?)\.txt$")
class BadDif(Exception):
pass
class ProjectDebugFileManager(BaseManager):
def find_missing(self, checksums, project):
if not checksums:
return []
checksums = [x.lower() for x in checksums]
missing = set(checksums)
found = ProjectDebugFile.objects.filter(
file__checksum__in=checksums, project=project
).values("file__checksum")
for values in found:
missing.discard(values.values()[0])
return sorted(missing)
def find_by_checksums(self, checksums, project):
if not checksums:
return []
checksums = [x.lower() for x in checksums]
return ProjectDebugFile.objects.filter(file__checksum__in=checksums, project=project)
def find_by_debug_ids(self, project, debug_ids, features=None):
"""Finds debug information files matching the given debug identifiers.
If a set of features is specified, only files that satisfy all features
will be returned. This does not apply to legacy debug files that were
not tagged with features.
Returns a dict of debug files keyed by their debug identifier.
"""
features = frozenset(features) if features is not None else frozenset()
difs = (
ProjectDebugFile.objects.filter(project=project, debug_id__in=debug_ids)
.select_related("file")
.order_by("-id")
)
difs_by_id = {}
for dif in difs:
difs_by_id.setdefault(dif.debug_id, []).append(dif)
rv = {}
for debug_id, group in six.iteritems(difs_by_id):
with_features = [dif for dif in group if "features" in (dif.data or ())]
# In case we've never computed features for any of these files, we
# just take the first one and assume that it matches.
if not with_features:
rv[debug_id] = group[0]
continue
# There's at least one file with computed features. Older files are
# considered redundant and will be deleted. We search for the first
# file matching the given feature set. This might not resolve if no
# DIF matches the given feature set.
for dif in with_features:
if dif.features >= features:
rv[debug_id] = dif
break
return rv
class ProjectDebugFile(Model):
__core__ = False
file = FlexibleForeignKey("sentry.File")
object_name = models.TextField()
cpu_name = models.CharField(max_length=40)
project = FlexibleForeignKey("sentry.Project", null=True)
debug_id = models.CharField(max_length=64, db_column="uuid")
code_id = models.CharField(max_length=64, null=True)
data = JSONField(null=True)
objects = ProjectDebugFileManager()
class Meta:
index_together = (("project", "debug_id"), ("project", "code_id"))
db_table = "sentry_projectdsymfile"
app_label = "sentry"
__repr__ = sane_repr("object_name", "cpu_name", "debug_id")
@property
def file_format(self):
ct = self.file.headers.get("Content-Type", "unknown").lower()
return KNOWN_DIF_FORMATS.get(ct, "unknown")
@property
def file_type(self):
if self.data:
return self.data.get("type")
@property
def file_extension(self):
if self.file_format == "breakpad":
return ".sym"
if self.file_format == "macho":
return "" if self.file_type == "exe" else ".dSYM"
if self.file_format == "proguard":
return ".txt"
if self.file_format == "elf":
return "" if self.file_type == "exe" else ".debug"
if self.file_format == "pe":
return ".exe" if self.file_type == "exe" else ".dll"
if self.file_format == "pdb":
return ".pdb"
return ""
@property
def features(self):
return frozenset((self.data or {}).get("features", []))
def delete(self, *args, **kwargs):
super(ProjectDebugFile, self).delete(*args, **kwargs)
self.file.delete()
def clean_redundant_difs(project, debug_id):
"""Deletes redundant debug files from the database and file storage. A debug
file is considered redundant if there is a newer file with the same debug
identifier and the same or a superset of its features.
"""
difs = (
ProjectDebugFile.objects.filter(project=project, debug_id=debug_id)
.select_related("file")
.order_by("-id")
)
all_features = set()
for i, dif in enumerate(difs):
# We always keep the latest file. If it has no features, likely the
# previous files did not have features either and will be removed, or we
# keep both. Subsequent uploads will remove this file later.
if i > 0 and dif.features <= all_features:
dif.delete()
else:
all_features.update(dif.features)
def create_dif_from_id(project, meta, fileobj=None, file=None):
"""This creates a mach dsym file or proguard mapping from the given
debug id and open file object to a debug file. This will not verify the
debug id (intentionally so). Use `detect_dif_from_path` to do that.
"""
if meta.file_format == "proguard":
object_name = "proguard-mapping"
elif meta.file_format in ("macho", "elf", "pdb", "pe"):
object_name = meta.name
elif meta.file_format == "breakpad":
object_name = meta.name[:-4] if meta.name.endswith(".sym") else meta.name
else:
raise TypeError("unknown dif type %r" % (meta.file_format,))
if file is not None:
checksum = file.checksum
elif fileobj is not None:
h = hashlib.sha1()
while True:
chunk = fileobj.read(16384)
if not chunk:
break
h.update(chunk)
checksum = h.hexdigest()
fileobj.seek(0, 0)
else:
raise RuntimeError("missing file object")
dif = (
ProjectDebugFile.objects.select_related("file")
.filter(
project=project, debug_id=meta.debug_id, file__checksum=checksum, data__isnull=False
)
.order_by("-id")
.first()
)
if dif is not None:
return dif, False
if file is None:
file = File.objects.create(
name=meta.debug_id,
type="project.dif",
headers={"Content-Type": DIF_MIMETYPES[meta.file_format]},
)
file.putfile(fileobj)
else:
file.type = "project.dif"
file.headers["Content-Type"] = DIF_MIMETYPES[meta.file_format]
file.save()
dif = ProjectDebugFile.objects.create(
file=file,
debug_id=meta.debug_id,
code_id=meta.code_id,
cpu_name=meta.arch,
object_name=object_name,
project=project,
data=meta.data,
)
# The DIF we've just created might actually be removed here again. But since
# this can happen at any time in near or distant future, we don't care and
# assume a successful upload. The DIF will be reported to the uploader and
# reprocessing can start.
clean_redundant_difs(project, meta.debug_id)
resolve_processing_issue(project=project, scope="native", object="dsym:%s" % meta.debug_id)
return dif, True
def _analyze_progard_filename(filename):
match = _proguard_file_re.search(filename)
if match is None:
return None
ident = match.group(1)
try:
return six.text_type(uuid.UUID(ident))
except Exception:
pass
class DifMeta(object):
def __init__(self, file_format, arch, debug_id, path, code_id=None, name=None, data=None):
self.file_format = file_format
self.arch = arch
self.debug_id = debug_id
self.code_id = code_id
self.path = path
self.data = data
if name is not None:
self.name = os.path.basename(name)
elif path is not None:
self.name = os.path.basename(path)
@classmethod
def from_object(cls, obj, path, name=None, debug_id=None):
if debug_id is not None:
try:
debug_id = normalize_debug_id(debug_id)
except SymbolicError:
debug_id = None
# Only allow overrides in the debug_id's age if the rest of the debug id
# matches with what we determine from the object file. We generally
# trust the server more than the client.
obj_id = obj.debug_id
if obj_id and debug_id and obj_id[:36] == debug_id[:36]:
obj_id = debug_id
return cls(
file_format=obj.file_format,
arch=obj.arch,
debug_id=obj_id,
code_id=obj.code_id,
path=path,
# TODO: Extract the object name from the object
name=name,
data={"type": obj.kind, "features": list(obj.features)},
)
@property
def basename(self):
return os.path.basename(self.path)
def detect_dif_from_path(path, name=None, debug_id=None):
"""This detects which kind of dif(Debug Information File) the path
provided is. It returns an array since an Archive can contain more than
one Object.
"""
# proguard files (proguard/UUID.txt) or
# (proguard/mapping-UUID.txt).
proguard_id = _analyze_progard_filename(path)
if proguard_id is not None:
data = {"features": ["mapping"]}
return [
DifMeta(
file_format="proguard",
arch="any",
debug_id=proguard_id,
code_id=None,
path=path,
name=name,
data=data,
)
]
# native debug information files (MachO, ELF or Breakpad)
try:
archive = Archive.open(path)
except ObjectErrorUnsupportedObject as e:
raise BadDif("Unsupported debug information file: %s" % e)
except SymbolicError as e:
logger.warning("dsymfile.bad-fat-object", exc_info=True)
raise BadDif("Invalid debug information file: %s" % e)
else:
objs = []
for obj in archive.iter_objects():
objs.append(DifMeta.from_object(obj, path, name=name, debug_id=debug_id))
return objs
def create_debug_file_from_dif(to_create, project):
"""Create a ProjectDebugFile from a dif (Debug Information File) and
return an array of created objects.
"""
rv = []
for meta in to_create:
with open(meta.path, "rb") as f:
dif, created = create_dif_from_id(project, meta, fileobj=f)
if created:
rv.append(dif)
return rv
def create_files_from_dif_zip(fileobj, project):
"""Creates all missing debug files from the given zip file. This
returns a list of all files created.
"""
scratchpad = tempfile.mkdtemp()
try:
safe_extract_zip(fileobj, scratchpad, strip_toplevel=False)
to_create = []
for dirpath, dirnames, filenames in os.walk(scratchpad):
for fn in filenames:
fn = os.path.join(dirpath, fn)
try:
difs = detect_dif_from_path(fn)
except BadDif:
difs = None
if difs is None:
difs = []
to_create = to_create + difs
rv = create_debug_file_from_dif(to_create, project)
# Uploading new dsysm changes the reprocessing revision
bump_reprocessing_revision(project)
return rv
finally:
shutil.rmtree(scratchpad)
class DIFCache(object):
@property
def cache_path(self):
return options.get("dsym.cache-path")
def get_project_path(self, project):
return os.path.join(self.cache_path, six.text_type(project.id))
def fetch_difs(self, project, debug_ids, features=None):
"""Given some ids returns an id to path mapping for where the
debug symbol files are on the FS.
"""
debug_ids = [six.text_type(debug_id).lower() for debug_id in debug_ids]
difs = ProjectDebugFile.objects.find_by_debug_ids(project, debug_ids, features)
rv = {}
for debug_id, dif in six.iteritems(difs):
dif_path = os.path.join(self.get_project_path(project), debug_id)
try:
os.stat(dif_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
dif.file.save_to(dif_path)
rv[debug_id] = dif_path
return rv
def clear_old_entries(self):
try:
cache_folders = os.listdir(self.cache_path)
except OSError:
return
cutoff = int(time.time()) - ONE_DAY_AND_A_HALF
for cache_folder in cache_folders:
cache_folder = os.path.join(self.cache_path, cache_folder)
try:
items = os.listdir(cache_folder)
except OSError:
continue
for cached_file in items:
cached_file = os.path.join(cache_folder, cached_file)
try:
mtime = os.path.getmtime(cached_file)
except OSError:
continue
if mtime < cutoff:
try:
os.remove(cached_file)
except OSError:
pass
ProjectDebugFile.difcache = DIFCache()
| 32.087912
| 96
| 0.614589
|
0d1fc25a37d0f67bf67092ba7f8e4a07e534824e
| 377
|
py
|
Python
|
ancor/arch_factory.py
|
guybuk/ANCOR
|
229f8a52b81fa94cd584cbc6fc715697f98e43e2
|
[
"Apache-2.0"
] | 34
|
2021-04-26T17:35:19.000Z
|
2022-03-30T02:41:15.000Z
|
ancor/arch_factory.py
|
sxr9607/ANCOR
|
229f8a52b81fa94cd584cbc6fc715697f98e43e2
|
[
"Apache-2.0"
] | 5
|
2021-08-07T22:13:04.000Z
|
2022-02-11T10:03:15.000Z
|
ancor/arch_factory.py
|
sxr9607/ANCOR
|
229f8a52b81fa94cd584cbc6fc715697f98e43e2
|
[
"Apache-2.0"
] | 6
|
2021-04-06T07:51:17.000Z
|
2021-11-20T07:01:41.000Z
|
from models import model_pool
class ArchFactory(object):
def create_arch(self, arch_name):
encoder_q, encoder_k = model_pool[arch_name](), model_pool[arch_name]()
k2q_mapping = {k_name: q_name for q_name, k_name in
zip(encoder_q.state_dict().keys(), encoder_k.state_dict().keys())}
return encoder_q, encoder_k, k2q_mapping
| 34.272727
| 89
| 0.676393
|
d0cb0fa44bd2c18c96268816cf62c6533664dfcb
| 2,039
|
py
|
Python
|
actleto/annotator/visualizers/image.py
|
IINemo/jupyter_al_annotator
|
8e4256653c220fffdd15b1fdb30be0253c3f08f7
|
[
"MIT"
] | 22
|
2017-09-25T16:15:54.000Z
|
2022-01-07T22:27:30.000Z
|
actleto/annotator/visualizers/image.py
|
IINemo/jupyter_al_annotator
|
8e4256653c220fffdd15b1fdb30be0253c3f08f7
|
[
"MIT"
] | null | null | null |
actleto/annotator/visualizers/image.py
|
IINemo/jupyter_al_annotator
|
8e4256653c220fffdd15b1fdb30be0253c3f08f7
|
[
"MIT"
] | 9
|
2017-09-14T13:59:17.000Z
|
2022-02-22T16:15:17.000Z
|
import numpy as np
from ipywidgets import Image as WidgImage
from PIL import Image
import io
class ImageVisualizer(object):
"""Visualizer for images.
This visualizer can display images (e.g., MNIST), which are stored as rows
in a dataframe.
"""
def __init__(self, columns_range, img_shape, img_mode, preview_shape):
"""ImageVisualizer constructor.
Args:
rng (tuple): tuple (start, end) - range of columns in pandas.DataFrame, which contain image data
img_shape (tuple): original image shape width x height.
img_format (str): image format: "L" - black&white (MNIST); "RGB"; "CMYK"; "1".
preview_shape (tuple): output image size.
"""
super().__init__()
self._columns_range = columns_range
self._img_shape = img_shape
self._img_mode = img_mode
self._preview_shape = preview_shape
def __call__(self, dataframe, index):
"""Invokes the visualizer.
Args:
dataframe (pandas.DataFrame): the dataframe that contains the data for visualization.
index (int): the positional (iloc) index of the row to visualize.
Returns:
tuple: The list of widgets that visualize the row with number index.
"""
img_array = dataframe.iloc[index][self._columns_range[0] : self._columns_range[1]].as_matrix()
if img_array.shape[0] > np.product(self._img_shape):
cur_img_shape = self._img_shape + (-1,)
else:
cur_img_shape = self._img_shape
img = Image.fromarray(img_array.reshape(cur_img_shape), self._img_mode)
buffer = io.BytesIO()
img.convert('RGB').save(buffer, format = 'PNG')
return (WidgImage(value = buffer.getvalue(),
format = 'PNG',
width = self._preview_shape[0],
height = self._preview_shape[1]),)
| 35.77193
| 108
| 0.589505
|
254461d7eb43d99aaabb1b2199c3728476045573
| 1,677
|
py
|
Python
|
nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py
|
mfalkiewicz/nipype
|
775e21b78fb1ffa2ff9cb12e6f052868bd44d052
|
[
"Apache-2.0"
] | 1
|
2015-01-19T13:12:27.000Z
|
2015-01-19T13:12:27.000Z
|
nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py
|
bpinsard/nipype
|
373bdddba9f675ef153951afa368729e2d8950d2
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py
|
bpinsard/nipype
|
373bdddba9f675ef153951afa368729e2d8950d2
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brainsuite import Skullfinder
def test_Skullfinder_inputs():
input_map = dict(args=dict(argstr='%s',
),
bgLabelValue=dict(argstr='--bglabel %d',
),
brainLabelValue=dict(argstr='--brainlabel %d',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(deprecated='1.0.0',
nohash=True,
usedefault=True,
),
inputMRIFile=dict(argstr='-i %s',
mandatory=True,
),
inputMaskFile=dict(argstr='-m %s',
mandatory=True,
),
lowerThreshold=dict(argstr='-l %d',
),
outputLabelFile=dict(argstr='-o %s',
genfile=True,
),
performFinalOpening=dict(argstr='--finalOpening',
),
scalpLabelValue=dict(argstr='--scalplabel %d',
),
skullLabelValue=dict(argstr='--skulllabel %d',
),
spaceLabelValue=dict(argstr='--spacelabel %d',
),
surfaceFilePrefix=dict(argstr='-s %s',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
upperThreshold=dict(argstr='-u %d',
),
verbosity=dict(argstr='-v %d',
),
)
inputs = Skullfinder.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Skullfinder_outputs():
output_map = dict(outputLabelFile=dict(),
)
outputs = Skullfinder.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 26.203125
| 67
| 0.632081
|
6257dec38f889ea353ad670fc14a3418441ed5f4
| 37,314
|
py
|
Python
|
dreamcoder/program.py
|
Hitoshi-Nakanishi/ec2
|
b65905cdfb1a2451ad553fd15b937a7fc48fa9cf
|
[
"Unlicense"
] | null | null | null |
dreamcoder/program.py
|
Hitoshi-Nakanishi/ec2
|
b65905cdfb1a2451ad553fd15b937a7fc48fa9cf
|
[
"Unlicense"
] | null | null | null |
dreamcoder/program.py
|
Hitoshi-Nakanishi/ec2
|
b65905cdfb1a2451ad553fd15b937a7fc48fa9cf
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from dreamcoder.type import *
from dreamcoder.utilities import *
from time import time
import math
class InferenceFailure(Exception):
pass
class ShiftFailure(Exception):
pass
class RunFailure(Exception):
pass
class Program(object):
def __repr__(self):
return str(self)
def __ne__(self, o):
return not (self == o)
def __str__(self):
return self.show(False)
def canHaveType(self, t):
try:
context, actualType = self.inferType(Context.EMPTY, [], {})
context, t = t.instantiate(context)
context.unify(t, actualType)
return True
except UnificationFailure as e:
return False
def betaNormalForm(self):
n = self
while True:
np = n.betaReduce()
if np is None:
return n
n = np
def infer(self):
try:
return self.inferType(Context.EMPTY, [], {})[1].canonical()
except UnificationFailure as e:
raise InferenceFailure(self, e)
def uncurry(self):
t = self.infer()
a = len(t.functionArguments())
e = self
existingAbstractions = 0
while e.isAbstraction:
e = e.body
existingAbstractions += 1
newAbstractions = a - existingAbstractions
assert newAbstractions >= 0
# e is the body stripped of abstractions. we are going to pile
# some more lambdas at the front, so free variables in e
# (which were bound to the stripped abstractions) need to be
# shifted by the number of abstractions that we will be adding
e = e.shift(newAbstractions)
for n in reversed(range(newAbstractions)):
e = Application(e, Index(n))
for _ in range(a):
e = Abstraction(e)
assert self.infer() == e.infer(), "FATAL: uncurry has a bug. %s : %s, but uncurried to %s : %s" % (
self,
self.infer(),
e,
e.infer(),
)
return e
def wellTyped(self):
try:
self.infer()
return True
except InferenceFailure:
return False
def runWithArguments(self, xs):
f = self.evaluate([])
for x in xs:
f = f(x)
return f
def applicationParses(self):
yield self, []
def applicationParse(self):
return self, []
@property
def closed(self):
for surroundingAbstractions, child in self.walk():
if isinstance(child, FragmentVariable):
return False
if isinstance(child, Index) and child.free(surroundingAbstractions):
return False
return True
@property
def numberOfFreeVariables(expression):
n = 0
for surroundingAbstractions, child in expression.walk():
# Free variable
if isinstance(child, Index) and child.free(surroundingAbstractions):
n = max(n, child.i - surroundingAbstractions + 1)
return n
def freeVariables(self):
for surroundingAbstractions, child in self.walk():
if child.isIndex and child.i >= surroundingAbstractions:
yield child.i - surroundingAbstractions
@property
def isIndex(self):
return False
@property
def isUnion(self):
return False
@property
def isApplication(self):
return False
@property
def isAbstraction(self):
return False
@property
def isPrimitive(self):
return False
@property
def isInvented(self):
return False
@property
def isHole(self):
return False
@staticmethod
def parse(s):
s = parseSExpression(s)
def p(e):
if isinstance(e, list):
if e[0] == "#":
assert len(e) == 2
return Invented(p(e[1]))
if e[0] == "lambda":
assert len(e) == 2
return Abstraction(p(e[1]))
f = p(e[0])
for x in e[1:]:
f = Application(f, p(x))
return f
assert isinstance(e, str)
if e[0] == "$":
return Index(int(e[1:]))
if e in Primitive.GLOBALS:
return Primitive.GLOBALS[e]
if e == "??" or e == "?":
return FragmentVariable.single
if e == "<HOLE>":
return Hole.single
raise ParseFailure((s, e))
return p(s)
@staticmethod
def _parse(s, n):
while n < len(s) and s[n].isspace():
n += 1
for p in [Application, Abstraction, Index, Invented, FragmentVariable, Hole, Primitive]:
try:
return p._parse(s, n)
except ParseFailure:
continue
raise ParseFailure(s)
# parser helpers
@staticmethod
def parseConstant(s, n, *constants):
for constant in constants:
try:
for i, c in enumerate(constant):
if i + n >= len(s) or s[i + n] != c:
raise ParseFailure(s)
return n + len(constant)
except ParseFailure:
continue
raise ParseFailure(s)
@staticmethod
def parseHumanReadable(s):
s = parseSExpression(s)
def p(s, environment):
if isinstance(s, list) and s[0] in ["lambda", "\\"]:
assert isinstance(s[1], list) and len(s) == 3
newEnvironment = list(reversed(s[1])) + environment
e = p(s[2], newEnvironment)
for _ in s[1]:
e = Abstraction(e)
return e
if isinstance(s, list):
a = p(s[0], environment)
for x in s[1:]:
a = Application(a, p(x, environment))
return a
for j, v in enumerate(environment):
if s == v:
return Index(j)
if s in Primitive.GLOBALS:
return Primitive.GLOBALS[s]
assert False, f"could not parse {s}"
return p(s, [])
class Application(Program):
"""Function application"""
def __init__(self, f, x):
self.f = f
self.x = x
self.hashCode = None
self.isConditional = (
(not isinstance(f, int)) and f.isApplication and f.f.isApplication and f.f.f.isPrimitive and f.f.f.name == "if"
)
if self.isConditional:
self.falseBranch = x
self.trueBranch = f.x
self.branch = f.f.x
else:
self.falseBranch = None
self.trueBranch = None
self.branch = None
def betaReduce(self):
# See if either the function or the argument can be reduced
f = self.f.betaReduce()
if f is not None:
return Application(f, self.x)
x = self.x.betaReduce()
if x is not None:
return Application(self.f, x)
# Neither of them could be reduced. Is this not a redex?
if not self.f.isAbstraction:
return None
# Perform substitution
b = self.f.body
v = self.x
return b.substitute(Index(0), v.shift(1)).shift(-1)
def isBetaLong(self):
return (not self.f.isAbstraction) and self.f.isBetaLong() and self.x.isBetaLong()
def freeVariables(self):
return self.f.freeVariables() | self.x.freeVariables()
def clone(self):
return Application(self.f.clone(), self.x.clone())
def annotateTypes(self, context, environment):
self.f.annotateTypes(context, environment)
self.x.annotateTypes(context, environment)
r = context.makeVariable()
context.unify(arrow(self.x.annotatedType, r), self.f.annotatedType)
self.annotatedType = r.applyMutable(context)
@property
def isApplication(self):
return True
def __eq__(self, other):
return isinstance(other, Application) and self.f == other.f and self.x == other.x
def __hash__(self):
if self.hashCode is None:
self.hashCode = hash((hash(self.f), hash(self.x)))
return self.hashCode
"""Because Python3 randomizes the hash function, we need to never pickle the hash"""
def __getstate__(self):
return self.f, self.x, self.isConditional, self.falseBranch, self.trueBranch, self.branch
def __setstate__(self, state):
try:
self.f, self.x, self.isConditional, self.falseBranch, self.trueBranch, self.branch = state
except ValueError:
# backward compatibility
assert "x" in state
assert "f" in state
f = state["f"]
x = state["x"]
self.f = f
self.x = x
self.isConditional = (
(not isinstance(f, int)) and f.isApplication and f.f.isApplication and f.f.f.isPrimitive and f.f.f.name == "if"
)
if self.isConditional:
self.falseBranch = x
self.trueBranch = f.x
self.branch = f.f.x
else:
self.falseBranch = None
self.trueBranch = None
self.branch = None
self.hashCode = None
def visit(self, visitor, *arguments, **keywords):
return visitor.application(self, *arguments, **keywords)
def show(self, isFunction):
if isFunction:
return "%s %s" % (self.f.show(True), self.x.show(False))
else:
return "(%s %s)" % (self.f.show(True), self.x.show(False))
def evaluate(self, environment):
if self.isConditional:
if self.branch.evaluate(environment):
return self.trueBranch.evaluate(environment)
else:
return self.falseBranch.evaluate(environment)
else:
return self.f.evaluate(environment)(self.x.evaluate(environment))
def inferType(self, context, environment, freeVariables):
(context, ft) = self.f.inferType(context, environment, freeVariables)
(context, xt) = self.x.inferType(context, environment, freeVariables)
(context, returnType) = context.makeVariable()
context = context.unify(ft, arrow(xt, returnType))
return (context, returnType.apply(context))
def applicationParses(self):
yield self, []
for f, xs in self.f.applicationParses():
yield f, xs + [self.x]
def applicationParse(self):
f, xs = self.f.applicationParse()
return f, xs + [self.x]
def shift(self, offset, depth=0):
return Application(self.f.shift(offset, depth), self.x.shift(offset, depth))
def substitute(self, old, new):
if self == old:
return new
return Application(self.f.substitute(old, new), self.x.substitute(old, new))
def walkUncurried(self, d=0):
yield d, self
f, xs = self.applicationParse()
yield from f.walkUncurried(d)
for x in xs:
yield from x.walkUncurried(d)
def walk(self, surroundingAbstractions=0):
yield surroundingAbstractions, self
yield from self.f.walk(surroundingAbstractions)
yield from self.x.walk(surroundingAbstractions)
def size(self):
return self.f.size() + self.x.size()
@staticmethod
def _parse(s, n):
while n < len(s) and s[n].isspace():
n += 1
if n == len(s) or s[n] != "(":
raise ParseFailure(s)
n += 1
xs = []
while True:
x, n = Program._parse(s, n)
xs.append(x)
while n < len(s) and s[n].isspace():
n += 1
if n == len(s):
raise ParseFailure(s)
if s[n] == ")":
n += 1
break
e = xs[0]
for x in xs[1:]:
e = Application(e, x)
return e, n
class Index(Program):
"""
deBruijn index: https://en.wikipedia.org/wiki/De_Bruijn_index
These indices encode variables.
"""
def __init__(self, i):
self.i = i
def show(self, isFunction):
return "$%d" % self.i
def __eq__(self, o):
return isinstance(o, Index) and o.i == self.i
def __hash__(self):
return self.i
def visit(self, visitor, *arguments, **keywords):
return visitor.index(self, *arguments, **keywords)
def evaluate(self, environment):
return environment[self.i]
def inferType(self, context, environment, freeVariables):
if self.bound(len(environment)):
return (context, environment[self.i].apply(context))
else:
i = self.i - len(environment)
if i in freeVariables:
return (context, freeVariables[i].apply(context))
context, variable = context.makeVariable()
freeVariables[i] = variable
return (context, variable)
def clone(self):
return Index(self.i)
def annotateTypes(self, context, environment):
self.annotatedType = environment[self.i].applyMutable(context)
def shift(self, offset, depth=0):
# bound variable
if self.bound(depth):
return self
else: # free variable
i = self.i + offset
if i < 0:
raise ShiftFailure()
return Index(i)
def betaReduce(self):
return None
def isBetaLong(self):
return True
def freeVariables(self):
return {self.i}
def substitute(self, old, new):
if old == self:
return new
else:
return self
def walk(self, surroundingAbstractions=0):
yield surroundingAbstractions, self
def walkUncurried(self, d=0):
yield d, self
def size(self):
return 1
def free(self, surroundingAbstractions):
"""Is this index a free variable, given that it has surroundingAbstractions lambda's around it?"""
return self.i >= surroundingAbstractions
def bound(self, surroundingAbstractions):
"""Is this index a bound variable, given that it has surroundingAbstractions lambda's around it?"""
return self.i < surroundingAbstractions
@property
def isIndex(self):
return True
@staticmethod
def _parse(s, n):
while n < len(s) and s[n].isspace():
n += 1
if n == len(s) or s[n] != "$":
raise ParseFailure(s)
n += 1
j = ""
while n < len(s) and s[n].isdigit():
j += s[n]
n += 1
if j == "":
raise ParseFailure(s)
return Index(int(j)), n
class Abstraction(Program):
"""Lambda abstraction. Creates a new function."""
def __init__(self, body):
self.body = body
self.hashCode = None
@property
def isAbstraction(self):
return True
def __eq__(self, o):
return isinstance(o, Abstraction) and o.body == self.body
def __hash__(self):
if self.hashCode is None:
self.hashCode = hash((hash(self.body),))
return self.hashCode
"""Because Python3 randomizes the hash function, we need to never pickle the hash"""
def __getstate__(self):
return self.body
def __setstate__(self, state):
self.body = state
self.hashCode = None
def isBetaLong(self):
return self.body.isBetaLong()
def freeVariables(self):
return {f - 1 for f in self.body.freeVariables() if f > 0}
def visit(self, visitor, *arguments, **keywords):
return visitor.abstraction(self, *arguments, **keywords)
def clone(self):
return Abstraction(self.body.clone())
def annotateTypes(self, context, environment):
v = context.makeVariable()
self.body.annotateTypes(context, [v] + environment)
self.annotatedType = arrow(v.applyMutable(context), self.body.annotatedType)
def show(self, isFunction):
return "(lambda %s)" % (self.body.show(False))
def evaluate(self, environment):
return lambda x: self.body.evaluate([x] + environment)
def betaReduce(self):
b = self.body.betaReduce()
if b is None:
return None
return Abstraction(b)
def inferType(self, context, environment, freeVariables):
(context, argumentType) = context.makeVariable()
(context, returnType) = self.body.inferType(context, [argumentType] + environment, freeVariables)
return (context, arrow(argumentType, returnType).apply(context))
def shift(self, offset, depth=0):
return Abstraction(self.body.shift(offset, depth + 1))
def substitute(self, old, new):
if self == old:
return new
old = old.shift(1)
new = new.shift(1)
return Abstraction(self.body.substitute(old, new))
def walk(self, surroundingAbstractions=0):
yield surroundingAbstractions, self
yield from self.body.walk(surroundingAbstractions + 1)
def walkUncurried(self, d=0):
yield d, self
yield from self.body.walkUncurried(d + 1)
def size(self):
return self.body.size()
@staticmethod
def _parse(s, n):
n = Program.parseConstant(s, n, "(\\", "(lambda", "(\u03bb")
while n < len(s) and s[n].isspace():
n += 1
b, n = Program._parse(s, n)
while n < len(s) and s[n].isspace():
n += 1
n = Program.parseConstant(s, n, ")")
return Abstraction(b), n
class Primitive(Program):
GLOBALS = {}
def __init__(self, name, ty, value):
self.tp = ty
self.name = name
self.value = value
if name not in Primitive.GLOBALS:
Primitive.GLOBALS[name] = self
@property
def isPrimitive(self):
return True
def __eq__(self, o):
return isinstance(o, Primitive) and o.name == self.name
def __hash__(self):
return hash(self.name)
def visit(self, visitor, *arguments, **keywords):
return visitor.primitive(self, *arguments, **keywords)
def show(self, isFunction):
return self.name
def clone(self):
return Primitive(self.name, self.tp, self.value)
def annotateTypes(self, context, environment):
self.annotatedType = self.tp.instantiateMutable(context)
def evaluate(self, environment):
return self.value
def betaReduce(self):
return None
def isBetaLong(self):
return True
def freeVariables(self):
return set()
def inferType(self, context, environment, freeVariables):
return self.tp.instantiate(context)
def shift(self, offset, depth=0):
return self
def substitute(self, old, new):
if self == old:
return new
else:
return self
def walk(self, surroundingAbstractions=0):
yield surroundingAbstractions, self
def walkUncurried(self, d=0):
yield d, self
def size(self):
return 1
@staticmethod
def _parse(s, n):
while n < len(s) and s[n].isspace():
n += 1
name = []
while n < len(s) and not s[n].isspace() and s[n] not in "()":
name.append(s[n])
n += 1
name = "".join(name)
if name in Primitive.GLOBALS:
return Primitive.GLOBALS[name], n
raise ParseFailure(s)
# TODO(@mtensor): needs to be fixed to handle both pickling lambda functions and unpickling in general.
# def __getstate__(self):
# return self.name
# def __setstate__(self, state):
# #for backwards compatibility:
# if type(state) == dict:
# self.__dict__ = state
# else:
# p = Primitive.GLOBALS[state]
# self.__init__(p.name, p.tp, p.value)
class Invented(Program):
"""New invented primitives"""
def __init__(self, body):
self.body = body
self.tp = self.body.infer()
self.hashCode = None
@property
def isInvented(self):
return True
def show(self, isFunction):
return "#%s" % (self.body.show(False))
def visit(self, visitor, *arguments, **keywords):
return visitor.invented(self, *arguments, **keywords)
def __eq__(self, o):
return isinstance(o, Invented) and o.body == self.body
def __hash__(self):
if self.hashCode is None:
self.hashCode = hash((0, hash(self.body)))
return self.hashCode
"""Because Python3 randomizes the hash function, we need to never pickle the hash"""
def __getstate__(self):
return self.body, self.tp
def __setstate__(self, state):
self.body, self.tp = state
self.hashCode = None
def clone(self):
return Invented(self.body)
def annotateTypes(self, context, environment):
self.annotatedType = self.tp.instantiateMutable(context)
def evaluate(self, e):
return self.body.evaluate([])
def betaReduce(self):
return self.body
def isBetaLong(self):
return True
def freeVariables(self):
return set()
def inferType(self, context, environment, freeVariables):
return self.tp.instantiate(context)
def shift(self, offset, depth=0):
return self
def substitute(self, old, new):
if self == old:
return new
else:
return self
def walk(self, surroundingAbstractions=0):
yield surroundingAbstractions, self
def walkUncurried(self, d=0):
yield d, self
def size(self):
return 1
@staticmethod
def _parse(s, n):
while n < len(s) and s[n].isspace():
n += 1
if n < len(s) and s[n] == "#":
n += 1
b, n = Program._parse(s, n)
return Invented(b), n
raise ParseFailure(s)
class FragmentVariable(Program):
def __init__(self):
pass
def show(self, isFunction):
return "??"
def __eq__(self, o):
return isinstance(o, FragmentVariable)
def __hash__(self):
return 42
def visit(self, visitor, *arguments, **keywords):
return visitor.fragmentVariable(self, *arguments, **keywords)
def evaluate(self, e):
raise Exception("Attempt to evaluate fragment variable")
def betaReduce(self):
raise Exception("Attempt to beta reduce fragment variable")
def inferType(self, context, environment, freeVariables):
return context.makeVariable()
def shift(self, offset, depth=0):
raise Exception("Attempt to shift fragment variable")
def substitute(self, old, new):
if self == old:
return new
else:
return self
def match(self, context, expression, holes, variableBindings, environment=[]):
surroundingAbstractions = len(environment)
try:
context, variable = context.makeVariable()
holes.append((variable, expression.shift(-surroundingAbstractions)))
return context, variable
except ShiftFailure:
raise MatchFailure()
def walk(self, surroundingAbstractions=0):
yield surroundingAbstractions, self
def walkUncurried(self, d=0):
yield d, self
def size(self):
return 1
@staticmethod
def _parse(s, n):
while n < len(s) and s[n].isspace():
n += 1
n = Program.parseConstant(s, n, "??", "?")
return FragmentVariable.single, n
FragmentVariable.single = FragmentVariable()
class Hole(Program):
def __init__(self):
pass
def show(self, isFunction):
return "<HOLE>"
@property
def isHole(self):
return True
def __eq__(self, o):
return isinstance(o, Hole)
def __hash__(self):
return 42
def evaluate(self, e):
raise Exception("Attempt to evaluate hole")
def betaReduce(self):
raise Exception("Attempt to beta reduce hole")
def inferType(self, context, environment, freeVariables):
return context.makeVariable()
def shift(self, offset, depth=0):
raise Exception("Attempt to shift fragment variable")
def walk(self, surroundingAbstractions=0):
yield surroundingAbstractions, self
def walkUncurried(self, d=0):
yield d, self
def size(self):
return 1
@staticmethod
def _parse(s, n):
while n < len(s) and s[n].isspace():
n += 1
n = Program.parseConstant(s, n, "<HOLE>")
return Hole.single, n
Hole.single = Hole()
class ShareVisitor(object):
def __init__(self):
self.primitiveTable = {}
self.inventedTable = {}
self.indexTable = {}
self.applicationTable = {}
self.abstractionTable = {}
def invented(self, e):
body = e.body.visit(self)
i = id(body)
if i in self.inventedTable:
return self.inventedTable[i]
new = Invented(body)
self.inventedTable[i] = new
return new
def primitive(self, e):
if e.name in self.primitiveTable:
return self.primitiveTable[e.name]
self.primitiveTable[e.name] = e
return e
def index(self, e):
if e.i in self.indexTable:
return self.indexTable[e.i]
self.indexTable[e.i] = e
return e
def application(self, e):
f = e.f.visit(self)
x = e.x.visit(self)
fi = id(f)
xi = id(x)
i = (fi, xi)
if i in self.applicationTable:
return self.applicationTable[i]
new = Application(f, x)
self.applicationTable[i] = new
return new
def abstraction(self, e):
body = e.body.visit(self)
i = id(body)
if i in self.abstractionTable:
return self.abstractionTable[i]
new = Abstraction(body)
self.abstractionTable[i] = new
return new
def execute(self, e):
return e.visit(self)
class Mutator:
"""Perform local mutations to an expr, yielding the expr and the
description length distance from the original program"""
def __init__(self, grammar, fn):
"""Fn yields (expression, loglikelihood) from a type and loss.
Therefore, loss+loglikelihood is the distance from the original program."""
self.fn = fn
self.grammar = grammar
self.history = []
def enclose(self, expr):
for h in self.history[::-1]:
expr = h(expr)
return expr
def invented(self, e, tp, env, is_lhs=False):
deleted_ll = self.logLikelihood(tp, e, env)
for expr, replaced_ll in self.fn(tp, deleted, is_left_application=is_lhs):
yield self.enclose(expr), deleted_ll + replaced_ll
def primitive(self, e, tp, env, is_lhs=False):
deleted_ll = self.logLikelihood(tp, e, env)
for expr, replaced_ll in self.fn(tp, deleted_ll, is_left_application=is_lhs):
yield self.enclose(expr), deleted_ll + replaced_ll
def index(self, e, tp, env, is_lhs=False):
# yield from ()
deleted_ll = self.logLikelihood(tp, e, env) # self.grammar.logVariable
for expr, replaced_ll in self.fn(tp, deleted_ll, is_left_application=is_lhs):
yield self.enclose(expr), deleted_ll + replaced_ll
def application(self, e, tp, env, is_lhs=False):
self.history.append(lambda expr: Application(expr, e.x))
f_tp = arrow(e.x.infer(), tp)
yield from e.f.visit(self, f_tp, env, is_lhs=True)
self.history[-1] = lambda expr: Application(e.f, expr)
x_tp = inferArg(tp, e.f.infer())
yield from e.x.visit(self, x_tp, env)
self.history.pop()
deleted_ll = self.logLikelihood(tp, e, env)
for expr, replaced_ll in self.fn(tp, deleted_ll, is_left_application=is_lhs):
yield self.enclose(expr), deleted_ll + replaced_ll
def abstraction(self, e, tp, env, is_lhs=False):
self.history.append(lambda expr: Abstraction(expr))
yield from e.body.visit(self, tp.arguments[1], [tp.arguments[0]] + env)
self.history.pop()
deleted_ll = self.logLikelihood(tp, e, env)
for expr, replaced_ll in self.fn(tp, deleted_ll, is_left_application=is_lhs):
yield self.enclose(expr), deleted_ll + replaced_ll
def execute(self, e, tp):
yield from e.visit(self, tp, [])
def logLikelihood(self, tp, e, env):
summary = None
try:
_, summary = self.grammar.likelihoodSummary(Context.EMPTY, env, tp, e, silent=True)
except AssertionError as err:
# print(f"closedLikelihoodSummary failed on tp={tp}, e={e}, error={err}")
pass
if summary is not None:
return summary.logLikelihood(self.grammar)
else:
tmpE, depth = e, 0
while isinstance(tmpE, Abstraction):
depth += 1
tmpE = tmpE.body
to_introduce = len(tp.functionArguments()) - depth
if to_introduce == 0:
# print(f"HIT NEGATIVEINFINITY, tp={tp}, e={e}")
return NEGATIVEINFINITY
for i in reversed(range(to_introduce)):
e = Application(e, Index(i))
for _ in range(to_introduce):
e = Abstraction(e)
return self.logLikelihood(tp, e, env)
class RegisterPrimitives(object):
def invented(self, e):
e.body.visit(self)
def primitive(self, e):
if e.name not in Primitive.GLOBALS:
Primitive(e.name, e.tp, e.value)
def index(self, e):
pass
def application(self, e):
e.f.visit(self)
e.x.visit(self)
def abstraction(self, e):
e.body.visit(self)
@staticmethod
def register(e):
e.visit(RegisterPrimitives())
class PrettyVisitor(object):
def __init__(self, Lisp=False):
self.Lisp = Lisp
self.numberOfVariables = 0
self.freeVariables = {}
self.variableNames = ["x", "y", "z", "u", "v", "w"]
self.variableNames += [chr(ord("a") + j) for j in range(20)]
self.toplevel = True
def makeVariable(self):
v = self.variableNames[self.numberOfVariables]
self.numberOfVariables += 1
return v
def invented(self, e, environment, isFunction, isAbstraction):
s = e.body.visit(self, [], isFunction, isAbstraction)
return s
def primitive(self, e, environment, isVariable, isAbstraction):
return e.name
def index(self, e, environment, isVariable, isAbstraction):
if e.i < len(environment):
return environment[e.i]
else:
i = e.i - len(environment)
if i in self.freeVariables:
return self.freeVariables[i]
else:
v = self.makeVariable()
self.freeVariables[i] = v
return v
def application(self, e, environment, isFunction, isAbstraction):
self.toplevel = False
s = "%s %s" % (e.f.visit(self, environment, True, False), e.x.visit(self, environment, False, False))
if isFunction:
return s
else:
return "(" + s + ")"
def abstraction(self, e, environment, isFunction, isAbstraction):
toplevel = self.toplevel
self.toplevel = False
if not self.Lisp:
# Invent a new variable
v = self.makeVariable()
body = e.body.visit(self, [v] + environment, False, True)
if not e.body.isAbstraction:
body = "." + body
body = v + body
if not isAbstraction:
body = "λ" + body
if not toplevel:
body = "(%s)" % body
return body
else:
child = e
newVariables = []
while child.isAbstraction:
newVariables = [self.makeVariable()] + newVariables
child = child.body
body = child.visit(self, newVariables + environment, False, True)
body = "(λ (%s) %s)" % (" ".join(reversed(newVariables)), body)
return body
def prettyProgram(e, Lisp=False):
return e.visit(PrettyVisitor(Lisp=Lisp), [], False, False)
class EtaExpandFailure(Exception):
pass
class EtaLongVisitor(object):
"""Converts an expression into eta-longform"""
def __init__(self, request=None):
self.request = request
self.context = None
def makeLong(self, e, request):
if request.isArrow():
# eta expansion
return Abstraction(Application(e.shift(1), Index(0)))
return None
def abstraction(self, e, request, environment):
if not request.isArrow():
raise EtaExpandFailure()
return Abstraction(e.body.visit(self, request.arguments[1], [request.arguments[0]] + environment))
def _application(self, e, request, environment):
l = self.makeLong(e, request)
if l is not None:
return l.visit(self, request, environment)
f, xs = e.applicationParse()
if f.isIndex:
ft = environment[f.i].applyMutable(self.context)
elif f.isInvented or f.isPrimitive:
ft = f.tp.instantiateMutable(self.context)
else:
assert False, "Not in beta long form: %s" % e
self.context.unify(request, ft.returns())
ft = ft.applyMutable(self.context)
xt = ft.functionArguments()
if len(xs) != len(xt):
raise EtaExpandFailure()
returnValue = f
for x, t in zip(xs, xt):
t = t.applyMutable(self.context)
returnValue = Application(returnValue, x.visit(self, t, environment))
return returnValue
# This procedure works by recapitulating the generative process
# applications indices and primitives are all generated identically
def application(self, e, request, environment):
return self._application(e, request, environment)
def index(self, e, request, environment):
return self._application(e, request, environment)
def primitive(self, e, request, environment):
return self._application(e, request, environment)
def invented(self, e, request, environment):
return self._application(e, request, environment)
def execute(self, e):
assert len(e.freeVariables()) == 0
if self.request is None:
eprint("WARNING: request not specified for etaexpansion")
self.request = e.infer()
self.context = MutableContext()
el = e.visit(self, self.request, [])
self.context = None
# assert el.infer().canonical() == e.infer().canonical(), \
# f"Types are not preserved by ETA expansion: {e} : {e.infer().canonical()} vs {el} : {el.infer().canonical()}"
return el
class StripPrimitiveVisitor:
"""Replaces all primitives .value's w/ None. Does not destructively modify anything"""
def invented(self, e):
return Invented(e.body.visit(self))
def primitive(self, e):
return Primitive(e.name, e.tp, None)
def application(self, e):
return Application(e.f.visit(self), e.x.visit(self))
def abstraction(self, e):
return Abstraction(e.body.visit(self))
def index(self, e):
return e
class ReplacePrimitiveValueVisitor:
"""Intended to be used after StripPrimitiveVisitor.
Replaces all primitive.value's with their corresponding entry in Primitive.GLOBALS"""
def invented(self, e):
return Invented(e.body.visit(self))
def primitive(self, e):
return Primitive(e.name, e.tp, Primitive.GLOBALS[e.name].value)
def application(self, e):
return Application(e.f.visit(self), e.x.visit(self))
def abstraction(self, e):
return Abstraction(e.body.visit(self))
def index(self, e):
return e
def strip_primitive_values(e):
return e.visit(StripPrimitiveVisitor())
def unstrip_primitive_values(e):
return e.visit(ReplacePrimitiveValueVisitor())
# from luke
class TokeniseVisitor(object):
def invented(self, e):
return [e.body]
def primitive(self, e):
return [e.name]
def index(self, e):
return ["$" + str(e.i)]
def application(self, e):
return ["("] + e.f.visit(self) + e.x.visit(self) + [")"]
def abstraction(self, e):
return ["(_lambda"] + e.body.visit(self) + [")_lambda"]
def tokeniseProgram(e):
return e.visit(TokeniseVisitor())
def untokeniseProgram(l):
lookup = {"(_lambda": "(lambda", ")_lambda": ")"}
s = " ".join(lookup.get(x, x) for x in l)
return Program.parse(s)
if __name__ == "__main__":
from dreamcoder.domains.arithmetic.arithmeticPrimitives import *
e = Program.parse("(#(lambda (?? (+ 1 $0))) (lambda (?? (+ 1 $0))) (lambda (?? (+ 1 $0))) - * (+ +))")
eprint(e)
| 28.948022
| 127
| 0.576298
|
d1d05ac0ceb6d4f5392bc3eff285d8d82b87c282
| 401
|
py
|
Python
|
justathaught/asgi.py
|
sandipsandal/Just-A-Thought
|
97f97404b303deb2ea7bcc86d89b2b21b3715fba
|
[
"Apache-2.0"
] | null | null | null |
justathaught/asgi.py
|
sandipsandal/Just-A-Thought
|
97f97404b303deb2ea7bcc86d89b2b21b3715fba
|
[
"Apache-2.0"
] | null | null | null |
justathaught/asgi.py
|
sandipsandal/Just-A-Thought
|
97f97404b303deb2ea7bcc86d89b2b21b3715fba
|
[
"Apache-2.0"
] | null | null | null |
"""
ASGI config for justathaught project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'justathaught.settings')
application = get_asgi_application()
| 23.588235
| 78
| 0.790524
|
b915fbbad49b902a60b0e12b2535b74abaa191ff
| 601
|
py
|
Python
|
main.py
|
hectorleiva/automated-video-downloader
|
66466c114678e73d93b8b2370286ed59d6e558a4
|
[
"MIT"
] | null | null | null |
main.py
|
hectorleiva/automated-video-downloader
|
66466c114678e73d93b8b2370286ed59d6e558a4
|
[
"MIT"
] | null | null | null |
main.py
|
hectorleiva/automated-video-downloader
|
66466c114678e73d93b8b2370286ed59d6e558a4
|
[
"MIT"
] | null | null | null |
import os
import sys
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(HERE, "./vendored"))
from youtube_dl import main as youtube_downloader
PLAYLIST_URL = os.getenv('PLAYLIST_URL')
if not PLAYLIST_URL:
print('Unable to continue, no PLAYLIST_URL environment variable was set')
sys.exit(1)
DOWNLOAD_DIR = 'downloaded-videos'
youtube_downloader(
[PLAYLIST_URL
, '--ignore-errors'
, '--download-archive'
, '{}/archive.txt'.format(DOWNLOAD_DIR)
, '-r'
, '2M'
, '-o'
, '{}/%(title)s-%(id)s.%(ext)s'.format(DOWNLOAD_DIR)]
)
| 23.115385
| 77
| 0.665557
|
7fc168b5a58b34558e478b50f12234b86f5996c9
| 1,225
|
py
|
Python
|
app/api/deps.py
|
VPPR/backend-2
|
454e037f67245cae102f17c902f0494013c94e82
|
[
"MIT"
] | 1
|
2021-05-16T15:05:50.000Z
|
2021-05-16T15:05:50.000Z
|
app/api/deps.py
|
VPPR/backend-2
|
454e037f67245cae102f17c902f0494013c94e82
|
[
"MIT"
] | 6
|
2021-02-22T06:30:49.000Z
|
2021-06-26T15:17:24.000Z
|
app/api/deps.py
|
VPPR/backend-2
|
454e037f67245cae102f17c902f0494013c94e82
|
[
"MIT"
] | null | null | null |
from fastapi import Depends, HTTPException, status
from jose import jwt
from mongoengine.errors import ValidationError
from app import crud
from app.core.config import settings
from app.core.security import oauth2_scheme
from app.schema.token import TokenPayload
from app.schema.user import User
def get_current_user(token: str = Depends(oauth2_scheme)) -> User:
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM]
)
token_data = TokenPayload(**payload)
except (jwt.JWTError, ValidationError) as e:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Could not validate credentials",
)
user = crud.user.get(id=token_data.subject)
if not user:
raise HTTPException(status_code=404, detail="User not found")
if not user.is_active:
raise HTTPException(status_code=409, detail="User Inactive")
return user
def get_current_admin(user: User = Depends(get_current_user)) -> User:
if user.is_admin:
return user
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Unauthorized Resource. Access is forbidden",
)
| 32.236842
| 71
| 0.707755
|
10094e5c4abe42aad43afac78c041b9b4dede7b3
| 5,249
|
py
|
Python
|
nipype/workflows/dmri/camino/group_connectivity.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/workflows/dmri/camino/group_connectivity.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/workflows/dmri/camino/group_connectivity.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
import os.path as op # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
from .connectivity_mapping import create_connectivity_pipeline
def create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args_dict=0):
"""Creates a pipeline that performs basic Camino structural connectivity processing
on groups of subjects. Given a diffusion-weighted image, and text files containing
the associated b-values and b-vectors, the workflow will return each subjects' connectomes
in a Connectome File Format (CFF) file, for use in Connectome Viewer (http://www.cmtk.org).
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> import nipype.workflows.dmri.camino.group_connectivity as groupwork
>>> subjects_dir = '.'
>>> data_dir = '.'
>>> output_dir = '.'
>>> fs.FSCommand.set_default_subjects_dir(subjects_dir)
>>> group_list = {}
>>> group_list['group1'] = ['subj1', 'subj2']
>>> group_list['group2'] = ['subj3', 'subj4']
>>> template_args = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']])
>>> group_id = 'group1'
>>> l1pipeline = groupwork.create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args)
>>> l1pipeline.run() # doctest: +SKIP
Inputs::
group_list: Dictionary of subject lists, keyed by group name
group_id: String containing the group name
data_dir: Path to the data directory
subjects_dir: Path to the Freesurfer 'subjects' directory
output_dir: Path for the output files
template_args_dict: Dictionary of template arguments for the connectivity pipeline datasource
e.g. info = dict(dwi=[['subject_id', 'dwi']],
bvecs=[['subject_id','bvecs']],
bvals=[['subject_id','bvals']])
"""
group_infosource = pe.Node(interface=util.IdentityInterface(fields=['group_id']), name="group_infosource")
group_infosource.inputs.group_id = group_id
subject_list = group_list[group_id]
subj_infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="subj_infosource")
subj_infosource.iterables = ('subject_id', subject_list)
if template_args_dict == 0:
info = dict(dwi=[['subject_id', 'dwi']],
bvecs=[['subject_id','bvecs']],
bvals=[['subject_id','bvals']])
else:
info = template_args_dict
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=info.keys()),
name = 'datasource')
datasource.inputs.template = "%s/%s"
datasource.inputs.base_directory = data_dir
datasource.inputs.field_template = dict(dwi='%s/%s.nii')
datasource.inputs.template_args = info
"""
Create a connectivity mapping workflow
"""
conmapper = create_connectivity_pipeline("nipype_conmap")
conmapper.inputs.inputnode.subjects_dir = subjects_dir
conmapper.base_dir = op.abspath('conmapper')
datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = output_dir
datasink.inputs.container = group_id
l1pipeline = pe.Workflow(name="l1pipeline_"+group_id)
l1pipeline.base_dir = output_dir
l1pipeline.base_output_dir = group_id
l1pipeline.connect([(subj_infosource, datasource,[('subject_id', 'subject_id')])])
l1pipeline.connect([(subj_infosource, conmapper,[('subject_id', 'inputnode.subject_id')])])
l1pipeline.connect([(datasource, conmapper, [("dwi", "inputnode.dwi"),
("bvals", "inputnode.bvals"),
("bvecs", "inputnode.bvecs"),
])])
l1pipeline.connect([(conmapper, datasink, [("outputnode.connectome", "@l1output.cff"),
("outputnode.fa", "@l1output.fa"),
("outputnode.tracts", "@l1output.tracts"),
("outputnode.trace", "@l1output.trace"),
("outputnode.cmatrix", "@l1output.cmatrix"),
("outputnode.rois", "@l1output.rois"),
("outputnode.struct", "@l1output.struct"),
("outputnode.networks", "@l1output.networks"),
("outputnode.mean_fiber_length", "@l1output.mean_fiber_length"),
("outputnode.fiber_length_std", "@l1output.fiber_length_std"),
])])
l1pipeline.connect([(group_infosource, datasink,[('group_id','@group_id')])])
return l1pipeline
| 53.020202
| 138
| 0.587922
|
ff07da283ff40192fc22fbce8e0961519722c932
| 6,248
|
py
|
Python
|
train_ptb.py
|
Jinrohs/deep_maker
|
21e62a53bc25023e5e4a95f7b33fe4ee0e46a361
|
[
"MIT"
] | 1
|
2018-02-21T14:40:52.000Z
|
2018-02-21T14:40:52.000Z
|
train_ptb.py
|
Jinrohs/ChatelliteVoiceDeepLearning
|
21e62a53bc25023e5e4a95f7b33fe4ee0e46a361
|
[
"MIT"
] | null | null | null |
train_ptb.py
|
Jinrohs/ChatelliteVoiceDeepLearning
|
21e62a53bc25023e5e4a95f7b33fe4ee0e46a361
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Sample script of recurrent neural network language model.
This code is ported from following implementation written in Torch.
https://github.com/tomsercu/lstm
"""
from __future__ import print_function
import argparse
import math
import sys
import time
import numpy as np
import six
import six.moves.cPickle as pickle
import chainer
from chainer import cuda
import chainer.links as L
from chainer import optimizers
from chainer import serializers
import net
parser = argparse.ArgumentParser()
parser.add_argument('--initmodel', '-m', default='',
help='Initialize the model from given file')
parser.add_argument('--resume', '-r', default='',
help='Resume the optimization from snapshot')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', default=39, type=int,
help='number of epochs to learn')
parser.add_argument('--unit', '-u', default=650, type=int,
help='number of units')
parser.add_argument('--batchsize', '-b', type=int, default=128,
help='learning minibatch size')
parser.add_argument('--bproplen', '-l', type=int, default=35,
help='length of truncated BPTT')
parser.add_argument('--gradclip', '-c', type=int, default=5,
help='gradient norm threshold to clip')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
xp = cuda.cupy if args.gpu >= 0 else np
n_epoch = args.epoch # number of epochs
n_units = args.unit # number of units per layer
batchsize = args.batchsize # minibatch size
bprop_len = args.bproplen # length of truncated BPTT
grad_clip = args.gradclip # gradient norm threshold to clip
# Prepare dataset (preliminary download dataset by ./download.py)
vocab = {}
def load_data(filename):
global vocab
words = open(filename).read().replace('\n', '<eos>').strip().split()
dataset = np.ndarray((len(words),), dtype=np.int32)
for i, word in enumerate(words):
if word not in vocab:
vocab[word] = len(vocab)
dataset[i] = vocab[word]
return dataset
train_data = load_data('ptb.train.txt')
if args.test:
train_data = train_data[:100]
valid_data = load_data('ptb.valid.txt')
if args.test:
valid_data = valid_data[:100]
test_data = load_data('ptb.test.txt')
if args.test:
test_data = test_data[:100]
print('#vocab =', len(vocab))
with open('vocab.bin', 'wb') as f:
pickle.dump(vocab, f)
# Prepare RNNLM model, defined in net.py
lm = net.RNNLM(len(vocab), n_units)
model = L.Classifier(lm)
model.compute_accuracy = False # we only want the perplexity
for param in model.params():
data = param.data
data[:] = np.random.uniform(-0.1, 0.1, data.shape)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
# Setup optimizer
#optimizer = optimizers.SGD(lr=1.)
optimizer = optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(grad_clip))
# Init/Resume
if args.initmodel:
print('Load model from', args.initmodel)
serializers.load_npz(args.initmodel, model)
if args.resume:
print('Load optimizer state from', args.resume)
serializers.load_npz(args.resume, optimizer)
def evaluate(dataset):
# Evaluation routine
evaluator = model.copy() # to use different state
evaluator.predictor.reset_state() # initialize state
evaluator.predictor.train = False # dropout does nothing
sum_log_perp = 0
for i in six.moves.range(dataset.size - 1):
x = chainer.Variable(xp.asarray(dataset[i:i + 1]), volatile='on')
t = chainer.Variable(xp.asarray(dataset[i + 1:i + 2]), volatile='on')
loss = evaluator(x, t)
sum_log_perp += loss.data
return math.exp(float(sum_log_perp) / (dataset.size - 1))
# Learning loop
whole_len = train_data.shape[0]
jump = whole_len // batchsize
cur_log_perp = xp.zeros(())
epoch = 0
start_at = time.time()
cur_at = start_at
accum_loss = 0
batch_idxs = list(range(batchsize))
print('going to train {} iterations'.format(jump * n_epoch))
for i in six.moves.range(jump * n_epoch):
x = chainer.Variable(xp.asarray(
[train_data[(jump * j + i) % whole_len] for j in batch_idxs]))
t = chainer.Variable(xp.asarray(
[train_data[(jump * j + i + 1) % whole_len] for j in batch_idxs]))
loss_i = model(x, t)
accum_loss += loss_i
cur_log_perp += loss_i.data
if (i + 1) % bprop_len == 0: # Run truncated BPTT
model.zerograds()
accum_loss.backward()
accum_loss.unchain_backward() # truncate
accum_loss = 0
optimizer.update()
if (i + 1) % 10000 == 0:
now = time.time()
throuput = 10000. / (now - cur_at)
perp = math.exp(float(cur_log_perp) / 10000)
print('iter {} training perplexity: {:.2f} ({:.2f} iters/sec)'.format(
i + 1, perp, throuput))
cur_at = now
cur_log_perp.fill(0)
# Save the model and the optimizer
print('save the model')
serializers.save_npz('rnnlm.model', model)
print('save the optimizer')
serializers.save_npz('rnnlm.state', optimizer)
if (i + 1) % jump == 0:
epoch += 1
print('evaluate')
now = time.time()
perp = evaluate(valid_data)
print('epoch {} validation perplexity: {:.2f}'.format(epoch, perp))
cur_at += time.time() - now # skip time of evaluation
#if epoch >= 6:
# optimizer.lr /= 1.2
# print('learning rate =', optimizer.lr)
# Save the model and the optimizer
print('save the model')
serializers.save_npz('rnnlm.model', model)
print('save the optimizer')
serializers.save_npz('rnnlm.state', optimizer)
sys.stdout.flush()
## Evaluate on test dataset
#print('test')
#test_perp = evaluate(test_data)
#print('test perplexity:', test_perp)
# Save the model and the optimizer
print('save the model')
serializers.save_npz('rnnlm.model', model)
print('save the optimizer')
serializers.save_npz('rnnlm.state', optimizer)
| 31.555556
| 78
| 0.65525
|
106a9e63aee7d5606cafb1627d4153a26109e280
| 1,650
|
py
|
Python
|
load_options_data.py
|
thomasvandewiel-sai/option-strategies
|
1d4e8dbb18b68136a5150382811929cb793aec91
|
[
"Apache-2.0"
] | null | null | null |
load_options_data.py
|
thomasvandewiel-sai/option-strategies
|
1d4e8dbb18b68136a5150382811929cb793aec91
|
[
"Apache-2.0"
] | null | null | null |
load_options_data.py
|
thomasvandewiel-sai/option-strategies
|
1d4e8dbb18b68136a5150382811929cb793aec91
|
[
"Apache-2.0"
] | null | null | null |
from yahoo_fin import stock_info, options
from datetime import date, datetime, timedelta
import numpy as np
DAYS_IN_FUTURE = 45
TICKER = "AAPL"
OPTION_STRATEGY = "iron_scondor"
class AnalyzeOptions:
def __init__(self) -> None:
# Initialize dates
self.date_today = []
self.desired_option_date = []
self.nearest_date_to_target = []
self._compute_dates()
def _compute_dates(self) -> None:
# Get the current date
self.date_today = datetime.today().strftime("%Y-%m-%d")
# Convert to string
self.date_today = datetime.strptime(self.date_today, "%Y-%m-%d")
# Add desired number of days in the future
self.desired_option_date = self.date_today + timedelta(days=DAYS_IN_FUTURE)
# Compute strike nearest to set target
self._get_nearest_date()
def _get_fridays(self):
future_strikes = self.date_today # January 1st
future_strikes += timedelta(days=4 - future_strikes.weekday()) # First Sunday
cnt = 0
cnt_max = np.ceil(DAYS_IN_FUTURE / 7) + 2
while cnt <= cnt_max:
yield future_strikes
future_strikes += timedelta(days=7)
cnt += 1
return future_strikes
def _get_nearest_date(self) -> None:
future_strikes = self._get_fridays()
self.nearest_date_to_target = min(future_strikes, key=lambda x: abs(x - self.desired_option_date))
analyze_options = AnalyzeOptions()
print(analyze_options.nearest_date_to_target)
# todo:
# 1. Extract call and put options
# 2. Get IV and calculate 95% probability trade (start with iron condor)
| 28.448276
| 106
| 0.664848
|
96cd53f9b1c52984a9fe413c2d60ad87e7afaa39
| 7,552
|
py
|
Python
|
sap/cli/gcts.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | null | null | null |
sap/cli/gcts.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | null | null | null |
sap/cli/gcts.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | 1
|
2022-01-10T03:58:03.000Z
|
2022-01-10T03:58:03.000Z
|
"""gCTS methods"""
import sap.cli.core
import sap.cli.helpers
import sap.rest.gcts
def print_gcts_message(console, log, prefix=' '):
"""Print out the message with its protocol if it exists."""
if isinstance(log, str):
message = log
else:
message = log.get('message', None)
if message:
console.printerr(prefix, message)
prefix = prefix + ' '
if not isinstance(log, dict):
return
try:
protocol = log['protocol']
except KeyError:
return
if isinstance(protocol, dict):
protocol = [protocol]
for protocol_item in protocol:
print_gcts_message(console, protocol_item, prefix=prefix)
def dump_gcts_messages(console, messages):
"""Dumps gCTS exception to console"""
output = False
errlog = messages.get('errorLog', None)
if errlog:
output = True
console.printerr('Error Log:')
for errmsg in errlog:
print_gcts_message(console, errmsg)
msglog = messages.get('log', None)
if msglog:
output = True
console.printerr('Log:')
for logmsg in msglog:
print_gcts_message(console, logmsg)
exception = messages.get('exception', None)
if exception:
output = True
console.printerr('Exception:\n ', messages['exception'])
if not output:
console.printerr(str(messages))
def print_gcts_commit(console, commit):
"""Prints out gCTS commit description"""
console.printout('commit', commit['id'])
console.printout('Author:', commit['author'], f'<{commit["authorMail"]}>')
console.printout('Date: ', commit['date'])
console.printout('\n ', commit['message'])
class CommandGroup(sap.cli.core.CommandGroup):
"""Adapter converting command line parameters to sap.rest.gcts
methods calls.
"""
def __init__(self):
super().__init__('gcts')
@CommandGroup.command()
# pylint: disable=unused-argument
def repolist(connection, args):
"""ls"""
console = sap.cli.core.get_console()
try:
response = sap.rest.gcts.simple_fetch_repos(connection)
except sap.rest.gcts.GCTSRequestError as ex:
dump_gcts_messages(console, ex.messages)
return 1
for repo in response:
console.printout(repo.name, repo.branch, repo.head, repo.url)
return 0
@CommandGroup.argument('--heartbeat', type=int, nargs='?', default=0)
@CommandGroup.argument('--vsid', type=str, nargs='?', default='6IT')
@CommandGroup.argument('--starting-folder', type=str, nargs='?', default='src/')
@CommandGroup.argument('--no-fail-exists', default=False, action='store_true')
@CommandGroup.argument('--vcs-token', type=str, nargs='?')
@CommandGroup.argument('-t', '--type', choices=['GITHUB', 'GIT'], default='GITHUB')
@CommandGroup.argument('-r', '--role', choices=['SOURCE', 'TARGET'], default='SOURCE',
help='SOURCE=Development, TARGET=Provided')
@CommandGroup.argument('package', nargs='?')
@CommandGroup.argument('url')
@CommandGroup.command()
def clone(connection, args):
"""git clone <repository> [<package>]
"""
package = args.package
if not package:
package = sap.rest.gcts.package_name_from_url(args.url)
console = sap.cli.core.get_console()
try:
with sap.cli.helpers.ConsoleHeartBeat(console, args.heartbeat):
repo = sap.rest.gcts.simple_clone(connection, args.url, package,
start_dir=args.starting_folder,
vcs_token=args.vcs_token,
vsid=args.vsid,
error_exists=not args.no_fail_exists,
role=args.role,
typ=args.type)
except sap.rest.gcts.GCTSRequestError as ex:
dump_gcts_messages(sap.cli.core.get_console(), ex.messages)
return 1
console.printout('Cloned repository:')
console.printout(' URL :', repo.url)
console.printout(' branch:', repo.branch)
console.printout(' HEAD :', repo.head)
return 0
@CommandGroup.argument('package')
@CommandGroup.argument('-l', '--list', default=False, action='store_true')
@CommandGroup.command()
def config(connection, args):
"""git config [-l] [<package>]
"""
console = sap.cli.core.get_console()
if args.list:
repo = sap.rest.gcts.Repository(connection, args.package)
try:
configuration = repo.configuration
except sap.rest.gcts.GCTSRequestError as ex:
dump_gcts_messages(sap.cli.core.get_console(), ex.messages)
return 1
for key, value in configuration.items():
console.printout(f'{key}={value}')
return 0
console.printerr('Invalid command line options\nRun: sapcli gcts config --help')
return 1
@CommandGroup.argument('package')
@CommandGroup.command()
def delete(connection, args):
"""rm
"""
try:
sap.rest.gcts.simple_delete(connection, args.package)
except sap.rest.gcts.GCTSRequestError as ex:
dump_gcts_messages(sap.cli.core.get_console(), ex.messages)
return 1
sap.cli.core.printout(f'The repository "{args.package}" has been deleted')
return 0
@CommandGroup.argument('--heartbeat', type=int, nargs='?', default=0)
@CommandGroup.argument('branch')
@CommandGroup.argument('package')
@CommandGroup.command()
def checkout(connection, args):
"""git checkout <branch>
"""
repo = sap.rest.gcts.Repository(connection, args.package)
old_branch = repo.branch
console = sap.cli.core.get_console()
try:
with sap.cli.helpers.ConsoleHeartBeat(console, args.heartbeat):
response = sap.rest.gcts.simple_checkout(connection, args.branch, repo=repo)
except sap.rest.gcts.GCTSRequestError as ex:
dump_gcts_messages(sap.cli.core.get_console(), ex.messages)
return 1
console.printout(f'The repository "{args.package}" has been set to the branch "{args.branch}"')
console.printout(f'({old_branch}:{response["fromCommit"]}) -> ({args.branch}:{response["toCommit"]})')
return 0
@CommandGroup.argument('package')
@CommandGroup.command('log')
def gcts_log(connection, args):
"""git log
"""
console = sap.cli.core.get_console()
try:
commits = sap.rest.gcts.simple_log(connection, name=args.package)
except sap.rest.gcts.GCTSRequestError as ex:
dump_gcts_messages(console, ex.messages)
return 1
if not commits:
return 0
commit_iter = iter(commits)
commit = next(commit_iter)
print_gcts_commit(console, commit)
for commit in commit_iter:
console.printout('')
print_gcts_commit(console, commit)
return 0
@CommandGroup.argument('--heartbeat', type=int, nargs='?', default=0)
@CommandGroup.argument('package')
@CommandGroup.command()
def pull(connection, args):
"""git pull
"""
console = sap.cli.core.get_console()
try:
with sap.cli.helpers.ConsoleHeartBeat(console, args.heartbeat):
response = sap.rest.gcts.simple_pull(connection, name=args.package)
except sap.rest.gcts.GCTSRequestError as ex:
dump_gcts_messages(sap.cli.core.get_console(), ex.messages)
return 1
console.printout(f'The repository "{args.package}" has been pulled')
console.printout(f'{response["fromCommit"]} -> {response["toCommit"]}')
return 0
| 29.5
| 106
| 0.637182
|
ed489ed858b00ac9d511932404f571fca37caf6b
| 1,252
|
py
|
Python
|
homeassistant/helpers/__init__.py
|
sgrzys/AIS-home-assistant
|
7bfc4d6d90de75eea06702c36474d91bf38df3bf
|
[
"Apache-2.0"
] | 7
|
2018-08-03T10:15:36.000Z
|
2019-03-25T13:31:55.000Z
|
homeassistant/helpers/__init__.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:25:50.000Z
|
2022-03-11T23:27:53.000Z
|
homeassistant/helpers/__init__.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3
|
2018-09-14T07:34:09.000Z
|
2018-09-29T12:57:10.000Z
|
"""Helper methods for components within Home Assistant."""
import re
from typing import Any, Iterable, Tuple, Sequence, Dict
from homeassistant.const import CONF_PLATFORM
# pylint: disable=invalid-name
ConfigType = Dict[str, Any]
def config_per_platform(config: ConfigType,
domain: str) -> Iterable[Tuple[Any, Any]]:
"""Break a component config into different platforms.
For example, will find 'switch', 'switch 2', 'switch 3', .. etc
Async friendly.
"""
for config_key in extract_domain_configs(config, domain):
platform_config = config[config_key]
if not platform_config:
continue
elif not isinstance(platform_config, list):
platform_config = [platform_config]
for item in platform_config:
try:
platform = item.get(CONF_PLATFORM)
except AttributeError:
platform = None
yield platform, item
def extract_domain_configs(config: ConfigType, domain: str) -> Sequence[str]:
"""Extract keys from config for given domain name.
Async friendly.
"""
pattern = re.compile(r'^{}(| .+)$'.format(domain))
return [key for key in config.keys() if pattern.match(key)]
| 29.116279
| 77
| 0.646166
|
c138ceaa64724bb478c8a985ea110830e78e9b5e
| 1,869
|
py
|
Python
|
hassio/services/interface.py
|
xebbmw/hassio-supervisor
|
ce3f6705977e344b968f916a20e3358ce710845e
|
[
"Apache-2.0"
] | 1
|
2019-09-03T14:09:06.000Z
|
2019-09-03T14:09:06.000Z
|
hassio/services/interface.py
|
xebbmw/hassio-supervisor
|
ce3f6705977e344b968f916a20e3358ce710845e
|
[
"Apache-2.0"
] | null | null | null |
hassio/services/interface.py
|
xebbmw/hassio-supervisor
|
ce3f6705977e344b968f916a20e3358ce710845e
|
[
"Apache-2.0"
] | 1
|
2019-12-11T01:02:47.000Z
|
2019-12-11T01:02:47.000Z
|
"""Interface for single service."""
from typing import Any, Dict, List, Optional
import voluptuous as vol
from ..addons.addon import Addon
from ..const import PROVIDE_SERVICE
from ..coresys import CoreSys, CoreSysAttributes
class ServiceInterface(CoreSysAttributes):
"""Interface class for service integration."""
def __init__(self, coresys: CoreSys):
"""Initialize service interface."""
self.coresys: CoreSys = coresys
@property
def slug(self) -> str:
"""Return slug of this service."""
raise NotImplementedError()
@property
def _data(self) -> Dict[str, Any]:
"""Return data of this service."""
raise NotImplementedError()
@property
def schema(self) -> vol.Schema:
"""Return data schema of this service."""
raise NotImplementedError()
@property
def providers(self) -> List[str]:
"""Return name of service providers addon."""
addons = []
for addon in self.sys_addons.installed:
if addon.services_role.get(self.slug) == PROVIDE_SERVICE:
addons.append(addon.slug)
return addons
@property
def enabled(self) -> bool:
"""Return True if the service is in use."""
return bool(self._data)
def save(self) -> None:
"""Save changes."""
self.sys_services.data.save_data()
def get_service_data(self) -> Optional[Dict[str, Any]]:
"""Return the requested service data."""
if self.enabled:
return self._data
return None
def set_service_data(self, addon: Addon, data: Dict[str, Any]) -> None:
"""Write the data into service object."""
raise NotImplementedError()
def del_service_data(self, addon: Addon) -> None:
"""Remove the data from service object."""
raise NotImplementedError()
| 29.203125
| 75
| 0.628143
|
04cddd2fcc4466731fcaab150e18b2f572a8bdde
| 1,039
|
py
|
Python
|
main.py
|
TheMody/NiceInit
|
3de2c75708ac7b305640266dc0511b81f3c2cad9
|
[
"MIT"
] | null | null | null |
main.py
|
TheMody/NiceInit
|
3de2c75708ac7b305640266dc0511b81f3c2cad9
|
[
"MIT"
] | null | null | null |
main.py
|
TheMody/NiceInit
|
3de2c75708ac7b305640266dc0511b81f3c2cad9
|
[
"MIT"
] | null | null | null |
import argparse
import os
import datetime as dt
import configparser
from logger import Logger
import shutil
import sys
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config_file', help='path_to_config_file', type=str, default="config.json")
args = parser.parse_args()
config = configparser.ConfigParser()
config.sections()
config.read(args.config_file)
if config["DEFAULT"]["directory"] == "default":
config["DEFAULT"]["directory"] = "results/" + dt.datetime.now().strftime("%d.%m.%Y_%H.%M.%S")
os.makedirs(config["DEFAULT"]["directory"], exist_ok = True)
print(config["DEFAULT"]["directory"] )
for file in os.listdir(os.getcwd()):
if ".py" in file or ".json" in file:
shutil.copy2(file, config["DEFAULT"]["directory"] )
sys.stdout = Logger(open(config["DEFAULT"]["directory"] +"/SysOut.txt","w"))
if __name__ == '__main__':
main()
| 29.685714
| 101
| 0.644851
|
28858ca166d7af857a33f6741535946bc73e5f95
| 36
|
py
|
Python
|
tests/__init__.py
|
rapid7/leexportpy
|
441db1710399d6f8dc41aaf1b474b5f8ca9d7018
|
[
"MIT"
] | 2
|
2016-07-05T09:18:20.000Z
|
2016-07-12T19:48:37.000Z
|
tests/__init__.py
|
logentries/leexportpy
|
441db1710399d6f8dc41aaf1b474b5f8ca9d7018
|
[
"MIT"
] | 7
|
2016-06-28T13:47:57.000Z
|
2016-07-22T11:13:16.000Z
|
tests/examples/__init__.py
|
rapid7/leexportpy
|
441db1710399d6f8dc41aaf1b474b5f8ca9d7018
|
[
"MIT"
] | 6
|
2016-09-08T15:06:18.000Z
|
2021-01-26T10:12:32.000Z
|
__author__ = 'Logentries by Rapid7'
| 18
| 35
| 0.777778
|
e1f56c657cccd2bce705a98d8330a25d72cd0d6b
| 4,995
|
py
|
Python
|
data/dataconstructor.py
|
Skhaki18/APS360
|
4a386fc8ee14802bcb5ef25eefc234160673896a
|
[
"MIT"
] | 1
|
2022-02-09T20:35:05.000Z
|
2022-02-09T20:35:05.000Z
|
data/dataconstructor.py
|
Skhaki18/APS360
|
4a386fc8ee14802bcb5ef25eefc234160673896a
|
[
"MIT"
] | null | null | null |
data/dataconstructor.py
|
Skhaki18/APS360
|
4a386fc8ee14802bcb5ef25eefc234160673896a
|
[
"MIT"
] | null | null | null |
import fiftyone as fo
import fiftyone.zoo as foz
import torchvision.transforms as transforms
from PIL import Image
import os
import os.path
from pycocotools.coco import COCO
import torch.utils.data as data
import cv2
import torch
import matplotlib.pyplot as plt
class CocoDetection(data.Dataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root, annFile, args, Live = False, transform=None, target_transform=None):
self.root = root
self.coco = COCO(annFile)
self.filterClasses = args.config["class_type"]
self.CatIds = self.coco.getCatIds(catNms=self.filterClasses)
if(args.config["class_ids"] == []):
temp = self.coco.getCatIds(catNms=args.config["scanner"])
args.config["class_ids"] = list(temp)
self.ids = []
for i in self.CatIds:
subset = self.coco.getImgIds(catIds=[i])
for i in range(len(subset)):
mid = subset[i]
self.ids.append(mid)
self.ids = list(set(self.ids))
print("CurrentSelfIDS", self.ids)
self.transform = transform
self.target_transform = target_transform
self.Live = Live
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
img_id = self.ids[index]
path = coco.loadImgs(img_id)[0]['file_name']
base = plt.imread(os.path.join(self.root, path))
img = Image.open(os.path.join(self.root, path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if(self.Live): #Handles Live Annotations!!!
return img, target , base
boxes = []
labels = []
# Define Indices: [0:car, 1: truck, 2: bus, 3: motorcycle, 4:person]
# annotation file
Set = [3]#, 8, 6, 4, 1]
area = 0
for i in labels:
if(i["category_id"] in Set):
(startX, startY, endX, endY) = i["bbox"]
area += endX*endY
for i in target:
id = i["category_id"]
if i["category_id"] in Set:
labels.append(Set.index(id))
(startX, startY, endX, endY) = i["bbox"]
boxes.append([round(startX), round(startY), round(startX+endX), round(startY+endY)])
# Standard One Epoch Preprocessing PyTorch Supported!
boxes = torch.as_tensor(boxes, dtype=torch.float32)
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((boxes.shape[0],), dtype=torch.int64)
labels = torch.as_tensor(labels, dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["area"] = area
target["iscrowd"] = iscrowd
image_id = torch.tensor([index])
target["image_id"] = image_id
return img, target
def __len__(self):
return len(self.ids)
def downloadData():
dataset = foz.load_zoo_dataset(
"coco-2017",
label_types=["detections"],
classes=["person", "car", "truck", "motorcycle", "bus"],
seed = 420,
shuffle = True,
include_id = True
)
dataset.persistent = True
print(dataset.info)
def constructData(BasePath, args, Live):
transform = transforms.Compose([transforms.Resize((480, 640)),transforms.RandomRotation(10), transforms.ToTensor()])
Train = CocoDetection(root = BasePath+"train/data",annFile = BasePath+"train/labels.json", args=args, transform=transform, Live=Live)
Validation = CocoDetection(root = BasePath+"validation/data",annFile = BasePath+"validation/labels.json", args=args, transform=transform, Live=Live)
# Test currently disabled
# Test = CocoDetection(root = BasePath+"test/data",annFile = BasePath+"test/labels.json", args=args, transform=transform)
# Change here for test/validation
return Train, Validation, 1
def samplingImages(dataset):
print("sampled!")
# Sample Visualization curretly ammended
return
def Data_Driver(args, Live=False):
BasePath = args.config["datapath"]
Train, Validation, Test = constructData(BasePath, args, Live=Live)
print(len(Validation))
samplingImages(Validation)
return Train, Validation, Test
| 31.613924
| 152
| 0.617217
|
5bf6bb0057f19fbf40e689fb79c1d0bfee176925
| 109
|
py
|
Python
|
lab2/text_recognizer/models/__init__.py
|
AleksandrLiadov/fsdl-text-recognizer-2021-labs
|
9495e1457fc82ab83ff7e4141939d603565eb89b
|
[
"MIT"
] | 402
|
2021-01-18T12:14:08.000Z
|
2022-03-28T03:41:05.000Z
|
lab2/text_recognizer/models/__init__.py
|
AleksandrLiadov/fsdl-text-recognizer-2021-labs
|
9495e1457fc82ab83ff7e4141939d603565eb89b
|
[
"MIT"
] | 27
|
2021-01-21T01:54:30.000Z
|
2022-03-29T21:39:41.000Z
|
lab2/text_recognizer/models/__init__.py
|
AleksandrLiadov/fsdl-text-recognizer-2021-labs
|
9495e1457fc82ab83ff7e4141939d603565eb89b
|
[
"MIT"
] | 271
|
2021-01-21T18:07:24.000Z
|
2022-03-30T12:49:53.000Z
|
from .mlp import MLP
# Hide lines below until Lab 2
from .cnn import CNN
# Hide lines above until Lab 2
| 10.9
| 30
| 0.715596
|
3d06493111933d76673a5f62a23815918589a2d5
| 516
|
py
|
Python
|
src/sage/modular/modform/defaults.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 10
|
2018-06-01T21:54:53.000Z
|
2022-03-14T20:11:34.000Z
|
src/sage/modular/modform/defaults.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/modular/modform/defaults.py
|
dimpase/sage
|
468f23815ade42a2192b0a9cd378de8fdc594dcd
|
[
"BSL-1.0"
] | 15
|
2020-07-23T10:46:25.000Z
|
2022-01-25T15:37:24.000Z
|
#########################################################################
# Copyright (C) 2004--2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#########################################################################
# The default precision for computation and display of q-expansions of
# modular forms.
from sage.rings.all import Integer
DEFAULT_PRECISION = Integer(6)
DEFAULT_VARIABLE = 'q'
| 34.4
| 73
| 0.505814
|
fb635ed11674e583064d6a3937ee133c9de6576d
| 3,914
|
py
|
Python
|
pyecodevices_rt2/ecodevices_rt2.py
|
pcourbin/pyecodevices_rt2
|
5f9e49dddde01bef933a1825230bd90759d3f41c
|
[
"MIT"
] | null | null | null |
pyecodevices_rt2/ecodevices_rt2.py
|
pcourbin/pyecodevices_rt2
|
5f9e49dddde01bef933a1825230bd90759d3f41c
|
[
"MIT"
] | 2
|
2021-04-12T22:48:48.000Z
|
2021-05-15T10:56:27.000Z
|
pyecodevices_rt2/ecodevices_rt2.py
|
pcourbin/pyecodevices_rt2
|
5f9e49dddde01bef933a1825230bd90759d3f41c
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from datetime import timedelta
import requests
from .const import INDEX_GET_LINK
from .const import PRODUCT_ENTRY
from .const import PRODUCT_VALUE
from .const import RESPONSE_ENTRY
from .const import RESPONSE_SUCCESS_VALUE
from .const import RT2_API_GET_LINK_CACHED
from .exceptions import EcoDevicesRT2ConnectError
from .exceptions import EcoDevicesRT2RequestError
class EcoDevicesRT2:
"""Class representing the Ecodevices RT2 and its API"""
def __init__(
self,
host: str,
port: int = 80,
apikey: str = "",
timeout: int = 10,
cached_ms: int = 0,
):
self._host = host
self._port = port
self._apikey = apikey
self._apiurl = "http://%s:%s/api/xdevices.json?key=%s" % (
str(host),
str(port),
str(apikey),
)
self._timeout = timeout
self._cached_ms = cached_ms
self._cached = RT2_API_GET_LINK_CACHED
@property
def host(self):
"""Return the hostname."""
return self._host
@property
def apikey(self):
"""Return the apikey."""
return self._apikey
@property
def apiurl(self):
"""Return the default apiurl."""
return self._apiurl
@property
def cached_ms(self):
"""Return the maximum cached value in milliseconds."""
return self._cached_ms
def _request(self, params):
r = requests.get(self._apiurl, params=params, timeout=self._timeout)
r.raise_for_status()
content = r.json()
product = content.get(PRODUCT_ENTRY, None)
if product == PRODUCT_VALUE:
return content
else:
raise EcoDevicesRT2ConnectError(
"Ecodevices RT2 API wrong 'product' name\nUrl: %s \nValues: %s"
% (r.request.url, content)
)
def ping(self) -> bool:
try:
return (
self.get(INDEX_GET_LINK, command_entry=RESPONSE_ENTRY)
== RESPONSE_SUCCESS_VALUE
)
except:
pass
return False
def get_all_cached(self):
for complete_command in self._cached:
self.get(complete_command, cached_ms=0)
def get(
self, command, command_value=None, command_entry=None, cached_ms: int = None
):
"""Get value from api : http://{host}:{port}/api/xdevices.json?key={apikey}&{command}={command_value},
then get value {command_entry} in JSON response."""
complete_command = command
if command_value is not None:
complete_command = command + "=" + command_value
if cached_ms is None:
cached_ms = self._cached_ms
response = None
now = datetime.now()
if (
complete_command in self._cached
and "last_call" in self._cached[complete_command]
):
last_call = self._cached[complete_command]["last_call"]
if (now - last_call) / timedelta(
milliseconds=1
) <= cached_ms and "response" in self._cached[complete_command]:
response = self._cached[complete_command]["response"]
if response is None:
response = self._request(complete_command)
if complete_command in self._cached:
self._cached[complete_command]["last_call"] = now
self._cached[complete_command]["response"] = response
if command_entry is not None:
if command_entry in response:
response = response.get(command_entry)
else:
raise EcoDevicesRT2RequestError(
"Ecodevices RT2 API error, key '%s' not in return from command: %s \nValues: %s"
% (command_entry, complete_command, response)
)
return response
| 31.564516
| 110
| 0.594532
|
d3d7888caa0afbcaaaf6d2fb5162cd14288e9717
| 18,821
|
py
|
Python
|
google/appengine/tools/devappserver2/instance.py
|
yc550370460/google_app_engine
|
876db682bd9d88e7b1f5403f05a0b26f2efb1c19
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/tools/devappserver2/instance.py
|
yc550370460/google_app_engine
|
876db682bd9d88e7b1f5403f05a0b26f2efb1c19
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/tools/devappserver2/instance.py
|
yc550370460/google_app_engine
|
876db682bd9d88e7b1f5403f05a0b26f2efb1c19
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import logging
import threading
import time
import google
from google.appengine.tools.devappserver2 import errors
NORMAL_REQUEST = 0
READY_REQUEST = 1 # A warmup request i.e. /_ah/warmup.
BACKGROUND_REQUEST = 2 # A request to create a background thread.
SHUTDOWN_REQUEST = 3 # A request to stop the module i.e. /_ah/stop.
# A request to send a command to the module for evaluation e.g. for use by
# interactive shells.
INTERACTIVE_REQUEST = 4
# Constants for use with FILE_CHANGE_INSTANCE_RESTART_POLICY. These constants
# determine whether an instance will be restarted if a file is changed in
# the application_root or any directory returned by
# InstanceFactory.get_restart_directories.
ALWAYS = 0 # Always restart instances.
AFTER_FIRST_REQUEST = 1 # Restart instances that have received >= 1 request.
NEVER = 2 # Never restart instances.
class CannotAcceptRequests(errors.Error):
"""An Instance cannot accept a request e.g. because it is quitting."""
class CannotQuitServingInstance(errors.Error):
"""An Instance cannot be quit e.g. because it is handling a request."""
class InvalidInstanceId(errors.Error):
"""The requested instance id is not serving."""
class RuntimeProxy(object):
"""Abstract base class for a subclass that manages a runtime process."""
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Serves this request by forwarding it to the runtime process.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Yields:
A sequence of strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def start(self):
"""Starts the runtime process and waits until it is ready to serve."""
raise NotImplementedError()
def quit(self):
"""Terminates the runtime process."""
raise NotImplementedError()
class Instance(object):
"""Handle requests through a RuntimeProxy."""
def __init__(self,
request_data,
instance_id,
runtime_proxy,
max_concurrent_requests,
max_background_threads=0,
expect_ready_request=False):
"""Initializer for Instance.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
instance_id: A string or integer representing the unique (per module) id
of the instance.
runtime_proxy: A RuntimeProxy instance that will be used to handle
requests.
max_concurrent_requests: The maximum number of concurrent requests that
the instance can handle. If the instance does not support concurrent
requests then the value should be 1.
max_background_threads: The maximum number of background threads that
the instance can handle. If the instance does not support background
threads then the value should be 0.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
"""
self._request_data = request_data
self._instance_id = instance_id
self._max_concurrent_requests = max_concurrent_requests
self._max_background_threads = max_background_threads
self._runtime_proxy = runtime_proxy
self._condition = threading.Condition()
self._num_outstanding_requests = 0 # Protected by self._condition.
self._num_running_background_threads = 0 # Protected by self._condition.
self._total_requests = 0 # Protected by self._condition.
self._started = False # Protected by self._condition.
self._quitting = False # Protected by self._condition.
self._quit = False # Protected by self._condition.
self._last_request_end_time = time.time() # Protected by self._condition.
self._expecting_ready_request = expect_ready_request
self._expecting_shutdown_request = False
self._healthy = True
# A deque containg (start_time, end_time) 2-tuples representing completed
# requests. This is used to compute latency and qps statistics.
self._request_history = collections.deque() # Protected by self._condition.
def __repr__(self):
statuses = []
if not self._started:
statuses.append('not started')
if self._quitting:
statuses.append('quitting')
if self._quit:
statuses.append('quit')
if self._expecting_ready_request:
statuses.append('handling ready request')
if statuses:
status = ' [%s]' % ' '.join(statuses)
else:
status = ''
return '<Instance %s: %d/%d, total: %d%s>' % (
self._instance_id,
self._num_outstanding_requests,
self._max_concurrent_requests,
self._total_requests,
status)
@property
def instance_id(self):
"""The unique string or integer id for the Instance."""
return self._instance_id
@property
def total_requests(self):
"""The total number requests that the Instance has handled."""
with self._condition:
return self._total_requests
@property
def remaining_request_capacity(self):
"""The number of extra requests that the Instance can currently handle."""
with self._condition:
return self._max_concurrent_requests - self._num_outstanding_requests
@property
def remaining_background_thread_capacity(self):
"""The number of extra background threads the Instance can handle."""
with self._condition:
return self._max_background_threads - self._num_running_background_threads
@property
def num_outstanding_requests(self):
"""The number of requests that the Instance is currently handling."""
with self._condition:
return self._num_outstanding_requests
@property
def idle_seconds(self):
"""The number of seconds that the Instance has been idle.
Will be 0.0 if the Instance has not started.
"""
with self._condition:
if self._num_outstanding_requests:
return 0.0
elif not self._started:
return 0.0
else:
return time.time() - self._last_request_end_time
@property
def handling_ready_request(self):
"""True if the Instance is handling or will be sent a ready request."""
return self._expecting_ready_request
def get_latency_60s(self):
"""Returns the average request latency over the last 60s in seconds."""
with self._condition:
self._trim_request_history_to_60s()
if not self._request_history:
return 0.0
else:
total_latency = sum(
end - start for (start, end) in self._request_history)
return total_latency / len(self._request_history)
def get_qps_60s(self):
"""Returns the average queries-per-second over the last 60 seconds."""
with self._condition:
self._trim_request_history_to_60s()
if not self._request_history:
return 0.0
else:
return len(self._request_history) / 60.0
@property
def has_quit(self):
with self._condition:
return self._quit or self._quitting or self._expecting_shutdown_request
@property
def can_accept_requests(self):
"""True if .handle() will accept requests.
Does not consider outstanding request volume.
"""
with self._condition:
return (not self._quit and
not self._quitting and
not self._expecting_ready_request and
not self._expecting_shutdown_request and
self._started and
self._healthy)
def _trim_request_history_to_60s(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - 60
with self._condition:
while self._request_history:
t, _ = self._request_history[0]
if t < window_start:
self._request_history.popleft()
else:
break
def start(self):
"""Start the instance and the RuntimeProxy.
Returns:
True if the Instance was started or False, if the Instance has already
been quit.
"""
with self._condition:
if self._quit:
return False
self._runtime_proxy.start()
with self._condition:
if self._quit:
self._runtime_proxy.quit()
return False
self._last_request_end_time = time.time()
self._started = True
logging.debug('Started instance: %s', self)
return True
def quit(self, allow_async=False, force=False, expect_shutdown=False):
"""Quits the instance and the RuntimeProxy.
Args:
allow_async: Whether to enqueue the quit after all requests have completed
if the instance cannot be quit immediately.
force: Whether to force the instance to quit even if the instance is
currently handling a request. This overrides allow_async if True.
expect_shutdown: Whether the instance will be sent a shutdown request.
Raises:
CannotQuitServingInstance: if the Instance is currently handling a
request and allow_async is False.
"""
with self._condition:
if self._quit:
return
if not self._started:
self._quit = True
return
if expect_shutdown:
self._expecting_shutdown_request = True
return
if (self._num_outstanding_requests or
self._num_running_background_threads or
self._expecting_shutdown_request):
if not force:
if allow_async or expect_shutdown:
self._quitting = True
return
raise CannotQuitServingInstance()
self._quit = True
self._runtime_proxy.quit()
self._condition.notify_all()
logging.debug('Quit instance: %s', self)
def reserve_background_thread(self):
"""Reserves a background thread slot.
Raises:
CannotAcceptRequests: if the Instance is already handling the maximum
permissible number of background threads or is not in a state where it
can handle background threads.
"""
with self._condition:
if self._quit:
raise CannotAcceptRequests('Instance has been quit')
if not self._started:
raise CannotAcceptRequests('Instance has not started')
if not self.remaining_background_thread_capacity:
raise CannotAcceptRequests(
'Instance has no additional background thread capacity')
self._num_running_background_threads += 1
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Handles an HTTP request by forwarding it to the RuntimeProxy.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See *_REQUEST module constants.
Returns:
An iterable over strings containing the body of the HTTP response.
Raises:
CannotAcceptRequests: if the Instance has quit or is already handling the
maximum permissible number of concurrent requests.
"""
start_time = time.time()
with self._condition:
if self._quit:
raise CannotAcceptRequests('Instance has been quit')
if not self._started:
raise CannotAcceptRequests('Instance has not started')
if request_type not in (BACKGROUND_REQUEST, SHUTDOWN_REQUEST):
if self._quitting:
raise CannotAcceptRequests('Instance is shutting down')
if self._expecting_ready_request and request_type != READY_REQUEST:
raise CannotAcceptRequests('Instance is waiting for ready request')
if not self.remaining_request_capacity:
raise CannotAcceptRequests('Instance has no additional capacity')
self._num_outstanding_requests += 1
self._request_data.set_request_instance(request_id, self)
self._total_requests += 1
try:
# Force the generator to complete so the code in the finally block runs
# at the right time.
return list(self._runtime_proxy.handle(environ,
start_response,
url_map,
match,
request_id,
request_type))
finally:
logging.debug('Request handled by %s in %0.4fs',
self, time.time() - start_time)
with self._condition:
if request_type == READY_REQUEST:
self._expecting_ready_request = False
if request_type == BACKGROUND_REQUEST:
self._num_running_background_threads -= 1
elif request_type != SHUTDOWN_REQUEST:
self._num_outstanding_requests -= 1
self._last_request_end_time = time.time()
self._trim_request_history_to_60s()
self._request_history.append((start_time, self._last_request_end_time))
if request_type == READY_REQUEST:
self._condition.notify(self._max_concurrent_requests)
elif request_type == SHUTDOWN_REQUEST:
self._expecting_shutdown_request = False
self.quit(allow_async=True)
elif request_type == NORMAL_REQUEST:
self._condition.notify()
if (not self._num_outstanding_requests and
not self._num_running_background_threads):
if self._quitting:
self.quit()
def wait(self, timeout_time):
"""Wait for this instance to have capacity to serve a request.
Args:
timeout_time: A float containing a time in seconds since the epoch to wait
until before timing out.
Returns:
True if the instance has request capacity or False if the timeout time was
reached or the instance has been quit.
"""
with self._condition:
while (time.time() < timeout_time and not
(self.remaining_request_capacity and self.can_accept_requests)
and not self.has_quit):
self._condition.wait(timeout_time - time.time())
return bool(self.remaining_request_capacity and self.can_accept_requests)
def set_health(self, health):
self._healthy = health
@property
def healthy(self):
return self._healthy
class InstanceFactory(object):
"""An abstract factory that creates instances for an InstancePool.
Attributes:
max_concurrent_requests: The maximum number of concurrent requests that
Instances created by this factory can handle. If the Instances do not
support concurrent requests then the value should be 1.
START_URL_MAP: An apinfo.URLMap that should be used as the default
/_ah/start handler if no user-specified script handler matches.
WARMUP_URL_MAP: An apinfo.URLMap that should be used as the default
/_ah/warmup handler if no user-specified script handler matches.
"""
START_URL_MAP = None
WARMUP_URL_MAP = None
# If True then the runtime supports interactive command evaluation e.g. for
# use in interactive shells.
SUPPORTS_INTERACTIVE_REQUESTS = False
# Controls how instances are restarted when a file relevant to the application
# is changed. Possible values: NEVER, AFTER_FIRST_RESTART, ALWAYS.
FILE_CHANGE_INSTANCE_RESTART_POLICY = None
def __init__(self, request_data, max_concurrent_requests,
max_background_threads=0):
"""Initializer for InstanceFactory.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo instance that will be
populated with Instance data for use by the API stubs.
max_concurrent_requests: The maximum number of concurrent requests that
Instances created by this factory can handle. If the Instances do not
support concurrent requests then the value should be 1.
max_background_threads: The maximum number of background threads that
the instance can handle. If the instance does not support background
threads then the value should be 0.
"""
self.request_data = request_data
self.max_concurrent_requests = max_concurrent_requests
self.max_background_threads = max_background_threads
def get_restart_directories(self):
"""Returns a list of directories changes in which should trigger a restart.
Returns:
A list of directory paths. Changes (i.e. files added, deleted or modified)
in these directories will trigger the restart of all instances created
with this factory.
"""
return []
def files_changed(self):
"""Called when a file relevant to the factory *might* have changed."""
def configuration_changed(self, config_changes):
"""Called when the configuration of the module has changed.
Args:
config_changes: A set containing the changes that occured. See the
*_CHANGED constants in the application_configuration module.
"""
def new_instance(self, instance_id, expect_ready_request=False):
"""Create and return a new Instance.
Args:
instance_id: A string or integer representing the unique (per module) id
of the instance.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
Returns:
The newly created instance.Instance.
"""
raise NotImplementedError()
| 36.759766
| 80
| 0.693534
|
59792bc47ca33ca2baec2d123501559d70c078b0
| 1,445
|
py
|
Python
|
setup.py
|
trondkr/xESMF
|
27952e1ab2f1b7b23c443953b9d1e079376efb08
|
[
"MIT"
] | null | null | null |
setup.py
|
trondkr/xESMF
|
27952e1ab2f1b7b23c443953b9d1e079376efb08
|
[
"MIT"
] | null | null | null |
setup.py
|
trondkr/xESMF
|
27952e1ab2f1b7b23c443953b9d1e079376efb08
|
[
"MIT"
] | 1
|
2021-02-24T05:22:23.000Z
|
2021-02-24T05:22:23.000Z
|
from setuptools import setup, find_packages
import os
VERSION = '0.3.1'
DISTNAME = 'xesmf'
DESCRIPTION = "Universal Regridder for Geospatial Data"
AUTHOR = 'Jiawei Zhuang'
AUTHOR_EMAIL = 'jiaweizhuang@g.harvard.edu'
URL = 'https://github.com/JiaweiZhuang/xESMF'
LICENSE = 'MIT'
PYTHON_REQUIRES = '>=3.5'
# https://github.com/rtfd/readthedocs.org/issues/5512#issuecomment-475024373
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
INSTALL_REQUIRES = []
else:
INSTALL_REQUIRES = ['esmpy', 'xarray', 'numpy', 'scipy']
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
]
def readme():
with open('README.rst') as f:
return f.read()
setup(name=DISTNAME,
version=VERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=readme(),
python_requires=PYTHON_REQUIRES,
install_requires=INSTALL_REQUIRES,
url=URL,
packages=find_packages())
| 27.788462
| 76
| 0.671972
|
4f655a0391166aea5b02910c861cad30b327317d
| 1,957
|
py
|
Python
|
functions_drug_features.py
|
pamiganderson/insight
|
8397288ac2aee4d98614a6a42179b8520cb1cbf4
|
[
"MIT"
] | null | null | null |
functions_drug_features.py
|
pamiganderson/insight
|
8397288ac2aee4d98614a6a42179b8520cb1cbf4
|
[
"MIT"
] | null | null | null |
functions_drug_features.py
|
pamiganderson/insight
|
8397288ac2aee4d98614a6a42179b8520cb1cbf4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 15:54:44 2018
@author: pamelaanderson
"""
import numpy as np
import pandas as pd
def find_nti_drugs(df_merge_class):
""" Determine if drugs are narrow therapeutic index """
nti_list = ['carbamazepine',
'cyclosporine',
'digoxin',
'ethosuximide',
'levothyroxine sodium',
'lithium carbonate',
'phenytoin',
'procainamide',
'theophylline anhydrous',
'warfarin sodium',
'tacrolimus']
nti_risk = []
for i, val in enumerate(df_merge_class.index.values):
if val in nti_list:
nti_risk.append(1)
else:
nti_risk.append(0)
df_merge_class['nti_index'] = pd.Series(nti_risk, index=df_merge_class.index.values)
return df_merge_class
def find_num_act_ingredients(df_merge_class):
""" Find the number of active ingredients in drugs """
path = '/Users/pamelaanderson/Documents/Insight/spending/'
file_name = 'products.csv'
df = pd.read_csv(path+file_name)
num_act_ingredients = []
for i in df['ActiveIngredient']:
num_act_ingredients.append(len(i.split(';')))
df['num_act_ingredients'] = pd.Series(num_act_ingredients)
df_piv = pd.pivot_table(df, index='DrugName',
values = 'num_act_ingredients',
aggfunc = np.max)
df_piv = df_piv.reset_index()
df_piv['DrugName'] = df_piv['DrugName'].str.lower()
df_piv = df_piv.set_index('DrugName')
df_merge_ingre = df_merge_class.merge(df_piv, left_index=True,
right_index=True, how='left')
num_act_ingre = df_merge_ingre['num_act_ingredients']
num_act_ingre = num_act_ingre.fillna(1)
df_merge_ingre['num_act_ingredients'] = num_act_ingre
return df_merge_ingre
| 34.333333
| 88
| 0.610118
|
a90a5d223c9838f1aa357392d4b244819e4234cf
| 1,512
|
py
|
Python
|
rest-api/flask_app/config.py
|
sinedie/Flask-Svelte-Websockets-Nginx-Docker
|
76daeec2c76f9f27ca526f53393ab4363020b92b
|
[
"WTFPL"
] | 4
|
2021-11-21T14:04:15.000Z
|
2022-03-20T15:28:14.000Z
|
rest-api/flask_app/config.py
|
sinedie/Utimate-flask-websocket-template
|
76daeec2c76f9f27ca526f53393ab4363020b92b
|
[
"WTFPL"
] | null | null | null |
rest-api/flask_app/config.py
|
sinedie/Utimate-flask-websocket-template
|
76daeec2c76f9f27ca526f53393ab4363020b92b
|
[
"WTFPL"
] | null | null | null |
import os
# Flask env
FLASK_ENV = os.environ.get("FLASK_ENV")
SECRET_KEY = os.environ.get("SECRET_KEY")
# JWT
JWT_SECRET_KEY = os.environ.get("JWT_SECRET_KEY")
JWT_REFRESH_TOKEN_EXPIRES = int(os.environ.get("JWT_REFRESH_TOKEN_EXPIRES"))
# Postgres
POSTGRES_USER = os.environ['POSTGRES_USER']
POSTGRES_PASSWORD = os.environ['POSTGRES_PASSWORD']
POSTGRES_HOST = os.environ['POSTGRES_HOST']
POSTGRES_PORT = os.environ['POSTGRES_PORT']
POSTGRES_DB = os.environ['POSTGRES_DB']
DATABASE_CONNECTION_URI = f'postgresql+psycopg2://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}'
# Redis
REDIS_PASSWORD = os.environ['REDIS_PASSWORD']
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = os.environ['REDIS_PORT']
REDIS_DB = os.environ['REDIS_DB']
# REDIS_URI = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
REDIS_URI = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
class Config:
FLASK_ENV = FLASK_ENV
DEBUG = False
TESTING = False
SECRET_KEY = SECRET_KEY
JWT_SECRET_KEY = JWT_SECRET_KEY
JWT_REFRESH_TOKEN_EXPIRES = JWT_REFRESH_TOKEN_EXPIRES
SQLALCHEMY_DATABASE_URI = DATABASE_CONNECTION_URI
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS_URI = REDIS_URI
class DevConfig(Config):
DEBUG = True
class TestConfig(Config):
TESTING = True
class ProdConfig(Config):
pass
if FLASK_ENV == 'production':
config = ProdConfig()
elif FLASK_ENV == 'testing':
config = TestConfig()
else:
config = DevConfig()
| 25.2
| 132
| 0.749339
|
04dd012077cc995f9628b51e95b7f7a1162fc8ff
| 1,261
|
py
|
Python
|
dumbo/hyy-python-hadoop/examples/TestText.py
|
arbenson/mrtsqr
|
006a2de645db9ad4735971cc0a77739249c1437d
|
[
"BSD-2-Clause"
] | 11
|
2015-01-09T18:36:29.000Z
|
2022-02-09T09:02:36.000Z
|
python-hadoop/examples/TestText.py
|
streamsets/PythonSequenceFile
|
28350bc62442264bff79a0616474d8216718c694
|
[
"Apache-2.0"
] | null | null | null |
python-hadoop/examples/TestText.py
|
streamsets/PythonSequenceFile
|
28350bc62442264bff79a0616474d8216718c694
|
[
"Apache-2.0"
] | 1
|
2019-09-14T08:33:38.000Z
|
2019-09-14T08:33:38.000Z
|
#!/usr/bin/env python
# ========================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hadoop.io.SequenceFile import CompressionType
from hadoop.io import Text
from hadoop.io import SequenceFile
def writeData(writer):
key = Text()
value = Text()
key.set('Key')
value.set('Value')
writer.append(key, value)
if __name__ == '__main__':
writer = SequenceFile.createWriter('test.seq', Text, Text)
writeData(writer)
writer.close()
| 35.027778
| 74
| 0.701031
|
394263fbb9e84d42c2876dcabae02be673043326
| 688
|
py
|
Python
|
AzusaBot/Azusa/__init__.py
|
ethpch/AzusaBot_old
|
067e4985d4b8d60c98db758f3e103a4f409c9599
|
[
"MIT"
] | 2
|
2021-09-23T14:43:00.000Z
|
2021-09-26T12:01:07.000Z
|
AzusaBot/Azusa/__init__.py
|
ethpch/AzusaBot_old
|
067e4985d4b8d60c98db758f3e103a4f409c9599
|
[
"MIT"
] | null | null | null |
AzusaBot/Azusa/__init__.py
|
ethpch/AzusaBot_old
|
067e4985d4b8d60c98db758f3e103a4f409c9599
|
[
"MIT"
] | null | null | null |
from os import path, sep
import logging
import nonebot
from Azusa.log import initLogConf
def init(config) -> nonebot.NoneBot:
nonebot.init(config)
initLogConf()
logging.getLogger('Azusa').info('<Init> Azusa initializes successfully.')
bot = nonebot.get_bot()
bot.config.resources = path.join(path.dirname(__file__), 'resources')
import Azusa.common
import Azusa.data
for mod in config.LOAD_MODULES:
if '.' in mod:
t = sep.join(mod.split('.'))
else:
t = mod
nonebot.load_plugins(
path.join(path.dirname(__file__), 'modules', t),
f'Azusa.modules.{mod}'
)
return bot
| 26.461538
| 77
| 0.617733
|
463b18e154cab11eb3871912f71580e7f6055d2b
| 5,246
|
py
|
Python
|
docs/source/conf.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 96
|
2021-09-05T06:29:34.000Z
|
2021-11-07T15:22:54.000Z
|
docs/source/conf.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 188
|
2021-09-06T15:59:58.000Z
|
2021-11-17T09:34:16.000Z
|
docs/source/conf.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 8
|
2021-09-06T09:18:35.000Z
|
2021-11-11T21:18:39.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# Some code in conf.py and `_templates` are used from `github.com/jdb78/pytorch-forecasting/tree/v0.9.2/docs/source` under MIT License
import os
from pathlib import Path
import shutil
import sys
import toml
from sphinx.application import Sphinx
from sphinx.ext.autosummary import Autosummary
SOURCE_PATH = Path(os.path.dirname(__file__)) # noqa # docs source
PROJECT_PATH = SOURCE_PATH.joinpath("../..") # noqa # project root
"""try:
import git
repo = git.Repo(PROJECT_PATH)
COMMIT_SHORT_SHA = str(repo.active_branch.commit)[:8]
CI_COMMIT_BRANCH = str(repo.active_branch)
except:
COMMIT_SHORT_SHA = os.environ["CI_COMMIT_SHORT_SHA"]
CI_COMMIT_BRANCH = os.environ["CI_COMMIT_BRANCH"]"""
sys.path.insert(0, str(PROJECT_PATH)) # noqa
import etna # isort:skip
# -- Project information -----------------------------------------------------
project = 'ETNA Time Series Library'
copyright = '2021, etna-tech@tinkoff.ru'
author = 'etna-tech@tinkoff.ru'
# The full version, including alpha/beta/rc tags
with open(PROJECT_PATH / "pyproject.toml", "r") as f:
pyproject_toml = toml.load(f)
"""if CI_COMMIT_BRANCH == "master":
release = f"ID {COMMIT_SHORT_SHA}"
else:
release = pyproject_toml["tool"]["poetry"]["version"]"""
release = pyproject_toml["tool"]["poetry"]["version"]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"nbsphinx",
"myst_parser",
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx-mathjax-offline",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
autodoc_typehints = "both"
autodoc_typehints_description_target = "all"
add_module_names = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["**/.ipynb_checkpoints"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# setup configuration
def skip(app, what, name, obj, skip, options):
"""
Document __init__ methods
"""
if name == "__init__":
return True
return skip
apidoc_output_folder = SOURCE_PATH.joinpath("api")
PACKAGES = [etna.__name__]
def get_by_name(string: str):
"""
Import by name and return imported module/function/class
Args:
string (str): module/function/class to import, e.g. 'pandas.read_csv' will return read_csv function as
defined by pandas
Returns:
imported object
"""
class_name = string.split(".")[-1]
module_name = ".".join(string.split(".")[:-1])
if module_name == "":
return getattr(sys.modules[__name__], class_name)
mod = __import__(module_name, fromlist=[class_name])
return getattr(mod, class_name)
class ModuleAutoSummary(Autosummary):
def get_items(self, names):
new_names = []
for name in names:
mod = sys.modules[name]
mod_items = getattr(mod, "__all__", mod.__dict__)
for t in mod_items:
if "." not in t and not t.startswith("_"):
obj = get_by_name(f"{name}.{t}")
if hasattr(obj, "__module__"):
mod_name = obj.__module__
t = f"{mod_name}.{t}"
if t.startswith("etna"):
new_names.append(t)
new_items = super().get_items(sorted(new_names, key=lambda x: x.split(".")[-1]))
return new_items
def setup(app: Sphinx):
app.connect("autodoc-skip-member", skip)
app.add_directive("moduleautosummary", ModuleAutoSummary)
app.add_js_file("https://buttons.github.io/buttons.js", **{"async": "async"})
autodoc_member_order = "groupwise"
autoclass_content = "both"
# autosummary
autosummary_generate = True
shutil.rmtree(SOURCE_PATH.joinpath("api"), ignore_errors=True)
| 31.987805
| 134
| 0.65955
|
4c20617fda19b2aad3f3ad1b7c56b96e56ad0a54
| 32,204
|
py
|
Python
|
theano/sandbox/rng_mrg.py
|
arnaudsj/Theano
|
41103b5d158739e4147428ce776fb5716062d4a8
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T13:58:11.000Z
|
2015-11-05T13:58:11.000Z
|
theano/sandbox/rng_mrg.py
|
arnaudsj/Theano
|
41103b5d158739e4147428ce776fb5716062d4a8
|
[
"BSD-3-Clause"
] | null | null | null |
theano/sandbox/rng_mrg.py
|
arnaudsj/Theano
|
41103b5d158739e4147428ce776fb5716062d4a8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Implementation of MRG31k3p random number generator for Theano
Generator code in SSJ package (L'Ecuyer & Simard)
http://www.iro.umontreal.ca/~simardr/ssj/indexe.html
"""
import sys, warnings
import numpy
from theano import Op, Apply, shared, config, Variable
from theano.tensor import (raw_random, TensorType, as_tensor_variable,
get_vector_length, cast, opt, scal)
from theano.tensor import zeros_like, sqrt, log, sin, cos, join, prod
from theano.compile import optdb
from theano.gof import local_optimizer
from theano.gof.python25 import all
import multinomial
from theano.sandbox.cuda import cuda_available, cuda_enabled
if cuda_available:
from theano.sandbox.cuda import CudaNdarrayType, float32_shared_constructor
def mulmod(a, b, c, m):
r = numpy.int32((numpy.int64(a)*b + c) % m)
if r >= 0:
return r
else:
return r+m
def matVecModM(A, s, m):
# return (A * s) % m
err_orig = numpy.seterr(over='ignore')
try:
x = numpy.zeros_like(s)
for i in xrange(len(x)):
for j in xrange(len(s)):
x[i] = mulmod(A[i][j], s[j], x[i], m)
return x
finally:
numpy.seterr(**err_orig)
def multMatVect(v, A, m1, B, m2):
#multiply the first half of v by A with a modulo of m1
#and the second half by B with a modulo of m2
r = numpy.zeros_like(v)
r[:3] = matVecModM(A, v[:3], m1)
r[3:] = matVecModM(B, v[3:], m2)
return r
#MRG31k3p
#generator constants :
M1 = numpy.int32(2147483647) #2^31 - 1
M2 = numpy.int32(2147462579) #2^31 - 21069
MASK12 = numpy.int32(511) #2^9 - 1
MASK13 = numpy.int32(16777215) #2^24 - 1
MASK2 = numpy.int32(65535) #2^16 - 1
MULT2 = numpy.int32(21069)
NORM = 4.656612873077392578125e-10; #1./2^31
A1p0 = numpy.asarray([[0, 4194304, 129], [1, 0, 0], [0, 1, 0]])
A2p0 = numpy.asarray([[32768, 0, 32769], [1, 0, 0], [0, 1, 0]])
A1p72 = numpy.asarray([[1516919229, 758510237, 499121365],
[1884998244, 1516919229, 335398200],
[601897748, 1884998244, 358115744]])
A2p72 = numpy.asarray([[1228857673, 1496414766, 954677935],
[1133297478, 1407477216, 1496414766],
[2002613992, 1639496704, 1407477216]])
A1p134 = numpy.asarray(
[[1702500920, 1849582496, 1656874625],
[828554832, 1702500920, 1512419905],
[1143731069, 828554832, 102237247]])
A2p134 = numpy.asarray(
[[796789021, 1464208080, 607337906],
[1241679051, 1431130166, 1464208080],
[1401213391, 1178684362, 1431130166]])
def ff_2p134(rstate):
return multMatVect(rstate, A1p134, M1, A2p134, M2)
def ff_2p72(rstate):
return multMatVect(rstate, A1p72, M1, A2p72, M2)
def mrg_next_value(rstate, new_rstate):
err_orig = numpy.seterr(over='ignore')
try:
x11, x12, x13, x21, x22, x23 = rstate
assert type(x11) == numpy.int32
i0, i7, i9, i15, i16, i22, i24 = [numpy.int32(i)
for i in (0,7, 9, 15, 16, 22, 24)]
#first component
y1 = (((x12 & MASK12) << i22) + (x12 >> i9)
+ ((x13 & MASK13) << i7) + (x13 >> i24))
assert type(y1) == numpy.int32
if (y1 < 0 or y1 >= M1): #must also check overflow
y1 -= M1;
y1 += x13;
if (y1 < 0 or y1 >= M1):
y1 -= M1;
x13 = x12;
x12 = x11;
x11 = y1;
#second component
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16));
assert type(y1) == numpy.int32
if (y1 < 0 or y1 >= M2):
y1 -= M2;
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16));
assert type(y2) == numpy.int32
if (y2 < 0 or y2 >= M2):
y2 -= M2;
y2 += x23;
if (y2 < 0 or y2 >= M2):
y2 -= M2;
y2 += y1;
if (y2 < 0 or y2 >= M2):
y2 -= M2;
x23 = x22;
x22 = x21;
x21 = y2;
# Must never return either 0 or M1+1
new_rstate[...] = [x11, x12, x13, x21, x22, x23]
assert new_rstate.dtype == numpy.int32
if (x11 <= x21):
return (x11 - x21 + M1) * NORM
else:
return (x11 - x21) * NORM
finally:
numpy.seterr(**err_orig)
class mrg_uniform_base(Op):
def __init__(self, output_type, inplace=False):
Op.__init__(self)
self.output_type = output_type
self.inplace=inplace
if inplace:
self.destroy_map = {0:[0]}
self.warned_numpy_version = False
def __eq__(self, other):
return type(self) == type(other) \
and self.output_type == other.output_type \
and self.inplace == other.inplace
def __hash__(self):
return hash(type(self)) ^ hash(self.output_type) ^ hash(self.inplace)
def __str__(self):
if self.inplace:
s = "inplace"
else: s = "no_inplace"
return self.__class__.__name__+"{%s,%s}"%(self.output_type,s)
def make_node(self, rstate, size):
# error checking slightly redundant here, since
# this op should not be called directly.
#
# call through MRG_RandomStreams instead.
return Apply(self,
[rstate, size],
[rstate.type(), self.output_type()])
def grad(self,inputs,ograd):
return [None for i in inputs]
class mrg_uniform(mrg_uniform_base):
#CPU VERSION
@classmethod
def new(cls, rstate, ndim, dtype, size):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(TensorType(dtype, (False,)*ndim))
return op(rstate, cast(v_size, 'int32'))
def perform(self, node, inp, out):
rstate, size = inp
o_rstate, o_sample = out
numpy_version=numpy.__version__.split('.')
if not self.warned_numpy_version and int(numpy_version[0])<=1 and int(numpy_version[1])<3:
print "Warning: you must use numpy version 1.3.0 or higher with the python version of this op. Otherwise numpy leak memory. and numpy"
self.warned_numpy_version = True
n_elements = 1
rstate = numpy.asarray(rstate) # bring state from GPU if necessary
if not self.inplace:
rstate = rstate.copy()
for s in size:
n_elements *= s
n_streams,_ = rstate.shape
rval = numpy.zeros(n_elements, dtype=self.output_type.dtype)
for i in xrange(n_elements):
sample = mrg_next_value(rstate[i%n_streams], rstate[i%n_streams])
rval[i] = sample
o_rstate[0] = node.outputs[0].type.filter(rstate) # send to GPU if necessary
o_sample[0] = node.outputs[1].type.filter(rval.reshape(size))# send to GPU if necessary
def c_code(self, node, name, inp, out, sub):
rstate, size = inp
o_rstate, o_sample = out
if self.inplace:
o_rstate_requirement = 'NPY_C_CONTIGUOUS|NPY_ALIGNED'
else:
o_rstate_requirement = 'NPY_ENSURECOPY|NPY_C_CONTIGUOUS|NPY_ALIGNED'
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
if self.output_type.dtype == 'float32':
otype = 'float'
NORM = '4.6566126e-10f' #numpy.float32(1.0/(2**31+65))
# this was determined by finding the biggest number such that
# numpy.float32(number * M1) < 1.0
else:
otype = 'double'
NORM = '4.656612873077392578125e-10'
return """
//////// <code generated by mrg_uniform>
npy_intp odims[%(ndim)s];
int n_elements = 1;
int n_streams = 0;
int must_alloc_sample = ((NULL == %(o_sample)s) || (%(o_sample)s->nd != %(ndim)s));
%(otype)s * sample_data;
npy_int32 * state_data;
const npy_int32 i0 = 0;
const npy_int32 i7 = 7;
const npy_int32 i9 = 9;
const npy_int32 i15 = 15;
const npy_int32 i16 = 16;
const npy_int32 i22 = 22;
const npy_int32 i24 = 24;
const npy_int32 M1 = 2147483647; //2^31 - 1
const npy_int32 M2 = 2147462579; //2^31 - 21069
const npy_int32 MASK12 = 511; //2^9 - 1
const npy_int32 MASK13 = 16777215; //2^24 - 1
const npy_int32 MASK2 = 65535; //2^16 - 1
const npy_int32 MULT2 = 21069;
if (%(size)s->nd != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (%(size)s->dimensions[0] != %(ndim)s)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%i)",
%(ndim)s, int(%(size)s->dimensions[0]));
%(fail)s
}
if (%(size)s->descr->type_num != PyArray_INT32)
{
PyErr_SetString(PyExc_ValueError, "size must be int32");
%(fail)s
}
for (int i = 0; i < %(ndim)s; ++i)
{
odims[i] = ((npy_int32*)(%(size)s->data + %(size)s->strides[0] * i))[0];
n_elements *= odims[i];
must_alloc_sample = must_alloc_sample || (%(o_sample)s->dimensions[i] != odims[i]);
//fprintf(stderr, "size %%i %%i\\n", i, (int)odims[i]);
// TODO CHECK STRIDES OF o_sample?
}
if (must_alloc_sample)
{
Py_XDECREF(%(o_sample)s);
%(o_sample)s = (PyArrayObject*)PyArray_SimpleNew(%(ndim)s, odims, %(o_type_num)s);
if(!%(o_sample)s) {
PyErr_SetString(PyExc_MemoryError, "failed to alloc mrg_uniform output");
%(fail)s
}
}
Py_XDECREF(%(o_rstate)s);
%(o_rstate)s = (PyArrayObject*)PyArray_FromAny(py_%(rstate)s, NULL, 0, 0, %(o_rstate_requirement)s,NULL);
if (%(o_rstate)s->nd != 2)
{
PyErr_SetString(PyExc_ValueError, "rstate must be matrix");
%(fail)s
}
if (%(o_rstate)s->dimensions[1] != 6)
{
PyErr_Format(PyExc_ValueError, "rstate must have 6 columns");
%(fail)s
}
if (%(o_rstate)s->descr->type_num != PyArray_INT32)
{
PyErr_SetString(PyExc_ValueError, "rstate must be int32");
%(fail)s
}
n_streams = %(o_rstate)s->dimensions[0];
sample_data = (%(otype)s *) %(o_sample)s->data;
state_data = (npy_int32 *) %(o_rstate)s->data;
for (int i = 0; i < n_elements; ++i)
{
npy_int32 * state_data_i = state_data + (i%%n_streams)*6;
npy_int32 y1, y2, x11, x12, x13, x21, x22, x23;
x11 = state_data_i[0];
x12 = state_data_i[1];
x13 = state_data_i[2];
x21 = state_data_i[3];
x22 = state_data_i[4];
x23 = state_data_i[5];
y1 = ((x12 & MASK12) << i22) + (x12 >> i9) + ((x13 & MASK13) << i7) + (x13 >> i24);
if ((y1 < 0 || y1 >= M1)) //must also check overflow
y1 -= M1;
y1 += x13;
if ((y1 < 0 or y1 >= M1))
y1 -= M1;
x13 = x12;
x12 = x11;
x11 = y1;
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16));
if (y1 < 0 || y1 >= M2)
y1 -= M2;
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16));
if (y2 < 0 || y2 >= M2)
y2 -= M2;
y2 += x23;
if (y2 < 0 || y2 >= M2)
y2 -= M2;
y2 += y1;
if (y2 < 0 or y2 >= M2)
y2 -= M2;
x23 = x22;
x22 = x21;
x21 = y2;
if (x11 <= x21) {
assert((x11 - x21 + M1) <= M1);
sample_data[i] = (x11 - x21 + M1) * %(NORM)s;
}
else
{
assert(x11 - x21 <= M1);
sample_data[i] = (x11 - x21) * %(NORM)s;
}
state_data_i[0]= x11;
state_data_i[1]= x12;
state_data_i[2]= x13;
state_data_i[3]= x21;
state_data_i[4]= x22;
state_data_i[5]= x23;
}
//////// </ code generated by mrg_uniform>
""" %locals()
def c_code_cache_version(self):
return (1,)
class GPU_mrg_uniform(mrg_uniform_base):
#GPU VERSION
@classmethod
def new(cls, rstate, ndim, dtype, size):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(CudaNdarrayType((False,)*ndim))
return op(rstate, cast(v_size, 'int32'))
def c_support_code_apply(self, node, nodename):
if self.output_type.dtype == 'float32':
otype = 'float'
NORM = '4.6566126e-10f' #numpy.float32(1.0/(2**31+65))
# this was determined by finding the biggest number such that
# numpy.float32(number * M1) < 1.0
else:
otype = 'double'
NORM = '4.656612873077392578125e-10'
return """
static int %(nodename)s_printed_warning = 0;
static __global__ void %(nodename)s_mrg_uniform(
%(otype)s*sample_data,
npy_int32*state_data,
const int Nsamples,
const int Nstreams_used)
{
const npy_int32 i0 = 0;
const npy_int32 i7 = 7;
const npy_int32 i9 = 9;
const npy_int32 i15 = 15;
const npy_int32 i16 = 16;
const npy_int32 i22 = 22;
const npy_int32 i24 = 24;
const npy_int32 M1 = 2147483647; //2^31 - 1
const npy_int32 M2 = 2147462579; //2^31 - 21069
const npy_int32 MASK12 = 511; //2^9 - 1
const npy_int32 MASK13 = 16777215; //2^24 - 1
const npy_int32 MASK2 = 65535; //2^16 - 1
const npy_int32 MULT2 = 21069;
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
npy_int32 y1, y2, x11, x12, x13, x21, x22, x23;
if (idx < Nstreams_used)
{
x11 = state_data[idx*6+0];
x12 = state_data[idx*6+1];
x13 = state_data[idx*6+2];
x21 = state_data[idx*6+3];
x22 = state_data[idx*6+4];
x23 = state_data[idx*6+5];
for (int i = idx; i < Nsamples; i += Nstreams_used)
{
y1 = ((x12 & MASK12) << i22) + (x12 >> i9) + ((x13 & MASK13) << i7) + (x13 >> i24);
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
y1 += x13;
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
x13 = x12;
x12 = x11;
x11 = y1;
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16));
y1 -= (y1 < 0 || y1 >= M2) ? M2 : 0;
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16));
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += x23;
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += y1;
y2 -= (y2 < 0 or y2 >= M2) ? M2 : 0;
x23 = x22;
x22 = x21;
x21 = y2;
if (x11 <= x21) {
sample_data[i] = (x11 - x21 + M1) * %(NORM)s;
}
else
{
sample_data[i] = (x11 - x21) * %(NORM)s;
}
}
state_data[idx*6+0]= x11;
state_data[idx*6+1]= x12;
state_data[idx*6+2]= x13;
state_data[idx*6+3]= x21;
state_data[idx*6+4]= x22;
state_data[idx*6+5]= x23;
}
}
""" %locals()
def c_code(self, node, nodename, inp, out, sub):
rstate, size = inp
o_rstate, o_sample = out
inplace = int(self.inplace)
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
if self.output_type.dtype == 'float32':
otype = 'float'
else:
otype = 'double'
SYNC="CNDA_THREAD_SYNC";
return """
//////// <code generated by mrg_uniform>
int odims[%(ndim)s];
int n_elements = 1;
int n_streams, n_streams_used_in_this_call;
int must_alloc_sample = ((NULL == %(o_sample)s)
|| !CudaNdarray_Check(py_%(o_sample)s)
|| (%(o_sample)s->nd != %(ndim)s));
if (%(size)s->nd != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (%(size)s->dimensions[0] != %(ndim)s)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%i)",
%(ndim)s, %(size)s->dimensions[0]);
%(fail)s
}
if (%(size)s->descr->type_num != PyArray_INT32)
{
PyErr_SetString(PyExc_ValueError, "size must be int32");
%(fail)s
}
for (int i = 0; i < %(ndim)s; ++i)
{
odims[i] = ((npy_int32*)(%(size)s->data + %(size)s->strides[0] * i))[0];
n_elements *= odims[i];
must_alloc_sample = (must_alloc_sample
|| CudaNdarray_HOST_DIMS(%(o_sample)s)[i] != odims[i]);
}
if (must_alloc_sample)
{
Py_XDECREF(%(o_sample)s);
%(o_sample)s = (CudaNdarray*)CudaNdarray_NewDims(%(ndim)s, odims);
if(!%(o_sample)s)
{
%(fail)s;
}
}
if (!CudaNdarray_Check(py_%(rstate)s))
{
PyErr_Format(PyExc_ValueError, "rstate must be cudandarray");
%(fail)s;
}
Py_XDECREF(%(o_rstate)s);
if (%(inplace)s)
{
Py_INCREF(%(rstate)s);
%(o_rstate)s = %(rstate)s;
}
else
{
%(o_rstate)s = (CudaNdarray*)CudaNdarray_Copy(%(rstate)s);
}
if (%(o_rstate)s->nd != 1)
{
PyErr_SetString(PyExc_ValueError, "rstate must be vector");
%(fail)s;
}
if (CudaNdarray_HOST_DIMS(%(o_rstate)s)[0] %% 6)
{
PyErr_Format(PyExc_ValueError, "rstate len must be multiple of 6");
%(fail)s;
}
n_streams = CudaNdarray_HOST_DIMS(%(o_rstate)s)[0]/6;
n_streams_used_in_this_call = std::min(n_streams, n_elements);
{
unsigned int threads_per_block = std::min((unsigned int)n_streams_used_in_this_call, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
unsigned int n_blocks = std::min(ceil_intdiv((unsigned int)n_streams_used_in_this_call, threads_per_block), (unsigned int)NUM_VECTOR_OP_BLOCKS);
if (threads_per_block * n_blocks < n_streams)
{
if (! %(nodename)s_printed_warning)
fprintf(stderr, "WARNING: unused streams above %%i (Tune GPU_mrg get_n_streams)\\n", threads_per_block * n_blocks );
%(nodename)s_printed_warning = 1;
}
%(nodename)s_mrg_uniform<<<n_blocks,threads_per_block>>>(
CudaNdarray_DEV_DATA(%(o_sample)s),
(npy_int32*)CudaNdarray_DEV_DATA(%(o_rstate)s),
n_elements, n_streams_used_in_this_call);
}
%(SYNC)s;
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s.\\n", "mrg_uniform", cudaGetErrorString(err));
%(fail)s;
}
}
//////// </ code generated by mrg_uniform>
""" %locals()
def c_code_cache_version(self):
return (5,)
def guess_n_streams(size, warn=True):
"""
Return a guess at a good number of streams.
:param warn: If True, warn when a guess cannot be made (in which case
we return 30 * 256).
"""
# TODO: a smart way of choosing the number of streams, see #612.
# Note that this code was moved out of `MRG_RandomStreams` so that it can
# be easily accessed from tests, where we want to disable the warning.
if (isinstance(size, (tuple, list)) and
all([isinstance(i, int) for i in size])):
# We can make a guess.
r = 1
for s in size:
r *= s
if r > 6:
r = r/6 # chosen as fastest for rbm_benchmark
return r
else:
if warn:
warnings.warn((
"MRG_RandomStreams Can't determine #streams from "
"size (%s), guessing 30*256") % str(size),
stacklevel=3)
return 30 * 256
class MRG_RandomStreams(object):
"""Module component with similar interface to numpy.random (numpy.random.RandomState)"""
state_updates = []
"""A list of pairs of the form (input_r, output_r), representing the
update rules of all the random states generated by this RandomStreams"""
def updates(self):
return list(self.state_updates)
def __init__(self, seed=12345, use_cuda=None):
"""
:type seed: int or list of 6 int.
:param seed: a default seed to initialize the random state.
If a single int is given, it will be replicated 6 times.
The first 3 values of the seed must all be less than M1 = 2147483647,
and not all 0; and the last 3 values must all be less than
M2 = 2147462579, and not all 0.
"""
super(MRG_RandomStreams, self).__init__()
if isinstance(seed, int):
if seed == 0:
raise ValueError('seed should not be 0', seed)
elif seed >= M2:
raise ValueError('seed should be less than %i' % M2, seed)
self.rstate = numpy.asarray([seed]*6, dtype='int32')
elif len(seed)==6:
if seed[0] == 0 and seed[1] == 0 and seed[2] == 0:
raise ValueError('The first 3 values of seed should not be all 0', seed)
if seed[3] == 0 and seed[4] == 0 and seed[5] == 0:
raise ValueError('The last 3 values of seed should not be all 0', seed)
if seed[0] >= M1 or seed[1] >= M1 or seed[2] >= M1:
raise ValueError('The first 3 values of seed should be less than %i' % M1, seed)
if seed[3] >= M2 or seed[4] >= M2 or seed[5] >= M2:
raise ValueError('The last 3 values of seed should be less than %i' % M2, seed)
self.rstate = numpy.asarray(seed, dtype='int32')
else:
raise TypeError("seed should be 1 integer or 6 integers")
if use_cuda is None:
self.use_cuda = cuda_enabled
else:
self.use_cuda = use_cuda
def inc_rstate(self):
"""Update self.rstate to be skipped 2^134 steps forward to the next stream start"""
self.rstate = ff_2p134(self.rstate)
assert self.rstate.dtype == numpy.int32
def get_substream_rstates(self, n_streams, inc_rstate=True):
"""Initialize a matrix in which each row is a MRG stream state,
and they are spaced by 2**72 samples.
"""
assert n_streams < 2**72
assert n_streams > 0
rval = numpy.zeros((n_streams,6), dtype='int32')
rval[0] = self.rstate
for i in xrange(1, n_streams):
rval[i] = ff_2p72(rval[i-1])
if inc_rstate:
self.inc_rstate()
return rval
def n_streams(self, size):
return guess_n_streams(size, warn=True)
def pretty_return(self, node_rstate, new_rstate, sample):
sample.rstate = node_rstate
sample.update = (node_rstate, new_rstate)
self.state_updates.append((node_rstate, new_rstate))
node_rstate.default_update = new_rstate
return sample
def uniform(self, size, low=0.0, high=1.0, ndim=None, dtype=None,
nstreams=None):
"""
Sample a tensor of given size whose element from a uniform
distribution between low and high.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing
information.
:param low: Lower bound of the interval on which values are sampled.
If the ``dtype`` arg is provided, ``low`` will be cast into dtype.
:param high: Higher bound of the interval on which values are sampled.
If the ``dtype`` arg is provided, ``high`` will be cast into dtype.
:param size: Can be a list of integer or Theano variable
(ex: the shape of other Theano Variable)
:param dtype: The output data type. If dtype is not specified, it will
be inferred from the dtype of low and high, but will be at least as
precise as floatX.
"""
low = as_tensor_variable(low)
high = as_tensor_variable(high)
if dtype is None:
dtype = scal.upcast(config.floatX, low.dtype, high.dtype)
low = cast(low, dtype=dtype)
high = cast(high, dtype=dtype)
if isinstance(size, tuple):
msg = "size must be a tuple of int or a Theano variable"
assert all([isinstance(i,int) or isinstance(i,Variable)
for i in size]), msg
else:
msg = "size must be a tuple of int or a Theano variable"
assert isinstance(size, Variable) and size.ndim==1, msg
if nstreams is None:
nstreams = self.n_streams(size)
if self.use_cuda and dtype=='float32':
rstates = self.get_substream_rstates(nstreams)
rstates = rstates.flatten()
# HACK - we use fact that int32 and float32 have same size to
# sneak ints into the CudaNdarray type.
# these *SHOULD NEVER BE USED AS FLOATS*
tmp_float_buf = numpy.frombuffer(rstates.data, dtype='float32')
assert tmp_float_buf.shape == rstates.shape
assert tmp_float_buf.data[:24] == rstates.data[:24]
# transfer to device
node_rstate = float32_shared_constructor(tmp_float_buf)
assert isinstance(node_rstate.type, CudaNdarrayType)
# we can't use the normal mrg_uniform constructor + later
# optimization
# because of the tmp_float_buf hack above. There is
# currently no Theano node that will do a frombuffer
# reinterpretation.
u = self.pretty_return(node_rstate,
*GPU_mrg_uniform.new(node_rstate, ndim, dtype, size))
else:
node_rstate = shared(self.get_substream_rstates(nstreams))
u = self.pretty_return(node_rstate,
*mrg_uniform.new(node_rstate, ndim, dtype, size))
r = u * (high-low) + low
if u.type.broadcastable != r.type.broadcastable:
raise NotImplementedError( 'Increase the size to match the broadcasting pattern of `low` and `high` arguments')
assert r.dtype == dtype
return r
def binomial(self, size=None, n=1, p=0.5, ndim=None, dtype='int64',
nstreams=None):
if n == 1:
if dtype == 'float32' and self.use_cuda:
x = self.uniform(size=size, dtype=dtype, nstreams=nstreams)
else:
x = self.uniform(size=size, nstreams=nstreams)
return cast(x < p, dtype)
else:
raise NotImplementedError("MRG_RandomStreams.binomial with n > 1")
def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64',
nstreams=None):
"""
Sample `n` (currently `n` needs to be 1) times from a multinomial
distribution defined by probabilities pvals.
Example : pvals = [[.98,.01, .01], [.01, .98 .01]] will probably result
in [[1,0,0],[0,1,0]].
.. note::
`size` and `ndim` are only there keep the same signature as other
uniform, binomial, normal, etc.
todo : adapt multinomial to take that into account
"""
if pvals is None:
raise TypeError("You have to specify pvals")
pvals = as_tensor_variable(pvals)
if n == 1 and pvals.ndim == 2:
ndim, size, bcast = raw_random._infer_ndim_bcast(
ndim, size, pvals[:,0])
assert ndim==1
bcast = bcast+(pvals.type.broadcastable[-1],)
unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
op = multinomial.MultinomialFromUniform(dtype)
return op(pvals, unis)
else:
raise NotImplementedError(("MRG_RandomStreams.multinomial only"
" implemented with n == 1 and pvals.ndim = 2"))
def normal(self, size=None, avg=0.0, std=1.0, ndim=None,
dtype=None, nstreams=None):
"""
:param size: Can be a list of integers or Theano variables (ex: the
shape of another Theano Variable)
:param dtype: The output data type. If dtype is not specified, it will
be inferred from the dtype of low and high, but will be at least as
precise as floatX.
:param nstreams: Number of streams.
"""
# We need an even number of ]0,1[ samples. Then we split them
# in two halves. First half becomes our U1's for Box-Muller,
# second half our U2's. See Wikipedia page:
# http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
avg = as_tensor_variable(avg)
std = as_tensor_variable(std)
if dtype is None:
dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)
avg = cast(avg, dtype)
std = cast(std, dtype)
evened = False
constant = False
if isinstance(size, tuple) and all([isinstance(i,int) for i in size]):
constant = True
n_samples = numpy.prod(size)
if n_samples % 2 == 1:
n_samples += 1
evened = True
else:
#if even, don't change, if odd, +1
n_samples = prod(size)+(prod(size)%2)
flattened = self.uniform(size=(n_samples,), dtype=dtype,
nstreams=nstreams)
if constant:
U1 = flattened[:n_samples // 2]
U2 = flattened[n_samples // 2:]
else:
U1 = flattened[:prod(flattened.shape) // 2]
U2 = flattened[prod(flattened.shape) // 2:]
#normal_samples = zeros_like(flattened)
sqrt_ln_U1 = sqrt(-2.0 * log(U1))
# TypeError: 'TensorVariable' object does not support item assignment
# so this doesn't work...
#normal_samples[:n_samples/2] = sqrt_ln_U1 * cos(2.0*numpy.pi*U2)
#normal_samples[n_samples/2:] = sqrt_ln_U1 * sin(2.0*numpy.pi*U2)
# so trying this instead
first_half = sqrt_ln_U1 * cos(numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
second_half = sqrt_ln_U1 * sin(numpy.array(2.0 * numpy.pi, dtype=dtype)*U2)
normal_samples = join(0, first_half, second_half)
final_samples = None
if evened:
final_samples = normal_samples[:-1]
elif constant:
final_samples = normal_samples
else:
final_samples = normal_samples[:prod(size)]
if size:
final_samples = final_samples.reshape(size)
final_samples = avg + std * final_samples
assert final_samples.dtype == dtype
return final_samples
@local_optimizer([None])
def mrg_random_make_inplace(node):
op = node.op
if isinstance(op, mrg_uniform) and not op.inplace:
# op might be gpu version
new_op = op.__class__(op.output_type, inplace=True)
return new_op.make_node(*node.inputs).outputs
return False
optdb.register('random_make_inplace_mrg', opt.in2out(mrg_random_make_inplace, ignore_newtrees=True), 99, 'fast_run', 'inplace')
| 36.022371
| 156
| 0.545025
|
22d1eb662ee1ea65f5c4d5921bb3c75aefa664d7
| 2,352
|
py
|
Python
|
AlgorithmQuestionAnswering/SparqlEndpoint.py
|
zointblackbriar/QuestionAnswering
|
319c3623ced22254d75c2918929a875090bd2bf5
|
[
"MIT"
] | 1
|
2019-03-04T19:44:10.000Z
|
2019-03-04T19:44:10.000Z
|
AlgorithmQuestionAnswering/SparqlEndpoint.py
|
zointblackbriar/QuestionAnswering
|
319c3623ced22254d75c2918929a875090bd2bf5
|
[
"MIT"
] | null | null | null |
AlgorithmQuestionAnswering/SparqlEndpoint.py
|
zointblackbriar/QuestionAnswering
|
319c3623ced22254d75c2918929a875090bd2bf5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from SPARQLWrapper import SPARQLWrapper, JSON
import rdflib
from rdflib.graph import Graph
# from rdflib.plugins import sparql
# from rdflib import URIRef, Literal
from rdflib.plugin import register, Serializer, Parser
register('ttl', Parser, 'rdflib.plugins.parsers.notation3', 'TurtleParser')
import logging
# import colored_logs
import time
from rdflib.plugins.sparql import prepareQuery
logging.info('Starting logger for ...') #or call logging.basicConfig
# Set log name
logger = logging.getLogger(__name__)
class SPARQLEndpoint():
def __init__(self, endpoint, setQuery, paramFormat, filename=None):
self._endpoint = endpoint
self._setQuery = setQuery
self._paramFormat = paramFormat
self._filename = filename
self._sparql = SPARQLWrapper(self._endpoint)
def SparqlInit(self):
logger.info("SparqlInit")
try:
results = self._sparql.query().convert()
print(results)
except Exception as ex:
logger.exception('SparqlInit exception: ')
def rdfParser(self, param_format):
logger.info("RDF Parser")
try:
graph = Graph()
#format is xml not rdf
print(self._filename)
graph.parse(self._filename, format=param_format)
print(len(graph))
import pprint
for stmt in graph:
pprint.pprint(stmt)
except Exception as ex:
logger.exception('RDF Parser error:')
def sparqlQueryForLocalSource(self):
qresult = []
try:
logger.info("sparqlQueryForLocalSource")
print(self._paramFormat)
print(self._filename)
print(self._setQuery)
graph = rdflib.Graph()
graph.load(self._filename, format=self._paramFormat)
preparedQuery = prepareQuery(self._setQuery)
qresult = graph.query(preparedQuery)
print(type(qresult))
for row in qresult:
print(str(row))
except Exception as ex:
logger.exception('Sparql for local source exception: ')
return qresult
def sparqlQueryRemote(self):
time.sleep(5)
results = self._sparql.query().convert()
time.sleep(5)
return results
| 29.772152
| 75
| 0.629252
|
ebb827a6dad65325cbcf822908634a155168f308
| 48,702
|
py
|
Python
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
# Copyright (C) 1999--2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# keltus <keltus@users.sourceforge.net>
#
# $Id: irclib.py,v 1.47 2008/09/25 22:00:59 keltus Exp $
"""irclib -- Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol at a quite
low level. It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, DCC chat,
but DCC file transfers is not yet supported.
In order to understand how to make an IRC client, I'm afraid you more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtilties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* The IRC protocol shines through the abstraction a bit too much.
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* There are no support for DCC file transfers.
* The author haven't even read RFC 2810, 2811, 2812 and 2813.
* Like most projects, documentation is lacking...
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
import bisect
import re
import select
import socket
import string
import sys
import time
import types
VERSION = 0, 4, 8
DEBUG = 0
# TODO
# ----
# (maybe) thread safety
# (maybe) color parser convenience functions
# documentation (including all event types)
# (maybe) add awareness of different types of ircds
# send data asynchronously to the server (and DCC connections)
# (maybe) automatically close unused, passive DCC connections after a while
# NOTES
# -----
# connection.quit() only sends QUIT to the server.
# ERROR from the server triggers the error event and the disconnect event.
# dropping of the connection triggers the disconnect event.
class IRCError(Exception):
"""Represents an IRC exception."""
pass
class IRC:
"""Class that handles one or several IRC server connections.
When an IRC object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the IRC object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler, execute_at,
execute_delayed, process_once and process_forever.
Here is an example:
irc = irclib.IRC()
server = irc.server()
server.connect(\"irc.some.where\", 6667, \"my_nickname\")
server.privmsg(\"a_nickname\", \"Hi there!\")
irc.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message \"Hi there!\"
to the nickname a_nickname.
"""
def __init__(self, fn_to_add_socket=None,
fn_to_remove_socket=None,
fn_to_add_timeout=None):
"""Constructor for IRC objects.
Optional arguments are fn_to_add_socket, fn_to_remove_socket
and fn_to_add_timeout. The first two specify functions that
will be called with a socket object as argument when the IRC
object wants to be notified (or stop being notified) of data
coming on a new socket. When new data arrives, the method
process_data should be called. Similarly, fn_to_add_timeout
is called with a number of seconds (a floating point number)
as first argument when the IRC object wants to receive a
notification (by calling the process_timeout method). So, if
e.g. the argument is 42.17, the object wants the
process_timeout method to be called after 42 seconds and 170
milliseconds.
The three arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
if fn_to_add_socket and fn_to_remove_socket:
self.fn_to_add_socket = fn_to_add_socket
self.fn_to_remove_socket = fn_to_remove_socket
else:
self.fn_to_add_socket = None
self.fn_to_remove_socket = None
self.fn_to_add_timeout = fn_to_add_timeout
self.connections = []
self.handlers = {}
self.delayed_commands = [] # list of tuples in the format (time, function, arguments)
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
c = ServerConnection(self)
self.connections.append(c)
return c
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for IRC.__init__.
"""
for s in sockets:
for c in self.connections:
if s == c._get_socket():
c.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for IRC.__init__.
"""
t = time.time()
while self.delayed_commands:
if t >= self.delayed_commands[0][0]:
self.delayed_commands[0][1](*self.delayed_commands[0][2])
del self.delayed_commands[0]
else:
break
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
sockets = map(lambda x: x._get_socket(), self.connections)
sockets = filter(lambda x: x != None, sockets)
if sockets:
(i, o, e) = select.select(sockets, [], [], timeout)
self.process_data(i)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
while 1:
self.process_once(timeout)
def disconnect_all(self, message=""):
"""Disconnects all connections."""
for c in self.connections:
c.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of the
numeric_events dictionary in irclib.py for possible event
types.
handler -- Callback function.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
\"NO MORE\", no more handlers will be called.
"""
if not event in self.handlers:
self.handlers[event] = []
bisect.insort(self.handlers[event], ((priority, handler)))
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
if not event in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h[1]:
self.handlers[event].remove(h)
return 1
def execute_at(self, at, function, arguments=()):
"""Execute a function at a specified time.
Arguments:
at -- Execute at this time (standard \"time_t\" time).
function -- Function to call.
arguments -- Arguments to give the function.
"""
self.execute_delayed(at-time.time(), function, arguments)
def execute_delayed(self, delay, function, arguments=()):
"""Execute a function after a specified time.
Arguments:
delay -- How many seconds to wait.
function -- Function to call.
arguments -- Arguments to give the function.
"""
bisect.insort(self.delayed_commands, (delay+time.time(), function, arguments))
if self.fn_to_add_timeout:
self.fn_to_add_timeout(delay)
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
c = DCCConnection(self, dcctype)
self.connections.append(c)
return c
def _handle_event(self, connection, event):
"""[Internal]"""
h = self.handlers
for handler in h.get("all_events", []) + h.get(event.eventtype(), []):
if handler[1](connection, event) == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
self.connections.remove(connection)
if self.fn_to_remove_socket:
self.fn_to_remove_socket(connection._get_socket())
_rfc_1459_command_regexp = re.compile("^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?")
class Connection:
"""Base class for IRC connections.
Must be overridden.
"""
def __init__(self, irclibobj):
self.irclibobj = irclibobj
def _get_socket():
raise IRCError, "Not overridden"
##############################
### Convenience wrappers.
def execute_at(self, at, function, arguments=()):
self.irclibobj.execute_at(at, function, arguments)
def execute_delayed(self, delay, function, arguments=()):
self.irclibobj.execute_delayed(delay, function, arguments)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
# Huh!? Crrrrazy EFNet doesn't follow the RFC: their ircd seems to
# use \n as message separator! :P
_linesep_regexp = re.compile("\r?\n")
class ServerConnection(Connection):
"""This class represents an IRC server connection.
ServerConnection objects are instantiated by calling the server
method on an IRC object.
"""
def __init__(self, irclibobj):
Connection.__init__(self, irclibobj)
self.connected = 0 # Not connected yet.
self.socket = None
self.ssl = None
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name ("realname").
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
if self.connected:
self.disconnect("Changing servers")
self.previous_buffer = ""
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.localaddress = localaddress
self.localport = localport
self.localhost = socket.gethostname()
if ipv6:
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.bind((self.localaddress, self.localport))
self.socket.connect((self.server, self.port))
if ssl:
self.ssl = socket.ssl(self.socket)
except socket.error, x:
self.socket.close()
self.socket = None
raise ServerConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
self.disconnect("Closing object")
self.irclibobj._remove_connection(self)
def _get_socket(self):
"""[Internal]"""
return self.socket
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
if self.real_server_name:
return self.real_server_name
else:
return ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method. """
return self.real_nickname
def process_data(self):
"""[Internal]"""
try:
if self.ssl:
new_data = self.ssl.read(2**14)
else:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
lines = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = lines.pop()
for line in lines:
if DEBUG:
print "FROM SERVER:", line
if not line:
continue
prefix = None
command = None
arguments = None
self._handle_event(Event("all_raw_messages",
self.get_server_name(),
None,
[line]))
m = _rfc_1459_command_regexp.match(line)
if m.group("prefix"):
prefix = m.group("prefix")
if not self.real_server_name:
self.real_server_name = prefix
if m.group("command"):
command = m.group("command").lower()
if m.group("argument"):
a = m.group("argument").split(" :", 1)
arguments = a[0].split()
if len(a) == 2:
arguments.append(a[1])
# Translate numerics into more readable strings.
if command in numeric_events:
command = numeric_events[command]
if command == "nick":
if nm_to_n(prefix) == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
if command in ["privmsg", "notice"]:
target, message = arguments[0], arguments[1]
messages = _ctcp_dequote(message)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for m in messages:
if type(m) is types.TupleType:
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
m = list(m)
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, m)
self._handle_event(Event(command, prefix, target, m))
if command == "ctcp" and m[0] == "ACTION":
self._handle_event(Event("action", prefix, target, m[1:]))
else:
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, [m])
self._handle_event(Event(command, prefix, target, [m]))
else:
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0]
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self._handle_event(Event(command, prefix, target, arguments))
def _handle_event(self, event):
"""[Internal]"""
self.irclibobj._handle_event(self, event)
if event.eventtype() in self.handlers:
for fn in self.handlers[event.eventtype()]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.irclibobj.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.irclibobj.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_raw(" ".join(["ADMIN", server]).strip())
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or ""))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
self.quit(message)
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_raw("GLOBOPS :" + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_raw(" ".join(["INFO", server]).strip())
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_raw(" ".join(["INVITE", nick, channel]).strip())
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_raw("ISON " + " ".join(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_raw("JOIN %s%s" % (channel, (key and (" " + key))))
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_raw("KICK %s %s%s" % (channel, nick, (comment and (" :" + comment))))
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
command = "LINKS"
if remote_server:
command = command + " " + remote_server
if server_mask:
command = command + " " + server_mask
self.send_raw(command)
def list(self, channels=None, server=""):
"""Send a LIST command."""
command = "LIST"
if channels:
command = command + " " + ",".join(channels)
if server:
command = command + " " + server
self.send_raw(command)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_raw("LUSERS" + (server and (" " + server)))
def mode(self, target, command):
"""Send a MODE command."""
self.send_raw("MODE %s %s" % (target, command))
def motd(self, server=""):
"""Send an MOTD command."""
self.send_raw("MOTD" + (server and (" " + server)))
def names(self, channels=None):
"""Send a NAMES command."""
self.send_raw("NAMES" + (channels and (" " + ",".join(channels)) or ""))
def nick(self, newnick):
"""Send a NICK command."""
self.send_raw("NICK " + newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_raw("NOTICE %s :%s" % (target, text))
def oper(self, nick, password):
"""Send an OPER command."""
self.send_raw("OPER %s %s" % (nick, password))
def part(self, channels, message=""):
"""Send a PART command."""
if type(channels) == types.StringType:
self.send_raw("PART " + channels + (message and (" " + message)))
else:
self.send_raw("PART " + ",".join(channels) + (message and (" " + message)))
def pass_(self, password):
"""Send a PASS command."""
self.send_raw("PASS " + password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_raw("PING %s%s" % (target, target2 and (" " + target2)))
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_raw("PONG %s%s" % (target, target2 and (" " + target2)))
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (target, text))
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (",".join(targets), text))
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_raw("QUIT" + (message and (" :" + message)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
if self.socket is None:
raise ServerNotConnectedError, "Not connected."
try:
if self.ssl:
self.ssl.write(string + "\r\n")
else:
self.socket.send(string + "\r\n")
if DEBUG:
print "TO SERVER:", string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment)))
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_raw("STATS %s%s" % (statstype, server and (" " + server)))
def time(self, server=""):
"""Send a TIME command."""
self.send_raw("TIME" + (server and (" " + server)))
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
if new_topic is None:
self.send_raw("TOPIC " + channel)
else:
self.send_raw("TOPIC %s :%s" % (channel, new_topic))
def trace(self, target=""):
"""Send a TRACE command."""
self.send_raw("TRACE" + (target and (" " + target)))
def user(self, username, realname):
"""Send a USER command."""
self.send_raw("USER %s 0 * :%s" % (username, realname))
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_raw("USERHOST " + ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_raw("USERS" + (server and (" " + server)))
def version(self, server=""):
"""Send a VERSION command."""
self.send_raw("VERSION" + (server and (" " + server)))
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_raw("WALLOPS :" + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o")))
def whois(self, targets):
"""Send a WHOIS command."""
self.send_raw("WHOIS " + ",".join(targets))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_raw("WHOWAS %s%s%s" % (nick,
max and (" " + max),
server and (" " + server)))
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""This class represents a DCC connection.
DCCConnection objects are instantiated by calling the dcc
method on an IRC object.
"""
def __init__(self, irclibobj, dcctype):
Connection.__init__(self, irclibobj)
self.connected = 0
self.passive = 0
self.dcctype = dcctype
self.peeraddress = None
self.peerport = None
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.socket = None
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 0
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error, x:
raise DCCConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
return self
def listen(self):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 1
try:
self.socket.bind((socket.gethostbyname(socket.gethostname()), 0))
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error, x:
raise DCCConnectionError, "Couldn't bind socket: %s" % x
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self.irclibobj._handle_event(
self,
Event("dcc_disconnect", self.peeraddress, "", [message]))
self.irclibobj._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = 1
if DEBUG:
print "DCC connection from %s:%d" % (
self.peeraddress, self.peerport)
self.irclibobj._handle_event(
self,
Event("dcc_connect", self.peeraddress, None, None))
return
try:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
# The specification says lines are terminated with LF, but
# it seems safer to handle CR LF terminations too.
chunks = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = chunks[-1]
if len(self.previous_buffer) > 2**14:
# Bad peer! Naughty peer!
self.disconnect()
return
chunks = chunks[:-1]
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
if DEBUG:
print "FROM PEER:", chunk
arguments = [chunk]
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self.irclibobj._handle_event(
self,
Event(command, prefix, target, arguments))
def _get_socket(self):
"""[Internal]"""
return self.socket
def privmsg(self, string):
"""Send data to DCC peer.
The string will be padded with appropriate LF if it's a DCC
CHAT session.
"""
try:
self.socket.send(string)
if self.dcctype == "chat":
self.socket.send("\n")
if DEBUG:
print "TO PEER: %s\n" % string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
class SimpleIRCClient:
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Instance attributes that can be used by sub classes:
ircobj -- The IRC instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
def __init__(self):
self.ircobj = IRC()
self.connection = self.ircobj.server()
self.dcc_connections = []
self.ircobj.add_global_handler("all_events", self._dispatcher, -10)
self.ircobj.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, c, e):
"""[Internal]"""
m = "on_" + e.eventtype()
if hasattr(self, m):
getattr(self, m)(c, e)
def _dcc_disconnect(self, c, e):
self.dcc_connections.remove(c)
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name.
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
"""
self.connection.connect(server, port, nickname,
password, username, ircname,
localaddress, localport, ssl, ipv6)
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.connect(address, port)
return dcc
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.listen()
return dcc
def start(self):
"""Start the IRC client."""
self.ircobj.process_forever()
class Event:
"""Class representing an IRC event."""
def __init__(self, eventtype, source, target, arguments=None):
"""Constructor of Event objects.
Arguments:
eventtype -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event specific arguments.
"""
self._eventtype = eventtype
self._source = source
self._target = target
if arguments:
self._arguments = arguments
else:
self._arguments = []
def eventtype(self):
"""Get the event type."""
return self._eventtype
def source(self):
"""Get the event source."""
return self._source
def target(self):
"""Get the event target."""
return self._target
def arguments(self):
"""Get the event arguments."""
return self._arguments
_LOW_LEVEL_QUOTE = "\020"
_CTCP_LEVEL_QUOTE = "\134"
_CTCP_DELIMITER = "\001"
_low_level_mapping = {
"0": "\000",
"n": "\n",
"r": "\r",
_LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE
}
_low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)")
def mask_matches(nick, mask):
"""Check if a nick matches a mask.
Returns true if the nick matches, otherwise false.
"""
nick = irc_lower(nick)
mask = irc_lower(mask)
mask = mask.replace("\\", "\\\\")
for ch in ".$|[](){}+":
mask = mask.replace(ch, "\\" + ch)
mask = mask.replace("?", ".")
mask = mask.replace("*", ".*")
r = re.compile(mask, re.IGNORECASE)
return r.match(nick)
_special = "-[]\\`^{}"
nick_characters = string.ascii_letters + string.digits + _special
_ircstring_translation = string.maketrans(string.ascii_uppercase + "[]\\^",
string.ascii_lowercase + "{}|~")
def irc_lower(s):
"""Returns a lowercased string.
The definition of lowercased comes from the IRC specification (RFC
1459).
"""
return s.translate(_ircstring_translation)
def _ctcp_dequote(message):
"""[Internal] Dequote a message according to CTCP specifications.
The function returns a list where each element can be either a
string (normal message) or a tuple of one or two strings (tagged
messages). If a tuple has only one element (ie is a singleton),
that element is the tag; otherwise the tuple has two elements: the
tag and the data.
Arguments:
message -- The message to be decoded.
"""
def _low_level_replace(match_obj):
ch = match_obj.group(1)
# If low_level_mapping doesn't have the character as key, we
# should just return the character.
return _low_level_mapping.get(ch, ch)
if _LOW_LEVEL_QUOTE in message:
# Yup, there was a quote. Release the dequoter!
message = _low_level_regexp.sub(_low_level_replace, message)
if _CTCP_DELIMITER not in message:
return [message]
else:
# Split it into parts. (Does any IRC client actually *use*
# CTCP stacking like this?)
chunks = message.split(_CTCP_DELIMITER)
messages = []
i = 0
while i < len(chunks)-1:
# Add message if it's non-empty.
if len(chunks[i]) > 0:
messages.append(chunks[i])
if i < len(chunks)-2:
# Aye! CTCP tagged data ahead!
messages.append(tuple(chunks[i+1].split(" ", 1)))
i = i + 2
if len(chunks) % 2 == 0:
# Hey, a lonely _CTCP_DELIMITER at the end! This means
# that the last chunk, including the delimiter, is a
# normal message! (This is according to the CTCP
# specification.)
messages.append(_CTCP_DELIMITER + chunks[-1])
return messages
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""Convert an IP number as an integer given in ASCII
representation (e.g. '3232235521') to an IP address string
(e.g. '192.168.0.1')."""
n = long(num)
p = map(str, map(int, [n >> 24 & 0xFF, n >> 16 & 0xFF,
n >> 8 & 0xFF, n & 0xFF]))
return ".".join(p)
def ip_quad_to_numstr(quad):
"""Convert an IP address string (e.g. '192.168.0.1') to an IP
number as an integer given in ASCII representation
(e.g. '3232235521')."""
p = map(long, quad.split("."))
s = str((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
if s[-1] == "L":
s = s[:-1]
return s
def nm_to_n(s):
"""Get the nick part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[0]
def nm_to_uh(s):
"""Get the userhost part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[1]
def nm_to_h(s):
"""Get the host part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("@")[1]
def nm_to_u(s):
"""Get the user part of a nickmask.
(The source of an Event is a nickmask.)
"""
s = s.split("!")[1]
return s.split("@")[0]
def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
always None.
Example:
>>> irclib.parse_nick_modes(\"+ab-c\")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
None if mode isn't one of \"b\", \"k\", \"l\", \"v\" or \"o\".
Example:
>>> irclib.parse_channel_modes(\"+ab-c foo\")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvo")
def _parse_modes(mode_string, unary_modes=""):
"""[Internal]"""
modes = []
arg_count = 0
# State variable.
sign = ""
a = mode_string.split()
if len(a) == 0:
return []
else:
mode_part, args = a[0], a[1:]
if mode_part[0] not in "+-":
return []
for ch in mode_part:
if ch in "+-":
sign = ch
elif ch == " ":
collecting_arguments = 1
elif ch in unary_modes:
if len(args) >= arg_count + 1:
modes.append([sign, ch, args[arg_count]])
arg_count = arg_count + 1
else:
modes.append([sign, ch, None])
else:
modes.append([sign, ch, None])
return modes
def _ping_ponger(connection, event):
"""[Internal]"""
connection.pong(event.target())
# Numeric table mostly stolen from the Perl IRC module (Net::IRC).
numeric_events = {
"001": "welcome",
"002": "yourhost",
"003": "created",
"004": "myinfo",
"005": "featurelist", # XXX
"200": "tracelink",
"201": "traceconnecting",
"202": "tracehandshake",
"203": "traceunknown",
"204": "traceoperator",
"205": "traceuser",
"206": "traceserver",
"207": "traceservice",
"208": "tracenewtype",
"209": "traceclass",
"210": "tracereconnect",
"211": "statslinkinfo",
"212": "statscommands",
"213": "statscline",
"214": "statsnline",
"215": "statsiline",
"216": "statskline",
"217": "statsqline",
"218": "statsyline",
"219": "endofstats",
"221": "umodeis",
"231": "serviceinfo",
"232": "endofservices",
"233": "service",
"234": "servlist",
"235": "servlistend",
"241": "statslline",
"242": "statsuptime",
"243": "statsoline",
"244": "statshline",
"250": "luserconns",
"251": "luserclient",
"252": "luserop",
"253": "luserunknown",
"254": "luserchannels",
"255": "luserme",
"256": "adminme",
"257": "adminloc1",
"258": "adminloc2",
"259": "adminemail",
"261": "tracelog",
"262": "endoftrace",
"263": "tryagain",
"265": "n_local",
"266": "n_global",
"300": "none",
"301": "away",
"302": "userhost",
"303": "ison",
"305": "unaway",
"306": "nowaway",
"311": "whoisuser",
"312": "whoisserver",
"313": "whoisoperator",
"314": "whowasuser",
"315": "endofwho",
"316": "whoischanop",
"317": "whoisidle",
"318": "endofwhois",
"319": "whoischannels",
"321": "liststart",
"322": "list",
"323": "listend",
"324": "channelmodeis",
"329": "channelcreate",
"331": "notopic",
"332": "currenttopic",
"333": "topicinfo",
"341": "inviting",
"342": "summoning",
"346": "invitelist",
"347": "endofinvitelist",
"348": "exceptlist",
"349": "endofexceptlist",
"351": "version",
"352": "whoreply",
"353": "namreply",
"361": "killdone",
"362": "closing",
"363": "closeend",
"364": "links",
"365": "endoflinks",
"366": "endofnames",
"367": "banlist",
"368": "endofbanlist",
"369": "endofwhowas",
"371": "info",
"372": "motd",
"373": "infostart",
"374": "endofinfo",
"375": "motdstart",
"376": "endofmotd",
"377": "motd2", # 1997-10-16 -- tkil
"381": "youreoper",
"382": "rehashing",
"384": "myportis",
"391": "time",
"392": "usersstart",
"393": "users",
"394": "endofusers",
"395": "nousers",
"401": "nosuchnick",
"402": "nosuchserver",
"403": "nosuchchannel",
"404": "cannotsendtochan",
"405": "toomanychannels",
"406": "wasnosuchnick",
"407": "toomanytargets",
"409": "noorigin",
"411": "norecipient",
"412": "notexttosend",
"413": "notoplevel",
"414": "wildtoplevel",
"421": "unknowncommand",
"422": "nomotd",
"423": "noadmininfo",
"424": "fileerror",
"431": "nonicknamegiven",
"432": "erroneusnickname", # Thiss iz how its speld in thee RFC.
"433": "nicknameinuse",
"436": "nickcollision",
"437": "unavailresource", # "Nick temporally unavailable"
"441": "usernotinchannel",
"442": "notonchannel",
"443": "useronchannel",
"444": "nologin",
"445": "summondisabled",
"446": "usersdisabled",
"451": "notregistered",
"461": "needmoreparams",
"462": "alreadyregistered",
"463": "nopermforhost",
"464": "passwdmismatch",
"465": "yourebannedcreep", # I love this one...
"466": "youwillbebanned",
"467": "keyset",
"471": "channelisfull",
"472": "unknownmode",
"473": "inviteonlychan",
"474": "bannedfromchan",
"475": "badchannelkey",
"476": "badchanmask",
"477": "nochanmodes", # "Channel doesn't support modes"
"478": "banlistfull",
"481": "noprivileges",
"482": "chanoprivsneeded",
"483": "cantkillserver",
"484": "restricted", # Connection is restricted
"485": "uniqopprivsneeded",
"491": "nooperhost",
"492": "noservicehost",
"501": "umodeunknownflag",
"502": "usersdontmatch",
}
generated_events = [
# Generated events
"dcc_connect",
"dcc_disconnect",
"dccmsg",
"disconnect",
"ctcp",
"ctcpreply",
]
protocol_events = [
# IRC protocol events
"error",
"join",
"kick",
"mode",
"part",
"ping",
"privmsg",
"privnotice",
"pubmsg",
"pubnotice",
"quit",
"invite",
"pong",
]
all_events = generated_events + protocol_events + numeric_events.values()
| 31.199231
| 105
| 0.574802
|
7d39581a70eaa6c4311e994b1539716957258987
| 2,388
|
py
|
Python
|
libcst/codegen/gen_type_mapping.py
|
rowillia/LibCST
|
621d9a949a57a9100b7f2d1465ebd32aaeddb05c
|
[
"Apache-2.0"
] | null | null | null |
libcst/codegen/gen_type_mapping.py
|
rowillia/LibCST
|
621d9a949a57a9100b7f2d1465ebd32aaeddb05c
|
[
"Apache-2.0"
] | null | null | null |
libcst/codegen/gen_type_mapping.py
|
rowillia/LibCST
|
621d9a949a57a9100b7f2d1465ebd32aaeddb05c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from typing import List
from libcst.codegen.gather import imports, nodebases, nodeuses
generated_code: List[str] = []
generated_code.append("# Copyright (c) Facebook, Inc. and its affiliates.")
generated_code.append("#")
generated_code.append(
"# This source code is licensed under the MIT license found in the"
)
generated_code.append("# LICENSE file in the root directory of this source tree.")
generated_code.append("")
generated_code.append("# pyre-strict")
generated_code.append("")
generated_code.append("# This file was generated by libcst.codegen.gen_type_mapping")
generated_code.append("from typing import Dict as TypingDict, Type, Union")
generated_code.append("")
generated_code.append("from libcst._maybe_sentinel import MaybeSentinel")
generated_code.append("from libcst._removal_sentinel import RemovalSentinel")
generated_code.append("from libcst._nodes.base import CSTNode")
# Import the types we use. These have to be type guarded since it would
# cause an import cycle otherwise.
generated_code.append("")
generated_code.append("")
for module, objects in imports.items():
generated_code.append(f"from {module} import (")
generated_code.append(f" {', '.join(sorted(list(objects)))}")
generated_code.append(")")
# Generate the base visit_ methods
generated_code.append("")
generated_code.append("")
generated_code.append(
"TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = {"
)
for node in sorted(nodebases.keys(), key=lambda node: node.__name__):
name = node.__name__
if name.startswith("Base"):
continue
valid_return_types: List[str] = [nodebases[node].__name__]
node_uses = nodeuses[node]
base_uses = nodeuses[nodebases[node]]
if node_uses.maybe or base_uses.maybe:
valid_return_types.append("MaybeSentinel")
if (
node_uses.optional
or node_uses.sequence
or base_uses.optional
or base_uses.sequence
):
valid_return_types.append("RemovalSentinel")
generated_code.append(f' {name}: Union[{", ".join(valid_return_types)}],')
generated_code.append("}")
if __name__ == "__main__":
# Output the code
print("\n".join(generated_code))
| 36.181818
| 85
| 0.7366
|
9043d4d356f149f3c143f3254f6afba5e8ffcaaf
| 676
|
py
|
Python
|
ThreeDEngine/util.py
|
MysteryCoder456/ThreeDEngine
|
c40bf5d751faa9016d6b3d5d11d76dbfd2e3fa75
|
[
"MIT"
] | 8
|
2020-04-29T03:46:21.000Z
|
2020-05-01T17:43:29.000Z
|
ThreeDEngine/util.py
|
MysteryCoder456/3DEngine
|
c40bf5d751faa9016d6b3d5d11d76dbfd2e3fa75
|
[
"MIT"
] | null | null | null |
ThreeDEngine/util.py
|
MysteryCoder456/3DEngine
|
c40bf5d751faa9016d6b3d5d11d76dbfd2e3fa75
|
[
"MIT"
] | null | null | null |
from math import sqrt
from glm import vec3
def average(numbers: list):
"""
Get the average of a list of numbers
Arguments:
numbers {list} -- Number list
"""
total_sum = 0
divisor = 0
for number in numbers:
total_sum += number
divisor += 1
return total_sum / divisor
def dist3d(pos1: vec3, pos2: vec3):
"""
Get distance between 2 positions in 3D Space
Arguments:
pos1 {vec3} -- First position
pos2 {vec3} -- Second position
Returns:
float -- The distance
"""
a = pos1.x - pos2.x
b = pos1.y - pos2.y
c = pos1.z - pos2.z
return sqrt(a**2 + b**2 + c**2)
| 17.789474
| 48
| 0.565089
|
aac367bc7daade2b1492016649d29daf6b054361
| 3,430
|
py
|
Python
|
tripleoclient/exceptions.py
|
openstack/python-tripleoclient
|
3712cede1f859f5eb7b1d86b1721272448c219b0
|
[
"Apache-2.0"
] | 39
|
2015-09-08T14:34:36.000Z
|
2022-02-20T21:00:44.000Z
|
tripleoclient/exceptions.py
|
openstack/python-tripleoclient
|
3712cede1f859f5eb7b1d86b1721272448c219b0
|
[
"Apache-2.0"
] | 1
|
2021-02-28T06:06:29.000Z
|
2021-02-28T06:06:29.000Z
|
tripleoclient/exceptions.py
|
openstack/python-tripleoclient
|
3712cede1f859f5eb7b1d86b1721272448c219b0
|
[
"Apache-2.0"
] | 33
|
2015-10-01T17:53:04.000Z
|
2022-03-10T11:50:38.000Z
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Exception definitions"""
class Base(Exception):
"""Base TripleO exception."""
class WorkflowServiceError(Base):
"""The service type is unknown"""
class WebSocketConnectionClosed(Base):
"""Websocket connection is closed before wait for messages"""
class NotFound(Base):
"""Resource not found"""
class LookupError(Base):
"""Lookup Error"""
class DeploymentError(Base):
"""Deployment failed"""
class PlanEnvWorkflowError(Base):
"""Plan Environment workflow has failed"""
class ConfigDownloadInProgress(Base):
"""Unable to deploy as config download already in progress"""
msg_format = ("Config download already in progress with "
"execution id {} for stack {}")
def __init__(self, execution_id='', stack=''):
message = self.msg_format.format(execution_id, stack)
super(ConfigDownloadInProgress, self).__init__(message)
class RootUserExecution(Base):
"""Command was executed by a root user"""
class RootDeviceDetectionError(Base):
"""Failed to detect the root device"""
class InvalidConfiguration(Base, ValueError):
"""Invalid parameters were specified for the deployment"""
class IntrospectionError(Base):
"""Introspection failed"""
class RegisterOrUpdateError(WorkflowServiceError):
"""Introspection failed"""
class NodeProvideError(WorkflowServiceError):
"""Node Provide failed."""
class NodeConfigurationError(WorkflowServiceError):
"""Node Configuration failed."""
class ProfileMatchingError(Base):
"""Failed to validate or assign node profiles"""
class PlanCreationError(Base):
"""Plan creation failed"""
class PlanExportError(Base):
"""Plan export failed"""
class WorkflowActionError(Base):
"""Workflow action failed"""
msg_format = "Action {} execution failed: {}"
def __init__(self, action='', output=''):
message = self.msg_format.format(action, output)
super(WorkflowActionError, self).__init__(message)
class DownloadError(Base):
"""Download attempt failed"""
class LogFetchError(Base):
"""Fetching logs failed"""
class ContainerDeleteFailed(Base):
"""Container deletion failed"""
class UndercloudUpgradeNotConfirmed(Base):
"""Undercloud upgrade security question not confirmed."""
class OvercloudUpdateNotConfirmed(Base):
"""Overcloud Update security question not confirmed."""
class OvercloudUpgradeNotConfirmed(Base):
"""Overcloud Update security question not confirmed."""
class CellExportError(Base):
"""Cell export failed"""
class BannedParameters(Base):
"""Some of the environment parameters provided should be removed"""
class HeatPodMessageQueueException(Base):
"""Heat messaging queue not created"""
class InvalidPlaybook(Base):
"""Invalid playbook path specified"""
| 23.493151
| 77
| 0.714869
|
bf57511adaf8c9df7860d6b3eb64add771c54091
| 44,925
|
py
|
Python
|
models/Trading.py
|
Vashiru/pycryptobot
|
a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe
|
[
"Apache-2.0"
] | 1
|
2021-06-14T18:21:49.000Z
|
2021-06-14T18:21:49.000Z
|
models/Trading.py
|
Vashiru/pycryptobot
|
a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe
|
[
"Apache-2.0"
] | null | null | null |
models/Trading.py
|
Vashiru/pycryptobot
|
a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe
|
[
"Apache-2.0"
] | null | null | null |
"""Technical analysis on a trading Pandas DataFrame"""
import warnings
from re import compile
from numpy import floor, maximum, mean, minimum, nan, ndarray, round
from numpy import sum as np_sum
from numpy import where
from pandas import DataFrame, Series
from datetime import datetime, timedelta
from statsmodels.tsa.statespace.sarimax import SARIMAX, SARIMAXResultsWrapper
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from models.helper.LogHelper import Logger
warnings.simplefilter('ignore', ConvergenceWarning)
class TechnicalAnalysis():
def __init__(self, data=DataFrame()) -> None:
"""Technical Analysis object model
Parameters
----------
data : Pandas Time Series
data[ts] = [ 'date', 'market', 'granularity', 'low', 'high', 'open', 'close', 'volume' ]
"""
if not isinstance(data, DataFrame):
raise TypeError('Data is not a Pandas dataframe.')
if 'date' not in data and 'market' not in data and 'granularity' not in data \
and 'low' not in data and 'high' not in data \
and 'open' not in data and 'close' not in data \
and 'volume' not in data:
raise ValueError('Data not not contain date, market, granularity, low, high, open, close, volume')
if not 'close' in data.columns:
raise AttributeError("Pandas DataFrame 'close' column required.")
if not data['close'].dtype == 'float64' and not data['close'].dtype == 'int64':
raise AttributeError("Pandas DataFrame 'close' column not int64 or float64.")
self.df = data
self.levels = []
def getDataFrame(self) -> DataFrame:
"""Returns the Pandas DataFrame"""
return self.df
def addAll(self) -> None:
"""Adds analysis to the DataFrame"""
self.addChangePct()
self.addCMA()
self.addSMA(20)
self.addSMA(50)
self.addSMA(200)
self.addEMA(8)
self.addEMA(12)
self.addEMA(26)
self.addGoldenCross()
self.addDeathCross()
self.addFibonacciBollingerBands()
self.addRSI(14)
self.addStochasticRSI(14)
self.addWilliamsR(14)
self.addMACD()
self.addOBV()
self.addElderRayIndex()
self.addEMABuySignals()
self.addSMABuySignals()
self.addMACDBuySignals()
self.addCandleAstralBuy()
self.addCandleAstralSell()
self.addCandleHammer()
self.addCandleInvertedHammer()
self.addCandleShootingStar()
self.addCandleHangingMan()
self.addCandleThreeWhiteSoldiers()
self.addCandleThreeBlackCrows()
self.addCandleDoji()
self.addCandleThreeLineStrike()
self.addCandleTwoBlackGapping()
self.addCandleMorningStar()
self.addCandleEveningStar()
self.addCandleAbandonedBaby()
self.addCandleMorningDojiStar()
self.addCandleEveningDojiStar()
"""Candlestick References
https://commodity.com/technical-analysis
https://www.investopedia.com
https://github.com/SpiralDevelopment/candlestick-patterns
https://www.incrediblecharts.com/candlestick_patterns/candlestick-patterns-strongest.php
"""
def candleHammer(self) -> Series:
"""* Candlestick Detected: Hammer ("Weak - Reversal - Bullish Signal - Up"""
return ((self.df['high'] - self.df['low']) > 3 * (self.df['open'] - self.df['close'])) \
& (((self.df['close'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) > 0.6) \
& (((self.df['open'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) > 0.6)
def addCandleHammer(self) -> None:
self.df['hammer'] = self.candleHammer()
def candleShootingStar(self) -> Series:
"""* Candlestick Detected: Shooting Star ("Weak - Reversal - Bearish Pattern - Down")"""
return ((self.df['open'].shift(1) < self.df['close'].shift(1)) & (self.df['close'].shift(1) < self.df['open'])) \
& (self.df['high'] - maximum(self.df['open'], self.df['close']) >= (abs(self.df['open'] - self.df['close']) * 3)) \
& ((minimum(self.df['close'], self.df['open']) - self.df['low']) <= abs(self.df['open'] - self.df['close']))
def addCandleShootingStar(self) -> None:
self.df['shooting_star'] = self.candleShootingStar()
def candleHangingMan(self) -> Series:
"""* Candlestick Detected: Hanging Man ("Weak - Continuation - Bearish Pattern - Down")"""
return ((self.df['high'] - self.df['low']) > (4 * (self.df['open'] - self.df['close']))) \
& (((self.df['close'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) >= 0.75) \
& (((self.df['open'] - self.df['low']) / (.001 + self.df['high'] - self.df['low'])) >= 0.75) \
& (self.df['high'].shift(1) < self.df['open']) \
& (self.df['high'].shift(2) < self.df['open'])
def addCandleHangingMan(self) -> None:
self.df['hanging_man'] = self.candleHangingMan()
def candleInvertedHammer(self) -> Series:
"""* Candlestick Detected: Inverted Hammer ("Weak - Continuation - Bullish Pattern - Up")"""
return (((self.df['high'] - self.df['low']) > 3 * (self.df['open'] - self.df['close'])) \
& ((self.df['high'] - self.df['close']) / (.001 + self.df['high'] - self.df['low']) > 0.6) \
& ((self.df['high'] - self.df['open']) / (.001 + self.df['high'] - self.df['low']) > 0.6))
def addCandleInvertedHammer(self) -> None:
self.df['inverted_hammer'] = self.candleInvertedHammer()
def candleThreeWhiteSoldiers(self) -> Series:
"""*** Candlestick Detected: Three White Soldiers ("Strong - Reversal - Bullish Pattern - Up")"""
return ((self.df['open'] > self.df['open'].shift(1)) & (self.df['open'] < self.df['close'].shift(1))) \
& (self.df['close'] > self.df['high'].shift(1)) \
& (self.df['high'] - maximum(self.df['open'], self.df['close']) < (abs(self.df['open'] - self.df['close']))) \
& ((self.df['open'].shift(1) > self.df['open'].shift(2)) & (self.df['open'].shift(1) < self.df['close'].shift(2))) \
& (self.df['close'].shift(1) > self.df['high'].shift(2)) \
& (self.df['high'].shift(1) - maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < (abs(self.df['open'].shift(1) - self.df['close'].shift(1))))
def addCandleThreeWhiteSoldiers(self) -> None:
self.df['three_white_soldiers'] = self.candleThreeWhiteSoldiers()
def candleThreeBlackCrows(self) -> Series:
"""* Candlestick Detected: Three Black Crows ("Strong - Reversal - Bearish Pattern - Down")"""
return ((self.df['open'] < self.df['open'].shift(1)) & (self.df['open'] > self.df['close'].shift(1))) \
& (self.df['close'] < self.df['low'].shift(1)) \
& (self.df['low'] - maximum(self.df['open'], self.df['close']) < (abs(self.df['open'] - self.df['close']))) \
& ((self.df['open'].shift(1) < self.df['open'].shift(2)) & (self.df['open'].shift(1) > self.df['close'].shift(2))) \
& (self.df['close'].shift(1) < self.df['low'].shift(2)) \
& (self.df['low'].shift(1) - maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < (abs(self.df['open'].shift(1) - self.df['close'].shift(1))))
def addCandleThreeBlackCrows(self) -> None:
self.df['three_black_crows'] = self.candleThreeBlackCrows()
def candleDoji(self) -> Series:
"""! Candlestick Detected: Doji ("Indecision")"""
return ((abs(self.df['close'] - self.df['open']) / (self.df['high'] - self.df['low'])) < 0.1) \
& ((self.df['high'] - maximum(self.df['close'], self.df['open'])) > (3 * abs(self.df['close'] - self.df['open']))) \
& ((minimum(self.df['close'], self.df['open']) - self.df['low']) > (3 * abs(self.df['close'] - self.df['open'])))
def addCandleDoji(self) -> None:
self.df['doji'] = self.candleDoji()
def candleThreeLineStrike(self) -> Series:
"""** Candlestick Detected: Three Line Strike ("Reliable - Reversal - Bullish Pattern - Up")"""
return ((self.df['open'].shift(1) < self.df['open'].shift(2)) & (self.df['open'].shift(1) > self.df['close'].shift(2))) \
& (self.df['close'].shift(1) < self.df['low'].shift(2)) \
& (self.df['low'].shift(1) - maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < (abs(self.df['open'].shift(1) - self.df['close'].shift(1)))) \
& ((self.df['open'].shift(2) < self.df['open'].shift(3)) & (self.df['open'].shift(2) > self.df['close'].shift(3))) \
& (self.df['close'].shift(2) < self.df['low'].shift(3)) \
& (self.df['low'].shift(2) - maximum(self.df['open'].shift(2), self.df['close'].shift(2)) < (abs(self.df['open'].shift(2) - self.df['close'].shift(2)))) \
& ((self.df['open'] < self.df['low'].shift(1)) & (self.df['close'] > self.df['high'].shift(3)))
def addCandleThreeLineStrike(self) -> None:
self.df['three_line_strike'] = self.candleThreeLineStrike()
def candleTwoBlackGapping(self) -> Series:
"""*** Candlestick Detected: Two Black Gapping ("Reliable - Reversal - Bearish Pattern - Down")"""
return ((self.df['open'] < self.df['open'].shift(1)) & (self.df['open'] > self.df['close'].shift(1))) \
& (self.df['close'] < self.df['low'].shift(1)) \
& (self.df['low'] - maximum(self.df['open'], self.df['close']) < (abs(self.df['open'] - self.df['close']))) \
& (self.df['high'].shift(1) < self.df['low'].shift(2))
def addCandleTwoBlackGapping(self) -> None:
self.df['two_black_gapping'] = self.candleTwoBlackGapping()
def candleMorningStar(self) -> Series:
"""*** Candlestick Detected: Morning Star ("Strong - Reversal - Bullish Pattern - Up")"""
return ((maximum(self.df['open'].shift(1), self.df['close'].shift(1)) < self.df['close'].shift(2)) & (self.df['close'].shift(2) < self.df['open'].shift(2))) \
& ((self.df['close'] > self.df['open']) & (self.df['open'] > maximum(self.df['open'].shift(1), self.df['close'].shift(1))))
def addCandleMorningStar(self) -> None:
self.df['morning_star'] = self.candleMorningStar()
def candleEveningStar(self) -> ndarray:
"""*** Candlestick Detected: Evening Star ("Strong - Reversal - Bearish Pattern - Down")"""
return ((minimum(self.df['open'].shift(1), self.df['close'].shift(1)) > self.df['close'].shift(2)) & (self.df['close'].shift(2) > self.df['open'].shift(2))) \
& ((self.df['close'] < self.df['open']) & (self.df['open'] < minimum(self.df['open'].shift(1), self.df['close'].shift(1))))
def addCandleEveningStar(self) -> None:
self.df['evening_star'] = self.candleEveningStar()
def candleAbandonedBaby(self):
"""** Candlestick Detected: Abandoned Baby ("Reliable - Reversal - Bullish Pattern - Up")"""
return (self.df['open'] < self.df['close']) \
& (self.df['high'].shift(1) < self.df['low']) \
& (self.df['open'].shift(2) > self.df['close'].shift(2)) \
& (self.df['high'].shift(1) < self.df['low'].shift(2))
def addCandleAbandonedBaby(self) -> None:
self.df['abandoned_baby'] = self.candleAbandonedBaby()
def candleMorningDojiStar(self) -> Series:
"""** Candlestick Detected: Morning Doji Star ("Reliable - Reversal - Bullish Pattern - Up")"""
return (self.df['close'].shift(2) < self.df['open'].shift(2)) \
& (abs(self.df['close'].shift(2) - self.df['open'].shift(2)) / (self.df['high'].shift(2) - self.df['low'].shift(2)) >= 0.7) \
& (abs(self.df['close'].shift(1) - self.df['open'].shift(1)) / (self.df['high'].shift(1) - self.df['low'].shift(1)) < 0.1) \
& (self.df['close'] > self.df['open']) \
& (abs(self.df['close'] - self.df['open']) / (self.df['high'] - self.df['low']) >= 0.7) \
& (self.df['close'].shift(2) > self.df['close'].shift(1)) \
& (self.df['close'].shift(2) > self.df['open'].shift(1)) \
& (self.df['close'].shift(1) < self.df['open']) \
& (self.df['open'].shift(1) < self.df['open']) \
& (self.df['close'] > self.df['close'].shift(2)) \
& ((self.df['high'].shift(1) - maximum(self.df['close'].shift(1), self.df['open'].shift(1))) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))) \
& (minimum(self.df['close'].shift(1), self.df['open'].shift(1)) - self.df['low'].shift(1)) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))
def addCandleMorningDojiStar(self) -> None:
self.df['morning_doji_star'] = self.candleMorningDojiStar()
def candleEveningDojiStar(self) -> Series:
"""** Candlestick Detected: Evening Doji Star ("Reliable - Reversal - Bearish Pattern - Down")"""
return (self.df['close'].shift(2) > self.df['open'].shift(2)) \
& (abs(self.df['close'].shift(2) - self.df['open'].shift(2)) / (self.df['high'].shift(2) - self.df['low'].shift(2)) >= 0.7) \
& (abs(self.df['close'].shift(1) - self.df['open'].shift(1)) / (self.df['high'].shift(1) - self.df['low'].shift(1)) < 0.1) \
& (self.df['close'] < self.df['open']) \
& (abs(self.df['close'] - self.df['open']) / (self.df['high'] - self.df['low']) >= 0.7) \
& (self.df['close'].shift(2) < self.df['close'].shift(1)) \
& (self.df['close'].shift(2) < self.df['open'].shift(1)) \
& (self.df['close'].shift(1) > self.df['open']) \
& (self.df['open'].shift(1) > self.df['open']) \
& (self.df['close'] < self.df['close'].shift(2)) \
& ((self.df['high'].shift(1) - maximum(self.df['close'].shift(1), self.df['open'].shift(1))) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))) \
& (minimum(self.df['close'].shift(1), self.df['open'].shift(1)) - self.df['low'].shift(1)) > (3 * abs(self.df['close'].shift(1) - self.df['open'].shift(1)))
def addCandleEveningDojiStar(self) -> None:
self.df['evening_doji_star'] = self.candleEveningDojiStar()
def candleAstralBuy(self) -> Series:
"""*** Candlestick Detected: Astral Buy (Fibonacci 3, 5, 8)"""
return (self.df['close'] < self.df['close'].shift(3)) & (self.df['low'] < self.df['low'].shift(5)) \
& (self.df['close'].shift(1) < self.df['close'].shift(4)) & (self.df['low'].shift(1) < self.df['low'].shift(6)) \
& (self.df['close'].shift(2) < self.df['close'].shift(5)) & (self.df['low'].shift(2) < self.df['low'].shift(7)) \
& (self.df['close'].shift(3) < self.df['close'].shift(6)) & (self.df['low'].shift(3) < self.df['low'].shift(8)) \
& (self.df['close'].shift(4) < self.df['close'].shift(7)) & (self.df['low'].shift(4) < self.df['low'].shift(9)) \
& (self.df['close'].shift(5) < self.df['close'].shift(8)) & (self.df['low'].shift(5) < self.df['low'].shift(10)) \
& (self.df['close'].shift(6) < self.df['close'].shift(9)) & (self.df['low'].shift(6) < self.df['low'].shift(11)) \
& (self.df['close'].shift(7) < self.df['close'].shift(10)) & (self.df['low'].shift(7) < self.df['low'].shift(12))
def addCandleAstralBuy(self) -> None:
self.df['astral_buy'] = self.candleAstralBuy()
def candleAstralSell(self) -> Series:
"""*** Candlestick Detected: Astral Sell (Fibonacci 3, 5, 8)"""
return (self.df['close'] > self.df['close'].shift(3)) & (self.df['high'] > self.df['high'].shift(5)) \
& (self.df['close'].shift(1) > self.df['close'].shift(4)) & (self.df['high'].shift(1) > self.df['high'].shift(6)) \
& (self.df['close'].shift(2) > self.df['close'].shift(5)) & (self.df['high'].shift(2) > self.df['high'].shift(7)) \
& (self.df['close'].shift(3) > self.df['close'].shift(6)) & (self.df['high'].shift(3) > self.df['high'].shift(8)) \
& (self.df['close'].shift(4) > self.df['close'].shift(7)) & (self.df['high'].shift(4) > self.df['high'].shift(9)) \
& (self.df['close'].shift(5) > self.df['close'].shift(8)) & (self.df['high'].shift(5) > self.df['high'].shift(10)) \
& (self.df['close'].shift(6) > self.df['close'].shift(9)) & (self.df['high'].shift(6) > self.df['high'].shift(11)) \
& (self.df['close'].shift(7) > self.df['close'].shift(10)) & (self.df['high'].shift(7) > self.df['high'].shift(12))
def addCandleAstralSell(self) -> None:
self.df['astral_sell'] = self.candleAstralSell()
def changePct(self) -> DataFrame:
"""Close change percentage"""
close_pc = self.df['close'] / self.df['close'].shift(1) - 1
close_pc = close_pc.fillna(0)
return close_pc
def addChangePct(self) -> None:
"""Adds the close percentage to the DataFrame"""
self.df['close_pc'] = self.changePct()
# cumulative returns
self.df['close_cpc'] = (1 + self.df['close_pc']).cumprod()
def cumulativeMovingAverage(self) -> float:
"""Calculates the Cumulative Moving Average (CMA)"""
return self.df.close.expanding().mean()
def addCMA(self) -> None:
"""Adds the Cumulative Moving Average (CMA) to the DataFrame"""
self.df['cma'] = self.cumulativeMovingAverage()
def exponentialMovingAverage(self, period: int) -> float:
"""Calculates the Exponential Moving Average (EMA)"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 5 or period > 200:
raise ValueError('Period is out of range')
if len(self.df) < period:
raise Exception('Data range too small.')
return self.df.close.ewm(span=period, adjust=False).mean()
def addEMA(self, period: int) -> None:
"""Adds the Exponential Moving Average (EMA) the DateFrame"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 5 or period > 200:
raise ValueError('Period is out of range')
if len(self.df) < period:
raise Exception('Data range too small.')
self.df['ema' + str(period)] = self.exponentialMovingAverage(period)
def calculateRelativeStrengthIndex(self, series: int, interval: int=14) -> float:
"""Calculates the RSI on a Pandas series of closing prices."""
if not isinstance(series, Series):
raise TypeError('Pandas Series required.')
if not isinstance(interval, int):
raise TypeError('Interval integer required.')
if(len(series) < interval):
raise IndexError('Pandas Series smaller than interval.')
diff = series.diff(1).dropna()
sum_gains = 0 * diff
sum_gains[diff > 0] = diff[diff > 0]
avg_gains = sum_gains.ewm(com=interval-1, min_periods=interval).mean()
sum_losses = 0 * diff
sum_losses[diff < 0] = diff[diff < 0]
avg_losses = sum_losses.ewm(com=interval-1, min_periods=interval).mean()
rs = abs(avg_gains / avg_losses)
rsi = 100 - 100 / (1 + rs)
return rsi
def calculateStochasticRelativeStrengthIndex(self, series: int, interval: int=14) -> float:
"""Calculates the Stochastic RSI on a Pandas series of RSI"""
if not isinstance(series, Series):
raise TypeError('Pandas Series required.')
if not isinstance(interval, int):
raise TypeError('Interval integer required.')
if(len(series) < interval):
raise IndexError('Pandas Series smaller than interval.')
return (series - series.rolling(interval).min()) / (series.rolling(interval).max() - series.rolling(interval).min())
def addFibonacciBollingerBands(self, interval: int=20, multiplier: int=3) -> None:
"""Adds Fibonacci Bollinger Bands."""
if not isinstance(interval, int):
raise TypeError('Interval integer required.')
if not isinstance(multiplier, int):
raise TypeError('Multiplier integer required.')
tp = (self.df['high'] + self.df['low'] + self.df['close']) / 3
sma = tp.rolling(interval).mean()
sd = multiplier * tp.rolling(interval).std()
sma = sma.fillna(0)
sd = sd.fillna(0)
self.df['fbb_mid'] = sma
self.df['fbb_upper0_236'] = sma + (0.236 * sd)
self.df['fbb_upper0_382'] = sma + (0.382 * sd)
self.df['fbb_upper0_5'] = sma + (0.5 * sd)
self.df['fbb_upper0_618'] = sma + (0.618 * sd)
self.df['fbb_upper0_764'] = sma + (0.764 * sd)
self.df['fbb_upper1'] = sma + (1 * sd)
self.df['fbb_lower0_236'] = sma - (0.236 * sd)
self.df['fbb_lower0_382'] = sma - (0.382 * sd)
self.df['fbb_lower0_5'] = sma - (0.5 * sd)
self.df['fbb_lower0_618'] = sma - (0.618 * sd)
self.df['fbb_lower0_764'] = sma - (0.764 * sd)
self.df['fbb_lower1'] = sma - (1 * sd)
def movingAverageConvergenceDivergence(self) -> DataFrame:
"""Calculates the Moving Average Convergence Divergence (MACD)"""
if len(self.df) < 26:
raise Exception('Data range too small.')
if not self.df['ema12'].dtype == 'float64' and not self.df['ema12'].dtype == 'int64':
raise AttributeError("Pandas DataFrame 'ema12' column not int64 or float64.")
if not self.df['ema26'].dtype == 'float64' and not self.df['ema26'].dtype == 'int64':
raise AttributeError("Pandas DataFrame 'ema26' column not int64 or float64.")
df = DataFrame()
df['macd'] = self.df['ema12'] - self.df['ema26']
df['signal'] = df['macd'].ewm(span=9, adjust=False).mean()
return df
def addMACD(self) -> None:
"""Adds the Moving Average Convergence Divergence (MACD) to the DataFrame"""
df = self.movingAverageConvergenceDivergence()
self.df['macd'] = df['macd']
self.df['signal'] = df['signal']
def onBalanceVolume(self) -> ndarray:
"""Calculate On-Balance Volume (OBV)"""
return where(self.df['close'] == self.df['close'].shift(1), 0, where(self.df['close'] > self.df['close'].shift(1), self.df['volume'],
where(self.df['close'] < self.df['close'].shift(1), -self.df['volume'], self.df.iloc[0]['volume']))).cumsum()
def addOBV(self) -> None:
"""Add the On-Balance Volume (OBV) to the DataFrame"""
self.df['obv'] = self.onBalanceVolume()
self.df['obv_pc'] = self.df['obv'].pct_change() * 100
self.df['obv_pc'] = round(self.df['obv_pc'].fillna(0), 2)
def relativeStrengthIndex(self, period) -> DataFrame:
"""Calculate the Relative Strength Index (RSI)"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 7 or period > 21:
raise ValueError('Period is out of range')
# calculate relative strength index
rsi = self.calculateRelativeStrengthIndex(self.df['close'], period)
# default to midway-50 for first entries
rsi = rsi.fillna(50)
return rsi
def stochasticRelativeStrengthIndex(self, period) -> DataFrame:
"""Calculate the Stochastic Relative Strength Index (Stochastic RSI)"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 7 or period > 21:
raise ValueError('Period is out of range')
if 'rsi' + str(period) not in self.df:
self.addRSI(period)
# calculate relative strength index
stochrsi = self.calculateStochasticRelativeStrengthIndex(self.df['rsi' + str(period)], period)
# default to midway-50 for first entries
stochrsi = stochrsi.fillna(0.5)
return stochrsi
def williamsR(self, period) -> DataFrame:
"""Calculate the Williams %R"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 7 or period > 21:
raise ValueError('Period is out of range')
return (self.df['high'].rolling(14).max() - self.df['close']) / (self.df['high'].rolling(14).max() - self.df['low'].rolling(14).min())
def addRSI(self, period: int) -> None:
"""Adds the Relative Strength Index (RSI) to the DataFrame"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 7 or period > 21:
raise ValueError('Period is out of range')
self.df['rsi' + str(period)] = self.relativeStrengthIndex(period)
self.df['rsi' + str(period)] = self.df['rsi' + str(period)].replace(nan, 50)
def addStochasticRSI(self, period: int) -> None:
"""Adds the Stochastic Relative Strength Index (RSI) to the DataFrame"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 7 or period > 21:
raise ValueError('Period is out of range')
self.df['stochrsi' + str(period)] = self.stochasticRelativeStrengthIndex(period)
self.df['stochrsi' + str(period)] = self.df['stochrsi' + str(period)].replace(nan, 0.5)
def addWilliamsR(self, period: int) -> None:
"""Adds the Willams %R to the DataFrame"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 7 or period > 21:
raise ValueError('Period is out of range')
self.df['williamsr' + str(period)] = self.williamsR(period)
self.df['williamsr' + str(period)] = self.df['williamsr' + str(period)].replace(nan, -50)
def seasonalARIMAModel(self) -> SARIMAXResultsWrapper:
"""Returns the Seasonal ARIMA Model for price predictions"""
# hyperparameters for SARIMAX
model = SARIMAX(self.df['close'], trend='n', order=(0,1,0), seasonal_order=(1,1,1,12))
return model.fit(disp=-1)
def seasonalARIMAModelFittedValues(self): # TODO: annotate return type
"""Returns the Seasonal ARIMA Model for price predictions"""
return self.seasonalARIMAModel().fittedvalues
def seasonalARIMAModelPrediction(self, minutes: int=180) -> tuple:
"""Returns seasonal ARIMA model prediction
Parameters
----------
minutes : int
Number of minutes to predict
"""
if not isinstance(minutes, int):
raise TypeError('Prediction minutes is not numeric.')
if minutes < 1 or minutes > 4320:
raise ValueError('Predication minutes is out of range')
results_ARIMA = self.seasonalARIMAModel()
start_ts = self.df.last_valid_index()
end_ts = start_ts + timedelta(minutes=minutes)
pred = results_ARIMA.predict(start=str(start_ts), end=str(end_ts), dynamic=True)
try:
if len(pred) == 0:
df_last = self.df['close'].tail(1)
return (str(df_last.index.values[0]).replace('T', ' ').replace('.000000000', ''), df_last.values[0])
else:
df_last = pred.tail(1)
return (str(df_last.index.values[0]).replace('T', ' ').replace('.000000000', ''), df_last.values[0])
except Exception:
return None
return None
def simpleMovingAverage(self, period: int) -> float:
"""Calculates the Simple Moving Average (SMA)"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 5 or period > 200:
raise ValueError('Period is out of range')
if len(self.df) < period:
raise Exception('Data range too small.')
return self.df.close.rolling(period, min_periods=1).mean()
def addSMA(self, period: int) -> None:
"""Add the Simple Moving Average (SMA) to the DataFrame"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 5 or period > 200:
raise ValueError('Period is out of range')
if len(self.df) < period:
raise Exception('Data range too small.')
self.df['sma' + str(period)] = self.simpleMovingAverage(period)
def addGoldenCross(self) -> None:
"""Add Golden Cross SMA50 over SMA200"""
if 'sma50' not in self.df:
self.addSMA(50)
if 'sma200' not in self.df:
self.addSMA(200)
self.df['goldencross'] = self.df['sma50'] > self.df['sma200']
def addDeathCross(self) -> None:
"""Add Death Cross SMA50 over SMA200"""
if 'sma50' not in self.df:
self.addSMA(50)
if 'sma200' not in self.df:
self.addSMA(200)
self.df['deathcross'] = self.df['sma50'] < self.df['sma200']
def addElderRayIndex(self) -> None:
"""Add Elder Ray Index"""
if 'ema13' not in self.df:
self.addEMA(13)
self.df['elder_ray_bull'] = self.df['high'] - self.df['ema13']
self.df['elder_ray_bear'] = self.df['low'] - self.df['ema13']
# bear power’s value is negative but increasing (i.e. becoming less bearish)
# bull power’s value is increasing (i.e. becoming more bullish)
self.df['eri_buy'] = ((self.df['elder_ray_bear'] < 0) & (self.df['elder_ray_bear'] > self.df['elder_ray_bear'].shift(1))) | ((self.df['elder_ray_bull'] > self.df['elder_ray_bull'].shift(1)))
# bull power’s value is positive but decreasing (i.e. becoming less bullish)
# bear power’s value is decreasing (i.e., becoming more bearish)
self.df['eri_sell'] = ((self.df['elder_ray_bull'] > 0) & (self.df['elder_ray_bull'] < self.df['elder_ray_bull'].shift(1))) | ((self.df['elder_ray_bear'] < self.df['elder_ray_bear'].shift(1)))
def getSupportResistanceLevels(self) -> Series:
"""Calculate the Support and Resistance Levels"""
self.levels = []
self.__calculateSupportResistenceLevels()
levels_ts = {}
for level in self.levels:
levels_ts[self.df.index[level[0]]] = level[1]
# add the support levels to the DataFrame
return Series(levels_ts)
def printSupportResistanceLevel(self, price: float=0) -> None:
if isinstance(price, int) or isinstance(price, float):
df = self.getSupportResistanceLevels()
if len(df) > 0:
df_last = df.tail(1)
if float(df_last[0]) < price:
Logger.info(' Support level of ' + str(df_last[0]) + ' formed at ' + str(df_last.index[0]))
elif float(df_last[0]) > price:
Logger.info(' Resistance level of ' + str(df_last[0]) + ' formed at ' + str(df_last.index[0]))
else:
Logger.info(' Support/Resistance level of ' + str(df_last[0]) + ' formed at ' + str(df_last.index[0]))
def getResistance(self, price: float=0) -> float:
if isinstance(price, int) or isinstance(price, float):
if price > 0:
sr = self.getSupportResistanceLevels()
for r in sr.sort_values():
if r > price:
return r
return price
def getFibonacciUpper(self, price: float=0) -> float:
if isinstance(price, int) or isinstance(price, float):
if price > 0:
fb = self.getFibonacciRetracementLevels()
for f in fb.values():
if f > price:
return f
return price
def getTradeExit(self, price: float=0) -> float:
if isinstance(price, int) or isinstance(price, float):
if price > 0:
r = self.getResistance(price)
f = self.getFibonacciUpper(price)
if price < r and price < f:
r_margin = ((r - price) / price) * 100
f_margin = ((f - price) / price) * 100
if r_margin > 1 and f_margin > 1 and r <= f:
return r
elif r_margin > 1 and f_margin > 1 and f <= r:
return f
elif r_margin > 1 and f_margin < 1:
return r
elif f_margin > 1 and r_margin < 1:
return f
return price
def printSupportResistanceFibonacciLevels(self, price: float=0) -> str:
if isinstance(price, int) or isinstance(price, float):
if price > 0:
sr = self.getSupportResistanceLevels()
s = price
for r in sr.sort_values():
if r > price:
fb = self.getFibonacciRetracementLevels()
l = price
for b in fb.values():
if b > price:
return 'support: ' + str(s) + ', resistance: ' + str(r) + ', fibonacci (l): ' + str(l) + ', fibonacci (u): ' + str(b)
else:
l = b
break
else:
s = r
if len(sr) > 1 and sr.iloc[-1] < price:
fb = self.getFibonacciRetracementLevels()
l = price
for b in fb.values():
if b > price:
return 'support: ' + str(sr.iloc[-1]) + ', fibonacci (l): ' + str(l) + ', fibonacci (u): ' + str(b)
else:
l = b
return ''
def addEMABuySignals(self) -> None:
"""Adds the EMA12/EMA26 buy and sell signals to the DataFrame"""
if not isinstance(self.df, DataFrame):
raise TypeError('Pandas DataFrame required.')
if not 'close' in self.df.columns:
raise AttributeError("Pandas DataFrame 'close' column required.")
if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':
raise AttributeError(
"Pandas DataFrame 'close' column not int64 or float64.")
if not 'ema8' in self.df.columns:
self.addEMA(8)
if not 'ema12' in self.df.columns:
self.addEMA(12)
if not 'ema26' in self.df.columns:
self.addEMA(26)
# true if EMA8 is above the EMA12
self.df['ema8gtema12'] = self.df.ema8 > self.df.ema12
# true if the current frame is where EMA8 crosses over above
self.df['ema8gtema12co'] = self.df.ema8gtema12.ne(self.df.ema8gtema12.shift())
self.df.loc[self.df['ema8gtema12'] == False, 'ema8gtema12co'] = False
# true if the EMA8 is below the EMA12
self.df['ema8ltema12'] = self.df.ema8 < self.df.ema12
# true if the current frame is where EMA8 crosses over below
self.df['ema8ltema12co'] = self.df.ema8ltema12.ne(self.df.ema8ltema12.shift())
self.df.loc[self.df['ema8ltema12'] == False, 'ema8ltema12co'] = False
# true if EMA12 is above the EMA26
self.df['ema12gtema26'] = self.df.ema12 > self.df.ema26
# true if the current frame is where EMA12 crosses over above
self.df['ema12gtema26co'] = self.df.ema12gtema26.ne(self.df.ema12gtema26.shift())
self.df.loc[self.df['ema12gtema26'] == False, 'ema12gtema26co'] = False
# true if the EMA12 is below the EMA26
self.df['ema12ltema26'] = self.df.ema12 < self.df.ema26
# true if the current frame is where EMA12 crosses over below
self.df['ema12ltema26co'] = self.df.ema12ltema26.ne(self.df.ema12ltema26.shift())
self.df.loc[self.df['ema12ltema26'] == False, 'ema12ltema26co'] = False
def addSMABuySignals(self) -> None:
"""Adds the SMA50/SMA200 buy and sell signals to the DataFrame"""
if not isinstance(self.df, DataFrame):
raise TypeError('Pandas DataFrame required.')
if not 'close' in self.df.columns:
raise AttributeError("Pandas DataFrame 'close' column required.")
if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':
raise AttributeError(
"Pandas DataFrame 'close' column not int64 or float64.")
if not 'sma50' or not 'sma200' in self.df.columns:
self.addSMA(50)
self.addSMA(200)
# true if SMA50 is above the SMA200
self.df['sma50gtsma200'] = self.df.sma50 > self.df.sma200
# true if the current frame is where SMA50 crosses over above
self.df['sma50gtsma200co'] = self.df.sma50gtsma200.ne(self.df.sma50gtsma200.shift())
self.df.loc[self.df['sma50gtsma200'] == False, 'sma50gtsma200co'] = False
# true if the SMA50 is below the SMA200
self.df['sma50ltsma200'] = self.df.sma50 < self.df.sma200
# true if the current frame is where SMA50 crosses over below
self.df['sma50ltsma200co'] = self.df.sma50ltsma200.ne(self.df.sma50ltsma200.shift())
self.df.loc[self.df['sma50ltsma200'] == False, 'sma50ltsma200co'] = False
def addMACDBuySignals(self) -> None:
"""Adds the MACD/Signal buy and sell signals to the DataFrame"""
if not isinstance(self.df, DataFrame):
raise TypeError('Pandas DataFrame required.')
if not 'close' in self.df.columns:
raise AttributeError("Pandas DataFrame 'close' column required.")
if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':
raise AttributeError("Pandas DataFrame 'close' column not int64 or float64.")
if not 'macd' or not 'signal' in self.df.columns:
self.addMACD()
self.addOBV()
# true if MACD is above the Signal
self.df['macdgtsignal'] = self.df.macd > self.df.signal
# true if the current frame is where MACD crosses over above
self.df['macdgtsignalco'] = self.df.macdgtsignal.ne(self.df.macdgtsignal.shift())
self.df.loc[self.df['macdgtsignal'] == False, 'macdgtsignalco'] = False
# true if the MACD is below the Signal
self.df['macdltsignal'] = self.df.macd < self.df.signal
# true if the current frame is where MACD crosses over below
self.df['macdltsignalco'] = self.df.macdltsignal.ne(self.df.macdltsignal.shift())
self.df.loc[self.df['macdltsignal'] == False, 'macdltsignalco'] = False
def getFibonacciRetracementLevels(self, price: float=0) -> dict:
# validates price is numeric
if not isinstance(price, int) and not isinstance(price, float):
raise TypeError('Optional price is not numeric.')
price_min = self.df.close.min()
price_max = self.df.close.max()
diff = price_max - price_min
data = {}
if price != 0 and (price <= price_min):
data['ratio1'] = float(self.__truncate(price_min, 2))
elif price == 0:
data['ratio1'] = float(self.__truncate(price_min, 2))
if price != 0 and (price > price_min) and (price <= (price_max - 0.768 * diff)):
data['ratio1'] = float(self.__truncate(price_min, 2))
data['ratio0_768'] = float(self.__truncate(price_max - 0.768 * diff, 2))
elif price == 0:
data['ratio0_768'] = float(self.__truncate(price_max - 0.768 * diff, 2))
if price != 0 and (price > (price_max - 0.768 * diff)) and (price <= (price_max - 0.618 * diff)):
data['ratio0_768'] = float(self.__truncate(price_max - 0.768 * diff, 2))
data['ratio0_618'] = float(self.__truncate(price_max - 0.618 * diff, 2))
elif price == 0:
data['ratio0_618'] = float(self.__truncate(price_max - 0.618 * diff, 2))
if price != 0 and (price > (price_max - 0.618 * diff)) and (price <= (price_max - 0.5 * diff)):
data['ratio0_618'] = float(self.__truncate(price_max - 0.618 * diff, 2))
data['ratio0_5'] = float(self.__truncate(price_max - 0.5 * diff, 2))
elif price == 0:
data['ratio0_5'] = float(self.__truncate(price_max - 0.5 * diff, 2))
if price != 0 and (price > (price_max - 0.5 * diff)) and (price <= (price_max - 0.382 * diff)):
data['ratio0_5'] = float(self.__truncate(price_max - 0.5 * diff, 2))
data['ratio0_382'] = float(self.__truncate(price_max - 0.382 * diff, 2))
elif price == 0:
data['ratio0_382'] = float(self.__truncate(price_max - 0.382 * diff, 2))
if price != 0 and (price > (price_max - 0.382 * diff)) and (price <= (price_max - 0.286 * diff)):
data['ratio0_382'] = float(self.__truncate(price_max - 0.382 * diff, 2))
data['ratio0_286'] = float(self.__truncate(price_max - 0.286 * diff, 2))
elif price == 0:
data['ratio0_286'] = float(self.__truncate(price_max - 0.286 * diff, 2))
if price != 0 and (price > (price_max - 0.286 * diff)) and (price <= price_max):
data['ratio0_286'] = float(self.__truncate(price_max - 0.286 * diff, 2))
data['ratio0'] = float(self.__truncate(price_max, 2))
elif price == 0:
data['ratio0'] = float(self.__truncate(price_max, 2))
if price != 0 and (price < (price_max + 0.272 * diff)) and (price >= price_max):
data['ratio0'] = float(self.__truncate(price_max, 2))
data['ratio1_272'] = float(self.__truncate(price_max + 0.272 * diff, 2))
elif price == 0:
data['ratio1_272'] = float(self.__truncate(price_max + 0.272 * diff, 2))
if price != 0 and (price < (price_max + 0.414 * diff)) and (price >= (price_max + 0.272 * diff)):
data['ratio1_272'] = float(self.__truncate(price_max, 2))
data['ratio1_414'] = float(self.__truncate(price_max + 0.414 * diff, 2))
elif price == 0:
data['ratio1_414'] = float(self.__truncate(price_max + 0.414 * diff, 2))
if price != 0 and (price < (price_max + 0.618 * diff)) and (price >= (price_max + 0.414 * diff)):
data['ratio1_618'] = float(self.__truncate(price_max + 0.618 * diff, 2))
elif price == 0:
data['ratio1_618'] = float(self.__truncate(price_max + 0.618 * diff, 2))
return data
def saveCSV(self, filename: str='tradingdata.csv') -> None:
"""Saves the DataFrame to an uncompressed CSV."""
p = compile(r"^[\w\-. ]+$")
if not p.match(filename):
raise TypeError('Filename required.')
if not isinstance(self.df, DataFrame):
raise TypeError('Pandas DataFrame required.')
try:
self.df.to_csv(filename)
except OSError:
Logger.critical(f'Unable to save: {filename}')
def __calculateSupportResistenceLevels(self):
"""Support and Resistance levels. (private function)"""
for i in range(2, self.df.shape[0] - 2):
if self.__isSupport(self.df, i):
l = self.df['low'][i]
if self.__isFarFromLevel(l):
self.levels.append((i, l))
elif self.__isResistance(self.df, i):
l = self.df['high'][i]
if self.__isFarFromLevel(l):
self.levels.append((i, l))
return self.levels
def __isSupport(self, df, i) -> bool:
"""Is support level? (private function)"""
c1 = df['low'][i] < df['low'][i - 1]
c2 = df['low'][i] < df['low'][i + 1]
c3 = df['low'][i + 1] < df['low'][i + 2]
c4 = df['low'][i - 1] < df['low'][i - 2]
support = c1 and c2 and c3 and c4
return support
def __isResistance(self, df, i) -> bool:
"""Is resistance level? (private function)"""
c1 = df['high'][i] > df['high'][i - 1]
c2 = df['high'][i] > df['high'][i + 1]
c3 = df['high'][i + 1] > df['high'][i + 2]
c4 = df['high'][i - 1] > df['high'][i - 2]
resistance = c1 and c2 and c3 and c4
return resistance
def __isFarFromLevel(self, l) -> float:
"""Is far from support level? (private function)"""
s = mean(self.df['high'] - self.df['low'])
return np_sum([abs(l-x) < s for x in self.levels]) == 0
def __truncate(self, f, n) -> float:
return floor(f * 10 ** n) / 10 ** n
| 45.701933
| 199
| 0.571263
|
c89cb6684d0be0cacbc2a151b33f7b9b6cac9277
| 2,638
|
py
|
Python
|
examples/cifar3/inference.py
|
riyasavla/ml601_hw9
|
dcec2e5644b41d85303bdaf29da8e81f8a5e29c3
|
[
"BSD-2-Clause"
] | null | null | null |
examples/cifar3/inference.py
|
riyasavla/ml601_hw9
|
dcec2e5644b41d85303bdaf29da8e81f8a5e29c3
|
[
"BSD-2-Clause"
] | null | null | null |
examples/cifar3/inference.py
|
riyasavla/ml601_hw9
|
dcec2e5644b41d85303bdaf29da8e81f8a5e29c3
|
[
"BSD-2-Clause"
] | null | null | null |
import caffe
import lmdb
import numpy as np
from caffe.proto import caffe_pb2
import scipy
from scipy import io
import csv
import sys, getopt
# mean_file = '/Users/riya/Downloads/mean.binaryproto' #'/Users/riya/caffe/examples/cifar3/mean.binaryproto'
# model = "/Users/riya/Downloads/cifar10_full_iter_60000.caffemodel.h5" #'/Users/riya/caffe/examples/cifar3/cifar10_quick_iter_5000.caffemodel.h5'
# deploy_net = '/Users/riya/caffe/examples/cifar3/deploy.prototxt'
ch = 3
h_in = 32
w_in = 32
# Per channel ZCA whitening
def my_zca(data, cov_dict):
eps = 0.000000001
new_data = np.zeros(data.shape)
for c in xrange(ch):
channel = data[:, c, :]
#print "channel" , channel.shape
cov = cov_dict[str(c)]
U,S,V = np.linalg.svd(cov)
channel_rot = np.dot(channel, U)
channel_rot_cov = (np.dot(channel_rot.T, channel_rot)/ channel_rot.shape[0])
#plt.show(imshow(np.dot(channel_rot.T, channel_rot)/ channel_rot.shape[0]))
#print "CHECK DIAG ", np.array_equal(np.argmax(channel_rot_cov, axis =1), np.argmax(channel_rot_cov, axis =1))
channel_white = channel_rot / (np.sqrt(S) + eps)
channel_white = np.dot(channel_white, U.T)
new_data[:, c, :] = channel_white
return new_data
def main(argv):
mean_file = ''
zca_file = ''
model = ''
deploy_net = ''
opts, args = getopt.getopt(argv,"n:z:m:d:",["mean=", "zca=", "model=","deploy="])
for opt, arg in opts:
if opt in ("-n", "--mean"):
mean_file = str(arg)
elif opt in ("-z", "--zca"):
zca_file = str(arg)
elif opt in ("-m", "--model"):
model = str(arg)
elif opt in ("-d", "--deploy"):
deploy_net = str(arg)
ch = 3
h_in = 32
w_in = 32
mean = (scipy.io.loadmat(mean_file)['data'])
cov_dict = scipy.io.loadmat(zca_file)
net = caffe.Net(deploy_net, model, caffe.TEST)
print net.blobs['data'].data.shape
path = '/Users/riya/Downloads/data_mat/test_data.mat'
data = scipy.io.loadmat(path)['data']
N = data.shape[0]
test_data = np.zeros((N, ch*h_in*w_in))
for i in xrange(N):
test_data[i] = data[i]
test_data -= mean
test_data = test_data.reshape((N, ch, h_in*w_in))
# Per channel ZCA whitening
test_data = my_zca(test_data, cov_dict)
with open('results.csv', 'wb') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',')
for i in xrange(N):
img = X = test_data[i].reshape((ch, h_in, w_in))
net.blobs['data'].data[...] = img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))
out = net.forward()
pred_probas = out['prob']
filewriter.writerow([pred_probas.argmax()])
if __name__ == "__main__":
main(sys.argv[1:])
| 28.673913
| 146
| 0.655042
|
10973174d5a35a21757b354168be7aaf62c0df8a
| 5,625
|
py
|
Python
|
src/protean/adapters/cache/memory.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 6
|
2018-09-26T04:54:09.000Z
|
2022-03-30T01:01:45.000Z
|
src/protean/adapters/cache/memory.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 261
|
2018-09-20T09:53:33.000Z
|
2022-03-08T17:43:04.000Z
|
src/protean/adapters/cache/memory.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 6
|
2018-07-22T07:09:15.000Z
|
2021-02-02T05:17:23.000Z
|
import collections
import re
import time
from threading import RLock
from typing import Optional, Union
from protean.core.view import BaseView
from protean.reflection import id_field
try:
# Python 3.8+
collectionsAbc = collections.abc
except AttributeError: # pragma: no cover
# Until Python 3.7
collectionsAbc = collections
from protean.port.cache import BaseCache
from protean.utils import Cache
from protean.utils.inflection import underscore
class TTLDict(collectionsAbc.MutableMapping):
def __init__(self, default_ttl, *args, **kwargs):
self._default_ttl = default_ttl
self._values = {}
self._lock = RLock()
self.update(*args, **kwargs)
def __repr__(self):
return "<TTLDict@%#08x; ttl=%r, v=%r;>" % (
id(self),
self._default_ttl,
self._values,
)
def set_ttl(self, key, ttl, now=None):
""" Set TTL for the given key """
if now is None:
now = time.time()
with self._lock:
_expire, value = self._values[key]
self._values[key] = (now + ttl, value)
def get_ttl(self, key, now=None):
""" Return remaining TTL for a key """
if now is None:
now = time.time()
with self._lock:
expire, _value = self._values[key]
return expire - now
def expire_at(self, key, timestamp):
""" Set the key expire timestamp """
with self._lock:
_expire, value = self._values[key]
self._values[key] = (timestamp, value)
def is_expired(self, key, now=None, remove=False):
""" Check if key has expired """
with self._lock:
if now is None:
now = time.time()
expire, _value = self._values[key]
if expire is None:
return False
expired = expire < now
if expired and remove:
self.__delitem__(key)
return expired
def __len__(self):
with self._lock:
for key in self._values.keys():
self.is_expired(key, remove=True)
return len(self._values)
def __iter__(self):
with self._lock:
for key in self._values.keys():
if not self.is_expired(key):
yield key
def __setitem__(self, key, value):
with self._lock:
if self._default_ttl is None:
expire = None
else:
expire = time.time() + self._default_ttl
self._values[key] = (expire, value)
def __delitem__(self, key):
with self._lock:
del self._values[key]
def __getitem__(self, key):
with self._lock:
self.is_expired(key, remove=True)
return self._values[key][1]
class MemoryCache(BaseCache):
def __init__(self, name, domain, conn_info: dict):
"""Initialize Cache with Connection/Adapter details"""
# In case of `MemoryCache`, the `CACHE` value will always be `MEMORY`.
conn_info["CACHE"] = Cache.MEMORY.value
super().__init__(name, domain, conn_info)
# The Data Cache
self._db = TTLDict(self.conn_info.get("TTL") or 300)
self._lock = RLock()
def ping(self):
"""Always returns True for memory cache"""
return True
def get_connection(self):
"""Get the connection object for the repository"""
return self._db._values
def add(self, view: BaseView, ttl: Optional[Union[int, float]] = None) -> None:
"""Add view record to cache
KEY: View ID
Value: View Data (derived from `to_dict()`)
TTL is in seconds.
Args:
view (BaseView): View Instance containing data
ttl (int, float, optional): Timeout in seconds. Defaults to None.
"""
identifier = getattr(view, id_field(view).field_name)
key = f"{underscore(view.__class__.__name__)}:::{identifier}"
self._db[key] = view.to_dict()
if ttl:
self._db.set_ttl(key, ttl)
def get(self, key):
view_name = key.split(":::")[0]
view_cls = self._views[view_name]
value = self._db.get(key)
return view_cls(value) if value else None
def get_all(self, key_pattern, last_position=0, size=25):
# FIXME Handle Pagination with Last Position
# FIXME Handle Pagination with Size
view_name = key_pattern.split(":::")[0]
view_cls = self._views[view_name]
key_list = self._db.keys()
regex = re.compile(key_pattern)
results = list(filter(regex.match, key_list))
return [view_cls(self._db.get(key)) for key in results]
def count(self, key_pattern):
key_list = self._db.keys()
regex = re.compile(key_pattern)
return len(list(filter(regex.match, key_list)))
def remove(self, view):
identifier = getattr(view, id_field(view).field_name)
key = f"{underscore(view.__class__.__name__)}:::{identifier}"
del self._db[key]
def remove_by_key(self, key):
del self._db[key]
def remove_by_key_pattern(self, key_pattern):
full_key_list = self._db.keys()
regex = re.compile(key_pattern)
keys_to_delete = list(filter(regex.match, full_key_list))
for key in keys_to_delete:
del self._db[key]
def flush_all(self):
self._db = {}
def set_ttl(self, key, ttl):
self._db.set_ttl(key, ttl)
def get_ttl(self, key):
return self._db.get_ttl(key)
| 29.605263
| 83
| 0.588267
|
f7efda68c3bccc9bc78431210b23be4a7caade05
| 110
|
py
|
Python
|
src/inbus/shared/__init__.py
|
mlos/inbus
|
51778800740b18337b540e193a52f6faa7451067
|
[
"BSD-2-Clause"
] | 1
|
2017-11-09T20:09:31.000Z
|
2017-11-09T20:09:31.000Z
|
src/inbus/shared/__init__.py
|
mlos/inbus
|
51778800740b18337b540e193a52f6faa7451067
|
[
"BSD-2-Clause"
] | null | null | null |
src/inbus/shared/__init__.py
|
mlos/inbus
|
51778800740b18337b540e193a52f6faa7451067
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Maarten Los
# See LICENSE.rst for details.
| 22
| 32
| 0.654545
|
25e4bb46e67e751dc723c95800225006202e4f65
| 9,556
|
py
|
Python
|
sugarui/widgets/textfields.py
|
sugarsack/sugar-ui
|
0ba7b62f300adbd3290e0ef8d5cb45e26f44a446
|
[
"MIT"
] | null | null | null |
sugarui/widgets/textfields.py
|
sugarsack/sugar-ui
|
0ba7b62f300adbd3290e0ef8d5cb45e26f44a446
|
[
"MIT"
] | null | null | null |
sugarui/widgets/textfields.py
|
sugarsack/sugar-ui
|
0ba7b62f300adbd3290e0ef8d5cb45e26f44a446
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Text fields class.
These are used to display some data in the tables.
"""
import curses
import npyscreen
from sugarui.windows.floating import ErrorMessageForm
class ColoredTextField(npyscreen.Textfield):
"""
Colorised text field (highlighted data)
"""
syntax_highlighting = True
def __init__(self, *args, **kwargs):
npyscreen.Textfield.__init__(self, *args, **kwargs)
self.syntax_highlighting = True
self.highlight_map = {}
def colorise(self):
"""
On the current value highlight string that has value.
:param value: Value to highlight.
:return:
"""
if self.value:
hldata = [curses.A_NORMAL for _ in range(len(self.value))]
for value in self.highlight_map:
offset = self.value.find(value)
if offset > -1:
hl_colorc = self.parent.theme_manager.findPair(self, self.highlight_map[value])
hldata = hldata[:offset] + [hl_colorc for _ in range(len(value))] + hldata[offset + len(value):]
self._highlightingdata = hldata
del hldata
def update(self, clear=True, cursor=True):
"""
On update.
:param clear:
:param cursor:
:return:
"""
self.colorise()
super(npyscreen.Textfield, self).update(clear, cursor)
class VisualTextField(npyscreen.Textfield):
"""
Text field with colored background.
"""
def __init__(self, *args, **kwargs):
kwargs["color"] = "CAUTIONHL"
kwargs["max_height"] = 1
npyscreen.Textfield.__init__(self, *args, **kwargs)
def _print(self):
"""
Create text content within the field.
:return:
"""
self._field_space()
width_of_char_to_print = 0
string_to_print = self._get_string_to_print()
if not string_to_print:
return None
string_to_print = self.display_value(self.value)[
self.begin_at:self.maximum_string_length + self.begin_at - self.left_margin]
column = 0
place_in_string = 0
if self.syntax_highlighting:
self.update_highlighting(start=self.begin_at,
end=self.maximum_string_length + self.begin_at - self.left_margin)
while column <= (self.maximum_string_length - self.left_margin):
if not string_to_print or place_in_string > len(string_to_print) - 1:
break
width_of_char_to_print = self.find_width_of_char(string_to_print[place_in_string])
if column - 1 + width_of_char_to_print > self.maximum_string_length:
break
try:
highlight = self._highlightingdata[self.begin_at + place_in_string]
except IndexError:
highlight = curses.A_NORMAL
self.parent.curses_pad.addstr(self.rely, self.relx + column + self.left_margin,
self._print_unicode_char(string_to_print[place_in_string]),
highlight)
column += self.find_width_of_char(string_to_print[place_in_string])
place_in_string += 1
else:
color = self._get_color()
while column <= (self.maximum_string_length - self.left_margin):
if not string_to_print or place_in_string > len(string_to_print) - 1:
if self.highlight_whole_widget:
self.parent.curses_pad.addstr(self.rely, self.relx + column + self.left_margin, ' ', color)
column += width_of_char_to_print
place_in_string += 1
continue
else:
break
width_of_char_to_print = self.find_width_of_char(string_to_print[place_in_string])
if column - 1 + width_of_char_to_print > self.maximum_string_length:
break
self.parent.curses_pad.addstr(self.rely, self.relx + column + self.left_margin,
self._print_unicode_char(string_to_print[place_in_string]), color)
column += width_of_char_to_print
place_in_string += 1
def _field_space(self):
"""
Create a field space.
:return:
"""
line = " " * self.width
self.add_line(self.rely, self.relx, line, self.make_attributes_list(
line, self.parent.theme_manager.findPair(
self, self.color) | curses.A_STANDOUT | curses.A_BOLD | curses.A_DIM), self.width)
def _get_color(self):
"""
Get color.
:return:
"""
if self.do_colors():
if self.show_bold and self.color == 'DEFAULT':
color = self.parent.theme_manager.findPair(self, 'BOLD') | curses.A_BOLD
elif self.show_bold:
color = self.parent.theme_manager.findPair(self, self.color) | curses.A_BOLD
elif self.important:
color = self.parent.theme_manager.findPair(self, 'IMPORTANT') | curses.A_BOLD
else:
color = self.parent.theme_manager.findPair(self)
else:
if self.important or self.show_bold:
color = curses.A_BOLD
else:
color = curses.A_NORMAL
return color | curses.A_STANDOUT | curses.A_BOLD | curses.A_DIM
class RangeVisualTextField(VisualTextField):
"""
Visual text field that supports integer/float ranges.
"""
def __init__(self, screen, value, *args, range=(0, 100), **kwargs):
self.range = range
self._value = value
kwargs["screen"] = screen
VisualTextField.__init__(self, *args, **kwargs)
self.screen = screen
def get_value(self):
"""
Get numeric value.
:return:
"""
if self.value:
try:
value = (int if "." in self.value else float)(self.value)
except ValueError:
msg = "Value should be a number."
alert = ErrorMessageForm(msg, len(msg) + 6, 6, passive_text=True, name="Error")
alert.center_on_display()
alert.edit()
value = 0
else:
value = 0
return value
def check_value(self):
"""
Check if value is within the range.
:return:
"""
value = self.get_value()
if self.range:
low, high = self.range
if not low <= value <= high:
if not self.editing:
msg = "Value should be within the range from {} to {}.".format(*self.range)
alert = ErrorMessageForm(msg, len(msg) + 6, 6, passive_text=True, name="Error")
alert.center_on_display()
alert.edit()
def h_exit_down(self, _input):
"""
Exit editing mode downwards.
:param _input:
:return:
"""
super(RangeVisualTextField, self).h_exit_down(_input)
self.check_value()
def h_exit_up(self, _input):
"""
Exit editing mode upwards.
:param _input:
:return:
"""
super(RangeVisualTextField, self).h_exit_up(_input)
self.check_value()
def h_exit_left(self, _input):
"""
Exit editing mode left.
:param _input:
:return:
"""
super(RangeVisualTextField, self).h_exit_left(_input)
self.check_value()
def h_exit_right(self, _input):
"""
Exit editing mode right.
:param _input:
:return:
"""
super(RangeVisualTextField, self).h_exit_right(_input)
self.check_value()
def h_exit_escape(self, _input):
"""
Exit editing mode at ESC key.
:param _input:
:return:
"""
super(RangeVisualTextField, self).h_exit_escape(_input)
self.check_value()
def h_exit_mouse(self, _input):
"""
Exit editing mode at mouse handler clicked elsewhere.
:param _input:
:return:
"""
super(RangeVisualTextField, self).h_exit_mouse(_input)
self.check_value()
class ConcealedVisualTextField(VisualTextField):
"""
Concealed text field (for passwords and sensitive info)
"""
MASK_CHAR = "\u25CF"
def __init__(self, screen, *args, mask=MASK_CHAR, **kwargs):
self.screen = screen
kwargs["screen"] = screen
VisualTextField.__init__(self, *args, **kwargs)
self._mask = mask or self.MASK_CHAR
assert len(self._mask) == 1, "Mask is should be one character."
def _print(self):
"""
Display value in the entry field.
:return:
"""
self._field_space()
color = self._get_color()
strlen = len(self.value)
if self.maximum_string_length < strlen:
tmp_x = self.relx
for i in range(self.maximum_string_length):
self.parent.curses_pad.addch(self.rely, tmp_x, self._mask, color)
tmp_x += 1
else:
tmp_x = self.relx
for i in range(strlen):
self.parent.curses_pad.addstr(self.rely, tmp_x, self._mask, color)
tmp_x += 1
| 32.838488
| 116
| 0.559962
|
ea41638d8a9db4519a4876b680772d3c3b29436f
| 3,112
|
py
|
Python
|
DataApp.py
|
DaniloGR91/Semana_DataScience_MinerandoDados
|
99b198c29b0d97e6ccaf1b07c633d0e641dae0eb
|
[
"MIT"
] | 1
|
2020-05-16T22:57:32.000Z
|
2020-05-16T22:57:32.000Z
|
DataApp.py
|
DaniloGR91/Semana_DataScience_MinerandoDados
|
99b198c29b0d97e6ccaf1b07c633d0e641dae0eb
|
[
"MIT"
] | 7
|
2020-05-16T17:50:23.000Z
|
2020-05-27T20:49:09.000Z
|
DataApp.py
|
DaniloGR91/Semana_DataScience_MinerandoDados
|
99b198c29b0d97e6ccaf1b07c633d0e641dae0eb
|
[
"MIT"
] | 1
|
2020-05-31T14:07:51.000Z
|
2020-05-31T14:07:51.000Z
|
import pandas as pd
import streamlit as st
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from sklearn.ensemble import RandomForestRegressor
# Cabeçalho
st.title('Boston House Prices')
st.markdown('A DataApp to show and predict boston house prices')
# Abrindo dataframe
data = pd.read_csv('dataMLfinal.csv', index_col=0)
# DataFrame
st.header('DataFrame')
# Escolha das colunas do dataset
cols = list(data.columns)
selCols = st.multiselect('Parameters for DataFrame: ',
cols,
default=['RM', 'LSTAT', 'MEDV'])
# CheckBox para mostrar a descrição de cada coluna
paramDescr = open('boston.descr.txt', 'r')
if st.checkbox('Show parameters descriptions'):
st.text(paramDescr.read())
else:
paramDescr.close()
# Visualização do DataSet
st.dataframe(data[selCols])
#### Distribution ####
st.header('Distribution')
# Visualização do histograma e boxplot
selHist = st.selectbox(
'Data for distribution visualization:', cols)
# Função para definir histograma e boxplot
def histBox(df, col):
fig = make_subplots(rows=1, cols=2)
fig.add_trace(go.Histogram(
x=df[col], name='Hitrogram'),
row=1, col=1)
fig.add_trace(go.Box(
y=df[col], name='Boxplot'),
row=1, col=2)
fig.update_layout(height=600, width=800,
title_text=f'Distribution of {col}')
return fig
figHist = histBox(data, selHist)
st.write(figHist)
#### ScatterPlot ####
st.header('ScatterPlot')
# Seleção dos eixos do scatterplot
selScat = []
selScat.append(st.selectbox('X axis of ScatterPlot', cols, index=4))
selScat.append(st.selectbox('Y axis of ScatterPlot', cols, index=1))
# Função para definir scatterplot
def scatplot(df, col):
col = col
fig = go.Figure(layout={'xaxis': {'title': {'text': col[0]}},
'yaxis': {'title': {'text': col[1]}}})
fig.add_trace(go.Scatter(
x=df[col[0]],
y=df[col[1]],
mode='markers',
name='ScatterPlot'))
fig.update_layout(height=600, width=800,
title_text=f'ScatterPlot of {col[0]}x{col[1]}')
return fig
figScat = scatplot(data, selScat)
st.write(figScat)
#### Modelo de Machine Learning ###
# Separando x e y
x = data.drop('MEDV', axis=1)
y = data['MEDV']
# Treinando o modelo
model = RandomForestRegressor()
model.fit(x, y)
#### SideBar para Predictions####
st.sidebar.header('Price Prediction')
predCols = cols.copy()
predCols.remove('MEDV')
paramInput = []
maxParam = {}
for c in predCols:
if c == 'MEDV':
continue
maxcol = data[c].max() + data[c].quantile(0.25)
param = st.sidebar.number_input(c,
min_value=0.0,
max_value=maxcol,
value=data[c].median())
paramInput.append(param)
btnPredict = st.sidebar.button('Price Predict')
if btnPredict:
result = model.predict([paramInput])
st.sidebar.subheader('The predicted value is:')
result *= 1000
st.sidebar.markdown(f'US$ {result[0]:.2f}')
| 25.933333
| 69
| 0.633997
|
3445f04708fd647fe45afc07a37460751568d7e9
| 3,840
|
py
|
Python
|
E03 - Learning programs and models/Architectures/models/denseaspp.py
|
mialona/Stomatal-segmentation
|
149d469ec572c41a13d62149d7d62d6805d19697
|
[
"MIT"
] | null | null | null |
E03 - Learning programs and models/Architectures/models/denseaspp.py
|
mialona/Stomatal-segmentation
|
149d469ec572c41a13d62149d7d62d6805d19697
|
[
"MIT"
] | null | null | null |
E03 - Learning programs and models/Architectures/models/denseaspp.py
|
mialona/Stomatal-segmentation
|
149d469ec572c41a13d62149d7d62d6805d19697
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .segbase import SegBaseModel
from .fcn import _FCNHead
__all__ = ['DenseASPP']
class DenseASPP(SegBaseModel):
def __init__(self, nclass, backbone_name="resnet101"):
self.backbone_name = backbone_name
self.nclass = nclass
super(DenseASPP, self).__init__(backbone_name=self.backbone_name, nclass=self.nclass, need_backbone=True)
in_channels = self.backbone.last_inp_channels
self.head = _DenseASPPHead(in_channels, self.nclass, norm_layer=self.norm_layer)
self.__setattr__('decoder', ['head'])
def forward(self, x):
size = x.size()[2:]
_, _, c3, c4 = self.backbone(x)
x = self.head(c4)
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
return x
class _DenseASPPHead(nn.Module):
def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(_DenseASPPHead, self).__init__()
self.dense_aspp_block = _DenseASPPBlock(in_channels, 256, 64, norm_layer, norm_kwargs)
self.block = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(in_channels + 5 * 64, nclass, 1)
)
def forward(self, x):
x = self.dense_aspp_block(x)
return self.block(x)
class _DenseASPPConv(nn.Sequential):
def __init__(self, in_channels, inter_channels, out_channels, atrous_rate,
drop_rate=0.1, norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(_DenseASPPConv, self).__init__()
self.add_module('conv1', nn.Conv2d(in_channels, inter_channels, 1)),
self.add_module('bn1', norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs))),
self.add_module('relu1', nn.ReLU(True)),
self.add_module('conv2', nn.Conv2d(inter_channels, out_channels, 3, dilation=atrous_rate, padding=atrous_rate)),
self.add_module('bn2', norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs))),
self.add_module('relu2', nn.ReLU(True)),
self.drop_rate = drop_rate
def forward(self, x):
features = super(_DenseASPPConv, self).forward(x)
if self.drop_rate > 0:
features = F.dropout(features, p=self.drop_rate, training=self.training)
return features
class _DenseASPPBlock(nn.Module):
def __init__(self, in_channels, inter_channels1, inter_channels2,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(_DenseASPPBlock, self).__init__()
self.aspp_3 = _DenseASPPConv(in_channels, inter_channels1, inter_channels2, 3, 0.1,
norm_layer, norm_kwargs)
self.aspp_6 = _DenseASPPConv(in_channels + inter_channels2 * 1, inter_channels1, inter_channels2, 6, 0.1,
norm_layer, norm_kwargs)
self.aspp_12 = _DenseASPPConv(in_channels + inter_channels2 * 2, inter_channels1, inter_channels2, 12, 0.1,
norm_layer, norm_kwargs)
self.aspp_18 = _DenseASPPConv(in_channels + inter_channels2 * 3, inter_channels1, inter_channels2, 18, 0.1,
norm_layer, norm_kwargs)
self.aspp_24 = _DenseASPPConv(in_channels + inter_channels2 * 4, inter_channels1, inter_channels2, 24, 0.1,
norm_layer, norm_kwargs)
def forward(self, x):
aspp3 = self.aspp_3(x)
x = torch.cat([aspp3, x], dim=1)
aspp6 = self.aspp_6(x)
x = torch.cat([aspp6, x], dim=1)
aspp12 = self.aspp_12(x)
x = torch.cat([aspp12, x], dim=1)
aspp18 = self.aspp_18(x)
x = torch.cat([aspp18, x], dim=1)
aspp24 = self.aspp_24(x)
x = torch.cat([aspp24, x], dim=1)
return x
| 39.183673
| 120
| 0.636198
|
521d2a8dd91ada4ec091e0ce2ca790e5f9564dfe
| 8,906
|
py
|
Python
|
apero/recipes/nirps_ha/cal_shape_nirps_ha.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | 1
|
2021-03-09T17:49:31.000Z
|
2021-03-09T17:49:31.000Z
|
apero/recipes/nirps_ha/cal_shape_nirps_ha.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | 43
|
2020-10-06T18:42:24.000Z
|
2022-03-28T21:23:10.000Z
|
apero/recipes/nirps_ha/cal_shape_nirps_ha.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-03-23 at 13:01
@author: cook
"""
from apero import core
from apero import lang
from apero.core import constants
from apero.core.core import drs_database
from apero.io import drs_fits
from apero.science.calib import general
from apero.science.calib import shape
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'cal_shape_nirps_ha.py'
__INSTRUMENT__ = 'NIRPS_HA'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# Get Logging function
WLOG = core.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(directory=None, files=None, **kwargs):
"""
Main function for cal_shape_spirou.py
:param directory: string, the night name sub-directory
:param files: list of strings or string, the list of fp files
:param kwargs: any additional keywords
:type directory: str
:type files: list[str]
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
:rtype: dict
"""
# assign function calls (must add positional)
fkwargs = dict(directory=directory, files=files, **kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = core.setup(__NAME__, __INSTRUMENT__, fkwargs)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = core.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return core.end_main(params, llmain, recipe, success)
def __main__(recipe, params):
"""
Main code: should only call recipe and params (defined from main)
:param recipe:
:param params:
:return:
"""
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
mainname = __NAME__ + '._main()'
# get files
infiles = params['INPUTS']['FILES'][1]
# get list of filenames (for output)
rawfiles = []
for infile in infiles:
rawfiles.append(infile.basename)
# deal with input data from function
if 'files' in params['DATA_DICT']:
infiles = params['DATA_DICT']['files']
rawfiles = params['DATA_DICT']['rawfiles']
combine = params['DATA_DICT']['combine']
# combine input images if required
elif params['INPUT_COMBINE_IMAGES']:
# get combined file
infiles = [drs_fits.combine(params, recipe, infiles, math='median')]
combine = True
else:
combine = False
# get the number of infiles
num_files = len(infiles)
# ----------------------------------------------------------------------
# Loop around input files
# ----------------------------------------------------------------------
for it in range(num_files):
# ------------------------------------------------------------------
# add level to recipe log
log1 = recipe.log.add_level(params, 'num', it)
# ------------------------------------------------------------------
# set up plotting (no plotting before this)
recipe.plot.set_location(it)
# print file iteration progress
core.file_processing_update(params, it, num_files)
# ge this iterations file
infile = infiles[it]
# get header from file instance
header = infile.header
# get calibrations for this data
drs_database.copy_calibrations(params, header)
# ------------------------------------------------------------------
# Correction of file
# ------------------------------------------------------------------
props, image = general.calibrate_ppfile(params, recipe, infile)
# ------------------------------------------------------------------
# Load master fp, shape dxmap and dymap
# ------------------------------------------------------------------
masterfp_file, masterfp_image = shape.get_master_fp(params, header)
dxmap_file, dxmap = shape.get_shapex(params, header)
dymap_file, dymap = shape.get_shapey(params, header)
# ----------------------------------------------------------------------
# Get transform parameters (transform image onto fpmaster)
# ----------------------------------------------------------------------
# log progress
WLOG(params, '', TextEntry('40-014-00033'))
# transform
targs = [params, recipe, masterfp_image, image]
transform, xres, yres = shape.get_linear_transform_params(*targs)
# ----------------------------------------------------------------------
# For debug purposes straighten the image
# ----------------------------------------------------------------------
image2 = shape.ea_transform(params, image, transform, dxmap=dxmap,
dymap=dymap)
# ----------------------------------------------------------------------
# Quality control
# ----------------------------------------------------------------------
qc_params, passed = shape.shape_local_qc(params, transform, xres, yres)
# update recipe log
log1.add_qc(params, qc_params, passed)
# ------------------------------------------------------------------
# Writing shape to file
# ------------------------------------------------------------------
outfile = shape.write_shape_local_files(params, recipe, infile, combine,
rawfiles, props, transform,
image, image2, qc_params)
# ------------------------------------------------------------------
# Move to calibDB and update calibDB
# ------------------------------------------------------------------
if passed:
# add shapel transforms
drs_database.add_file(params, outfile)
# ------------------------------------------------------------------
# plot a zoom in of non-shifted vs shifted
# ------------------------------------------------------------------
pkwargs = dict(params=params, image=image, simage=image2)
# debug plot
recipe.plot('SHAPEL_ZOOM_SHIFT', **pkwargs)
# summary plot
recipe.plot('SUM_SHAPEL_ZOOM_SHIFT', **pkwargs)
# ------------------------------------------------------------------
# Construct summary document
# ------------------------------------------------------------------
shape.write_shape_local_summary(recipe, params, qc_params, it,
transform)
# ------------------------------------------------------------------
# update recipe log file
# ------------------------------------------------------------------
log1.end(params)
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return core.return_locals(params, locals())
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# =============================================================================
| 42.61244
| 80
| 0.422636
|
68071996310f09b7403db61d1e139c8acd0ec10d
| 4,903
|
py
|
Python
|
jupyter_russian/topic06_features/demo.py
|
salman394/AI-ml--course
|
2ed3a1382614dd00184e5179026623714ccc9e8c
|
[
"Unlicense"
] | 1
|
2018-10-24T08:35:29.000Z
|
2018-10-24T08:35:29.000Z
|
jupyter_russian/topic06_features/demo.py
|
salman394/AI-ml--course
|
2ed3a1382614dd00184e5179026623714ccc9e8c
|
[
"Unlicense"
] | null | null | null |
jupyter_russian/topic06_features/demo.py
|
salman394/AI-ml--course
|
2ed3a1382614dd00184e5179026623714ccc9e8c
|
[
"Unlicense"
] | 3
|
2019-10-03T22:32:24.000Z
|
2021-01-13T10:09:22.000Z
|
import numpy as np
import pandas as pd
import json
from sklearn.base import TransformerMixin
EPSILON = 1e-5
class FeatureEngineer(TransformerMixin):
def apply(self, df, k, condition):
df[k] = df['features'].apply(condition)
df[k] = df[k].astype(np.int8)
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, y=None):
df = X.copy()
df.features = df.features.apply(lambda x: ' '.join([y.replace(' ', '_') for y in x]))
df.features = df.features.apply(lambda x: x.lower())
df.features = df.features.apply(lambda x: x.replace('-', '_'))
for k, condition in (('dishwasher', lambda x: 'dishwasher' in x),
('doorman', lambda x: 'doorman' in x or 'concierge' in x),
('pets', lambda x: "pets" in x or "pet" in x or "dog" in x or "cats" in x and "no_pets" not in x),
('air_conditioning', lambda x: 'air_conditioning' in x or 'central' in x),
('parking', lambda x: 'parking' in x),
('balcony', lambda x: 'balcony' in x or 'deck' in x or 'terrace' in x or 'patio' in x),
('bike', lambda x: 'bike' in x),
('storage', lambda x: 'storage' in x),
('outdoor', lambda x: 'outdoor' in x or 'courtyard' in x or 'garden' in x),
('roof', lambda x: 'roof' in x),
('gym', lambda x: 'gym' in x or 'fitness' in x),
('pool', lambda x: 'pool' in x),
('backyard', lambda x: 'backyard' in x),
('laundry', lambda x: 'laundry' in x),
('hardwood_floors', lambda x: 'hardwood_floors' in x),
('new_construction', lambda x: 'new_construction' in x),
('dryer', lambda x: 'dryer' in x),
('elevator', lambda x: 'elevator' in x),
('garage', lambda x: 'garage' in x),
('pre_war', lambda x: 'pre_war' in x or 'prewar' in x),
('post_war', lambda x: 'post_war' in x or 'postwar' in x),
('no_fee', lambda x: 'no_fee' in x),
('low_fee', lambda x: 'reduced_fee' in x or 'low_fee' in x),
('fire', lambda x: 'fireplace' in x),
('private', lambda x: 'private' in x),
('wheelchair', lambda x: 'wheelchair' in x),
('internet', lambda x: 'wifi' in x or 'wi_fi' in x or 'internet' in x),
('yoga', lambda x: 'yoga' in x),
('furnished', lambda x: 'furnished' in x),
('multi_level', lambda x: 'multi_level' in x),
('exclusive', lambda x: 'exclusive' in x),
('high_ceil', lambda x: 'high_ceil' in x),
('green', lambda x: 'green_b' in x),
('stainless', lambda x: 'stainless_' in x),
('simplex', lambda x: 'simplex' in x),
('public', lambda x: 'public' in x),
):
self.apply(df, k, condition)
df['bathrooms'] = df['bathrooms'].apply(lambda x: x if x < 5 else 5)
df['bedrooms'] = df['bedrooms'].apply(lambda x: x if x < 5 else 5)
df["num_photos"] = df["photos"].apply(len)
df["num_features"] = df["features"].apply(len)
created = pd.to_datetime(df.pop("created"))
df["listing_age"] = (pd.to_datetime('today') - created).apply(lambda x: x.days)
df["room_dif"] = df["bedrooms"] - df["bathrooms"]
df["room_sum"] = df["bedrooms"] + df["bathrooms"]
df["price_per_room"] = df["price"] / df["room_sum"].apply(lambda x: max(x, .5))
df["bedrooms_share"] = df["bedrooms"] / df["room_sum"].apply(lambda x: max(x, .5))
df['price'] = df['price'].apply(lambda x: np.log(x + EPSILON))
key_types = df.dtypes.to_dict()
for k in key_types:
if key_types[k].name not in ('int64', 'float64', 'int8'):
df.pop(k)
for k in ('latitude', 'longitude', 'listing_id'):
df.pop(k)
return df
def encode(x):
if x == 'low':
return 0
elif x == 'medium':
return 1
elif x == 'high':
return 2
def get_data():
with open('train.json', 'r') as raw_data:
data = json.load(raw_data)
df = pd.DataFrame(data)
target = df.pop('interest_level').apply(encode)
df = FeatureEngineer().fit_transform(df)
return df, target
| 47.144231
| 127
| 0.470936
|
627e1e102b5559e7cb11f8422215158fddd6f40f
| 28,946
|
py
|
Python
|
run_experiment_pcgan.py
|
daverics/pcmelgan
|
a8b193a0601354c9b2da7c2dd64cd3e2549a9275
|
[
"MIT"
] | 4
|
2020-06-18T02:02:53.000Z
|
2022-01-22T15:54:08.000Z
|
run_experiment_pcgan.py
|
daverics/pcmelgan
|
a8b193a0601354c9b2da7c2dd64cd3e2549a9275
|
[
"MIT"
] | null | null | null |
run_experiment_pcgan.py
|
daverics/pcmelgan
|
a8b193a0601354c9b2da7c2dd64cd3e2549a9275
|
[
"MIT"
] | 4
|
2020-06-18T02:02:54.000Z
|
2022-02-02T11:54:47.000Z
|
import dataset
import librosa
from torch.utils.data import DataLoader, random_split
import torch
import torch.nn.functional as F
from utils import *
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
import argparse
import yaml
from pathlib import Path
import pandas as pd
import time
from networks import *
# from models import *
# from filter import *
from torch.autograd import Variable
import glob
from mel2wav.modules import MelGAN_Generator, Audio2Mel
LongTensor = torch.cuda.LongTensor
FloatTensor = torch.cuda.FloatTensor
def parse_args():
parser = argparse.ArgumentParser()
# Training parameters
parser.add_argument("--device", type = str, default = '0')
parser.add_argument("--experiment_name", type = str, required = True)
parser.add_argument("--epochs", type = int, default = 2)
parser.add_argument("--batch_size", type = int, default = 128)
parser.add_argument("--save_interval", type = int, default = 1)
parser.add_argument("--checkpoint_interval", type = int, default = 1)
parser.add_argument("--load_path", type = str, default = None)
parser.add_argument("--resume_experiment", type = bool, default = False)
parser.add_argument("--D_real_loss_weight", type = float, default = 1)
parser.add_argument("--FD_lr", type = float, default = 4e-4)
parser.add_argument("--F_lr", type = float, default = 1e-4)
parser.add_argument("--G_lr", type = float, default = 1e-4)
parser.add_argument("--GD_lr", type = float, default = 4e-4)
parser.add_argument("--utility_loss", type = bool, default = False)
# Model and loss parameters
parser.add_argument("--loss", type = str, default = None)
parser.add_argument("--eps", type = float, default = 1e-3)
parser.add_argument("--lamb", type = float, default = 100)
parser.add_argument("--entropy_loss", type = bool, default = False)
parser.add_argument("--filter_receptive_field", type = int, default = 3)
parser.add_argument("--n_mel_channels", type = int, default = 80)
parser.add_argument("--ngf", type = int, default = 32)
parser.add_argument("--n_residual_layers", type = int, default = 3)
# Data parameters
parser.add_argument("--sampling_rate", type = int, default = 8000)
parser.add_argument("--segment_length", type = int, default = 8192)
parser.add_argument("--data_path", type = str, default = '/home/edvinli/thesis_spring_2020/audio_mnist/')
parser.add_argument("--meta_data_file", type = str, default = '/home/edvinli/thesis_spring_2020/audio_mnist/audioMNIST_meta.json')
# Experiment parameters
parser.add_argument("--seeds", type = int, nargs = '+', default = None)
parser.add_argument("--num_runs", type = int, default = 3)
parser.add_argument("--n_completed_runs", type = int, default = None)
args = parser.parse_args()
return args
def main():
args = parse_args()
root = Path(os.getcwd())
experiment_name = args.experiment_name
dist_loss = args.loss
num_runs = args.num_runs
device = 'cuda:' + str(args.device)
log_dir = os.path.join(root,'logs')
experiment_dir = os.path.join(log_dir, experiment_name)
if os.path.exists(experiment_dir) and not args.resume_experiment:
print("Experiment with this name already exists, use --resume_experiment to continue.")
exit()
os.mkdir(experiment_dir)
# Some hyper parameters
num_genders = 2
num_digits = 10
lamb = args.lamb
eps = args.eps
# If manual seed, check the number is same as number of runs
if not args.seeds == None and len(args.seeds) != num_runs:
print("Number of provided seeds is not the same as number of training runs")
# Meta data and list of data files
annotation_file = args.meta_data_file
train_file_index = librosa.util.find_files(args.data_path)
# annotation_file = '/home/edvinli/thesis_spring_2020/audio_mnist/audioMNIST_meta.json'
# train_file_index = librosa.util.find_files('/home/edvinli/thesis_spring_2020/audio_mnist/')
split_ratio = 5
# Build indices for the data
file_index, annotation_index_gender, annotation_index_digit, annotation_index_speaker_id = dataset.build_annotation_index(
train_file_index, annotation_file, balanced_genders = False)
test_annotation_index, train_annotation_index, test_ids, train_ids = dataset.balanced_annotation_split(file_index, annotation_index_gender, annotation_index_digit, annotation_index_speaker_id, split_ratio)
# Create the dataset
train_data = dataset.AnnotatedAudioDataset(
train_annotation_index, args.sampling_rate, args.segment_length
)
test_data = dataset.AnnotatedAudioDataset(
test_annotation_index, args.sampling_rate, args.segment_length
)
n_train = train_data.__len__()
n_test = test_data.__len__()
# Dataloaders
train_loader = DataLoader(train_data, batch_size = args.batch_size, num_workers = 1, shuffle = True)
test_loader = DataLoader(test_data, batch_size = 1, num_workers = 1)
# Set up models that are not trained
fft = Audio2Mel(sampling_rate = args.sampling_rate)
Mel2Audio = MelGAN_Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).to(device)
fix_digit_spec_classfier = load_modified_AlexNet(num_digits).to(device)
fix_gender_spec_classfier = load_modified_AlexNet(num_genders).to(device)
# Pretrained Mel spectrogram inversion and digit classification
Mel2Audio.load_state_dict(torch.load('mel2wav/best_netG_epoch_2120.pt'))
fix_digit_spec_classfier.load_state_dict(torch.load('fixed_classifier_checkpoints/best_digit_alexnet_spectrograms_epoch_26.pt'))
fix_gender_spec_classfier.load_state_dict(torch.load('fixed_classifier_checkpoints/best_gender_alexnet_epoch_29.pt'))
fix_digit_spec_classfier.eval()
fix_gender_spec_classfier.eval()
# Loss functions
dist_loss = 'l1'
distortion_loss = nn.L1Loss()
entropy_loss = HLoss()
adversarial_loss = nn.CrossEntropyLoss()
adversarial_loss_rf = nn.CrossEntropyLoss()
for run in range(num_runs):
run_dir = os.path.join(experiment_dir,'run_' + str(run))
checkpoint_dir = os.path.join(run_dir,'checkpoints')
visuals_dir = os.path.join(run_dir,'visuals')
example_dir = os.path.join(run_dir,'examples')
example_audio_dir = os.path.join(example_dir, 'audio')
example_spec_dir = os.path.join(example_dir, 'spectrograms')
if not args.resume_experiment:
os.mkdir(run_dir)
os.mkdir(example_dir)
os.mkdir(checkpoint_dir)
os.mkdir(example_audio_dir)
os.mkdir(example_spec_dir)
os.mkdir(visuals_dir)
# Set random seed
if args.seeds == None:
manualSeed = random.randint(1, 10000) # use if you want new results
else:
manualSeed = args.seed[str(run)]
random.seed(manualSeed)
torch.manual_seed(manualSeed)
np.random.seed(manualSeed)
####################################
# Dump arguments and create logger #
####################################
with open(Path(run_dir) / "args.yml", "w") as f:
yaml.dump(args, f)
yaml.dump({'Seed used' : manualSeed}, f)
yaml.dump({'Run number' : run}, f)
writer = SummaryWriter(str(run_dir))
# Set up trainable models and optimizers
netF = UNetFilter(1, 1, chs=[8, 16, 32, 64, 128], kernel_size = args.filter_receptive_field, image_width=32, image_height=80, noise_dim=10, nb_classes=2, embedding_dim=16, use_cond = False).to(device)
netFD = load_modified_AlexNet(num_genders).to(device)
netG = UNetFilter(1, 1, chs=[8, 16, 32, 64, 128], kernel_size = args.filter_receptive_field, image_width=32, image_height=80, noise_dim=10, nb_classes=2, embedding_dim=16, use_cond = True).to(device)
netGD = load_modified_AlexNet(num_genders + 1).to(device)
# Optimizers
optF = torch.optim.Adam(netF.parameters(), args.F_lr, betas = (0.5, 0.9))
optFD = torch.optim.Adam(netFD.parameters(), args.FD_lr, betas = (0.5, 0.9))
optG = torch.optim.Adam(netG.parameters(), args.G_lr, betas = (0.5, 0.9))
optGD = torch.optim.Adam(netGD.parameters(), args.GD_lr, betas = (0.5, 0.9))
# Put training objects into list for loading and saving state dicts
training_objects = []
training_objects.append(('netF', netF))
training_objects.append(('optF', optF))
training_objects.append(('netFD', netFD))
training_objects.append(('optFD', optFD))
training_objects.append(('netG', netG))
training_objects.append(('optG', optG))
training_objects.append(('netGD', netGD))
training_objects.append(('optGD', optGD))
training_objects.sort(key = lambda x : x[0])
# Load from checkpoints
start_epoch = 0
if args.resume_experiment or not args.load_path == None:
if args.resume_experiment:
if args.n_completed_runs <= run:
checkpoints = sorted(glob.glob(os.path.join(checkpoint_dir, '*latest*')))
start_epoch = int(checkpoints[0].split('_')[-1][:-3])
print('Resuming experiment {} from checkpoint, {} epochs completed.'.format(args.experiment_name, start_epoch))
else:
checkpoint_path = os.path.join(args.load_path,'checkpoints')
checkpoints = sorted(glob.glob(os.path.join(checkpoint_path,'*latest*')))
completed_epochs = int(checkpoints[0].split('_')[-1][:-3])
print('Starting from checkpoint in {}, {} epochs completed.'.format(args.load_path, completed_epochs))
for i, (_, object) in enumerate(training_objects):
object.load_state_dict(torch.load(checkpoints[i]))
print("GAN training initiated, {} epochs".format(args.epochs))
for epoch in range(start_epoch, args.epochs + start_epoch):
# Add counters for number of correct classifications
correct_FD = 0
correct_fake_GD = 0
correct_real_GD = 0
correct_gender_fake_GD = 0
correct_digit = 0
fixed_correct_gender = 0
# Add variables to add batch losses to
F_distortion_loss_accum = 0
F_adversary_loss_accum = 0
FD_adversary_loss_accum = 0
G_distortion_loss_accum = 0
G_adversary_loss_accum = 0
GD_real_loss_accum = 0
GD_fake_loss_accum = 0
netF.train()
netFD.train()
netG.train()
netGD.train()
epoch_start = time.time()
for i, (x, gender, digit, _) in enumerate(train_loader):
digit = digit.to(device)
gender = gender.to(device)
x = torch.unsqueeze(x,1)
spectrograms = fft(x).detach()
spectrograms, means, stds = preprocess_spectrograms(spectrograms)
spectrograms = torch.unsqueeze(spectrograms,1).to(device)
# -----------------
# Train Filter
# -----------------
optF.zero_grad()
z = torch.randn(spectrograms.shape[0], 10).to(device)
filter_mel = netF(spectrograms,z, gender.long())
pred_secret = netFD(filter_mel)
ones = Variable(FloatTensor(gender.shape).fill_(1.0), requires_grad = True).to(device)
target = ones - gender.float()
target = target.view(target.size(0))
filter_distortion_loss = distortion_loss(filter_mel, spectrograms)
if not args.entropy_loss:
filter_adversary_loss = adversarial_loss(pred_secret, target.long())
else:
filter_adversary_loss = entropy_loss(pred_secret)
netF_loss = filter_adversary_loss + lamb * torch.pow(torch.relu(filter_distortion_loss - eps),2)
netF_loss.backward()
optF.step()
# ------------------------
# Train Generator (Real/Fake)
# ------------------------
optG.zero_grad()
z1 = torch.randn(spectrograms.shape[0], 10).to(device)
filter_mel = netF(spectrograms,z1,gender.long())
z2 = torch.randn(spectrograms.shape[0], 10).to(device)
gen_secret = Variable(LongTensor(np.random.choice([0.0, 1.0], spectrograms.shape[0]))).to(device)
gen_mel = netG(filter_mel, z2, gen_secret)
pred_secret = netGD(gen_mel)
pred_digit = fix_digit_spec_classfier(gen_mel)
fixed_pred_secret = fix_gender_spec_classfier(gen_mel)
generator_distortion_loss = distortion_loss(gen_mel, spectrograms)
generator_adversary_loss = adversarial_loss(pred_secret, gen_secret)
netG_loss = generator_adversary_loss + lamb * torch.pow(torch.relu(generator_distortion_loss - eps),2)
netG_loss.backward()
optG.step()
# ---------------------
# Train Discriminator
# ---------------------
optFD.zero_grad()
pred_secret = netFD(filter_mel.detach())
netFD_loss = adversarial_loss(pred_secret, gender.long())
netFD_loss.backward()
optFD.step()
# --------------------------------
# Train Discriminator (Real/Fake)
# --------------------------------
optGD.zero_grad()
real_pred_secret = netGD(spectrograms)
fake_pred_secret = netGD(gen_mel.detach())
fake_secret = Variable(LongTensor(fake_pred_secret.size(0)).fill_(2.0), requires_grad=False).to(device)
GD_real_loss = adversarial_loss_rf(real_pred_secret, gender.long().to(device)).to(device)
GD_fake_loss = adversarial_loss_rf(fake_pred_secret, fake_secret).to(device)
netGD_loss = (GD_real_loss + GD_fake_loss)/2
netGD_loss.backward()
optGD.step()
# ----------------------------------------------
# Compute accuracies
# ----------------------------------------------
# FD accuracy on original gender
predicted_gender_FD = torch.argmax(pred_secret, 1)
correct_FD += (predicted_gender_FD == gender.long()).sum()
# GD accuracy on original gender in real and generated (fake) data,
# and sampled gender in generated (fake) data
predicted_fake_GD = torch.argmax(fake_pred_secret, 1)
predicted_real_GD = torch.argmax(real_pred_secret,1)
correct_fake_GD += (predicted_fake_GD == fake_secret).sum()
correct_real_GD += (predicted_real_GD == gender).sum()
correct_gender_fake_GD += (predicted_fake_GD == gen_secret).sum()
# Calculate number of correct classifications for the fixed classifiers on the training set
predicted_digit = torch.argmax(pred_digit.data, 1)
correct_digit += (predicted_digit == digit).sum()
fixed_predicted = torch.argmax(fixed_pred_secret.data, 1)
fixed_correct_gender += (fixed_predicted == gender.long()).sum()
# ----------------------------------------------
# Record losses
# ----------------------------------------------
F_distortion_loss_accum += filter_distortion_loss.item()
F_adversary_loss_accum += filter_adversary_loss.item()
FD_adversary_loss_accum += netFD_loss.item()
G_distortion_loss_accum += generator_distortion_loss.item()
G_adversary_loss_accum += generator_adversary_loss.item()
GD_real_loss_accum += GD_real_loss.item()
GD_fake_loss_accum += GD_fake_loss.item()
writer.add_scalar("F_distortion_loss", F_distortion_loss_accum/(i+1), epoch + 1)
writer.add_scalar("F_adversary_loss", F_adversary_loss_accum/(i+1), epoch + 1)
writer.add_scalar("G_distortion_loss", G_distortion_loss_accum/(i+1), epoch + 1)
writer.add_scalar("G_adversary_loss", G_adversary_loss_accum/(i+1), epoch + 1)
writer.add_scalar("FD_adversary_loss", FD_adversary_loss_accum/(i+1), epoch + 1)
writer.add_scalar("GD_real_loss", GD_real_loss_accum/(i+1), epoch + 1)
writer.add_scalar("GD_fake_loss", GD_fake_loss_accum/(i+1), epoch + 1)
# ----------------------------------------------
# Record accuracies
# ----------------------------------------------
FD_accuracy = 100 * correct_FD / n_train
GD_accuracy_fake = 100 * correct_fake_GD / n_train
GD_accuracy_real = 100 * correct_real_GD / n_train
GD_accuracy_gender_fake = 100 * correct_gender_fake_GD / n_train
fix_digit_spec_classfier_accuracy = 100 * correct_digit / n_train
fix_gender_spec_classfier_accuracy = 100 * fixed_correct_gender / n_train
writer.add_scalar("FD_accuracy", FD_accuracy, epoch + 1)
writer.add_scalar("GD_accuracy_fake", GD_accuracy_fake, epoch + 1)
writer.add_scalar("GD_accuracy_real", GD_accuracy_real, epoch + 1)
writer.add_scalar("GD_accuracy_gender_fake", GD_accuracy_gender_fake, epoch + 1)
writer.add_scalar("digit_accuracy", fix_digit_spec_classfier_accuracy, epoch + 1)
writer.add_scalar("fixed_gender_accuracy_fake", fix_gender_spec_classfier_accuracy, epoch + 1)
print('__________________________________________________________________________')
print("Epoch {} completed | Time: {:5.2f} s ".format(epoch+1, time.time() - epoch_start))
print("netF | Adversarial loss: {:5.5f} | Distortion loss: {:5.5f}".format(F_adversary_loss_accum/(i + 1), F_distortion_loss_accum/(i + 1)))
print("netFD | Filtered sample accuracy: {} %".format(FD_accuracy))
print("netG | Advsarial loss: {:5.5f} | Distortion loss: {:5.5f}".format(G_adversary_loss_accum/(i + 1), G_distortion_loss_accum/(i + 1)))
print("netGD | Real samples: {} % | Fake samples: {} % | Sampled gender accuracy: {} % ".format(
GD_accuracy_real, GD_accuracy_fake, GD_accuracy_gender_fake
))
print("Fix Digit accuracy: {} % | Fix gender accuracy: {} %".format(fix_digit_spec_classfier_accuracy, fix_gender_spec_classfier_accuracy))
# ----------------------------------------------
# Compute test accuracy
# ----------------------------------------------
if epoch % 10 == 0:
test_correct_digit = 0
test_fixed_original_gender = 0
test_fixed_sampled_gender = 0
for i, (x, gender, digit, speaker_id) in enumerate(test_loader):
x = torch.unsqueeze(x,1)
spectrograms = fft(x).detach()
spectrograms, means, stds = preprocess_spectrograms(spectrograms)
spectrograms = torch.unsqueeze(spectrograms,1).to(device)
gender = gender.to(device)
digit = digit.to(device)
z1 = torch.randn(spectrograms.shape[0], 10).to(device)
filter_mel = netF(spectrograms,z1, gender.long())
z2 = torch.randn(filter_mel.shape[0], 10).to(device)
gen_secret = Variable(LongTensor(np.random.choice([0.0, 1.0], filter_mel.shape[0]))).to(device)
gen_mel = netG(filter_mel,z2,gen_secret)
pred_digit = fix_digit_spec_classfier(gen_mel)
fixed_pred_secret = fix_gender_spec_classfier(gen_mel)
# Calculate utility accuracy
predicted = torch.argmax(pred_digit.data,1)
test_correct_digit += (predicted == digit).sum()
# Calculate gender accuracy for fixed net
fixed_predicted = torch.argmax(fixed_pred_secret.data,1)
test_fixed_original_gender += (fixed_predicted == gender.long()).sum()
test_fixed_sampled_gender += (fixed_predicted == gen_secret).sum()
test_digit_accuracy = 100*test_correct_digit / n_test
test_fixed_original_gender_accuracy_fake = 100*test_fixed_original_gender / n_test
test_fixed_sampled_gender_accuracy_fake = 100*test_fixed_sampled_gender / n_test
writer.add_scalar("test_set_digit_accuracy", test_digit_accuracy, epoch + 1)
writer.add_scalar("test_set_fixed_original_gender_accuracy_fake", test_fixed_original_gender_accuracy_fake, epoch + 1)
writer.add_scalar("test_set_fixed_sampled_gender_accuracy_fake", test_fixed_sampled_gender_accuracy_fake, epoch + 1)
print('__________________________________________________________________________')
print("## Test set statistics ##")
print("Utility | Digit accuracy: {} % | Fixed sampled gender accuracy: {} % | Fixed original gender accuracy: {} % ".format(test_digit_accuracy,test_fixed_sampled_gender_accuracy_fake, test_fixed_original_gender_accuracy_fake))
# ----------------------------------------------
# Save test samples
# ----------------------------------------------
if (epoch + 1) % args.save_interval == 0:
print("Saving audio and spectrogram samples.")
netF.eval()
netG.eval()
for i, (x, gender, digit, speaker_id) in enumerate(test_loader):
if i % 50 == 0:
x = torch.unsqueeze(x,1)
spectrograms = fft(x).detach()
spec_original = spectrograms
spectrograms, means, stds = preprocess_spectrograms(spectrograms)
spectrograms = torch.unsqueeze(spectrograms,1).to(device)
gender = gender.to(device)
digit = digit.to(device)
z1 = torch.randn(spectrograms.shape[0], 10).to(device)
filtered = netF(spectrograms,z1,gender.long()).detach()
z2 = torch.randn(spectrograms.shape[0], 10).to(device)
male = Variable(LongTensor(spectrograms.size(0)).fill_(1.0), requires_grad=False).to(device)
female = Variable(LongTensor(spectrograms.size(0)).fill_(0.0), requires_grad=False).to(device)
generated_male = netG(filtered, z2, male).detach()
generated_female = netG(filtered, z2, female).detach()
#Predict digit
digit_male = fix_digit_spec_classfier(generated_male)
pred_digit_male = torch.argmax(digit_male.data,1)
digit_female = fix_digit_spec_classfier(generated_female)
pred_digit_female = torch.argmax(digit_female.data,1)
#Predict gender
gender_male = fix_gender_spec_classfier(generated_male)
pred_gender_male = torch.argmax(gender_male.data,1)
gender_female = fix_gender_spec_classfier(generated_female)
pred_gender_female = torch.argmax(gender_female.data,1)
if pred_gender_male == 0:
pred_gender_male = 'female'
else:
pred_gender_male = 'male'
if pred_gender_female == 0:
pred_gender_female = 'female'
else:
pred_gender_female = 'male'
# Distortions
filtered_distortion = distortion_loss(spectrograms,filtered)
male_distortion = distortion_loss(spectrograms,generated_male).item()
female_distortion = distortion_loss(spectrograms,generated_female).item()
sample_distortion = distortion_loss(generated_male, generated_female).item()
filtered = torch.squeeze(filtered,1).to(device) * 3 * stds.to(device) + means.to(device)
generated_male = torch.squeeze(generated_male,1).to(device) * 3 * stds.to(device) + means.to(device)
generated_female = torch.squeeze(generated_female,1).to(device) * 3 * stds.to(device) + means.to(device)
spectrograms = spectrograms.to(device) * 3 * stds.to(device) + means.to(device)
inverted_filtered = Mel2Audio(filtered).squeeze().detach().cpu()
inverted_male = Mel2Audio(generated_male).squeeze().detach().cpu()
inverted_female = Mel2Audio(generated_female).squeeze().detach().cpu()
f_name_filtered_audio = os.path.join(example_audio_dir, 'speaker_{}_digit_{}_epoch_{}_filtered.wav'.format(speaker_id.item(), digit.item(), epoch + 1))
f_name_male_audio = os.path.join(example_audio_dir, 'speaker_{}_digit_{}_epoch_{}_sampled_gender_male_predicted_digit_{}.wav'.format(speaker_id.item(), digit.item(), epoch + 1, pred_digit_male.item()))
f_name_female_audio = os.path.join(example_audio_dir, 'speaker_{}_digit_{}_epoch_{}_sampled_gender_female_predicted_digit_{}.wav'.format(speaker_id.item(), digit.item(), epoch + 1, pred_digit_female.item()))
f_name_original_audio = os.path.join(example_audio_dir, 'speaker_{}_digit_{}_.wav'.format(speaker_id.item(), digit.item()))
save_sample(f_name_filtered_audio, args.sampling_rate, inverted_filtered)
save_sample(f_name_male_audio, args.sampling_rate, inverted_male)
save_sample(f_name_female_audio, args.sampling_rate, inverted_female)
save_sample(f_name_original_audio,args.sampling_rate,torch.squeeze(x))
if gender == 0:
gender_title = 'female'
else:
gender_title = 'male'
orig_title = 'Original spectrogram - Gender: {} - Digit: {}'.format(gender_title, digit.item())
filtered_title = 'Filtered spectrogram'
male_title = 'Sampled/predicted gender: male / {} | Predicted digit: {} \n Distortion loss: {:5.5f} (original) | {:5.5f} (female) ({}_loss)'.format(pred_gender_male, pred_digit_male.item(), male_distortion, sample_distortion, dist_loss)
female_title = 'Sampled/predicted gender: female / {} | Predicted digit: {} \n Distortion loss: {:5.5f} (original) | {:5.5f} (male) ({}_loss)'.format(pred_gender_female,pred_digit_female.item(), female_distortion, sample_distortion, dist_loss)
f_name = os.path.join(example_spec_dir, 'speaker_{}_digit_{}_epoch_{}'.format(
speaker_id.item(), digit.item(), epoch + 1
))
comparison_plot_pcgan(f_name, spec_original, filtered, generated_male, generated_female, orig_title, filtered_title, male_title, female_title)
print("Success!")
if (epoch + 1) % args.checkpoint_interval == 0:
save_epoch = epoch + 1
old_checkpoints = sorted(glob.glob(os.path.join(checkpoint_dir, '*latest*')))
if old_checkpoints:
for i, _ in enumerate(old_checkpoints):
os.remove(old_checkpoints[i])
for name, object in training_objects:
torch.save(object.state_dict(), os.path.join(checkpoint_dir, name + '_epoch_{}.pt'.format(save_epoch)))
torch.save(object.state_dict(), os.path.join(checkpoint_dir, name + '_latest_epoch_{}.pt'.format(save_epoch)))
print("Run number {} completed.".format(run+1))
print('__________________________________________________________________________')
if __name__ == "__main__":
main()
| 53.307551
| 267
| 0.603676
|
766bb0a4f6c1fd4efb75cb5f376bc4c7c224b885
| 263
|
py
|
Python
|
crossref/errors.py
|
ScholarTools/crossref_api_python
|
49282503bda1e013a6b1bddd171f1ef960bec3f5
|
[
"MIT"
] | null | null | null |
crossref/errors.py
|
ScholarTools/crossref_api_python
|
49282503bda1e013a6b1bddd171f1ef960bec3f5
|
[
"MIT"
] | 11
|
2016-03-01T21:15:33.000Z
|
2021-12-13T19:43:02.000Z
|
crossref/errors.py
|
ScholarTools/crossref_api_python
|
49282503bda1e013a6b1bddd171f1ef960bec3f5
|
[
"MIT"
] | 1
|
2018-02-05T15:22:37.000Z
|
2018-02-05T15:22:37.000Z
|
# -*- coding: utf-8 -*-
"""
"""
class CrossrefAPIError(Exception):
"""User errors in usage of the Crossref API"""
pass
class RequestError(Exception):
pass
class InvalidDOI(Exception):
pass
class UnhandledHTTPResonseFailure(Exception):
pass
| 16.4375
| 50
| 0.684411
|
cce60870685cc8bed5ae5ffb2a62a752407e9461
| 8,626
|
py
|
Python
|
mingpt/model.py
|
mircean/minGPT
|
e080e9a6deb8579ca0dd5a67fc42ae54db4478e9
|
[
"MIT"
] | null | null | null |
mingpt/model.py
|
mircean/minGPT
|
e080e9a6deb8579ca0dd5a67fc42ae54db4478e9
|
[
"MIT"
] | null | null | null |
mingpt/model.py
|
mircean/minGPT
|
e080e9a6deb8579ca0dd5a67fc42ae54db4478e9
|
[
"MIT"
] | null | null | null |
"""
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x):
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
return optimizer
def forward(self, batch):
idx = batch["X"]
targets = batch["Y"] if "Y" in batch else None
b, t = idx.size()
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
# loss = None
# if targets is not None:
# loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
#
# return logits, loss
return {"Y_pred": logits, "Y_pred_softmax": nn.functional.softmax(logits, 2)}
| 42.70297
| 127
| 0.623348
|
fca1cd946326176453973ccdee2903c8957cb60b
| 1,392
|
py
|
Python
|
modules/dbnd-airflow/src/dbnd_airflow/compat/kubernetes_executor.py
|
kalebinn/dbnd
|
89b6ac3537f861784be73ffe8989bf63fca7401c
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd-airflow/src/dbnd_airflow/compat/kubernetes_executor.py
|
kalebinn/dbnd
|
89b6ac3537f861784be73ffe8989bf63fca7401c
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd-airflow/src/dbnd_airflow/compat/kubernetes_executor.py
|
kalebinn/dbnd
|
89b6ac3537f861784be73ffe8989bf63fca7401c
|
[
"Apache-2.0"
] | null | null | null |
from dbnd_airflow._vendor import kubernetes_utils
from dbnd_airflow.constants import AIRFLOW_ABOVE_9, AIRFLOW_ABOVE_10
if AIRFLOW_ABOVE_10:
from airflow.executors.kubernetes_executor import (
AirflowKubernetesScheduler,
KubernetesJobWatcher,
KubernetesExecutor,
KubeConfig,
)
else:
from airflow.contrib.executors.kubernetes_executor import (
AirflowKubernetesScheduler,
KubernetesJobWatcher,
KubernetesExecutor,
KubeConfig,
)
def make_safe_label_value(value):
return kubernetes_utils.make_safe_label_value(value)
def get_tuple_for_watcher_queue(pod_id, namespace, state, labels, resource_version):
if AIRFLOW_ABOVE_9:
return pod_id, namespace, state, labels, resource_version
return pod_id, state, labels, resource_version
def get_job_watcher_kwargs(dbnd_kubernetes_scheduler):
kwargs = {
"namespace": dbnd_kubernetes_scheduler.namespace,
"watcher_queue": dbnd_kubernetes_scheduler.watcher_queue,
"resource_version": dbnd_kubernetes_scheduler.current_resource_version,
"worker_uuid": dbnd_kubernetes_scheduler.worker_uuid,
"kube_config": dbnd_kubernetes_scheduler.kube_config,
"kube_dbnd": dbnd_kubernetes_scheduler.kube_dbnd,
}
if AIRFLOW_ABOVE_10:
kwargs.update({"multi_namespace_mode": False})
return kwargs
| 31.636364
| 84
| 0.751437
|
48ef10a6201deae718f7dde78c8ab18c48ca08a0
| 1,341
|
py
|
Python
|
venv/lib/python3.9/site-packages/google/cloud/dlp_v2/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.9/site-packages/google/cloud/dlp_v2/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
venv/lib/python3.9/site-packages/google/cloud/dlp_v2/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 2
|
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import warnings
from google.cloud.dlp_v2 import types
from google.cloud.dlp_v2.gapic import dlp_service_client
from google.cloud.dlp_v2.gapic import enums
if sys.version_info[:2] == (2, 7):
message = (
"A future version of this library will drop support for Python 2.7."
"More details about Python 2 support for Google Cloud Client Libraries"
"can be found at https://cloud.google.com/python/docs/python2-sunset/"
)
warnings.warn(message, DeprecationWarning)
class DlpServiceClient(dlp_service_client.DlpServiceClient):
__doc__ = dlp_service_client.DlpServiceClient.__doc__
enums = enums
__all__ = ("enums", "types", "DlpServiceClient")
| 31.928571
| 79
| 0.747204
|
b56d1728fad9fe7bbe0bd9a97bb5a46b1f5d6075
| 7,821
|
py
|
Python
|
data_loader.py
|
feelpp/brits
|
f1dffd6ed6d92c3a70a287155d1db3a8d4eac84f
|
[
"MIT"
] | null | null | null |
data_loader.py
|
feelpp/brits
|
f1dffd6ed6d92c3a70a287155d1db3a8d4eac84f
|
[
"MIT"
] | null | null | null |
data_loader.py
|
feelpp/brits
|
f1dffd6ed6d92c3a70a287155d1db3a8d4eac84f
|
[
"MIT"
] | null | null | null |
import os
import time
import ujson as json
import csv
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
# Seems to be unused in the rest of the code.
# class MySet(Dataset):
# def __init__(self):
# super(MySet, self).__init__()
# self.content = open('./json/json').readlines()
# indices = np.arange(len(self.content))
# val_indices = np.random.choice(indices, len(self.content) // 5)
# self.val_indices = set(val_indices.tolist())
# def __len__(self):
# return len(self.content)
# def __getitem__(self, idx):
# rec = json.loads(self.content[idx])
# if idx in self.val_indices:
# rec['is_train'] = 0
# else:
# rec['is_train'] = 1
# return rec
# Original
# class MyTrainSet(Dataset):
# def __init__(self):
# super(MyTrainSet, self).__init__()
# self.content = open('./json/EMS/USA/USA_nitrate_2train1012.json').readlines()
# indices = np.arange(len(self.content))
# val_indices = np.random.choice(indices, len(self.content) // 5)
# self.val_indices = set(val_indices.tolist())
# def __len__(self):
# return len(self.content)
# def __getitem__(self, idx):
# rec = json.loads(self.content[idx])
# # if idx in self.val_indices:
# # rec['is_train'] = 0
# # else:
# # rec['is_train'] = 1
# return rec
# Modif
class MyTrainSet(Dataset):
def __init__(self):
super(MyTrainSet, self).__init__()
# self.content = open('./csv/ibat/preprocess/train_raw_results_demo.csv').readlines()
self.path = './csv/ibat/preprocess/train_raw_results_demo.csv'
self.content = pd.read_csv(self.path, header=0) # , chunksize=self.chunksize)
indices = np.arange(len(self.content))
val_indices = np.random.choice(indices, len(self.content) // 5)
self.val_indices = set(val_indices.tolist())
def __len__(self):
return len(self.content)
def __getitem__(self, idx):
# rec = csv.reader (self.content[idx])
rec = self.content.iloc[[idx]]
# if idx in self.val_indices:
# rec['is_train'] = 0
# else:
# rec['is_train'] = 1
return rec
# my_train_set = MyTrainSet()
# print(my_train_set[3])
# print(len(my_train_set))
# input("waiting")
# Original
# class MyTestSet(Dataset):
# def __init__(self):
# super(MyTestSet, self).__init__()
# self.content = open('./json/EMS/USA/USA_nitrate_2test1012.json').readlines()
# indices = np.arange(len(self.content))
# val_indices = np.random.choice(indices, len(self.content) // 5)
# self.val_indices = set(val_indices.tolist())
# def __len__(self):
# return len(self.content)
# def __getitem__(self, idx):
# rec = json.loads(self.content[idx])
# # if idx in self.val_indices:
# # rec['is_train'] = 0
# # else:
# # rec['is_train'] = 1
# return rec
# Modif
class MyTestSet(Dataset):
def __init__(self):
super(MyTestSet, self).__init__()
# self.content = open('./json/EMS/USA/USA_nitrate_2test1012.json').readlines()
self.path = './csv/ibat/preprocess/test_raw_results_demo.csv'
self.content = pd.read_csv(self.path, header=0) # , chunksize=self.chunksize)
indices = np.arange(len(self.content))
val_indices = np.random.choice(indices, len(self.content) // 5)
self.val_indices = set(val_indices.tolist())
def __len__(self):
return len(self.content)
def __getitem__(self, idx):
# rec = json.loads(self.content[idx])
rec = self.content.iloc[[idx]]
# if idx in self.val_indices:
# rec['is_train'] = 0
# else:
# rec['is_train'] = 1
return rec
# my_test_set = MyTestSet()
# print(my_test_set[4])
# print(len(my_test_set))
# input("waiting")
def collate_fn(recs):
forward = list(map(lambda x: x['forward'], recs))
backward = list(map(lambda x: x['backward'], recs))
def to_tensor_dict(recs):
# values = torch.FloatTensor(list(map(lambda r: r['values'], recs)))
# masks = torch.FloatTensor(list(map(lambda r: r['masks'], recs)))
# deltas = torch.FloatTensor(list(map(lambda r: r['deltas'], recs)))
# evals = torch.FloatTensor(list(map(lambda r: r['evals'], recs)))
# eval_masks = torch.FloatTensor(
# list(map(lambda r: r['eval_masks'], recs)))
# forwards = torch.FloatTensor(list(map(lambda r: r['forwards'], recs)))
values = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['values'], r)), recs)))
masks = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['masks'], r)), recs)))
deltas = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['deltas'], r)), recs)))
forwards = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['forwards'], r)), recs)))
evals = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['evals'], r)), recs)))
eval_masks = torch.FloatTensor(list(map(lambda r: list(map(lambda x: x['eval_masks'], r)), recs)))
# print('values:{}'.format(values.size()))
# print('!!')
# print('masks:{}'.format(masks.size()))
# print('deltas:{}'.format(deltas.size()))
# print('forwards:{}'.format(forwards.size()))
# print('evals:{}'.format(evals.size()))
# print('eval_masks:{}'.format(eval_masks.size()))
return {
'values': values.permute(0, 2, 1),
'forwards': forwards.permute(0, 2, 1),
'masks': masks.permute(0, 2, 1),
'deltas': deltas.permute(0, 2, 1),
'evals': evals.permute(0, 2, 1),
'eval_masks': eval_masks.permute(0, 2, 1)
}
ret_dict = {
'forward': to_tensor_dict(forward),
'backward': to_tensor_dict(backward)
}
ret_dict['labels'] = torch.FloatTensor(list(map(lambda x: x['label'], recs)))
ret_dict['is_train'] = torch.FloatTensor(list(map(lambda x: x['is_train'], recs)))
# print('values:{}'.format(ret_dict['forward']['values'].size()))
# print('!!')
# print('masks:{}'.format(masks.size()))
# print('deltas:{}'.format(deltas.size()))
# print('forwards:{}'.format(forwards.size()))
# print('evals:{}'.format(evals.size()))
# print('eval_masks:{}'.format(eval_masks.size()))
return ret_dict
# For now, get_loader is not used in the rest of the code
# def get_loader(batch_size=64, shuffle=False):
# data_set = MySet()
# data_iter = DataLoader(dataset=data_set,
# batch_size=batch_size,
# num_workers=1,
# shuffle=shuffle,
# pin_memory=True,
# collate_fn=collate_fn)
# return data_iter
def get_train_loader(batch_size=100, shuffle=False):
data_set = MyTrainSet()
data_iter = DataLoader(dataset=data_set,
batch_size=batch_size,
num_workers=1,
shuffle=shuffle,
pin_memory=True,
collate_fn=collate_fn)
return data_iter
def get_test_loader(batch_size=100, shuffle=False):
data_set = MyTestSet()
data_iter = DataLoader(dataset=data_set,
batch_size=batch_size,
num_workers=1,
shuffle=shuffle,
pin_memory=True,
collate_fn=collate_fn)
return data_iter
| 31.663968
| 106
| 0.577803
|
7a6e76424eb6916e7d73e2f1d0cad23f98ffd0f7
| 730
|
py
|
Python
|
1-100/41-50/49-groupAnagram/groupAnagram.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
1-100/41-50/49-groupAnagram/groupAnagram.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
1-100/41-50/49-groupAnagram/groupAnagram.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53, \
59,61,67,71,73,79,83,89,97,101]
dictionary = {}
result = []
groups = 0
for string in strs:
key = 1
for char in string:
key *= primes[ord(char) - 97]
index = dictionary.get(key, -1)
if index == -1:
dictionary[key] = groups
result.append([])
groups += 1
result[index].append(string)
return result
| 28.076923
| 64
| 0.420548
|
72dae11ac60386a53c9e102e2b45bf9967ce298d
| 893
|
py
|
Python
|
maro/cli/grass/image.py
|
KangFengjian/maro
|
2694a75731d5174ba5b33780670ba38d776d8c5a
|
[
"MIT"
] | 1
|
2020-09-30T09:31:05.000Z
|
2020-09-30T09:31:05.000Z
|
maro/cli/grass/image.py
|
KangFengjian/maro
|
2694a75731d5174ba5b33780670ba38d776d8c5a
|
[
"MIT"
] | 2
|
2020-12-15T09:13:43.000Z
|
2020-12-16T08:02:41.000Z
|
maro/cli/grass/image.py
|
KangFengjian/maro
|
2694a75731d5174ba5b33780670ba38d776d8c5a
|
[
"MIT"
] | 1
|
2021-10-01T09:17:43.000Z
|
2021-10-01T09:17:43.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.cli.grass.executors.grass_azure_executor import GrassAzureExecutor
from maro.cli.utils.checkers import check_details_validity
from maro.cli.utils.details import load_cluster_details
from maro.cli.utils.lock import lock
@check_details_validity(mode='grass')
@lock
def push_image(cluster_name: str, image_name: str, image_path: str, remote_context_path: str, remote_image_name: str,
**kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details['cloud']['infra'] == 'azure':
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.push_image(
image_name=image_name,
image_path=image_path,
remote_context_path=remote_context_path,
remote_image_name=remote_image_name
)
| 35.72
| 117
| 0.743561
|
b56bc8c26d37134baa1122e5fcbd8286e6068f7a
| 1,231
|
py
|
Python
|
data_structures/tree_based/binary_tree/test.py
|
kwahome/data-structures-and-algos
|
535b23c63bf384d63c1ebc08d1c32d3dd808297c
|
[
"Apache-2.0"
] | null | null | null |
data_structures/tree_based/binary_tree/test.py
|
kwahome/data-structures-and-algos
|
535b23c63bf384d63c1ebc08d1c32d3dd808297c
|
[
"Apache-2.0"
] | null | null | null |
data_structures/tree_based/binary_tree/test.py
|
kwahome/data-structures-and-algos
|
535b23c63bf384d63c1ebc08d1c32d3dd808297c
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from data_structures.tree_based.binary_tree.tree import Tree
class BinaryTreeTests(unittest.TestCase):
def setUp(self):
self.binary_tree = Tree()
def tearDown(self):
pass
def test_tree_insert(self):
data = 5
self.assertEqual(None, self.binary_tree.get_data())
self.binary_tree.insert(data)
self.assertEqual(data, self.binary_tree.get_data())
data = 2
self.binary_tree.insert(data)
self.assertEqual(data, self.binary_tree.get_left().get_data())
data = 1
self.binary_tree.insert(data)
self.assertEqual(data, self.binary_tree.get_left().get_left().get_data())
data = 3
self.binary_tree.insert(data)
self.assertEqual(data, self.binary_tree.get_left().get_right().get_data())
data = 10
self.binary_tree.insert(data)
self.assertEqual(data, self.binary_tree.get_right().get_data())
data = 15
self.binary_tree.insert(data)
self.assertEqual(data, self.binary_tree.get_right().get_right().get_data())
data = 9
self.binary_tree.insert(data)
self.assertEqual(data, self.binary_tree.get_right().get_left().get_data())
| 33.27027
| 83
| 0.658002
|
f8a41afd5381220afe0ccd93208961196c143a6d
| 2,177
|
py
|
Python
|
openquake/hazardlib/tests/gsim/utils.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 1
|
2019-08-01T00:28:24.000Z
|
2019-08-01T00:28:24.000Z
|
openquake/hazardlib/tests/gsim/utils.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 4
|
2018-08-31T14:14:35.000Z
|
2021-10-11T12:53:13.000Z
|
openquake/hazardlib/tests/gsim/utils.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 3
|
2018-08-31T14:11:00.000Z
|
2019-07-17T10:06:02.000Z
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012-2018, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import inspect
import numpy
from openquake.hazardlib.tests.gsim.check_gsim import check_gsim
class BaseGSIMTestCase(unittest.TestCase):
BASE_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
GSIM_CLASS = None
def get_context_attributes(self, ctx):
att = inspect.getmembers(ctx, lambda a: not inspect.isroutine(a))
att = [
k for k, v in att if not ('_abc' in k)
and not ((k.startswith('_') and k.endswith('_')))
]
return set(att)
def check(self, filename, max_discrep_percentage):
gsim = self.GSIM_CLASS()
filename = os.path.join(self.BASE_DATA_PATH, filename)
errors, stats, sctx, rctx, dctx, ctxs = check_gsim(
gsim.__class__, open(filename), max_discrep_percentage)
s_att = self.get_context_attributes(sctx)
r_att = self.get_context_attributes(rctx)
d_att = self.get_context_attributes(dctx)
self.assertEqual(gsim.REQUIRES_SITES_PARAMETERS, s_att)
self.assertEqual(gsim.REQUIRES_RUPTURE_PARAMETERS, r_att)
self.assertEqual(gsim.REQUIRES_DISTANCES, d_att)
self.assertTrue(
numpy.all(ctxs),
msg='Contexts objects have been changed by method '
'get_mean_and_stddevs')
if errors:
raise AssertionError(stats)
print()
print(stats)
| 38.192982
| 75
| 0.687184
|
d0bddc90deb43ede3aa2ab4fa1f587e59ee90a11
| 1,271
|
py
|
Python
|
test/functional/wallet_disableprivatekeys.py
|
farmscoins/farmcoin
|
dcfa0e7b33de314cfa1e4d3de1d8e2d894821d43
|
[
"MIT"
] | 1
|
2019-06-11T08:21:52.000Z
|
2019-06-11T08:21:52.000Z
|
test/functional/wallet_disableprivatekeys.py
|
farmscoins/farmcoin
|
dcfa0e7b33de314cfa1e4d3de1d8e2d894821d43
|
[
"MIT"
] | null | null | null |
test/functional/wallet_disableprivatekeys.py
|
farmscoins/farmcoin
|
dcfa0e7b33de314cfa1e4d3de1d8e2d894821d43
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test disable-privatekeys mode.
"""
from test_framework.test_framework import FarmcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
)
class DisablePrivateKeysTest(FarmcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.log.info("Test disableprivatekeys creation.")
self.nodes[0].createwallet('w1', True)
self.nodes[0].createwallet('w2')
w1 = node.get_wallet_rpc('w1')
w2 = node.get_wallet_rpc('w2')
assert_raises_rpc_error(-4,"Error: Private keys are disabled for this wallet", w1.getnewaddress)
assert_raises_rpc_error(-4,"Error: Private keys are disabled for this wallet", w1.getrawchangeaddress)
w1.importpubkey(w2.getaddressinfo(w2.getnewaddress())['pubkey'])
if __name__ == '__main__':
DisablePrivateKeysTest().main()
| 35.305556
| 110
| 0.715185
|
a82800f27ae80859d7c7339910172ff5a691e3d7
| 868
|
py
|
Python
|
setup.py
|
torchbox/django-tagging-autocomplete
|
43d9604cb2510cf5ba13a533660b7d14d1d5702b
|
[
"MIT"
] | 2
|
2015-09-23T23:19:31.000Z
|
2015-09-24T00:17:45.000Z
|
setup.py
|
torchbox/django-tagging-autocomplete
|
43d9604cb2510cf5ba13a533660b7d14d1d5702b
|
[
"MIT"
] | null | null | null |
setup.py
|
torchbox/django-tagging-autocomplete
|
43d9604cb2510cf5ba13a533660b7d14d1d5702b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
long_description = open('README.txt').read()
setup(
name='django-tagging-autocomplete',
version='0.3.1',
description='Autocompletion for django-tagging',
long_description=long_description,
author='Ludwik Trammer',
author_email='ludwik@gmail.com',
url='http://code.google.com/p/django-tagging-autocomplete/',
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
zip_safe=False,
#package_data={},
#test_suite='tagging_autocomplete.tests.runtests.runtests'
)
| 31
| 64
| 0.653226
|
66a84223179cfca341487381bf4f95676e3fa606
| 2,497
|
py
|
Python
|
HeVanderbiltModel/PBC[noBloch]/LatticeSolutions.py
|
Bellomia/pyDrude
|
cd2d6980008ddbe247f1aa50dc238e0d7cf0904f
|
[
"MIT"
] | null | null | null |
HeVanderbiltModel/PBC[noBloch]/LatticeSolutions.py
|
Bellomia/pyDrude
|
cd2d6980008ddbe247f1aa50dc238e0d7cf0904f
|
[
"MIT"
] | null | null | null |
HeVanderbiltModel/PBC[noBloch]/LatticeSolutions.py
|
Bellomia/pyDrude
|
cd2d6980008ddbe247f1aa50dc238e0d7cf0904f
|
[
"MIT"
] | null | null | null |
def LatticeSolutions(L, a, U0, W, cutoff): # L: Total length | a: Lattice parameter | U0: Perturbation strenght | Spread: Perturbation width | cutoff: Energy cut-off
print('Diagonalizing the perturbed Hamiltonian...')
import numpy
import pylab
from cmath import sqrt, sin, cos, exp, pi
from numBraKet import numBraKet as BraKet
s = 10*L # Sampling grain for integrals
## Imperturbed eigenproblem: PLANE WAVES ##
A = sqrt(1/L) # Normalization factor
def q(n): return (2*pi*n)/L
def psi0_n(x): return A*exp(1j*q(n)*x)
def psi0_m(x): return A*exp(1j*q(m)*x)
def eigenE0(n): return (4 * pi**2 * n**2)/(2 * L**2)
n = 0
while eigenE0(n) <= cutoff:
n += 1
basisDIM = n # We've established the basis dimension from the energy cut-off
print('Basis dimension: ', 2*basisDIM+1)
## Periodic potential definition ##
def U_lat(x):
return U0*exp(-x**2/W**2)
"""NB: x is centered on a lattice *barrier*,
not on the atomic site!!!"""
def U(x):
U = 0
for x_lat in range(0,L+a,a):
U += U_lat(x-x_lat)
return U
"""NB: We have choosed this particular implemention
for the lattice because of the ''realistic''
surface behaviour of the solutions.
x = numpy.linspace(0,L,s)
f = numpy.vectorize(U, otypes=[numpy.complex])
y = f(x)
pylab.fill_between(x,y, facecolor='gray', edgecolor='black', alpha=0.3)"""
## Now we proceed constructing the hamiltionian matrix H_{nm} ##
H = [[0 for i in range(2*basisDIM+1)] for j in range(2*basisDIM+1)]
def Ket_m(x): return U(x)*psi0_m(x)
Bra_n = numpy.vectorize(psi0_n, otypes=[numpy.complex])
Ket_m = numpy.vectorize(Ket_m, otypes=[numpy.complex])
for i in range(-basisDIM, basisDIM+1):
for j in range(-basisDIM, basisDIM+1):
n, m = i+1, j+1 # Dumb python indexing...
## Kinetic (diagonal) part ##
if i==j: H[i][i] += eigenE0(n)
## Periodic potential part ##
x = numpy.linspace(0,L,s)
H[i][j] += BraKet(Bra_n(x), Ket_m(x), 0, L, x)
## Perturbed solutions ##
eigenvalues, eigenvectors = numpy.linalg.eigh(H)
return eigenvalues, eigenvectors
| 29.376471
| 165
| 0.545855
|
484cc1a9025a7631393afd97aa749d7fe77e08c4
| 651
|
py
|
Python
|
common/BaseModel.py
|
maoxuelin083/restful-Blog
|
29a08613d71ee6c369dcec9ffb09a2a2ed837431
|
[
"Apache-2.0"
] | null | null | null |
common/BaseModel.py
|
maoxuelin083/restful-Blog
|
29a08613d71ee6c369dcec9ffb09a2a2ed837431
|
[
"Apache-2.0"
] | null | null | null |
common/BaseModel.py
|
maoxuelin083/restful-Blog
|
29a08613d71ee6c369dcec9ffb09a2a2ed837431
|
[
"Apache-2.0"
] | null | null | null |
from FlaskProject.extendsions import db
class BaseModels(db.Model):
__abstract__ = True
def save(self):
try:
db.session.add(self)
db.session.commit()
except Exception as e:
print(e)
return False
return True
def delete(self):
try:
db.session.delete(self)
db.session.commit()
except Exception as e:
print(e)
return False
return True
class BaseModelPrimaryKey(BaseModels):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
| 23.25
| 68
| 0.551459
|
b1fa28d7c67325025738d91a2a05dda49060c688
| 228
|
py
|
Python
|
shoppingcart/admin.py
|
sa1if3/django_ecommerce
|
ef6a0a79d4aa0cf518f68227beb6d15fa7729458
|
[
"BSD-3-Clause"
] | 16
|
2021-07-08T10:42:38.000Z
|
2022-02-24T05:32:59.000Z
|
shoppingcart/admin.py
|
RussQuan/django_ecommerce
|
84f090dd2471355a513bb13827ea17e18ce2310f
|
[
"BSD-3-Clause"
] | null | null | null |
shoppingcart/admin.py
|
RussQuan/django_ecommerce
|
84f090dd2471355a513bb13827ea17e18ce2310f
|
[
"BSD-3-Clause"
] | 4
|
2021-07-08T10:44:56.000Z
|
2021-07-12T03:39:40.000Z
|
from django.contrib import admin
from .models import *
# Register your models here.
@admin.register(Cart)
class CartAdmin(admin.ModelAdmin):
list_display = ("listing","total_quantity","created_by","created_at","updated_at")
| 32.571429
| 86
| 0.767544
|
0ed5d31d6bb118b64618572ffa2026febbecd246
| 5,616
|
py
|
Python
|
poetry/templates/template_executor.py
|
bennylut/poetry
|
f35e55d697260ebcd4de03f2272eb40afc450e1d
|
[
"MIT"
] | 10
|
2021-10-10T02:02:14.000Z
|
2022-03-01T16:45:28.000Z
|
poetry/templates/template_executor.py
|
bennylut/poetry
|
f35e55d697260ebcd4de03f2272eb40afc450e1d
|
[
"MIT"
] | 2
|
2021-10-31T07:27:54.000Z
|
2021-12-05T14:14:30.000Z
|
poetry/templates/template_executor.py
|
bennylut/poetry
|
f35e55d697260ebcd4de03f2272eb40afc450e1d
|
[
"MIT"
] | 1
|
2021-12-05T15:14:26.000Z
|
2021-12-05T15:14:26.000Z
|
import zipfile
from contextlib import contextmanager
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Dict, Optional, ContextManager
from poetry.core.pyproject.project import Project
from poetry.core.vcs import Git
from poetry.core.utils.props_ext import cached_property
from poetry.core.vcs.git import GitConfig
# noinspection PyPackageRequirements
from protopy import doc_generator
# noinspection PyPackageRequirements
from protopy.engine import ProtopyEngine
from poetry.console import console
from poetry.managed_project import ManagedProject
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from poetry.app.relaxed_poetry import RelaxedPoetry
class TemplateExecutor:
def __init__(self, rp: "RelaxedPoetry"):
self._rp = rp
self._pengine = ProtopyEngine(console.io)
@cached_property
def _git(self) -> Git:
return Git()
def document(self, descriptor: str) -> str:
with self._locate_template(descriptor) as template:
if not template or not template.exists():
raise FileNotFoundError(f"could not locate template according to descriptor: {descriptor}")
return doc_generator.generate(template / "proto.py", descriptor, "rp new")
def execute(
self,
descriptor: str,
out_path: Path,
args: List[str],
kwargs: Dict[str, str],
allow_override: bool
):
with self._locate_template(descriptor) as template:
if not template or not template.exists():
raise FileNotFoundError(f"could not locate template according to descriptor: {descriptor}")
rp_ctx = _RelaxedPoetryTemplateContext(self._rp)
self._pengine.render(
template, out_path, args,
kwargs, {"rp": rp_ctx}, allow_overwrite=allow_override)
@contextmanager
def _locate_template(self, descriptor: str) -> ContextManager[Path]:
if descriptor.startswith("git+"): # this is a git descriptor
with _use_git_template(self._git, descriptor[len("git+"):]) as path:
yield path
else:
descriptor_path = Path(descriptor)
if descriptor_path.exists() or descriptor_path.with_suffix(".zip").exists(): # this is a path descriptor
with _use_file_system(descriptor_path) as path:
yield path
else:
with _use_template_ref(descriptor, self._rp.active_project) as path:
yield path
@contextmanager
def _use_git_template(git: Git, repo: str):
with TemporaryDirectory() as tmp:
path = Path(tmp)
git.clone(repo, path)
yield path
@contextmanager
def _use_file_system(path: Path):
if path.suffix == ".zip":
with TemporaryDirectory() as tmp:
tmp_path = Path(tmp)
zipfile.ZipFile(path).extractall(tmp_path)
yield tmp_path
else:
yield path
@contextmanager
def _use_builtin(name: str):
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
with pkg_resources.path(__package__, name + ".zip") as resource_path:
if not resource_path.exists():
yield None
else:
with _use_file_system(resource_path) as path:
yield path
@contextmanager
def _use_template_ref(name: str, prj: ManagedProject):
if prj is not None:
template_path = prj.path / "etc/rp/templates" / name
if template_path.exists() or template_path.with_suffix(".zip").exists():
with _use_file_system(template_path) as path:
yield path
return
else:
parent = prj.parent
if parent:
with _use_template_ref(name, parent) as path:
yield path
return
with _use_builtin(name) as path:
yield path
class _RelaxedPoetryProjectDefaults:
@cached_property
def _git_config(self):
return GitConfig()
@cached_property
def author_name(self) -> str:
"""
:return: the default author name
"""
config = self._git_config
author = None
if config.get("user.name"):
author = config["user.name"]
author_email = config.get("user.email")
if author_email:
author += " <{}>".format(author_email)
return author
@cached_property
def python_requirements(self) -> str:
"""
:return: the default python version requirements (e.g., ^3.6)
"""
import platform
mj, mn, _ = platform.python_version_tuple()
return f"^{mj}.{mn}"
# noinspection PyCompatibility
@cached_property
def buildsys_requirements(self) -> str:
"""
:return: the default build-sys (relaxed-poetry-core) requirements (e.g., >=0.1)
"""
try:
import importlib.metadata as mtd
except ModuleNotFoundError:
import importlib_metadata as mtd
version = mtd.version("relaxed-poetry-core")
return f">={version}"
class _RelaxedPoetryTemplateContext:
def __init__(self, rp: "RelaxedPoetry"):
self._rp = rp
self.project_defaults = _RelaxedPoetryProjectDefaults()
@property
def active_project(self) -> Optional[Project]:
if not self._rp.active_project:
return None
return self._rp.active_project.pyproject
| 30.356757
| 117
| 0.633725
|
2a9974268b8baf372ce5c8bbc06cff8ae82e7860
| 46,206
|
py
|
Python
|
kitsune/wiki/views.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
kitsune/wiki/views.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
kitsune/wiki/views.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
import json
import logging
import time
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.db import connection
from django.http import (HttpResponse, HttpResponseRedirect,
Http404, HttpResponseBadRequest)
from django.shortcuts import get_object_or_404, render
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import (require_GET, require_POST,
require_http_methods)
import jingo
import waffle
from mobility.decorators import mobile_template
from ratelimit.decorators import ratelimit
from statsd import statsd
from tower import ugettext_lazy as _lazy
from tower import ugettext as _
from kitsune.access.decorators import login_required
from kitsune.products.models import Product, Topic
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import (paginate, smart_int, get_next_url, user_or_ip,
truncated_json_dumps)
from kitsune.wiki import DOCUMENTS_PER_PAGE
from kitsune.wiki.config import CATEGORIES, TEMPLATES_CATEGORY
from kitsune.wiki.events import (
EditDocumentEvent, ReviewableRevisionInLocaleEvent,
ApproveRevisionInLocaleEvent, ApprovedOrReadyUnion,
ReadyRevisionEvent)
from kitsune.wiki.forms import (
AddContributorForm, DocumentForm, RevisionForm, RevisionFilterForm,
ReviewForm)
from kitsune.wiki.models import Document, Revision, HelpfulVote, ImportantDate
from kitsune.wiki.parser import wiki_to_html
from kitsune.wiki.showfor import showfor_data
from kitsune.wiki.tasks import (
send_reviewed_notification, schedule_rebuild_kb,
send_contributor_notification)
log = logging.getLogger('k.wiki')
@require_GET
@mobile_template('wiki/{mobile/}document.html')
def document(request, document_slug, template=None):
"""View a wiki document."""
fallback_reason = None
# If a slug isn't available in the requested locale, fall back to en-US:
try:
doc = Document.objects.get(locale=request.LANGUAGE_CODE,
slug=document_slug)
if (not doc.current_revision and doc.parent and
doc.parent.current_revision):
# This is a translation but its current_revision is None
# and OK to fall back to parent (parent is approved).
fallback_reason = 'translation_not_approved'
elif not doc.current_revision:
# No current_revision, no parent with current revision, so
# nothing to show.
fallback_reason = 'no_content'
except Document.DoesNotExist:
# Look in default language:
doc = get_object_or_404(Document,
locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=document_slug)
# If there's a translation to the requested locale, take it:
translation = doc.translated_to(request.LANGUAGE_CODE)
if translation:
url = translation.get_absolute_url()
url = urlparams(url, query_dict=request.GET)
return HttpResponseRedirect(url)
elif doc.current_revision:
# There is no translation
# and OK to fall back to parent (parent is approved).
fallback_reason = 'no_translation'
# Obey explicit redirect pages:
# Don't redirect on redirect=no (like Wikipedia), so we can link from a
# redirected-to-page back to a "Redirected from..." link, so you can edit
# the redirect.
redirect_url = (None if request.GET.get('redirect') == 'no'
else doc.redirect_url(request.LANGUAGE_CODE))
if redirect_url:
url = urlparams(redirect_url, query_dict=request.GET,
redirectslug=doc.slug, redirectlocale=doc.locale)
return HttpResponseRedirect(url)
# Get "redirected from" doc if we were redirected:
redirect_slug = request.GET.get('redirectslug')
redirect_locale = request.GET.get('redirectlocale')
redirected_from = None
if redirect_slug and redirect_locale:
try:
redirected_from = Document.objects.get(locale=redirect_locale,
slug=redirect_slug)
except Document.DoesNotExist:
pass
related_documents = doc.related_documents
related_questions = doc.related_questions
contributors = doc.contributors.all()
products = doc.get_products()
if len(products) < 1:
product = Product.objects.filter(visible=True)[0]
else:
product = products[0]
topics = Topic.objects.filter(product=product, visible=True, parent=None)
ga_push = []
if fallback_reason is not None:
ga_push.append(
['_trackEvent', 'Incomplete L10n', 'Not Localized',
'%s/%s' % (doc.slug, request.LANGUAGE_CODE)])
elif doc.is_outdated():
ga_push.append(
['_trackEvent', 'Incomplete L10n', 'Not Updated',
'%s/%s' % (doc.parent.slug, request.LANGUAGE_CODE)])
hide_voting = False
if (doc.category == TEMPLATES_CATEGORY or
waffle.switch_is_active('hide-voting')):
hide_voting = True
data = {'document': doc, 'redirected_from': redirected_from,
'related_documents': related_documents,
'related_questions': related_questions,
'contributors': contributors,
'fallback_reason': fallback_reason,
'is_aoa_referral': request.GET.get('ref') == 'aoa',
'topics': topics, 'product': product, 'products': products,
'hide_voting': hide_voting, 'ga_push': ga_push}
data.update(showfor_data(products))
return render(request, template, data)
def revision(request, document_slug, revision_id):
"""View a wiki document revision."""
rev = get_object_or_404(Revision, pk=revision_id,
document__slug=document_slug)
data = {'document': rev.document, 'revision': rev}
data.update(showfor_data())
return render(request, 'wiki/revision.html', data)
@require_GET
def list_documents(request, category=None):
"""List wiki documents."""
docs = Document.objects.filter(locale=request.LANGUAGE_CODE).order_by('title')
if category:
docs = docs.filter(category=category)
try:
category_id = int(category)
except ValueError:
raise Http404
try:
category = unicode(dict(CATEGORIES)[category_id])
except KeyError:
raise Http404
docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
return render(request, 'wiki/list_documents.html', {
'documents': docs,
'category': category})
@login_required
def new_document(request):
"""Create a new wiki document."""
if request.method == 'GET':
doc_form = DocumentForm(initial_title=request.GET.get('title'))
rev_form = RevisionForm()
return render(request, 'wiki/new_document.html', {
'document_form': doc_form,
'revision_form': rev_form})
post_data = request.POST.copy()
post_data.update({'locale': request.LANGUAGE_CODE})
doc_form = DocumentForm(post_data)
rev_form = RevisionForm(post_data)
if doc_form.is_valid() and rev_form.is_valid():
doc = doc_form.save(None)
_save_rev_and_notify(rev_form, request.user, doc)
return HttpResponseRedirect(reverse('wiki.document_revisions',
args=[doc.slug]))
return render(request, 'wiki/new_document.html', {
'document_form': doc_form,
'revision_form': rev_form})
_document_lock_key = 'sumo::wiki::document::{id}::lock'
def _document_lock_check(document_id):
"""Check for a lock on a document.
Returns the username of the user that has the page locked, or ``None`` if
no user has a lock.
"""
try:
redis = redis_client(name='default')
key = _document_lock_key.format(id=document_id)
return redis.get(key)
except RedisError as e:
statsd.incr('redis.errror')
log.error('Redis error: %s' % e)
return None
def _document_lock_steal(document_id, user_name, expire_time=60 * 15):
"""Lock a document for a user.
Note that this does not check if the page is already locked, and simply
sets the lock on the page.
"""
try:
redis = redis_client(name='default')
key = _document_lock_key.format(id=document_id)
it_worked = redis.set(key, user_name)
redis.expire(key, expire_time)
return it_worked
except RedisError as e:
statsd.incr('redis.errror')
log.error('Redis error: %s' % e)
return False
def _document_lock_clear(document_id, user_name):
"""Remove a lock from a document.
This would be used to indicate the given user no longer wants the page
locked, so the lock should be cleared.
If the `user` parameter does not match the current lock, the lock remains
in place.
Returns true if the lock was removed, false otherwise.
"""
try:
redis = redis_client(name='default')
key = _document_lock_key.format(id=document_id)
locked_by = redis.get(key)
if locked_by == user_name:
return redis.delete(key)
else:
return False
except RedisError as e:
statsd.incr('redis.errror')
log.error('Redis error: %s' % e)
return False
def _document_lock(doc_id, username):
"""If there is no lock, take one. Return the current state of the lock."""
locked_by = _document_lock_check(doc_id)
if locked_by == username:
locked = False
if locked_by:
try:
locked = not (locked_by == username)
locked_by = User.objects.get(username=locked_by)
except User.DoesNotExist:
# If the user doesn't exist, they shouldn't be able to enforce a lock.
locked = False
locked_by = None
else:
locked_by = username
locked = False
_document_lock_steal(doc_id, username)
return locked, locked_by
@login_required
def steal_lock(request, document_slug, revision_id=None):
doc = get_object_or_404(
Document, locale=request.LANGUAGE_CODE, slug=document_slug)
user = request.user
ok = _document_lock_steal(doc.id, user.username)
return HttpResponse("", status=200 if ok else 400)
@require_http_methods(['GET', 'POST'])
@login_required
def edit_document(request, document_slug, revision_id=None):
"""Create a new revision of a wiki document, or edit document metadata."""
doc = get_object_or_404(
Document, locale=request.LANGUAGE_CODE, slug=document_slug)
user = request.user
can_edit_needs_change = doc.allows(user, 'edit_needs_change')
can_archive = doc.allows(user, 'archive')
# If this document has a parent, then the edit is handled by the
# translate view. Pass it on.
if doc.parent:
return translate(request, doc.parent.slug, revision_id)
if revision_id:
rev = get_object_or_404(Revision, pk=revision_id, document=doc)
else:
rev = doc.current_revision or doc.revisions.order_by('-created',
'-id')[0]
disclose_description = bool(request.GET.get('opendescription'))
doc_form = rev_form = None
if doc.allows(user, 'create_revision'):
rev_form = RevisionForm(
instance=rev,
initial={'based_on': rev.id, 'comment': ''})
if doc.allows(user, 'edit'):
doc_form = DocumentForm(
initial=_document_form_initial(doc),
can_archive=can_archive,
can_edit_needs_change=can_edit_needs_change)
if request.method == 'GET':
if not (rev_form or doc_form):
# You can't do anything on this page, so get lost.
raise PermissionDenied
else: # POST
# Comparing against localized names for the Save button bothers me, so
# I embedded a hidden input:
which_form = request.POST.get('form')
_document_lock_clear(doc.id, user.username)
if which_form == 'doc':
if doc.allows(user, 'edit'):
post_data = request.POST.copy()
post_data.update({'locale': request.LANGUAGE_CODE})
doc_form = DocumentForm(
post_data,
instance=doc,
can_archive=can_archive,
can_edit_needs_change=can_edit_needs_change)
if doc_form.is_valid():
# Get the possibly new slug for the imminent redirection:
doc = doc_form.save(None)
# Do we need to rebuild the KB?
_maybe_schedule_rebuild(doc_form)
return HttpResponseRedirect(
urlparams(reverse('wiki.edit_document',
args=[doc.slug]),
opendescription=1))
disclose_description = True
else:
raise PermissionDenied
elif which_form == 'rev':
if doc.allows(user, 'create_revision'):
rev_form = RevisionForm(request.POST)
rev_form.instance.document = doc # for rev_form.clean()
if rev_form.is_valid():
_save_rev_and_notify(rev_form, user, doc, base_rev=rev)
if 'notify-future-changes' in request.POST:
EditDocumentEvent.notify(request.user, doc)
return HttpResponseRedirect(
reverse('wiki.document_revisions',
args=[document_slug]))
else:
raise PermissionDenied
show_revision_warning = _show_revision_warning(doc, rev)
locked, locked_by = _document_lock(doc.id, user.username)
return render(request, 'wiki/edit.html', {
'revision_form': rev_form,
'document_form': doc_form,
'disclose_description': disclose_description,
'document': doc,
'show_revision_warning': show_revision_warning,
'locked': locked,
'locked_by': locked_by})
@login_required
@require_POST
def preview_revision(request):
"""Create an HTML fragment preview of the posted wiki syntax."""
wiki_content = request.POST.get('content', '')
statsd.incr('wiki.preview')
# TODO: Get doc ID from JSON.
data = {'content': wiki_to_html(wiki_content, request.LANGUAGE_CODE)}
data.update(showfor_data())
return render(request, 'wiki/preview.html', data)
@require_GET
def document_revisions(request, document_slug, contributor_form=None):
"""List all the revisions of a given document."""
locale = request.GET.get('locale', request.LANGUAGE_CODE)
doc = get_object_or_404(
Document, locale=locale, slug=document_slug)
revs = Revision.objects.filter(document=doc).order_by('-created', '-id')
if request.is_ajax():
template = 'wiki/includes/revision_list.html'
else:
template = 'wiki/history.html'
form = contributor_form or AddContributorForm()
return render(request, template, {
'revisions': revs, 'document': doc,
'contributor_form': form})
@login_required
def review_revision(request, document_slug, revision_id):
"""Review a revision of a wiki document."""
rev = get_object_or_404(Revision, pk=revision_id,
document__slug=document_slug)
doc = rev.document
if not doc.allows(request.user, 'review_revision'):
raise PermissionDenied
form = ReviewForm(
initial={'needs_change': doc.needs_change,
'needs_change_comment': doc.needs_change_comment})
# Don't ask significance if this doc is a translation or if it has no
# former approved versions:
should_ask_significance = not doc.parent and doc.current_revision
based_on_revs = doc.revisions.all()
last_approved_date = getattr(doc.current_revision, 'created',
datetime.fromordinal(1))
based_on_revs = based_on_revs.filter(created__gt=last_approved_date)
revision_contributors = list(set(
based_on_revs.values_list('creator__username', flat=True)))
# Don't include the reviewer in the recent contributors list.
if request.user.username in revision_contributors:
revision_contributors.remove(request.user.username)
if request.method == 'POST':
form = ReviewForm(request.POST)
if form.is_valid() and not rev.reviewed:
# Don't allow revisions to be reviewed twice
rev.is_approved = 'approve' in request.POST
rev.reviewer = request.user
rev.reviewed = datetime.now()
if should_ask_significance and form.cleaned_data['significance']:
rev.significance = form.cleaned_data['significance']
# If document is localizable and revision was approved and
# user has permission, set the is_ready_for_localization value.
if (doc.allows(request.user, 'mark_ready_for_l10n') and
rev.is_approved and rev.can_be_readied_for_localization()):
rev.is_ready_for_localization = form.cleaned_data[
'is_ready_for_localization']
# If the revision is ready for l10n, store the date
# and the user.
if rev.is_ready_for_localization:
rev.readied_for_localization = rev.reviewed
rev.readied_for_localization_by = rev.reviewer
rev.save()
# Update the needs change bit (if approved, default language and
# user has permission).
if (doc.locale == settings.WIKI_DEFAULT_LANGUAGE and
doc.allows(request.user, 'edit_needs_change') and
rev.is_approved):
doc.needs_change = form.cleaned_data['needs_change']
doc.needs_change_comment = \
form.cleaned_data['needs_change_comment']
doc.save()
# Send notifications of approvedness and readiness:
if rev.is_ready_for_localization or rev.is_approved:
events = [ApproveRevisionInLocaleEvent(rev)]
if rev.is_ready_for_localization:
events.append(ReadyRevisionEvent(rev))
ApprovedOrReadyUnion(*events).fire(exclude=[rev.creator,
request.user])
# Send an email (not really a "notification" in the sense that
# there's a Watch table entry) to revision creator.
msg = form.cleaned_data['comment']
send_reviewed_notification.delay(rev, doc, msg)
send_contributor_notification(based_on_revs, rev, doc, msg)
# Schedule KB rebuild?
statsd.incr('wiki.review')
schedule_rebuild_kb()
return HttpResponseRedirect(reverse('wiki.document_revisions',
args=[document_slug]))
if doc.parent: # A translation
# For diffing the based_on revision against, to help the user see if he
# translated all the recent changes:
parent_revision = (rev.based_on or
doc.parent.localizable_or_latest_revision())
template = 'wiki/review_translation.html'
else:
parent_revision = None
template = 'wiki/review_revision.html'
data = {'revision': rev, 'document': doc, 'form': form,
'parent_revision': parent_revision,
'revision_contributors': list(revision_contributors),
'should_ask_significance': should_ask_significance}
data.update(showfor_data())
return render(request, template, data)
@require_GET
def compare_revisions(request, document_slug):
"""Compare two wiki document revisions.
The ids are passed as query string parameters (to and from).
"""
locale = request.GET.get('locale', request.LANGUAGE_CODE)
doc = get_object_or_404(
Document, locale=locale, slug=document_slug)
if 'from' not in request.GET or 'to' not in request.GET:
raise Http404
from_id = smart_int(request.GET.get('from'))
to_id = smart_int(request.GET.get('to'))
revision_from = get_object_or_404(Revision, document=doc, id=from_id)
revision_to = get_object_or_404(Revision, document=doc, id=to_id)
if request.is_ajax():
template = 'wiki/includes/revision_diff.html'
else:
template = 'wiki/compare_revisions.html'
return render(request, template, {
'document': doc, 'revision_from': revision_from,
'revision_to': revision_to})
@login_required
def select_locale(request, document_slug):
"""Select a locale to translate the document to."""
doc = get_object_or_404(
Document, locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug)
return render(request, 'wiki/select_locale.html', {'document': doc})
@require_http_methods(['GET', 'POST'])
@login_required
def translate(request, document_slug, revision_id=None):
"""Create a new translation of a wiki document.
* document_slug is for the default locale
* translation is to the request.LANGUAGE_CODE
"""
# TODO: Refactor this view into two views? (new, edit)
# That might help reduce the headache-inducing branchiness.
parent_doc = get_object_or_404(
Document, locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug)
user = request.user
if settings.WIKI_DEFAULT_LANGUAGE == request.LANGUAGE_CODE:
# Don't translate to the default language.
return HttpResponseRedirect(reverse(
'wiki.edit_document', locale=settings.WIKI_DEFAULT_LANGUAGE,
args=[parent_doc.slug]))
if not parent_doc.is_localizable:
message = _lazy(u'You cannot translate this document.')
return render(request, 'handlers/400.html', {
'message': message},
status=400)
based_on_rev = parent_doc.localizable_or_latest_revision(
include_rejected=True)
disclose_description = bool(request.GET.get('opendescription'))
try:
doc = parent_doc.translations.get(locale=request.LANGUAGE_CODE)
except Document.DoesNotExist:
doc = None
disclose_description = True
user_has_doc_perm = not doc or doc.allows(user, 'edit')
user_has_rev_perm = not doc or doc.allows(user, 'create_revision')
if not user_has_doc_perm and not user_has_rev_perm:
# User has no perms, bye.
raise PermissionDenied
doc_form = rev_form = None
base_rev = None
if user_has_doc_perm:
doc_initial = _document_form_initial(doc) if doc else None
doc_form = DocumentForm(initial=doc_initial)
if user_has_rev_perm:
initial = {'based_on': based_on_rev.id, 'comment': ''}
if revision_id:
base_rev = Revision.objects.get(pk=revision_id)
initial.update(content=base_rev.content,
summary=base_rev.summary,
keywords=base_rev.keywords)
elif not doc:
initial.update(content=based_on_rev.content,
summary=based_on_rev.summary,
keywords=based_on_rev.keywords)
# Get a revision of the translation to plonk into the page as a
# starting point. Since translations are never "ready for
# localization", this will first try to find an approved revision, then
# an unrejected one, then give up.
instance = doc and doc.localizable_or_latest_revision()
rev_form = RevisionForm(instance=instance, initial=initial)
base_rev = base_rev or instance
if request.method == 'POST':
which_form = request.POST.get('form', 'both')
doc_form_invalid = False
if doc is not None:
_document_lock_clear(doc.id, user.username)
if user_has_doc_perm and which_form in ['doc', 'both']:
disclose_description = True
post_data = request.POST.copy()
post_data.update({'locale': request.LANGUAGE_CODE})
doc_form = DocumentForm(post_data, instance=doc)
doc_form.instance.locale = request.LANGUAGE_CODE
doc_form.instance.parent = parent_doc
if which_form == 'both':
rev_form = RevisionForm(request.POST)
# If we are submitting the whole form, we need to check that
# the Revision is valid before saving the Document.
if doc_form.is_valid() and (which_form == 'doc' or
rev_form.is_valid()):
doc = doc_form.save(parent_doc)
# Possibly schedule a rebuild.
_maybe_schedule_rebuild(doc_form)
if which_form == 'doc':
url = urlparams(reverse('wiki.edit_document',
args=[doc.slug]),
opendescription=1)
return HttpResponseRedirect(url)
doc_slug = doc_form.cleaned_data['slug']
else:
doc_form_invalid = True
else:
doc_slug = doc.slug
if doc and user_has_rev_perm and which_form in ['rev', 'both']:
rev_form = RevisionForm(request.POST)
rev_form.instance.document = doc # for rev_form.clean()
if rev_form.is_valid() and not doc_form_invalid:
if 'no-update' in request.POST:
# Keep the old based_on.
based_on_id = base_rev.based_on_id
else:
# Keep what was in the form.
based_on_id = None
_save_rev_and_notify(
rev_form, request.user, doc, based_on_id, base_rev=base_rev)
if 'notify-future-changes' in request.POST:
EditDocumentEvent.notify(request.user, doc)
url = reverse('wiki.document_revisions',
args=[doc_slug])
return HttpResponseRedirect(url)
show_revision_warning = _show_revision_warning(doc, base_rev)
# A list of the revisions that have been approved since the last
# translation.
recent_approved_revs = parent_doc.revisions.filter(
is_approved=True, id__lte=based_on_rev.id)
if doc and doc.current_revision and doc.current_revision.based_on_id:
recent_approved_revs = recent_approved_revs.filter(
id__gt=doc.current_revision.based_on_id)
if doc:
locked, locked_by = _document_lock(doc.id, user.username)
else:
locked, locked_by = False, None
return render(request, 'wiki/translate.html', {
'parent': parent_doc, 'document': doc,
'document_form': doc_form, 'revision_form': rev_form,
'locale': request.LANGUAGE_CODE, 'based_on': based_on_rev,
'disclose_description': disclose_description,
'show_revision_warning': show_revision_warning,
'recent_approved_revs': recent_approved_revs,
'locked': locked,
'locked_by': locked_by})
@require_POST
@login_required
def watch_document(request, document_slug):
"""Start watching a document for edits."""
document = get_object_or_404(
Document, locale=request.LANGUAGE_CODE, slug=document_slug)
EditDocumentEvent.notify(request.user, document)
statsd.incr('wiki.watches.document')
return HttpResponseRedirect(document.get_absolute_url())
@require_POST
@login_required
def unwatch_document(request, document_slug):
"""Stop watching a document for edits."""
document = get_object_or_404(
Document, locale=request.LANGUAGE_CODE, slug=document_slug)
EditDocumentEvent.stop_notifying(request.user, document)
return HttpResponseRedirect(document.get_absolute_url())
@require_POST
@login_required
def watch_locale(request):
"""Start watching a locale for revisions ready for review."""
ReviewableRevisionInLocaleEvent.notify(request.user, locale=request.LANGUAGE_CODE)
statsd.incr('wiki.watches.locale')
# A 200 so jQuery interprets it as success
return HttpResponse()
@require_POST
@login_required
def unwatch_locale(request):
"""Stop watching a locale for revisions ready for review."""
ReviewableRevisionInLocaleEvent.stop_notifying(request.user,
locale=request.LANGUAGE_CODE)
return HttpResponse()
@require_POST
@login_required
def watch_approved(request):
"""Start watching approved revisions in a locale."""
if request.LANGUAGE_CODE not in settings.SUMO_LANGUAGES:
raise Http404
ApproveRevisionInLocaleEvent.notify(request.user, locale=request.LANGUAGE_CODE)
statsd.incr('wiki.watches.approved')
return HttpResponse()
@require_POST
@login_required
def unwatch_approved(request):
"""Stop watching approved revisions."""
if request.LANGUAGE_CODE not in settings.SUMO_LANGUAGES:
raise Http404
ApproveRevisionInLocaleEvent.stop_notifying(request.user,
locale=request.LANGUAGE_CODE)
return HttpResponse()
@require_POST
@login_required
def watch_ready(request):
"""Start watching ready-for-l10n revisions."""
if request.LANGUAGE_CODE != settings.WIKI_DEFAULT_LANGUAGE:
raise Http404
ReadyRevisionEvent.notify(request.user)
statsd.incr('wiki.watches.ready')
return HttpResponse()
@require_POST
@login_required
def unwatch_ready(request):
"""Stop watching ready-for-l10n revisions."""
if request.LANGUAGE_CODE != settings.WIKI_DEFAULT_LANGUAGE:
raise Http404
ReadyRevisionEvent.stop_notifying(request.user)
return HttpResponse()
@require_GET
def json_view(request):
"""Return some basic document info in a JSON blob."""
kwargs = {'locale': request.LANGUAGE_CODE, 'current_revision__isnull': False}
if 'title' in request.GET:
kwargs['title'] = request.GET['title']
elif 'slug' in request.GET:
kwargs['slug'] = request.GET['slug']
else:
return HttpResponseBadRequest()
document = get_object_or_404(Document, **kwargs)
data = json.dumps({
'id': document.id,
'locale': document.locale,
'slug': document.slug,
'title': document.title,
'summary': document.current_revision.summary,
'url': document.get_absolute_url(),
})
return HttpResponse(data, mimetype='application/json')
@require_POST
@csrf_exempt
@ratelimit(keys=user_or_ip('document-vote'), ip=False, rate='10/d')
def helpful_vote(request, document_slug):
"""Vote for Helpful/Not Helpful document"""
if 'revision_id' not in request.POST:
return HttpResponseBadRequest()
revision = get_object_or_404(
Revision, id=smart_int(request.POST['revision_id']))
survey = None
if revision.document.category == TEMPLATES_CATEGORY:
return HttpResponseBadRequest()
if not revision.has_voted(request):
ua = request.META.get('HTTP_USER_AGENT', '')[:1000] # 1000 max_length
vote = HelpfulVote(revision=revision, user_agent=ua)
if 'helpful' in request.POST:
vote.helpful = True
message = _('Glad to hear it — thanks for the feedback!')
else:
message = _('Sorry to hear that.')
# If user is over the limit, don't save but pretend everything is ok.
if not request.limited:
if request.user.is_authenticated():
vote.creator = request.user
else:
vote.anonymous_id = request.anonymous.anonymous_id
vote.save()
statsd.incr('wiki.vote')
# Send a survey if flag is enabled and vote wasn't helpful.
if 'helpful' not in request.POST:
survey = jingo.render_to_string(
request, 'wiki/includes/unhelpful_survey.html',
{'vote_id': vote.id})
# Save vote metadata: referrer and search query (if available)
for name in ['referrer', 'query', 'source']:
val = request.POST.get(name)
if val:
vote.add_metadata(name, val)
else:
message = _('You already voted on this Article.')
if request.is_ajax():
r = {'message': message}
if survey:
r.update(survey=survey)
return HttpResponse(json.dumps(r))
return HttpResponseRedirect(revision.document.get_absolute_url())
@require_POST
@csrf_exempt
def unhelpful_survey(request):
"""Ajax only view: Unhelpful vote survey processing."""
vote = get_object_or_404(
HelpfulVote, id=smart_int(request.POST.get('vote_id')))
# Only save the survey if it was for a not helpful vote and a survey
# doesn't exist for it already.
if not vote.helpful and not vote.metadata.filter(key='survey').exists():
# The survey is the posted data, minus the vote_id and button value.
survey = request.POST.copy()
survey.pop('vote_id')
survey.pop('button')
# Save the survey in JSON format, taking care not to exceed 1000 chars.
vote.add_metadata(
'survey', truncated_json_dumps(survey, 1000, 'comment'))
return HttpResponse(
json.dumps({'message': _('Thanks for making us better!')}))
@require_GET
def get_helpful_votes_async(request, document_slug):
document = get_object_or_404(
Document, locale=request.LANGUAGE_CODE, slug=document_slug)
datums = []
flag_data = []
rev_data = []
revisions = set()
created_list = []
dates_with_data = set()
cursor = connection.cursor()
cursor.execute('SELECT wiki_helpfulvote.revision_id, '
' SUM(wiki_helpfulvote.helpful), '
' SUM(NOT(wiki_helpfulvote.helpful)), '
' wiki_helpfulvote.created '
'FROM wiki_helpfulvote '
'INNER JOIN wiki_revision ON '
' wiki_helpfulvote.revision_id=wiki_revision.id '
'WHERE wiki_revision.document_id=%s '
'GROUP BY DATE(wiki_helpfulvote.created)', [document.id])
results = cursor.fetchall()
for res in results:
revisions.add(int(res[0]))
created_list.append(res[3])
date = int(time.mktime(res[3].timetuple()) / 86400) * 86400
datums.append({
'yes': int(res[1]),
'no': int(res[2]),
'date': date,
})
dates_with_data.add(date)
if not created_list:
send = {'datums': [], 'annotations': []}
return HttpResponse(json.dumps(send), mimetype='application/json')
min_created = min(created_list)
max_created = max(created_list)
# Zero fill data
timestamp = int(time.mktime(res[3].timetuple()) / 86400) * 86400
end = time.mktime(datetime.now().timetuple())
while timestamp <= end:
if timestamp not in dates_with_data:
datums.append({
'yes': 0,
'no': 0,
'date': timestamp,
})
dates_with_data.add(timestamp)
timestamp += 24 * 60 * 60
for flag in ImportantDate.uncached.filter(date__gte=min_created,
date__lte=max_created):
flag_data.append({
'x': int(time.mktime(flag.date.timetuple())),
'text': _(flag.text)
})
for rev in Revision.objects.filter(pk__in=revisions,
created__gte=min_created,
created__lte=max_created):
rdate = rev.reviewed or rev.created
rev_data.append({
'x': int(time.mktime(rdate.timetuple())),
'text': unicode(_('Revision %s')) % rev.created
})
# Rickshaw wants data like
# [{'name': 'series1', 'data': [{'x': 1362774285, 'y': 100}, ...]},]
send = {'datums': datums, 'annotations': []}
if flag_data:
send['annotations'].append({
'name': _('Firefox Releases'),
'slug': 'releases',
'data': flag_data,
})
if rev_data:
send['annotations'].append({
'name': _('Article Revisions'),
'slug': 'revisions',
'data': rev_data,
})
return HttpResponse(json.dumps(send), mimetype='application/json')
@login_required
def delete_revision(request, document_slug, revision_id):
"""Delete a revision."""
revision = get_object_or_404(Revision, pk=revision_id,
document__slug=document_slug)
document = revision.document
if not document.allows(request.user, 'delete_revision'):
raise PermissionDenied
only_revision = document.revisions.count() == 1
helpful_votes = HelpfulVote.objects.filter(revision=revision.id)
has_votes = helpful_votes.exists()
if request.method == 'GET':
# Render the confirmation page
return render(request, 'wiki/confirm_revision_delete.html', {
'revision': revision, 'document': document,
'only_revision': only_revision,
'has_votes': has_votes})
# Don't delete the only revision of a document
if only_revision:
return HttpResponseBadRequest()
log.warning('User %s is deleting revision with id=%s' %
(request.user, revision.id))
revision.delete()
return HttpResponseRedirect(reverse('wiki.document_revisions',
args=[document.slug]))
@login_required
@require_POST
def mark_ready_for_l10n_revision(request, document_slug, revision_id):
"""Mark a revision as ready for l10n."""
revision = get_object_or_404(Revision, pk=revision_id,
document__slug=document_slug)
if not revision.document.allows(request.user, 'mark_ready_for_l10n'):
raise PermissionDenied
if revision.can_be_readied_for_localization():
# We don't use update(), because that wouldn't update
# Document.latest_localizable_revision.
revision.is_ready_for_localization = True
revision.readied_for_localization = datetime.now()
revision.readied_for_localization_by = request.user
revision.save()
ReadyRevisionEvent(revision).fire(exclude=request.user)
return HttpResponse(json.dumps({'message': revision_id}))
return HttpResponseBadRequest()
@login_required
def delete_document(request, document_slug):
"""Delete a revision."""
document = get_object_or_404(Document, locale=request.LANGUAGE_CODE,
slug=document_slug)
# Check permission
if not document.allows(request.user, 'delete'):
raise PermissionDenied
if request.method == 'GET':
# Render the confirmation page
return render(request, 'wiki/confirm_document_delete.html', {
'document': document})
# Handle confirm delete form POST
log.warning('User %s is deleting document: %s (id=%s)' %
(request.user, document.title, document.id))
document.delete()
return render(request, 'wiki/confirm_document_delete.html', {
'document': document, 'delete_confirmed': True})
@login_required
@require_POST
def add_contributor(request, document_slug):
"""Add a contributor to a document."""
document = get_object_or_404(Document, locale=request.LANGUAGE_CODE,
slug=document_slug)
if not document.allows(request.user, 'edit'):
raise PermissionDenied
form = AddContributorForm(request.POST)
if form.is_valid():
for user in form.cleaned_data['users']:
document.contributors.add(user)
msg = _('{users} added to the contributors successfully!').format(
users=request.POST.get('users'))
messages.add_message(request, messages.SUCCESS, msg)
return HttpResponseRedirect(reverse('wiki.document_revisions',
args=[document_slug]))
msg = _('There were errors adding new contributors, see below.')
messages.add_message(request, messages.ERROR, msg)
return document_revisions(request, document_slug, contributor_form=form)
@login_required
@require_http_methods(['GET', 'POST'])
def remove_contributor(request, document_slug, user_id):
"""Remove a contributor from a document."""
document = get_object_or_404(Document, locale=request.LANGUAGE_CODE,
slug=document_slug)
if not document.allows(request.user, 'edit'):
raise PermissionDenied
user = get_object_or_404(User, id=user_id)
if request.method == 'POST':
document.contributors.remove(user)
msg = _('{user} removed from the contributors successfully!').format(
user=user.username)
messages.add_message(request, messages.SUCCESS, msg)
return HttpResponseRedirect(reverse('wiki.document_revisions',
args=[document_slug]))
return render(request, 'wiki/confirm_remove_contributor.html', {
'document': document, 'contributor': user})
def show_translations(request, document_slug):
document = get_object_or_404(
Document, locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug)
translated_locales = []
untranslated_locales = []
translated_locales.append(document.locale)
translated_locales.extend(document.translations.all().values_list(
'locale', flat=True))
for locale in settings.LANGUAGE_CHOICES:
if not locale[0] in translated_locales:
untranslated_locales.append(locale[0])
return render(request, 'wiki/show_translations.html', {
'document': document,
'translated_locales': translated_locales,
'untranslated_locales': untranslated_locales})
def _document_form_initial(document):
"""Return a dict with the document data pertinent for the form."""
return {'title': document.title,
'slug': document.slug,
'category': document.category,
'is_localizable': document.is_localizable,
'is_archived': document.is_archived,
'topics': Topic.uncached.filter(
document=document).values_list('id', flat=True),
'products': Product.uncached.filter(
document=document).values_list('id', flat=True),
'allow_discussion': document.allow_discussion,
'needs_change': document.needs_change,
'needs_change_comment': document.needs_change_comment}
def _save_rev_and_notify(rev_form, creator, document, based_on_id=None,
base_rev=None):
"""Save the given RevisionForm and send notifications."""
new_rev = rev_form.save(creator, document, based_on_id, base_rev)
statsd.incr('wiki.revision')
# Enqueue notifications
ReviewableRevisionInLocaleEvent(new_rev).fire(exclude=new_rev.creator)
EditDocumentEvent(new_rev).fire(exclude=new_rev.creator)
def _maybe_schedule_rebuild(form):
"""Try to schedule a KB rebuild if a title or slug has changed."""
if 'title' in form.changed_data or 'slug' in form.changed_data:
schedule_rebuild_kb()
def _get_next_url_fallback_localization(request):
return get_next_url(request) or reverse('dashboards.localization')
def _show_revision_warning(document, revision):
if revision:
return document.revisions.filter(created__gt=revision.created,
reviewed=None).exists()
return False
def recent_revisions(request):
# Make writable
request.GET = request.GET.copy()
fragment = request.GET.pop('fragment', None)
form = RevisionFilterForm(request.GET)
revs = Revision.objects.order_by('-created')
# We are going to ignore validation errors for the most part, but
# this is needed to call the functions that generate `cleaned_data`
# This helps in particular when bad user names are typed in.
form.is_valid()
# If something has gone very wrong, `cleaned_data` won't be there.
if hasattr(form, 'cleaned_data'):
if form.cleaned_data.get('locale'):
revs = revs.filter(document__locale=form.cleaned_data['locale'])
if form.cleaned_data.get('users'):
revs = revs.filter(creator__in=form.cleaned_data['users'])
if form.cleaned_data.get('start'):
revs = revs.filter(created__gte=form.cleaned_data['start'])
if form.cleaned_data.get('end'):
revs = revs.filter(created__lte=form.cleaned_data['end'])
revs = paginate(request, revs)
c = {
'revisions': revs,
'form': form,
}
if fragment:
template = 'wiki/includes/recent_revisions_fragment.html'
else:
template = 'wiki/recent_revisions.html'
return render(request, template, c)
@require_GET
def what_links_here(request, document_slug):
"""List all documents that link to a document."""
locale = request.GET.get('locale', request.LANGUAGE_CODE)
doc = get_object_or_404(Document, locale=locale, slug=document_slug)
links = {}
for l in doc.links_to():
if doc.locale == l.linked_from.locale:
if l.kind not in links:
links[l.kind] = []
links[l.kind].append(l.linked_from)
c = {
'document': doc,
'relations': links
}
return render(request, 'wiki/what_links_here.html', c)
| 36.788217
| 86
| 0.641432
|
c9f99a89abb8aad7636656b84593912ac65862a3
| 1,118
|
py
|
Python
|
cohesity_management_sdk/models/type_sql_protection_source_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/type_sql_protection_source_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/type_sql_protection_source_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16
|
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class TypeSqlProtectionSourceEnum(object):
"""Implementation of the 'Type_SqlProtectionSource' enum.
Specifies the type of the managed Object in a SQL Protection Source.
Examples of SQL Objects include 'kInstance' and 'kDatabase'.
'kInstance' indicates that SQL server instance is being protected.
'kDatabase' indicates that SQL server database is being protected.
'kAAG' indicates that SQL AAG (AlwaysOn Availability Group) is being
protected.
'kAAGRootContainer' indicates that SQL AAG's root container is being
protected.
'kRootContainer' indicates root container for SQL sources.
Attributes:
KINSTANCE: TODO: type description here.
KDATABASE: TODO: type description here.
KAAG: TODO: type description here.
KAAGROOTCONTAINER: TODO: type description here.
KROOTCONTAINER: TODO: type description here.
"""
KINSTANCE = 'kInstance'
KDATABASE = 'kDatabase'
KAAG = 'kAAG'
KAAGROOTCONTAINER = 'kAAGRootContainer'
KROOTCONTAINER = 'kRootContainer'
| 30.216216
| 72
| 0.711986
|
a15203fce68e0225a09484576eb0c5044acde100
| 1,098
|
py
|
Python
|
examples/animations/warping.py
|
colinmford/coldtype
|
8462dbd5f65f3ef8f3cbc8662a866b7e20ec5985
|
[
"Apache-2.0"
] | null | null | null |
examples/animations/warping.py
|
colinmford/coldtype
|
8462dbd5f65f3ef8f3cbc8662a866b7e20ec5985
|
[
"Apache-2.0"
] | null | null | null |
examples/animations/warping.py
|
colinmford/coldtype
|
8462dbd5f65f3ef8f3cbc8662a866b7e20ec5985
|
[
"Apache-2.0"
] | null | null | null |
from coldtype import *
from coldtype.warping import warp_fn
from coldtype.fx.skia import phototype
Style.RegisterShorthandPrefix("≈", "~/Type/fonts/fonts")
peshka = Font.Cacheable("≈/CoFoPeshkaVariableV0.5.ttf")
loop = Loop(150, 15, [ # some keyframes
dict(wdth=0, wght=0, rotate=-15, leading=200,
font_size=700, warp=0, blur=15),
dict(wdth=1, wght=1, rotate=0, leading=10,
font_size=50, warp=200, blur=5),
dict(wdth=0, wght=1, rotate=15, leading=100,
font_size=500, warp=50, blur=3),
dict(wdth=0.5, wght=0.5, rotate=0, leading=-470,
font_size=330, warp=0, blur=1)
])
@animation(timeline=loop)
def warp(f):
state = f.a.t.current_state(f.i, e="eeio")
return ((ß:=StSt("WARP\nBLUR", peshka, ro=1, **state))
.align(f.a.r).pen() # a single, centered vector
.f(Gradient.V(ß.ambit(), hsl(0.7), hsl(0.9)))
#.flatten(5) # slower but preserves curves across warp
.nlt(warp_fn(f.i*30, f.i, mult=int(state["warp"])))
.f(1)
-.ch(phototype(f.a.r,
state["blur"], cutw=50, fill=hsl(0.75))))
| 37.862069
| 62
| 0.615665
|
91274dd79740393be58a86e7b52975d4460a763b
| 8,823
|
py
|
Python
|
mlir/test/Bindings/Python/dialects/linalg/opdsl/emit_structured_generic.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
mlir/test/Bindings/Python/dialects/linalg/opdsl/emit_structured_generic.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
mlir/test/Bindings/Python/dialects/linalg/opdsl/emit_structured_generic.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
# RUN: %PYTHON %s | FileCheck %s
from typing import Optional, Sequence
from mlir.ir import *
from mlir.dialects import builtin
from mlir.dialects import linalg
from mlir.dialects import std
from mlir.dialects.linalg.opdsl.lang import *
@linalg_structured_op
def matmul_mono(A=TensorDef(T, S.M, S.K),
B=TensorDef(T, S.K, S.N),
C=TensorDef(T, S.M, S.N, output=True)):
C[D.m, D.n] += A[D.m, D.k] * B[D.k, D.n]
@linalg_structured_op
def matmul_poly(A=TensorDef(TV.T1, S.M, S.K),
B=TensorDef(TV.T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
@linalg_structured_op
def fill_rng_2d(A=TensorDef(T, S.M, S.N, output=True),
min=CaptureDef(F64),
max=CaptureDef(F64),
seed=CaptureDef(I32)):
multiplier = const(I32, 1103515245)
increment = const(I32, 12345)
temp1 = (cast(I32, index(D.m)) + seed) * multiplier + increment
temp2 = (cast(I32, index(D.n)) + temp1) * multiplier + increment
inv_randmax = const(F64, 2.3283064e-10)
scaling = (max - min) * inv_randmax
A[D.m, D.n] = cast(T, cast(F64, temp2) * scaling + min)
with Context() as ctx, Location.unknown():
module = Module.create()
f16 = F16Type.get()
f32 = F32Type.get()
f64 = F64Type.get()
i8 = IntegerType.get_signless(8)
i16 = IntegerType.get_signless(16)
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
# Note that these all have the same indexing maps. We verify the first and
# then do more permutation tests on casting and body generation
# behavior.
# CHECK: #[[$MAPA:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
# CHECK: #[[$MAPB:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
# CHECK: #[[$MAPC:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
# CHECK-LABEL: func @test_matmul_mono
# CHECK-SAME: %[[A:.+]]: tensor<4x16xf32>
# CHECK-SAME: %[[B:.+]]: tensor<16x8xf32>
# CHECK: %[[INITC:.+]] = linalg.init_tensor [4, 8] : tensor<4x8xf32>
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$MAPA]], #[[$MAPB]], #[[$MAPC]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"]
# CHECK-SAME: ins(%[[A]], %[[B]]
# CHECK-SAME: outs(%[[INITC]]
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), f32),
RankedTensorType.get((16, 8), f32))
def test_matmul_mono(lhs, rhs):
init_result = linalg.InitTensorOp([4, 8], f32)
return matmul_mono(lhs, rhs, outs=[init_result.result])
# CHECK-LABEL: @test_i8i8i32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: i32)
# CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32
# CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i8 to i32
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32
# CHECK-NEXT: linalg.yield %[[ADD]] : i32
# CHECK-NEXT: -> tensor<4x8xi32>
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), i8),
RankedTensorType.get((16, 8), i8),
RankedTensorType.get((4, 8), i32))
def test_i8i8i32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i8i16i32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i16, %[[C_ARG:.+]]: i32)
# CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32
# CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i16 to i32
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32
# CHECK-NEXT: linalg.yield %[[ADD]] : i32
# CHECK-NEXT: -> tensor<4x8xi32>
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), i8),
RankedTensorType.get((16, 8), i16),
RankedTensorType.get((4, 8), i32))
def test_i8i16i32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i32i32i16_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i32, %[[B_ARG:.+]]: i32, %[[C_ARG:.+]]: i16)
# CHECK-NEXT: %[[A_CAST:.+]] = trunci %[[A_ARG]] : i32 to i16
# CHECK-NEXT: %[[B_CAST:.+]] = trunci %[[B_ARG]] : i32 to i16
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i16
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i16
# CHECK-NEXT: linalg.yield %[[ADD]] : i16
# CHECK-NEXT: -> tensor<4x8xi16>
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), i32),
RankedTensorType.get((16, 8), i32),
RankedTensorType.get((4, 8), i16))
def test_i32i32i16_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i8i8f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = sitofp %[[A_ARG]] : i8 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = sitofp %[[B_ARG]] : i8 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), i8),
RankedTensorType.get((16, 8), i8),
RankedTensorType.get((4, 8), f32))
def test_i8i8f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f16f16f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: f16, %[[B_ARG:.+]]: f16, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = fpext %[[A_ARG]] : f16 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = fpext %[[B_ARG]] : f16 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), f16),
RankedTensorType.get((16, 8), f16),
RankedTensorType.get((4, 8), f32))
def test_f16f16f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f64f64f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: f64, %[[B_ARG:.+]]: f64, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = fptrunc %[[A_ARG]] : f64 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = fptrunc %[[B_ARG]] : f64 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), f64),
RankedTensorType.get((16, 8), f64),
RankedTensorType.get((4, 8), f32))
def test_f64f64f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_fill_rng_2d
# CHECK-SAME: %{{.*}} tensor<4x16xi32>, %[[MIN:.+]]: f64, %[[MAX:.+]]: f64, %[[SEED:.+]]: i32
# CHECK-DAG: %[[IDX0:.+]] = linalg.index 0 : index
# CHECK-DAG: %[[IDX1:.+]] = linalg.index 1 : index
# CHECK-DAG: %[[IDX0_CAST:.+]] = index_cast %[[IDX0]] : index to i32
# CHECK-DAG: %[[IDX1_CAST:.+]] = index_cast %[[IDX1]] : index to i32
# CHECK-DAG: %[[RND0:.+]] = addi %[[IDX0_CAST]], %[[SEED]] : i32
# CHECK-DAG: %[[CST0:.+]] = constant 1103515245 : i32
# CHECK-DAG: %[[CST1:.+]] = constant 12345 : i32
# CHECK-DAG: %[[RND1:.+]] = muli %[[RND0]], %[[CST0]] : i32
# CHECK-DAG: %[[RND2:.+]] = addi %[[RND1]], %[[CST1]] : i32
# CHECK: %[[RND3:.+]] = sitofp %{{.*}} : i32 to f64
# CHECK-DAG: %[[DIFF:.+]] = subf %[[MAX]], %[[MIN]] : f64
# CHECK-DAG: %[[CST2:.+]] = constant 2.3283063999999999E-10 : f64
# CHECK-DAG: %[[FACT:.+]] = mulf %[[DIFF]], %[[CST2]] : f64
# CHECK-DAG: %[[RND4:.+]] = mulf %[[RND3]], %[[FACT]] : f64
# CHECK-DAG: %[[RND5:.+]] = addf %[[RND4]], %[[MIN]] : f64
# CHECK-DAG: %{{.*}} = fptosi %[[RND5]] : f64 to i32
@builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), i32),
f64, f64, i32)
def test_fill_rng_2d(init_result, min, max, seed):
return fill_rng_2d(outs=[init_result], captures=[min, max, seed])
print(module)
| 48.745856
| 98
| 0.535872
|
ff41f41402ac874f70fc7c285d5ba81ee35ff4ff
| 655
|
py
|
Python
|
app/main/forms.py
|
agladyshev/FSND-brewlocker
|
17e8c0411b0f1f87c2875e7622f30a23ccf8b75c
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
agladyshev/FSND-brewlocker
|
17e8c0411b0f1f87c2875e7622f30a23ccf8b75c
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
agladyshev/FSND-brewlocker
|
17e8c0411b0f1f87c2875e7622f30a23ccf8b75c
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import Required, Length, URL
from wtforms import ValidationError
from .. import images
class ItemForm(FlaskForm):
header = StringField("Your pitch here", validators=[Required()])
body = TextAreaField("Tell me everything", validators=[Required()])
img = FileField('Upload photos', validators=[
FileAllowed(images, 'Images only!')])
phone = StringField('Phone', validators=[Required(), Length(1, 20), ])
submit = SubmitField('Submit')
| 40.9375
| 74
| 0.732824
|
e73f3790370ad96d8c899fb14c1fbd21739e3196
| 4,410
|
py
|
Python
|
aleph/logic/profiles.py
|
aaronarnold2/aleph
|
1728f4db8863554d5b0722546838970e53ec72bd
|
[
"MIT"
] | 2
|
2021-01-09T17:27:23.000Z
|
2021-01-09T17:27:25.000Z
|
aleph/logic/profiles.py
|
nabla-c0d3/aleph
|
d0e4e04e23cb7ee3971298e33ccb1c5171ae0779
|
[
"MIT"
] | null | null | null |
aleph/logic/profiles.py
|
nabla-c0d3/aleph
|
d0e4e04e23cb7ee3971298e33ccb1c5171ae0779
|
[
"MIT"
] | null | null | null |
import logging
from datetime import datetime
from sqlalchemy.orm import aliased
from aleph.util import PairwiseDict
from aleph.core import db
from aleph.model import Collection, EntitySet, EntitySetItem, Judgement
log = logging.getLogger(__name__)
def collection_profiles(collection_id, judgements=None, deleted=False):
if judgements is not None:
judgements = list(map(Judgement, judgements))
entity_sets = EntitySet.by_collection_id(collection_id, types=[EntitySet.PROFILE])
for entity_set in entity_sets:
items = entity_set.profile(judgements=judgements, deleted=deleted).all()
if items:
yield (entity_set, items)
def pairwise_decisions(pairs, collection_id):
left = aliased(EntitySetItem)
right = aliased(EntitySetItem)
q = db.session.query(left, right)
q = q.filter(left.deleted_at == None, right.deleted_at == None) # noqa
q = q.filter(EntitySet.collection_id == collection_id)
q = q.filter(left.entityset_id == right.entityset_id)
q = q.filter(db.tuple_(left.entity_id, right.entity_id).in_(pairs))
return PairwiseDict(
((l.entity_id, r.entity_id), (l.judgement + r.judgement)) for l, r in q.all()
)
def profile_add_entities(
entityset, entity_id, collection_id, compared_to_entity_id, judgement, authz
):
pq = db.session.query(EntitySetItem)
pq = pq.filter(EntitySetItem.entityset_id == entityset.id)
pq = pq.filter(EntitySetItem.entity_id == entity_id)
pq = pq.filter(EntitySetItem.deleted_at == None) # noqa
pq.update({EntitySetItem.deleted_at: datetime.utcnow()}, synchronize_session=False)
esi = EntitySetItem(
entityset=entityset,
entity_id=entity_id,
compared_to_entity_id=compared_to_entity_id,
collection_id=collection_id,
added_by_id=authz.id,
judgement=judgement,
)
db.session.add(esi)
return esi
def create_profile(collection, authz):
data = {"type": EntitySet.PROFILE, "label": "profile"}
return EntitySet.create(data, collection, authz)
def decide_xref(xref, judgement, authz):
"""Store user feedback from an Xref result as an profile-type EntitySet
The problem here is that we're trying to translate a single pair-wise
user judgement into a merge or split judgement regarding a cluster of
entities.
This works for most cases, with the exception that a profile, once
established, cannot be split in a way that preserves what entities
were linked to what other entities originally."""
if not isinstance(judgement, Judgement):
judgement = Judgement(judgement)
entity_id = xref.get("entity_id")
collection = Collection.by_id(xref.get("collection_id"))
entity_profile = EntitySet.by_entity_id(
entity_id,
judgements=[Judgement.POSITIVE],
collection_id=collection.id,
types=[EntitySet.PROFILE],
).first()
match_id = xref.get("match_id")
match_collection_id = xref.get("match_collection_id")
match_profile = EntitySet.by_entity_id(
match_id,
judgements=[Judgement.POSITIVE],
collection_id=collection.id,
types=[EntitySet.PROFILE],
).first()
# If we are undecided, and we stay undecided, not much to change.
if entity_profile is None or match_profile is None:
if judgement == Judgement.NO_JUDGEMENT:
return
if entity_profile is None:
entity_profile = create_profile(collection, authz)
profile_add_entities(
entity_profile, entity_id, collection.id, None, Judgement.POSITIVE, authz
)
if judgement is Judgement.POSITIVE and match_profile is not None:
# Case 1: both entities have profiles and the match is positive
entity_profile = entity_profile.merge(match_profile, authz.id)
else:
# Case 2: any other judgement
# NOTE: Another case of NEGATIVE judgements triggering a
# `split_profile` could be useful, however it isn't implemented
# here so that we don't lose judgements. This however should be
# strongly considered in order to reverse profile mergers. The question
# is: what to do with old judgements on a pair when we do this?
profile_add_entities(
entity_profile, match_id, match_collection_id, entity_id, judgement, authz
)
db.session.commit()
return entity_profile
| 37.058824
| 87
| 0.703855
|
d863b4396abb0ffe59d743d211d8edb93c482b2e
| 1,071
|
py
|
Python
|
smsAlert/smsAlert.py
|
brijkishor7828/API-Integration-Sample-Code-Python
|
df8c276eeb221fc605848fd2af9f1795c0d31b2d
|
[
"0BSD"
] | null | null | null |
smsAlert/smsAlert.py
|
brijkishor7828/API-Integration-Sample-Code-Python
|
df8c276eeb221fc605848fd2af9f1795c0d31b2d
|
[
"0BSD"
] | null | null | null |
smsAlert/smsAlert.py
|
brijkishor7828/API-Integration-Sample-Code-Python
|
df8c276eeb221fc605848fd2af9f1795c0d31b2d
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import requests
class smsAlertException(Exception):
def __init__(self, message):
self.message = message
def get_message(self):
return self.message
class smsAlertMsg(object):
"""A simple Python API for the smsAlert
It includes methods for calling sms api of smsAlert
"""
def __init__(self, api_key, **kwargs):
self.auth_key = api_key
self.sender_id = kwargs.get('sender_id') or 'CVTECH'
self.route = kwargs.get('route') or 'nondnd'
self.sms_url = 'http://smsalert.co.in/api/push.json?'
def send_sms(self, message, mobile):
res = requests.get(self.sms_url,
params={'apikey': self.auth_key,
'mobileno': mobile,
'text': message,
'sender': self.sender_id,
'route': self.route,
'response': 'json'})
return json.loads(res.content)
| 26.775
| 61
| 0.524743
|
a671dd7a9004808f6531650fc6d23bb866931e2e
| 525
|
py
|
Python
|
rover/core/servers/ArduinoSocketServer/misc/sqlFunctions.py
|
CSUFTitanRover/TitanRover2018
|
4926d377322a37ba644d7e852faa305fb8bb9b55
|
[
"Apache-2.0"
] | 16
|
2017-09-01T23:33:17.000Z
|
2021-01-04T02:41:19.000Z
|
rover/core/servers/ArduinoSocketServer/misc/sqlFunctions.py
|
CSUFTitanRover/TitanRover2018
|
4926d377322a37ba644d7e852faa305fb8bb9b55
|
[
"Apache-2.0"
] | 56
|
2017-08-30T01:14:46.000Z
|
2021-02-28T22:18:44.000Z
|
rover/core/servers/ArduinoSocketServer/misc/sqlFunctions.py
|
CSUFTitanRover/TitanRover2018
|
4926d377322a37ba644d7e852faa305fb8bb9b55
|
[
"Apache-2.0"
] | 15
|
2017-09-14T19:55:55.000Z
|
2020-05-03T19:44:39.000Z
|
import sqlite3
from sqlite3 import Error
def storeMessage(time, distance, messageType):
conn = sqlite3.connect('telemetry.db', check_same_thread=False)
c = conn.cursor()
try:
c.execute('''CREATE TABLE IF NOT EXISTS telemetry
(time double PRIMARY KEY,
distance double NOT NULL,
messageType integer NOT NULL)''')
args = [time, distance, messageType]
c.execute('''INSERT INTO telemetry (time, distance, messageType) VALUES (?, ?, ?)''', args)
conn.commit()
except Error as e:
print(e)
| 29.166667
| 95
| 0.685714
|
7fa0b7416a01cba57603209b516a2f4ea3a0dfcb
| 2,333
|
py
|
Python
|
tests-python/test_batch_waveforms_from_files.py
|
babycat-io/babycat
|
39ecba8469e698a990bc9dc52e5de9ae78492a60
|
[
"MIT"
] | 8
|
2021-05-10T23:12:14.000Z
|
2022-02-23T06:54:31.000Z
|
tests-python/test_batch_waveforms_from_files.py
|
babycat-io/babycat
|
39ecba8469e698a990bc9dc52e5de9ae78492a60
|
[
"MIT"
] | 13
|
2021-06-01T05:31:17.000Z
|
2022-03-25T22:24:18.000Z
|
tests-python/test_batch_waveforms_from_files.py
|
babycat-io/babycat
|
39ecba8469e698a990bc9dc52e5de9ae78492a60
|
[
"MIT"
] | 1
|
2021-06-01T05:24:52.000Z
|
2021-06-01T05:24:52.000Z
|
"""
Tests loading waveform from file.
These tests mirror the ones in ``../tests/test_waveform_batch_from_files.rs``
"""
from fixtures import *
import babycat
ALL_SAME_FILENAMES = [COF_FILENAME, COF_FILENAME, COF_FILENAME]
def test_all_same_file_1():
batch = babycat.batch.waveforms_from_files(ALL_SAME_FILENAMES)
for named_result in batch:
assert named_result.exception is None
waveform = named_result.waveform
assert waveform.num_channels == COF_NUM_CHANNELS
assert waveform.num_frames == COF_NUM_FRAMES
assert waveform.frame_rate_hz == COF_FRAME_RATE_HZ
def test_all_same_file_2():
batch = babycat.batch.waveforms_from_files(
ALL_SAME_FILENAMES, end_time_milliseconds=15000
)
for named_result in batch:
assert named_result.exception is None
waveform = named_result.waveform
assert waveform.num_channels == COF_NUM_CHANNELS
assert waveform.num_frames == 661500
assert waveform.frame_rate_hz == COF_FRAME_RATE_HZ
def test_all_same_file_single_threaded_1():
batch = babycat.batch.waveforms_from_files(
ALL_SAME_FILENAMES,
num_workers=1,
)
for named_result in batch:
assert named_result.exception is None
waveform = named_result.waveform
assert waveform.num_channels == COF_NUM_CHANNELS
assert waveform.num_frames == COF_NUM_FRAMES
assert waveform.frame_rate_hz == COF_FRAME_RATE_HZ
def test_different_filenames_1():
batch = babycat.batch.waveforms_from_files(ALL_FILENAMES)
for i, named_result in enumerate(batch):
assert named_result.exception is None
waveform = named_result.waveform
assert ALL_NUM_CHANNELS[i] == waveform.num_channels
assert ALL_NUM_FRAMES[i] == waveform.num_frames
assert ALL_FRAME_RATE_HZ[i] == waveform.frame_rate_hz
def test_file_not_found_error_1():
batch = babycat.batch.waveforms_from_files([COF_FILENAME, "asdfasdf"])
assert 2 == len(batch)
assert batch[0].exception is None
assert batch[0].waveform.num_channels == COF_NUM_CHANNELS
assert batch[0].waveform.num_frames == COF_NUM_FRAMES
assert batch[0].waveform.frame_rate_hz == COF_FRAME_RATE_HZ
assert batch[1].waveform is None
assert isinstance(batch[1].exception, FileNotFoundError)
| 34.820896
| 77
| 0.737248
|
b50ff04ac894b1b7f8cd4eae732e6c03667ca500
| 647
|
py
|
Python
|
thenewboston/utils/files.py
|
rajat4665/thenewboston-python
|
df842c793fe7bfd8731fd8746abf25747c9e569e
|
[
"MIT"
] | null | null | null |
thenewboston/utils/files.py
|
rajat4665/thenewboston-python
|
df842c793fe7bfd8731fd8746abf25747c9e569e
|
[
"MIT"
] | 3
|
2021-03-30T14:03:41.000Z
|
2021-09-22T19:30:31.000Z
|
thenewboston/utils/files.py
|
rajat4665/thenewboston-python
|
df842c793fe7bfd8731fd8746abf25747c9e569e
|
[
"MIT"
] | null | null | null |
import json
from hashlib import sha3_256 as sha3
def get_file_hash(file):
"""
Return hash value of file
"""
h = sha3()
with open(file, 'rb') as file:
chunk = 0
while chunk != b'':
chunk = file.read(1024)
h.update(chunk)
return h.hexdigest()
def read_json(file):
"""
Read JSON file
"""
try:
with open(file, 'r') as f:
data = json.load(f)
except FileNotFoundError:
data = None
return data
def write_json(file, data):
"""
Write JSON file
"""
with open(file, 'w') as f:
json.dump(data, f, indent=2)
| 15.404762
| 36
| 0.525502
|
948c15483503842770b133c829cdad2605f2a828
| 6,633
|
py
|
Python
|
test/unitTests/stlOperatorTests/testTimedUntil.py
|
pieter-hendriks/STL-monitoring
|
114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df
|
[
"MIT"
] | null | null | null |
test/unitTests/stlOperatorTests/testTimedUntil.py
|
pieter-hendriks/STL-monitoring
|
114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df
|
[
"MIT"
] | null | null | null |
test/unitTests/stlOperatorTests/testTimedUntil.py
|
pieter-hendriks/STL-monitoring
|
114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df
|
[
"MIT"
] | null | null | null |
""" Test the timed until operation (both the efficient and syntax variants) """
import unittest
from stl.signals import Signal, BooleanSignal
from stl.utility import Interval
from stl.operators import computeTimedUntil, computeSyntaxUntil, computeBooleanUntil
from .untilTestData import TESTCASE1_INTERVAL_LOWERBOUND, TESTCASE1_INTERVAL_UPPERBOUND, TESTCASE1_LEFTCHILD_SIGNAL, TESTCASE1_RIGHTCHILD_SIGNAL
from .untilTestData import TESTCASE2_INTERVAL_LOWERBOUND, TESTCASE2_INTERVAL_UPPERBOUND, TESTCASE2_LEFTCHILD_SIGNAL, TESTCASE2_RIGHTCHILD_SIGNAL
class TimedUntilTest(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
def testEmptySignal(self):
empty = Signal()
nonEmpty = Signal("test", [0, 1, 2], [0, 1, 2], [1, 1, 0])
expectedResult = Signal("timedUntil")
interval = Interval(0, 5) # Shouldn't matter
# Test empty both
self.assertEqual(computeTimedUntil(empty, empty, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(empty, empty, interval), expectedResult)
# Test empty rhs
self.assertEqual(computeTimedUntil(empty, nonEmpty, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(empty, nonEmpty, interval), expectedResult)
# Test empty lhs
self.assertEqual(computeTimedUntil(nonEmpty, empty, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(nonEmpty, empty, interval), expectedResult)
def testBooleanEmptySignal(self):
empty = BooleanSignal()
nonEmpty = BooleanSignal("test", [0, 1, 2], [0, 1, 1])
expectedResult = BooleanSignal("booleanTimedUntil")
interval = Interval(0, 5) # Shouldn't matter
# Test empty both
self.assertEqual(computeBooleanUntil(empty, empty, interval), expectedResult)
# Test empty rhs
self.assertEqual(computeBooleanUntil(empty, nonEmpty, interval), expectedResult)
# Test empty lhs
self.assertEqual(computeBooleanUntil(nonEmpty, empty, interval), expectedResult)
def testSimpleSignalBoolean(self):
signal = BooleanSignal('test', [0, 1], [0, 1], [1, 0])
interval = Interval(0, 1)
expectedResult = BooleanSignal('booleanTimedUntil', [0], [0], [0])
self.assertEqual(computeBooleanUntil(signal, signal, interval), expectedResult)
# This signal should be equal to the previous one - negative value should be converted to 0
signal = BooleanSignal('test', [0, 1], [-1, 1], [2, 0])
self.assertEqual(computeBooleanUntil(signal, signal, interval), expectedResult)
# This signal should be equal to the previous one - big positive value should be converted to 1
signal = BooleanSignal('test', [0, 1], [0, 5], [5, 0])
self.assertEqual(computeBooleanUntil(signal, signal, interval), expectedResult)
def testComplexSignalAlgorithmEquality(self):
# Compare efficient to syntax algorithm - we don't have a predicted result for this case
# That we expect to be entirely correct
efficientResult = computeTimedUntil(
TESTCASE1_LEFTCHILD_SIGNAL, TESTCASE1_RIGHTCHILD_SIGNAL,
Interval(TESTCASE1_INTERVAL_LOWERBOUND, TESTCASE1_INTERVAL_UPPERBOUND)
)
syntaxResult = computeSyntaxUntil(
TESTCASE1_LEFTCHILD_SIGNAL, TESTCASE1_RIGHTCHILD_SIGNAL,
Interval(TESTCASE1_INTERVAL_LOWERBOUND, TESTCASE1_INTERVAL_UPPERBOUND)
)
self.assertEqual(efficientResult, syntaxResult)
efficientResult = computeTimedUntil(
TESTCASE2_LEFTCHILD_SIGNAL, TESTCASE2_RIGHTCHILD_SIGNAL,
Interval(TESTCASE2_INTERVAL_LOWERBOUND, TESTCASE2_INTERVAL_UPPERBOUND)
)
syntaxResult = computeSyntaxUntil(
TESTCASE2_LEFTCHILD_SIGNAL, TESTCASE2_RIGHTCHILD_SIGNAL,
Interval(TESTCASE2_INTERVAL_LOWERBOUND, TESTCASE2_INTERVAL_UPPERBOUND)
)
self.assertEqual(efficientResult, syntaxResult)
def testSimpleSignal(self):
signal = Signal('test', [0, 1], [0, 1], [1, 0])
expectedResult = Signal('timedUntil', [0], [0], [0])
interval = Interval(0, 1)
self.assertEqual(computeTimedUntil(signal, signal, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(signal, signal, interval), expectedResult)
def testSmallSignal(self):
left = Signal('test', [0, 1, 2, 3, 4], [2, 7, 5, 4, -1], [5, -2, -1, -5, 0])
right = Signal('test', [0, 1, 2, 3, 4], [-1, -1, -1, 1, 1], [0, 0, 2, 0, 0])
interval = Interval(0, 4)
expectedResult = Signal('timedUntil', [0], [1], [0])
self.assertEqual(computeTimedUntil(left, right, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(left, right, interval), expectedResult)
interval = Interval(0, 2)
expectedResult = Signal('timedUntil', [0, 1, 1.6, 2], [-1, 1, 1, 1], [2, 0, 0, 0])
self.assertEqual(computeTimedUntil(left, right, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(left, right, interval), expectedResult)
interval = Interval(2, 4)
expectedResult = Signal('timedUntil', [0], [1], [0])
self.assertEqual(computeTimedUntil(left, right, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(left, right, interval), expectedResult)
interval = Interval(1, 2)
expectedResult = Signal('timedUntil', [0, 1, 1.6, 2], [-1, 1, 1, 1], [2, 0, 0, 0])
self.assertEqual(computeTimedUntil(left, right, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(left, right, interval), expectedResult)
interval = Interval(1, 3)
expectedResult = Signal('timedUntil', [0, 0.6, 1], [1, 1, 1], [0, 0, 0])
self.assertEqual(computeTimedUntil(left, right, interval), expectedResult)
self.assertEqual(computeSyntaxUntil(left, right, interval), expectedResult)
def testBooleanUntilLhsTimes(self):
lhs = BooleanSignal("l", [0, 1], [1, 0])
rhs = BooleanSignal("r", [0, 1], [0, 1])
interval = Interval(0, 1)
expected = BooleanSignal("booleanTimedUntil", [0], [1])
self.assertEqual(computeBooleanUntil(lhs, rhs, interval), expected)
def testBooleanUntilRhsTimes(self):
lhs = BooleanSignal("l", [0, 1], [0, 1])
rhs = BooleanSignal("r", [0, 1], [1, 0])
interval = Interval(0, 1)
expected = BooleanSignal("booleanTimedUntil", [0], [0])
self.assertEqual(computeBooleanUntil(lhs, rhs, interval), expected)
def testBooleanUntilLargeTimegap(self):
lhs = BooleanSignal("l", [0, 0.5, 2], [1, 1, 0])
rhs = BooleanSignal("r", [0, 1.5, 2], [0, 0, 1])
interval = Interval(0, 1)
expected = BooleanSignal("booleanTimedUntil", [0, 0.5, 1], [0, 0, 1])
self.assertEqual(computeBooleanUntil(lhs, rhs, interval), expected)
lhs = BooleanSignal("l", [0, 0.5, 2], [1, 0, 0])
rhs = BooleanSignal("r", [0, 1.5, 2], [0, 0, 1])
interval = Interval(0, 1)
expected = BooleanSignal("booleanTimedUntil", [0, 0.5, 1], [0, 0, 0])
self.assertEqual(computeBooleanUntil(lhs, rhs, interval), expected)
if __name__ == "__main__":
unittest.main()
| 46.711268
| 144
| 0.732248
|
83438ffa0de9a825d8bc295785047481855f9182
| 3,223
|
py
|
Python
|
aiopyfix/message.py
|
qwhelan/AIOPyFix
|
b7d172db9d30d1023e7161c35d30347bea33a085
|
[
"CC0-1.0"
] | 12
|
2020-02-11T09:16:17.000Z
|
2021-12-29T06:31:31.000Z
|
aiopyfix/message.py
|
qwhelan/AIOPyFix
|
b7d172db9d30d1023e7161c35d30347bea33a085
|
[
"CC0-1.0"
] | 2
|
2021-08-30T15:09:10.000Z
|
2021-09-16T12:55:27.000Z
|
aiopyfix/message.py
|
qwhelan/AIOPyFix
|
b7d172db9d30d1023e7161c35d30347bea33a085
|
[
"CC0-1.0"
] | 8
|
2019-02-04T21:02:07.000Z
|
2021-11-07T15:14:51.000Z
|
from collections import OrderedDict
from enum import Enum
class MessageDirection(Enum):
INBOUND = 0
OUTBOUND = 1
class _FIXRepeatingGroupContainer:
def __init__(self):
self.groups = []
def addGroup(self, group, index):
if index == -1:
self.groups.append(group)
else:
self.groups.insert(index, group)
def removeGroup(self, index):
del self.groups[index]
def getGroup(self, index):
return self.groups[index]
def __str__(self):
return str(len(self.groups)) + "=>" + str(self.groups)
__repr__ = __str__
class FIXContext(object):
def __init__(self):
self.tags = OrderedDict()
def setField(self, tag, value):
self.tags[tag] = value
def removeField(self, tag):
try:
del self.tags[tag]
except KeyError:
pass
def getField(self, tag):
return self.tags[tag]
def addRepeatingGroup(self, tag, group, index=-1):
if tag in self.tags:
groupContainer = self.tags[tag]
groupContainer.addGroup(group, index)
else:
groupContainer = _FIXRepeatingGroupContainer()
groupContainer.addGroup(group, index)
self.tags[tag] = groupContainer
def removeRepeatingGroupByIndex(self, tag, index=-1):
if self.isRepeatingGroup(tag):
try:
if index == -1:
del self.tags[tag]
pass
else:
groups = self.tags[tag]
groups.removeGroup(index)
except KeyError:
pass
def getRepeatingGroup(self, tag):
if self.isRepeatingGroup(tag):
return (len(self.tags[tag].groups), self.tags[tag].groups)
return None
def getRepeatingGroupByTag(self, tag, identifierTag, identifierValue):
if self.isRepeatingGroup(tag):
for group in self.tags[tag].groups:
if identifierTag in group.tags:
if group.getField(identifierTag) == identifierValue:
return group
return None
def getRepeatingGroupByIndex(self, tag, index):
if self.isRepeatingGroup(tag):
return self.tags[tag].groups[index]
return None
def __getitem__(self, tag):
return self.getField(tag)
def __setitem__(self, tag, value):
self.setField(tag, value)
def isRepeatingGroup(self, tag):
return type(self.tags[tag]) is _FIXRepeatingGroupContainer
def __contains__(self, item):
return item in self.tags
def __str__(self):
r= ""
allTags = []
for tag in self.tags:
allTags.append("%s=%s" % (tag, self.tags[tag]))
r += "|".join(allTags)
return r
def __eq__(self, other):
# if our string representation looks the same, the objects are equivalent
return self.__str__() == other.__str__()
__repr__ = __str__
class FIXMessage(FIXContext):
def __init__(self, msgType):
self.msgType = msgType
FIXContext.__init__(self)
def setMsgType(self, msgType):
self.msgType = msgType
| 27.547009
| 81
| 0.585479
|
09dc5c91e56457ca79126b900d31720e868778e2
| 10,888
|
py
|
Python
|
tests/test_api.py
|
YixinLin294/flask_demo
|
34b92947632bb01e65f700a6ac175841d966e3a2
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
YixinLin294/flask_demo
|
34b92947632bb01e65f700a6ac175841d966e3a2
|
[
"MIT"
] | 4
|
2020-03-24T16:46:50.000Z
|
2021-06-01T23:27:42.000Z
|
tests/test_api.py
|
YixinLin294/flask_demo
|
34b92947632bb01e65f700a6ac175841d966e3a2
|
[
"MIT"
] | null | null | null |
import unittest
import json
import re
from base64 import b64encode
from flask import url_for
from app import create_app, db
from app.models import User, Role, Post, Comment
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_404(self):
response = self.client.get(
'/wrong/url',
headers=self.get_api_headers('email', 'password'))
self.assertTrue(response.status_code == 404)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['error'] == 'not found')
def test_no_auth(self):
response = self.client.get(url_for('api.get_posts'), content_type='application/json')
self.assertTrue(response.status_code == 200)
def test_bad_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirm=True, role=r)
db.session.add(u)
db.session.commit()
# authenticate with bad password
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('john@example.com', 'dog'))
self.assertTrue(response.status_code == 401)
def test_token_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirmed=True, role=r)
db.session.add(u)
db.session.commit()
# issue a request with a bad token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('bad-token', ''))
self.assertTrue(response.status_code == 401)
# get a token
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('token'))
token = json_response['token']
# issue a request with the token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers(token, ''))
self.assertTrue(response.status_code == 200)
def test_anonymous(self):
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 200)
def test_unconfirmed_account(self):
# add an unconfirmed user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirmed=False,
role=r)
db.session.add(u)
db.session.commit()
# get list of posts with the unconfirmed account
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 403)
def test_posts(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# write an empty post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('john@example.com', 'cat'),
data=json.dumps({'body': ''}))
self.assertTrue(response.status_code == 400)
# write a post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers(
'john@example.com', 'cat'),
data=json.dumps({'body': 'body of the *blog* post'}))
self.assertTrue(response.status_code == 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
# get the new post
response = self.client.get(
url,
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'body of the *blog* post')
self.assertTrue(json_response['body_html'] ==
'<p>body of the <em>blog</em> post</p>')
json_post = json_response
# get the post from the user
response = self.client.get(
url_for('api.get_user_posts', id=u.id),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# get the post from the user as a follower
response = self.client.get(
url_for('api.get_user_followed_posts', id=u.id),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
# edit post
response = self.client.put(
url,
headers=self.get_api_headers('john@example.com', 'cat'),
data=json.dumps({'body': 'updated body'}))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'updated body')
self.assertTrue(json_response['body_html'] == '<p>updated body</p>')
def test_user(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='john@example.com', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='susan@example.com', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# get users
response = self.client.get(
url_for('api.get_user', id=u1.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'john')
response = self.client.get(
url_for('api.get_user', id=u2.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'susan')
def test_comments(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='john@example.com', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='susan@example.com', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# add a post
post = Post(body='body of the post', author=u1)
db.session.add(post)
db.session.commit()
# write a comment
response = self.client.post(
url_for('api.new_post_comment', id=post.id),
headers=self.get_api_headers('susan@example.com', 'dog'),
data=json.dumps({'body': 'Good [post](http://example.com)!'}))
self.assertTrue(response.status_code == 201)
json_response = json.loads(response.data.decode('utf-8'))
url = response.headers.get('Location')
self.assertIsNotNone(url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
self.assertTrue(
re.sub('<.*?>', '', json_response['body_html']) == 'Good post!')
# get the new comment
response = self.client.get(
url,
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'Good [post](http://example.com)!')
# add another comment
comment = Comment(body='Thank you!,', author=u1, post=post)
db.session.add(comment)
db.session.commit()
# get the two comments from the post
response = self.client.get(
url_for('api.get_post_comments', id=post.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('comments'))
self.assertTrue(json_response.get('count', 0) == 2)
# get all the comments
response = self.client.get(
url_for('api.get_comments', id=post.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('comments'))
self.assertTrue(json_response.get('count', 0) == 2)
| 41.086792
| 94
| 0.58165
|
42ad7c7ab75d523718c244b944c1efdc79535ee0
| 347
|
py
|
Python
|
CyberPi V1/Python with CyberPi 037(mkcloud 聊天机器人).py
|
SCSZCC/PythonWithHardware
|
3e5ae890cb7a8e5e2c5a636092aca9ce21728ab2
|
[
"MIT"
] | 2
|
2020-08-15T02:49:19.000Z
|
2020-08-15T02:49:31.000Z
|
CyberPi V1/Python with CyberPi 037(mkcloud 聊天机器人).py
|
SCSZCC/PythonWithHardware
|
3e5ae890cb7a8e5e2c5a636092aca9ce21728ab2
|
[
"MIT"
] | null | null | null |
CyberPi V1/Python with CyberPi 037(mkcloud 聊天机器人).py
|
SCSZCC/PythonWithHardware
|
3e5ae890cb7a8e5e2c5a636092aca9ce21728ab2
|
[
"MIT"
] | 1
|
2022-02-24T05:30:30.000Z
|
2022-02-24T05:30:30.000Z
|
"""
名称:037 mkcloud上的聊天机器人-小思
硬件: 童芯派
功能介绍:简单的模拟一个登录界面
难度:⭐⭐
支持的模式:上传、在线都支持
使用到的API及功能解读:
1. mkcloud.robot.chat(str)
str:字符串,填入要聊天的内容,返回值为字符串,为机器人给出的回复。
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import mkcloud
while True:
say = input("请输入要聊天内容:")
response = mkcloud.robot.chat(say)
print(response)
| 14.458333
| 68
| 0.605187
|
ae37a8a8de9daeb124bc8ecdc88b259511f64bef
| 1,684
|
py
|
Python
|
pangea/core/migrations/0006_auto_20200905_2245.py
|
LongTailBio/pangea-django
|
630551dded7f9e38f95eda8c36039e0de46961e7
|
[
"MIT"
] | null | null | null |
pangea/core/migrations/0006_auto_20200905_2245.py
|
LongTailBio/pangea-django
|
630551dded7f9e38f95eda8c36039e0de46961e7
|
[
"MIT"
] | 27
|
2020-03-26T02:55:12.000Z
|
2022-03-12T00:55:04.000Z
|
pangea/core/migrations/0006_auto_20200905_2245.py
|
LongTailBio/pangea-django
|
630551dded7f9e38f95eda8c36039e0de46961e7
|
[
"MIT"
] | 1
|
2021-09-14T08:15:54.000Z
|
2021-09-14T08:15:54.000Z
|
# Generated by Django 3.0.3 on 2020-09-05 22:45
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import pangea.core.encrypted_fields
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20200822_0140'),
]
operations = [
migrations.AddField(
model_name='sampleanalysisresult',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='samplegroupanalysisresult',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.CreateModel(
name='Project',
fields=[
('created_at', models.DateTimeField(blank=True, db_index=True, null=True)),
('updated_at', models.DateTimeField(blank=True, db_index=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.TextField(unique=True)),
('description', models.TextField(default='')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Organization')),
('sample_groups', models.ManyToManyField(null=True, to='core.SampleGroup')),
('sub_projects', models.ManyToManyField(null=True, related_name='super_projects', to='core.Project')),
],
options={
'abstract': False,
},
),
]
| 38.272727
| 121
| 0.61342
|
46a5ec12bf67039b685787a9cb47e0727daee2f1
| 1,132
|
py
|
Python
|
src/app/modules/examples/module_random_volume.py
|
balintfodor/Build3D
|
b735129e380a414d62a1d91556f5f52674f1f6f9
|
[
"MIT"
] | null | null | null |
src/app/modules/examples/module_random_volume.py
|
balintfodor/Build3D
|
b735129e380a414d62a1d91556f5f52674f1f6f9
|
[
"MIT"
] | 5
|
2021-03-19T09:28:07.000Z
|
2022-03-12T00:09:14.000Z
|
src/app/modules/examples/module_random_volume.py
|
balintfodor/Build3D
|
b735129e380a414d62a1d91556f5f52674f1f6f9
|
[
"MIT"
] | 1
|
2019-12-23T16:44:49.000Z
|
2019-12-23T16:44:49.000Z
|
import a3dc_module_interface as a3
import numpy as np
def module_main(ctx):
w = a3.inputs['width']
h = a3.inputs['height']
d = a3.inputs['depth']
seed = a3.inputs['seed']
np.random.seed(seed)
vol = np.random.rand(w, h, d)
print('your volume is ready! 🍻')
a3.outputs['volume'] = a3.MultiDimImageFloat_from_ndarray(vol)
config = [
a3.Parameter('width', a3.types.uint16).setIntHint('min', 1)
.setIntHint('max', 2048)
.setIntHint('default', 64),
a3.Parameter('height', a3.types.uint16).setIntHint('min', 1)
.setIntHint('max', 2048)
.setIntHint('default', 64),
a3.Parameter('depth', a3.types.uint16).setIntHint('min', 1)
.setIntHint('max', 2048)
.setIntHint('default', 16),
a3.Parameter('seed', a3.types.uint16).setIntHint('default', 42),
a3.Output('volume', a3.types.ImageFloat)]
a3.def_process_module(config, module_main)
| 35.375
| 70
| 0.519435
|
418dd604251352a11dc8cb504b8aa278f4c82bff
| 10,775
|
py
|
Python
|
cohorts_proj/api/adapters/unm.py
|
gellerjulia/harmonization-website
|
c47b109d9110e34520ef43469b6b5ccac01cc178
|
[
"MIT"
] | 1
|
2020-09-20T02:32:01.000Z
|
2020-09-20T02:32:01.000Z
|
cohorts_proj/api/adapters/unm.py
|
gellerjulia/harmonization-website
|
c47b109d9110e34520ef43469b6b5ccac01cc178
|
[
"MIT"
] | 20
|
2020-04-17T14:01:41.000Z
|
2022-03-12T00:30:23.000Z
|
cohorts_proj/api/adapters/unm.py
|
gellerjulia/harmonization-website
|
c47b109d9110e34520ef43469b6b5ccac01cc178
|
[
"MIT"
] | 3
|
2020-10-08T00:24:51.000Z
|
2021-06-02T20:07:30.000Z
|
import pandas as pd
import numpy as np
from datasets.models import RawUNM
from api.dilutionproc import predict_dilution
from api.analysis import add_confound
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels
def get_dataframe():
"""Returns a pandas DataFrame with the correct
format for the generic plotting functions."""
# First is necessary to pivot the raw UNM dataset so it matches
# the requested features.
# This queries the RawUNM dataset and excludes some of the values
# TODO - Should we drop NaN here?
df = pd.DataFrame.from_records(
RawUNM.objects.
# exclude(Creat_Corr_Result__lt=-1000).
# exclude(Creat_Corr_Result__isnull=True).
values()
)
df['creatininemgdl'] = df['creatininemgdl'].astype(float)
df = df[~df['creatininemgdl'].isna()]
covars = ['Outcome_weeks', 'age', 'ethnicity',
'race', 'education', 'BMI', 'income', 'smoking', 'parity',
'preg_complications', 'folic_acid_supp', 'fish', 'babySex',
'birthWt', 'headCirc',
'birthLen','WeightCentile',
'LGA','SGA','ga_collection','birth_year']
df['ga_collection'] = df['gestAge_collection']
# RAW SAMPLE
# id PIN_Patient Member_c TimePeriod Analyte Result Creat_Corr_Result
# 1 A0000M 1 1 BCD 1.877245 -99999.0
# 2 A0001M 1 1 BCD 1.458583 -99999.0
# 3 A0002M 1 1 BCD 1.694041 -99999.0
# 4 A0002M 1 1 BCD 1.401296 -99999.0
# 5 A0003M 1 1 BCD 0.763068 -99999.0
# Pivoting the table and reseting index
# TODO - Do we want to plot Result or Creat_Corr_Result
numerical_values = 'Result'
columns_to_indexes = ['PIN_Patient', 'TimePeriod', 'Member_c', 'Outcome'] + covars
categorical_to_columns = ['Analyte']
indexes_to_columns = ['PIN_Patient','Member_c', 'TimePeriod', 'Outcome'] + covars
df = pd.pivot_table(df, values=numerical_values,
index=columns_to_indexes,
columns=categorical_to_columns)
df = df.reset_index(level=indexes_to_columns)
# TODO - Should we drop NaN here?
# After pivot
# Analyte TimePeriod Member_c BCD ... UTMO UTU UUR
# PIN_Patient ...
# A0000M 1 1 1.877245 ... 0.315638 1.095520 0.424221
# A0000M 3 1 1.917757 ... 0.837639 4.549155 0.067877
# A0001M 1 1 1.458583 ... 0.514317 1.262910 1.554346
# A0001M 3 1 1.365789 ... 0.143302 1.692582 0.020716
# A0002M 1 1 1.547669 ... 0.387643 0.988567 1.081877
df['CohortType'] = 'UNM'
df['TimePeriod'] = pd.to_numeric(df['TimePeriod'], errors='coerce')
return df
def get_dataframe_nofish():
"""Returns a pandas DataFrame with fish removed for cohort"""
df = get_dataframe()
df['fish'] = df['fish'].astype(float)
neu_logic = (df['fish'] == 0)
df_nofish = df[neu_logic]
return df_nofish
def get_dataframe_orig():
"""Returns a pandas DataFrame with the correct
format for the generic plotting functions."""
# First is necessary to pivot the raw UNM dataset so it matches
# the requested features.
# This queries the RawUNM dataset and excludes some of the values
# TODO - Should we drop NaN here?
df = pd.DataFrame.from_records(
RawUNM.objects.
# exclude(Creat_Corr_Result__lt=-1000).
# exclude(Creat_Corr_Result__isnull=True).
values()
)
df['creatininemgdl'] = df['creatininemgdl'].astype(float)
df = df[~df['creatininemgdl'].isna()]
covars = ['Outcome_weeks', 'age', 'ethnicity',
'race', 'education', 'BMI', 'income', 'smoking', 'parity',
'preg_complications', 'folic_acid_supp', 'fish', 'babySex',
'birthWt', 'headCirc',
'birthLen','WeightCentile',
'LGA','SGA','ga_collection','birth_year']
df['ga_collection'] = df['gestAge_collection']
# RAW SAMPLE
# id PIN_Patient Member_c TimePeriod Analyte Result Creat_Corr_Result
# 1 A0000M 1 1 BCD 1.877245 -99999.0
# 2 A0001M 1 1 BCD 1.458583 -99999.0
# 3 A0002M 1 1 BCD 1.694041 -99999.0
# 4 A0002M 1 1 BCD 1.401296 -99999.0
# 5 A0003M 1 1 BCD 0.763068 -99999.0
# Pivoting the table and reseting index
# TODO - Do we want to plot Result or Creat_Corr_Result
numerical_values = 'Result'
columns_to_indexes = ['PIN_Patient', 'TimePeriod', 'Member_c']
categorical_to_columns = ['Analyte']
indexes_to_columns = ['PIN_Patient','Member_c', 'TimePeriod']
df = pd.pivot_table(df, values=numerical_values,
index=columns_to_indexes,
columns=categorical_to_columns)
df = df.reset_index()
# TODO - Should we drop NaN here?
# After pivot
# Analyte TimePeriod Member_c BCD ... UTMO UTU UUR
# PIN_Patient ...
# A0000M 1 1 1.877245 ... 0.315638 1.095520 0.424221
# A0000M 3 1 1.917757 ... 0.837639 4.549155 0.067877
# A0001M 1 1 1.458583 ... 0.514317 1.262910 1.554346
# A0001M 3 1 1.365789 ... 0.143302 1.692582 0.020716
# A0002M 1 1 1.547669 ... 0.387643 0.988567 1.081877
df['CohortType'] = 'UNM'
df['TimePeriod'] = pd.to_numeric(df['TimePeriod'], errors='coerce')
return df
def get_dataframe_covars():
# First is necessary to pivot the raw UNM dataset so it matches
# the requested features.
# This queries the RawUNM dataset and excludes some of the values
# TODO - Should we drop NaN here?
df = pd.DataFrame.from_records(
RawUNM.objects.
# exclude(Creat_Corr_Result__lt=-1000).
# exclude(Creat_Corr_Result__isnull=True).
values()
)
df['creatininemgdl'] = df['creatininemgdl'].astype(float)
df = df[~df['creatininemgdl'].isna()]
df['ga_collection'] = df['gestAge_collection']
# RAW SAMPLE
# id PIN_Patient Member_c TimePeriod Analyte Result Creat_Corr_Result
# 1 A0000M 1 1 BCD 1.877245 -99999.0
# 2 A0001M 1 1 BCD 1.458583 -99999.0
# 3 A0002M 1 1 BCD 1.694041 -99999.0
# 4 A0002M 1 1 BCD 1.401296 -99999.0
# 5 A0003M 1 1 BCD 0.763068 -99999.0
df['CohortType'] = 'UNM'
df['TimePeriod'] = pd.to_numeric(df['TimePeriod'], errors='coerce')
df['Outcome'] = df['Outcome'].astype(float)
df['LGA'] = df['LGA'].astype(float)
df['SGA'] = df['SGA'].astype(float)
covars = ['CohortType','TimePeriod','Outcome_weeks', 'age', 'ethnicity',
'race', 'education', 'BMI', 'income', 'smoking', 'parity', 'creatininemgdl',
'preg_complications', 'folic_acid_supp', 'fish', 'babySex',
'birthWt', 'headCirc', 'Outcome',
'birthLen','WeightCentile',
'LGA','SGA','ga_collection','birth_year']
adjust_types = ['age', 'ethnicity',
'race', 'education', 'BMI', 'income', 'smoking', 'parity', 'creatininemgdl',
'preg_complications', 'folic_acid_supp', 'fish', 'babySex']
for var in adjust_types:
try:
df[var] = df[var].astype(float)
except:
pass
##drop duplicates required because it was initially in long foramt per analyte/visit/participant
return df[['PIN_Patient'] + covars].drop_duplicates()
def get_dataframe_imputed():
"""Returns a pandas DataFrame with the correct
format for the generic plotting functions."""
# First is necessary to pivot the raw UNM dataset so it matches
# the requested features.
# This queries the RawUNM dataset and excludes some of the values
# TODO - Should we drop NaN here?
df = pd.DataFrame.from_records(
RawUNM.objects.
# exclude(Creat_Corr_Result__lt=-1000).
# exclude(Creat_Corr_Result__isnull=True).
values()
)
covars = ['Outcome_weeks', 'age', 'ethnicity',
'race', 'education', 'BMI', 'income', 'smoking', 'parity',
'preg_complications', 'folic_acid_supp', 'fish', 'babySex',
'birthWt', 'headCirc',
'birthLen','WeightCentile',
'LGA','SGA','ga_collection','birth_year']
df['ga_collection'] = df['gestAge_collection']
# RAW SAMPLE
# id PIN_Patient Member_c TimePeriod Analyte Result Creat_Corr_Result
# 1 A0000M 1 1 BCD 1.877245 -99999.0
# 2 A0001M 1 1 BCD 1.458583 -99999.0
# 3 A0002M 1 1 BCD 1.694041 -99999.0
# 4 A0002M 1 1 BCD 1.401296 -99999.0
# 5 A0003M 1 1 BCD 0.763068 -99999.0
# Pivoting the table and reseting index
# TODO - Do we want to plot Result or Creat_Corr_Result
numerical_values = 'imputed'
columns_to_indexes = ['PIN_Patient', 'TimePeriod', 'Member_c', 'Outcome']
categorical_to_columns = ['Analyte']
indexes_to_columns = ['PIN_Patient','Member_c', 'TimePeriod', 'Outcome'] + covars
df = pd.pivot_table(df, values=numerical_values,
index=columns_to_indexes,
columns=categorical_to_columns)
df = df.reset_index()
# TODO - Should we drop NaN here?
# After pivot
# Analyte TimePeriod Member_c BCD ... UTMO UTU UUR
# PIN_Patient ...
# A0000M 1 1 1.877245 ... 0.315638 1.095520 0.424221
# A0000M 3 1 1.917757 ... 0.837639 4.549155 0.067877
# A0001M 1 1 1.458583 ... 0.514317 1.262910 1.554346
# A0001M 3 1 1.365789 ... 0.143302 1.692582 0.020716
# A0002M 1 1 1.547669 ... 0.387643 0.988567 1.081877
df['CohortType'] = 'UNM'
df['TimePeriod'] = pd.to_numeric(df['TimePeriod'], errors='coerce')
return df
| 37.940141
| 100
| 0.560278
|
e5c70e753723b9a97cfa6bf00cfc91cacef066a7
| 862
|
py
|
Python
|
Math/python/leetcode67_add_Binary.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
Math/python/leetcode67_add_Binary.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
Math/python/leetcode67_add_Binary.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
# Given two binary strings, return their sum (also a binary string).
# The input strings are both non-empty and contains only characters 1 or 0.
# Example 1:
# Input: a = "11", b = "1"
# Output: "100"
# Example 2:
# Input: a = "1010", b = "1011"
# Output: "10101"
class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
m, n = len(a) - 1, len(b) - 1
carry = 0
res = []
while m >= 0 or n >= 0 or carry:
if m >= 0 :
carry += int(a[m])
m -= 1
if n >= 0:
carry += int(b[n])
n -= 1
res.append(str(carry % 2))
carry = carry // 2
ans = ("").join(res[::-1])
return ans
# Time: O(n)
# Space: O(n)
# Difficulty: easy
| 22.102564
| 75
| 0.433875
|
ea23f1e1511e9420d2fabf753649b8833b8dfdbf
| 8,827
|
py
|
Python
|
intersight/model/storage_flex_flash_controller_list_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/storage_flex_flash_controller_list_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/storage_flex_flash_controller_list_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.storage_flex_flash_controller import StorageFlexFlashController
globals()['StorageFlexFlashController'] = StorageFlexFlashController
class StorageFlexFlashControllerListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'count': (int,), # noqa: E501
'results': ([StorageFlexFlashController], none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StorageFlexFlashControllerListAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'storage.FlexFlashController' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([StorageFlexFlashController], none_type): The array of 'storage.FlexFlashController' resources matching the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 50.153409
| 1,678
| 0.643594
|
a1defa967f02bbb8375847a9403e6d4d5512cc2c
| 1,912
|
py
|
Python
|
crosswalk_client/methods/entity/best_match_or_create.py
|
The-Politico/django-crosswalk-client
|
57d7658ed9b91be5d1c5d48398187c7b05acfffe
|
[
"MIT"
] | 3
|
2020-05-26T16:31:19.000Z
|
2021-12-22T16:42:10.000Z
|
crosswalk_client/methods/entity/best_match_or_create.py
|
The-Politico/django-crosswalk-client
|
57d7658ed9b91be5d1c5d48398187c7b05acfffe
|
[
"MIT"
] | 6
|
2019-04-05T17:20:24.000Z
|
2019-04-13T13:33:01.000Z
|
crosswalk_client/methods/entity/best_match_or_create.py
|
The-Politico/django-crosswalk-client
|
57d7658ed9b91be5d1c5d48398187c7b05acfffe
|
[
"MIT"
] | null | null | null |
from urllib.parse import urljoin
import requests
from crosswalk_client.encoder import encode
from crosswalk_client.exceptions import BadResponse
from crosswalk_client.objects.entity import EntityObject
from crosswalk_client.validators.entity import (
validate_block_attrs_kwarg,
validate_create_attrs_kwarg,
validate_domain_kwarg,
validate_required_query_arg,
validate_threshold_kwarg,
)
class BestMatchOrCreate(object):
@validate_required_query_arg
@validate_block_attrs_kwarg
@validate_create_attrs_kwarg
@validate_domain_kwarg
@validate_threshold_kwarg
def best_match_or_create(
self,
query,
block_attrs={},
create_attrs={},
domain=None,
threshold=None,
scorer=None,
return_canonical=True,
):
if domain is None:
domain = self.domain
if threshold is None:
threshold = self.threshold
if scorer is None:
scorer = self.scorer
query_field = list(query.keys())[0]
data = {
"query_field": query_field,
"query_value": query[query_field],
"threshold": threshold,
"block_attrs": block_attrs,
"create_attrs": create_attrs,
"scorer": scorer,
"return_canonical": return_canonical,
}
response = requests.post(
urljoin(
self.service_address,
"domains/{}/entities/best-match-or-create/".format(domain),
),
headers=self.headers,
data=encode(data),
)
if response.status_code != requests.codes.ok:
raise BadResponse(
"The service responded with a {}: {}".format(
response.status_code, response.content
)
)
return EntityObject(response.json(), client=self)
| 29.875
| 75
| 0.611925
|
bcbdb3021669a72639707df7af5231c14fe70cb2
| 257
|
py
|
Python
|
django/stock/urls.py
|
nah990/StockF
|
58f719728f4072186459d0ca5651624eab820c5e
|
[
"MIT"
] | null | null | null |
django/stock/urls.py
|
nah990/StockF
|
58f719728f4072186459d0ca5651624eab820c5e
|
[
"MIT"
] | 3
|
2021-12-27T02:05:58.000Z
|
2022-03-23T00:06:14.000Z
|
django/stock/urls.py
|
nah990/StockF
|
58f719728f4072186459d0ca5651624eab820c5e
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.views.generic import TemplateView
app_name = 'stock'
urlpatterns = [
path('test', TemplateView.as_view(template_name="stock/index.html")),
path('', TemplateView.as_view(template_name="stock/index.html")),
]
| 28.555556
| 73
| 0.747082
|
dc06ade5272e3478f34ba3f0439ac9ff43111ca0
| 1,676
|
py
|
Python
|
auv_control_pi/tests/test_simulator.py
|
adrienemery/auv-control-pi
|
633fe89b652b07eb6ebe03c0550daa211b122297
|
[
"MIT"
] | 9
|
2016-10-02T06:59:37.000Z
|
2020-09-24T15:36:10.000Z
|
auv_control_pi/tests/test_simulator.py
|
adrienemery/auv-control-pi
|
633fe89b652b07eb6ebe03c0550daa211b122297
|
[
"MIT"
] | null | null | null |
auv_control_pi/tests/test_simulator.py
|
adrienemery/auv-control-pi
|
633fe89b652b07eb6ebe03c0550daa211b122297
|
[
"MIT"
] | 4
|
2019-01-12T23:09:34.000Z
|
2020-11-05T14:52:42.000Z
|
import pytest
from pygc import great_circle
from ..simulator import Navitgator, GPS, Motor, AHRS
from ..components.navigation import Point, distance_to_point
@pytest.fixture
def sim():
starting_point = Point(50, 120)
return Navitgator(gps=GPS(),
current_location=starting_point,
update_period=1)
def test_simulator_move_to_waypoint(sim):
waypoint = Point(49, 120)
sim.move_to_waypoint(waypoint)
assert sim._compass.heading == 180
def test_simulator_update(sim):
# generate a waypoint 100 meters away due South
heading = 140.0
distance = 100
result = great_circle(distance=distance,
azimuth=heading,
latitude=sim._current_location.lat,
longitude=sim._current_location.lng)
waypoint = Point(result['latitude'], result['longitude'])
sim.move_to_waypoint(waypoint)
sim.speed = 10
starting_point = sim._current_location
# since we have an update period of 1s and speed of 10 m/s
# after one update cycle we should have moved 10 meters
# from our last point
sim._update()
distance_moved = distance_to_point(starting_point, sim._current_location)
assert sim.speed == pytest.approx(distance_moved)
assert heading == pytest.approx(sim._compass.heading)
assert sim.arrived is False
# should take 8 updates total to get within 20 meters
# since we have already moved 10 meters we should only need
# to move another 70 meters
for x in range(7):
sim._update()
if x < 6:
assert sim.arrived is False
assert sim.arrived is True
| 31.622642
| 77
| 0.671241
|
fd45e02f1535a505a9a431d5f9baf5fec189eda6
| 4,688
|
py
|
Python
|
tamr_unify_client/base_collection.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 9
|
2019-08-13T11:07:06.000Z
|
2022-01-14T18:15:13.000Z
|
tamr_unify_client/base_collection.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 166
|
2019-08-09T18:51:05.000Z
|
2021-12-02T15:24:15.000Z
|
tamr_unify_client/base_collection.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 21
|
2019-08-12T15:37:31.000Z
|
2021-06-15T14:06:23.000Z
|
from abc import abstractmethod
from collections.abc import Iterable
class BaseCollection(Iterable):
"""Base class for client-side collections.
:param client: Delegate underlying API calls to this client.
:type client: :class:`~tamr_unify_client.Client`
:param api_path: API path for this collection. E.g. ``"projects/1/inputDatasets"``.
:type api_path: str
"""
def __init__(self, client, api_path):
self.client = client
self.api_path = api_path
@abstractmethod
def by_resource_id(self, canonical_path, resource_id):
"""Retrieve an item in this collection by resource ID.
Subclasses should override this method and pass in the specific
``canonical_path`` for that collection.
:param canonical_path: The canonical (i.e. unaliased) API path for this collection.
:type canonical_path: str
:param resource_id: The resource ID. E.g. "1"
:type resource_id: str
:returns: The specified item.
:rtype: The ``resource_class`` for this collection. See :func:`~tamr_unify_client.base_collection.BaseCollection.by_relative_id`.
"""
relative_id = canonical_path + "/" + resource_id
return self.by_relative_id(relative_id)
@abstractmethod
def by_relative_id(self, resource_class, relative_id):
"""Retrieve an item in this collection by relative ID.
Subclasses should override this method and pass in the specific
``resource_class`` for that collection.
:param resource_class: Resource class corresponding to items in this collection.
:type resource_class: Python class
:param relative_id: The relative ID. E.g. "projects/1"
:type relative_id: str
:returns: The specified item.
:rtype: ``resource_class``
"""
resource_json = self.client.get(relative_id).successful().json()
return resource_class.from_json(
self.client, resource_json, api_path=relative_id
)
@abstractmethod
def stream(self, resource_class):
"""Stream items in this collection.
Subclasses should override this method and pass in the specific
``resource_class`` for that collection.
:param resource_class: Resource class corresponding to items in this collection.
:type resource_class: Python class
:returns: Generator that yields each item.
:rtype: Python generator of ``resource_class``
"""
resources = self.client.get(self.api_path).successful().json()
for resource_json in resources:
yield resource_class.from_json(self.client, resource_json)
def __iter__(self):
return self.stream()
@abstractmethod
def by_external_id(self, resource_class, external_id):
"""Retrieve an item in this collection by external ID.
Subclasses should override this method and pass in the specific
``resource_class`` for that collection.
:param resource_class: Resource class corresponding to items in this collection.
:type resource_class: Python class
:param external_id: The external ID.
:type external_id: str
:returns: The specified item, if found.
:rtype: ``resource_class``
:raises KeyError: If no resource with the specified external_id is found
:raises LookupError: If multiple resources with the specified external_id are found
"""
params = {"filter": "externalId==" + external_id}
resources = self.client.get(self.api_path, params=params).successful().json()
items = [
resource_class.from_json(self.client, resource_json)
for resource_json in resources
]
if len(items) == 0:
raise KeyError(f'No item found with external ID "{external_id}"')
elif len(items) > 1:
raise LookupError(
f'More than one item found with external ID "{external_id}"'
)
return items[0]
def delete_by_resource_id(self, resource_id):
"""Deletes a resource from this collection by resource ID.
:param resource_id: The resource ID of the resource that will be deleted.
:type resource_id: str
:return: HTTP response from the server.
:rtype: :class:`requests.Response`
"""
path = f"{self.api_path}/{resource_id}"
response = self.client.delete(path).successful()
return response
def __repr__(self):
return (
f"{self.__class__.__module__}."
f"{self.__class__.__qualname__}("
f"api_path={self.api_path!r})"
)
| 38.113821
| 138
| 0.656143
|
1d0f8c64fe8122e852ac6ba76fea974737e10080
| 672
|
py
|
Python
|
ExerciciosPYTHON/PythonCeV/053.py
|
Samuel-Melo890/Python-Desafios
|
2abc7734d6a6c1f5ab67421f792d6889d93bac94
|
[
"MIT"
] | null | null | null |
ExerciciosPYTHON/PythonCeV/053.py
|
Samuel-Melo890/Python-Desafios
|
2abc7734d6a6c1f5ab67421f792d6889d93bac94
|
[
"MIT"
] | 2
|
2022-03-18T16:06:07.000Z
|
2022-03-18T16:55:29.000Z
|
ExerciciosPYTHON/PythonCeV/053.py
|
Samuel-Melo890/Python-Desafios
|
2abc7734d6a6c1f5ab67421f792d6889d93bac94
|
[
"MIT"
] | null | null | null |
print('='*8,'Detector de Palíndromo','='*8)
f = str(input('Digite uma frase: ')).strip().upper().split()
fj = ''.join(f)
r = fj[::-1]
print('A frase {}{}{} de trás para frente fica {}{}{}.'.format('\033[4m', fj, '\033[m', '\033[4m', r, '\033[m'))
if fj == r:
print('Portanto essa frase é um {}PALÍNDROMO{}!'.format('\033[36m' , '\033[m'))
else:
print('Portanto essa frase {}NÃO{} é um {}PALÍNDROMO{}!'.format('\033[31m' , '\033[m' , '\033[36m' , '\033[m'))
#r = ''
#for l in range(len(fj) -1, -1, -1):
#r += fj[l]
#print('O inverso de {} é {}.'.format(fj , r))
#if fj == r:
#print('Temos um palíndromo!')
#else:
#print('A frase não é um palíndromo.')
| 35.368421
| 115
| 0.543155
|
d6e810f6e9e19d9716f8e921bd30fe5676f69f23
| 812
|
py
|
Python
|
example/speech-demo/tests/test_nothing.py
|
IIMarch/mxnet
|
64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b
|
[
"Apache-2.0"
] | 399
|
2017-05-30T05:12:48.000Z
|
2022-01-29T05:53:08.000Z
|
example/speech-demo/tests/test_nothing.py
|
IIMarch/mxnet
|
64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b
|
[
"Apache-2.0"
] | 58
|
2017-05-30T23:25:32.000Z
|
2019-11-18T09:30:54.000Z
|
example/speech-demo/tests/test_nothing.py
|
IIMarch/mxnet
|
64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b
|
[
"Apache-2.0"
] | 107
|
2017-05-30T05:53:22.000Z
|
2021-06-24T02:43:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def test_nothing():
pass
| 40.6
| 62
| 0.770936
|
1383b333bce4199ba0c3d53c7b33217efeb2a454
| 268
|
py
|
Python
|
tests/testapp/models.py
|
sen-den/django-link-shortener
|
9dcfcf362a14b11a1fa203b376a4bd2840450b09
|
[
"MIT"
] | 24
|
2018-06-09T10:00:53.000Z
|
2022-03-04T19:49:49.000Z
|
tests/testapp/models.py
|
sen-den/django-link-shortener
|
9dcfcf362a14b11a1fa203b376a4bd2840450b09
|
[
"MIT"
] | 8
|
2018-06-10T20:45:20.000Z
|
2022-02-20T14:50:07.000Z
|
tests/testapp/models.py
|
sen-den/django-link-shortener
|
9dcfcf362a14b11a1fa203b376a4bd2840450b09
|
[
"MIT"
] | 7
|
2019-05-22T06:25:38.000Z
|
2022-03-04T01:03:17.000Z
|
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class CustomUser(AbstractUser):
email = models.EmailField(unique=True, error_messages={
'unique': "A user with that email already exists."
})
| 26.8
| 59
| 0.738806
|
5d06f31ba2bf9f1fb58640d75929604c576c9827
| 616
|
py
|
Python
|
bin/startup.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-11-16T00:58:43.000Z
|
2021-11-16T00:58:43.000Z
|
bin/startup.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 44
|
2022-01-20T01:31:32.000Z
|
2022-03-31T01:50:41.000Z
|
bin/startup.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-05-12T19:11:52.000Z
|
2021-05-12T19:11:52.000Z
|
from os import path, sys
# Good overview of what is going on here:
# https://stackoverflow.com/questions/11536764/how-to-fix-attempted-relative-import-in-non-package-even-with-init-py
# Once we have a stable package name for core, it should be easier to do away with something like this
# for now we add the core component path to the sys.path when we are running these scripts
component_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
# Load the 'core' module as though this script were being run from
# the parent component (either circulation or metadata).
sys.path.append(component_dir)
| 51.333333
| 116
| 0.784091
|
8d53d257e96cfb17ba8b88d7de2e5c41fa65a818
| 6,628
|
py
|
Python
|
ews-fetch-calendar.py
|
chckyn/ews-orgmode
|
a63ee0f703912812702c17b139ba6eabc441ddfd
|
[
"MIT"
] | 50
|
2015-01-19T16:45:13.000Z
|
2021-11-19T13:57:44.000Z
|
ews-fetch-calendar.py
|
chckyn/ews-orgmode
|
a63ee0f703912812702c17b139ba6eabc441ddfd
|
[
"MIT"
] | 6
|
2015-03-03T07:54:23.000Z
|
2020-06-16T13:20:21.000Z
|
ews-fetch-calendar.py
|
chckyn/ews-orgmode
|
a63ee0f703912812702c17b139ba6eabc441ddfd
|
[
"MIT"
] | 20
|
2015-01-10T20:41:31.000Z
|
2021-07-13T18:16:57.000Z
|
#!/usr/bin/python
# This script was inspired by:
# http://blogs.msdn.com/b/exchangedev/archive/2009/02/05/quick-and-dirty-unix-shell-scripting-with-ews.aspx
# http://ewsmacwidget.codeplex.com/
import os
from lxml import etree
from datetime import datetime
from datetime import date
from datetime import timedelta
from pytz import timezone
from StringIO import StringIO
import pytz
import pycurl
import base64
import ConfigParser
# Read the config file
timezoneLocation = os.getenv('TZ', 'UTC')
config = ConfigParser.RawConfigParser({
'path': '/ews/Exchange.asmx',
'username': '',
'password': '',
'auth_type': 'any',
'cainfo': '',
'timezone': timezoneLocation,
'days_history': 7,
'days_future': 30,
'max_entries': 100})
dir = os.path.dirname(os.path.realpath(__file__))
config.read(os.path.join(dir, 'config.cfg'))
# Exchange user and password
ewsHost = config.get('ews-orgmode', 'host')
ewsUrl = config.get('ews-orgmode', 'path')
ewsUser = config.get('ews-orgmode', 'username')
ewsPassword = config.get('ews-orgmode', 'password')
ewsAuthType = config.get('ews-orgmode', 'auth_type').lower()
ewsCAInfo = config.get('ews-orgmode', 'cainfo')
timezoneLocation = config.get('ews-orgmode', 'timezone')
daysHistory = config.getint('ews-orgmode', 'days_history')
daysFuture = config.getint('ews-orgmode', 'days_future')
maxEntries = config.getint('ews-orgmode', 'max_entries')
def parse_ews_date(dateStr):
d = datetime.strptime(dateStr, "%Y-%m-%dT%H:%M:%SZ")
exchangeTz = pytz.utc
localTz = timezone(timezoneLocation)
return exchangeTz.localize(d).astimezone(localTz);
def format_orgmode_date(dateObj):
return dateObj.strftime("%Y-%m-%d %H:%M")
def format_orgmode_time(dateObj):
return dateObj.strftime("%H:%M")
# Helper function to write an orgmode entry
def print_orgmode_entry(subject, start, end, location, response):
startDate = parse_ews_date(start);
endDate = parse_ews_date(end);
# Check if the appointment starts and ends on the same day and use proper formatting
dateStr = ""
if startDate.date() == endDate.date():
dateStr = "<" + format_orgmode_date(startDate) + "-" + format_orgmode_time(endDate) + ">"
else:
dateStr = "<" + format_orgmode_date(startDate) + ">--<" + format_orgmode_date(endDate) + ">"
if subject is not None:
if dateStr != "":
print "* " + dateStr + " " + subject.encode('ascii', 'ignore')
else:
print "* " + subject.encode('ascii', 'ignore')
if location is not None:
print ":PROPERTIES:"
print ":LOCATION: " + location.encode('utf-8')
print ":RESPONSE: " + response.encode('utf-8')
print ":END:"
print ""
# Debug code
# print_orgmode_entry("subject", "2012-07-27T11:10:53Z", "2012-07-27T11:15:53Z", "location", "participants")
# exit(0)
# Build the soap request
# For CalendarItem documentation, http://msdn.microsoft.com/en-us/library/exchange/aa564765(v=exchg.140).aspx
start = date.today() - timedelta(days=daysHistory)
end = date.today() + timedelta(days=daysFuture)
request = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<soap:Body>
<FindItem Traversal="Shallow" xmlns="http://schemas.microsoft.com/exchange/services/2006/messages">
<ItemShape>
<t:BaseShape>Default</t:BaseShape>
<t:AdditionalProperties>
<t:FieldURI FieldURI="calendar:MyResponseType"/>
</t:AdditionalProperties>
</ItemShape>
<CalendarView MaxEntriesReturned="{2}" StartDate="{0}T00:00:00-08:00" EndDate="{1}T00:00:00-08:00"/>
<ParentFolderIds>
<t:DistinguishedFolderId Id="calendar"/>
</ParentFolderIds>
</FindItem>
</soap:Body>
</soap:Envelope>""".format(start, end, maxEntries)
request_len = len(request)
request = StringIO(request)
# Debug code
# print request.getvalue()
# exit(0)
h = []
h.append('Content-Type: text/xml; charset=UTF-8')
h.append('Content-Length: %d' % request_len);
header = StringIO()
body = StringIO()
c = pycurl.Curl()
# Debug code
# c.setopt(c.VERBOSE, 1)
c.setopt(c.URL, 'https://%s%s' % (ewsHost, ewsUrl))
c.setopt(c.POST, 1)
c.setopt(c.HTTPHEADER, h)
if ewsAuthType == 'digest':
c.setopt(c.HTTPAUTH, c.HTTPAUTH_DIGEST)
elif ewsAuthType == 'basic':
c.setopt(c.HTTPAUTH, c.HTTPAUTH_BASIC)
elif ewsAuthType == 'ntlm':
c.setopt(c.HTTPAUTH, c.HTTPAUTH_NTLM)
elif ewsAuthType == 'negotiate':
c.setopt(c.HTTPAUTH, c.HTTPAUTH_GSSNEGOTIATE)
elif ewsAuthType == 'any':
c.setopt(c.HTTPAUTH, c.HTTPAUTH_ANYSAFE)
c.setopt(c.USERPWD, '%s:%s' % (ewsUser, ewsPassword))
if len(ewsCAInfo) > 0:
c.setopt(c.CAINFO, ewsCAInfo)
# http://stackoverflow.com/questions/27808835/fail-to-assign-io-object-to-writedata-pycurl
c.setopt(c.WRITEFUNCTION, body.write)
c.setopt(c.HEADERFUNCTION, header.write)
c.setopt(c.READFUNCTION, request.read)
c.perform()
c.close()
# Read the webservice response
data = body.getvalue()
# Debug code
# print data
# exit(0)
# Parse the result xml
root = etree.fromstring(data)
xpathStr = "/s:Envelope/s:Body/m:FindItemResponse/m:ResponseMessages/m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem"
namespaces = {
's': 'http://schemas.xmlsoap.org/soap/envelope/',
't': 'http://schemas.microsoft.com/exchange/services/2006/types',
'm': 'http://schemas.microsoft.com/exchange/services/2006/messages',
}
# Print calendar elements
elements = root.xpath(xpathStr, namespaces=namespaces)
for element in elements:
subjectElem = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}Subject')
if subjectElem is not None:
subject = subjectElem.text
else:
subject = ""
locationElem = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}Location')
if locationElem is not None:
location = locationElem.text
else:
location = ""
startElem = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}Start')
if startElem is not None:
start = startElem.text
else:
start = ""
endElem = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}End')
if endElem is not None:
end = endElem.text
else:
end = ""
responseElem = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}MyResponseType')
if responseElem is not None:
response = responseElem.text
else:
response = ""
print_orgmode_entry(subject, start, end, location, response)
| 32.174757
| 131
| 0.702625
|
d9f4fda51b20e161332b506d6241e4f74b1703e1
| 2,880
|
py
|
Python
|
test/test_volume.py
|
jonazpiazu/rocker
|
37a60a046fd858f4a0b812de70f88444ddae56cc
|
[
"Apache-2.0"
] | 259
|
2019-01-09T06:39:04.000Z
|
2022-03-30T11:29:04.000Z
|
test/test_volume.py
|
jonazpiazu/rocker
|
37a60a046fd858f4a0b812de70f88444ddae56cc
|
[
"Apache-2.0"
] | 150
|
2019-01-03T00:06:16.000Z
|
2022-03-17T01:44:20.000Z
|
test/test_volume.py
|
osrf/crocker
|
556596dc099f823a7364b61e68e68b9831eed4b3
|
[
"Apache-2.0"
] | 37
|
2019-02-21T20:39:00.000Z
|
2022-01-03T20:02:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from rocker.core import list_plugins
from rocker.volume_extension import Volume
class VolumeTest(unittest.TestCase):
def setUp(self):
self._instance = Volume()
self._curr_path = os.path.abspath(os.path.curdir)
self._virtual_path = "/path/in/container"
def _test_equals_args(self, mock_cliargs, expected):
"""
@type mock_cliargs: { str: [[str]] }
@type expected: [[str]]
"""
print("DEBUG: 'mock_cliargs' {}\n\t'expected': {}".format(mock_cliargs, expected))
docker_args = self._instance.get_docker_args(mock_cliargs)
print("DEBUG: Resulted docker_args: {}".format(docker_args))
for arg_expected in expected:
# Whitespace at the beginning is needed.
complete_expected = " {} {}".format(Volume.ARG_DOCKER_VOLUME, arg_expected[0])
self.assertTrue(complete_expected in docker_args)
def test_args_single(self):
"""Passing source path"""
arg = [[self._curr_path]]
expected = [['{}:{}'.format(self._curr_path, self._curr_path)]]
mock_cliargs = {Volume.name: arg}
self._test_equals_args(mock_cliargs, expected)
def test_args_twopaths(self):
"""Passing source path, dest path"""
arg = ["{}:{}".format(self._curr_path, self._virtual_path)]
mock_cliargs = {Volume.name: [arg]}
self._test_equals_args(mock_cliargs, arg)
def test_args_twopaths_opt(self):
"""Passing source path, dest path, and Docker's volume option"""
arg = ["{}:{}:ro".format(self._curr_path, self._virtual_path)]
mock_cliargs = {Volume.name: [arg]}
self._test_equals_args(mock_cliargs, arg)
def test_args_two_volumes(self):
"""Multiple volume points"""
arg_first = ["{}:{}:ro".format(self._curr_path, self._virtual_path)]
arg_second = ["/tmp:{}".format(os.path.join(self._virtual_path, "tmp"))]
args = [arg_first, arg_second]
mock_cliargs = {Volume.name: args}
self._test_equals_args(mock_cliargs, args)
| 41.142857
| 90
| 0.678472
|
514dbdb6ed886c1e6bed7d80c4bba264cb875e5d
| 3,319
|
py
|
Python
|
goodbadcomment/HcpChapingGet.py
|
MyCodeBattle/ManyPython
|
eee07c96e3048248ce281a9203fc8cd1a38f3f2a
|
[
"MIT"
] | null | null | null |
goodbadcomment/HcpChapingGet.py
|
MyCodeBattle/ManyPython
|
eee07c96e3048248ce281a9203fc8cd1a38f3f2a
|
[
"MIT"
] | null | null | null |
goodbadcomment/HcpChapingGet.py
|
MyCodeBattle/ManyPython
|
eee07c96e3048248ce281a9203fc8cd1a38f3f2a
|
[
"MIT"
] | null | null | null |
import pandas as pd
from loguru import logger
import arrow
import time
import json
import requests
import tqdm
from retrying import retry
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'G_zj_gsid=08890c40500a4a8ab21e0b2b9e9e47b1-gsid-', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
index = 1
df = pd.DataFrame()
while True:
reqBody = {"pageNum": index, "pageSize": 20, "createTimeFrom": 1609430400000, "createTimeTo": 1623227340000, "channelList": [], "areaList": ["ff8080815d551320015d589cc1da0014", "ff8080815d551320015d589cc1da0014$0", "ff8080815df350d4015df3d64603057a", "ff8080815df350d4015df4333f0006cb", "ff8080815df350d4015df44090fd06f5", "ff8080815df350d4015df44e7d100706", "ff8080815de36663015de3e0d0380000", "ff8080815de36663015de3ef492c0015", "ff8080815df45323015df461a0e6000b", "ff8080815df482ba015df7dceec40326", "ff8080815df45323015df46e7610001d", "ff8080815df482ba015df7eb3a8c0367", "ff8080815df45323015df47feebf0116", "ff8080815dfd3779015dfdc8850c06c5", "ff8080815dfc9cb1015dfcf0c19f109f", "bb4a9da3634a58f601636c22dc381288d", "bb4a9da3634a58f601636c22583f125b", "ff8080816f12372d016f1d5959bb7545"], "statusList": []}
res = json.loads(requests.post(url = 'https://opt.zjzwfw.gov.cn/rest/api/evaluation/case/historyForHandle/list?ctoken=4300b7fc-1f14-4591-a460-b58da21841d3', json=reqBody, headers = headers).text)
# print(res)
if not res['data']['list']:
break
df = df.append(pd.DataFrame(res['data']['list']), ignore_index=True)
index += 1
time.sleep(0.5)
df.to_excel('好差评结果.xls', index=False)
df = pd.read_excel('好差评结果.xls', dtype=str)
resDf = pd.DataFrame()
process = tqdm.tqdm(total=df.shape[0], ncols=200)
for idx, row in df.iterrows():
res = json.loads(requests.get(url='https://opt.zjzwfw.gov.cn/rest/api/evaluation/detail?ctoken=598cd51b-5ecf-49f6-8e1d-e503d20df4de&id={}&isQueryCaseInfo=true&portalTyp=G'.format(row['evaluationId']), headers=headers).text)['data']
#申诉理由
appealReason = res['caseDTO']['appealReason']
#驳回理由
failedReason = res['caseDTO']['failedReason']
#差评人联系方式
phone = res['raterInfoDTO']['phoneNum']
#整改理由
solution = res['caseDTO']['solution']
#追评等级
plusCommentDTO = res['feedbackItemDTOList']
row['申诉理由'] = appealReason
row['驳回理由'] = failedReason
row['整改回复'] = solution
row['差评人手机'] = phone
if plusCommentDTO:
plusCommentDTO = plusCommentDTO[0]
row['追评等级'] = plusCommentDTO['levelDesc']
row['追评文本'] = plusCommentDTO['rateText']
resDf = resDf.append(row)
time.sleep(0.3)
process.update(1)
resDf.to_excel('原始表咯.xls', index=False)
resDf.rename(columns={'caseTime': '评价时间', 'channelDesc': '评价渠道', 'dealTime': '办结时间', 'evalutionLevel': '评价等级', 'location': '地区', 'matterName': '事项名称', 'name': '差评人姓名', 'raterText': '差评内容', 'reformTime': '整改时间', 'reformer': '整改人', 'statusString': '工单状态', 'departmentName': '部门名称', 'code': '工单编号'}, inplace=True)
resDf = resDf[['工单编号', '地区', '部门名称', '事项名称', '办结时间', '评价时间', '差评内容', '评价渠道', '评价等级', '差评人姓名', '差评人手机', '申诉理由', '驳回理由', '整改回复', '整改人', '工单状态', '追评等级', '追评文本']]
process.close()
resDf.to_excel(f'{arrow.now().strftime("%m%d")}差评信息.xls', index=False)
| 46.097222
| 814
| 0.712263
|
02439bc423929cf57d69d44d014931c16a74ad5e
| 1,717
|
py
|
Python
|
spinoffs/inference_gym/targets/banana_test.py
|
KonstantinKlepikov/probability
|
0cc6c5febf3b10ece5bb2b9877bd695137a420ea
|
[
"Apache-2.0"
] | 1
|
2020-08-28T21:01:19.000Z
|
2020-08-28T21:01:19.000Z
|
spinoffs/inference_gym/targets/banana_test.py
|
KonstantinKlepikov/probability
|
0cc6c5febf3b10ece5bb2b9877bd695137a420ea
|
[
"Apache-2.0"
] | null | null | null |
spinoffs/inference_gym/targets/banana_test.py
|
KonstantinKlepikov/probability
|
0cc6c5febf3b10ece5bb2b9877bd695137a420ea
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for inference_gym.targets.banana."""
import tensorflow.compat.v2 as tf
from spinoffs.inference_gym.internal import test_util
from spinoffs.inference_gym.targets import banana
@test_util.multi_backend_test(globals(), 'targets.banana_test')
class BananaTest(test_util.InferenceGymTestCase):
def testBasic(self):
"""Checks that you get finite values given unconstrained samples.
We check `unnormalized_log_prob` as well as the values of the sample
transformations.
"""
model = banana.Banana(ndims=3)
self.validate_log_prob_and_transforms(
model,
sample_transformation_shapes=dict(identity=[3]),
check_ground_truth_mean=True,
check_ground_truth_standard_deviation=True,
)
def testMC(self):
"""Checks true samples from the model against the ground truth."""
model = banana.Banana(ndims=3)
self.validate_ground_truth_using_monte_carlo(
model,
num_samples=int(1e6),
)
if __name__ == '__main__':
tf.test.main()
| 32.396226
| 78
| 0.70763
|
4b92b5d50fddc227448a20d9adddc59cc2a73c4c
| 2,134
|
py
|
Python
|
pyjobs/core/management/commands/send_weekly_mailing.py
|
Mdslino/PyJobs
|
d2496d58067503c3304a6c59052238b1f097472b
|
[
"BSD-3-Clause"
] | 132
|
2017-10-27T23:54:47.000Z
|
2022-03-15T12:10:10.000Z
|
pyjobs/core/management/commands/send_weekly_mailing.py
|
Mdslino/PyJobs
|
d2496d58067503c3304a6c59052238b1f097472b
|
[
"BSD-3-Clause"
] | 129
|
2017-09-05T04:22:50.000Z
|
2022-03-12T01:06:49.000Z
|
pyjobs/core/management/commands/send_weekly_mailing.py
|
Mdslino/PyJobs
|
d2496d58067503c3304a6c59052238b1f097472b
|
[
"BSD-3-Clause"
] | 82
|
2017-10-28T00:14:04.000Z
|
2021-07-27T20:00:40.000Z
|
import os
from datetime import datetime
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from django.template.loader import get_template
from django.core.mail import EmailMultiAlternatives
from django.template import Context
from pyjobs.core.models import Job
from pyjobs.marketing.models import MailingList
from pyjobs.marketing.utils import post_telegram_channel
from django.conf import settings
def format_owner_email(email):
splited_email = email.split("@")
splited_owner_email = settings.WEBSITE_OWNER_EMAIL.split("@")
return "{}+{}@{}".format(
splited_owner_email[0], splited_email[0], splited_owner_email[1]
)
def check_today_is_the_right_day():
if datetime.today().weekday() == 2:
return True
return False
class Command(BaseCommand):
def handle(self, *args, **options):
if not check_today_is_the_right_day():
return "False"
emails_mailing_lists = [mailing.email for mailing in MailingList.objects.all()]
if len(emails_mailing_lists) == 0:
return "False"
emails_mailing_replies = [
format_owner_email(email) for email in emails_mailing_lists
]
to_emails = emails_mailing_replies
from_emails = emails_mailing_lists
jobs = list(Job.get_premium_jobs())
missing_jobs = 10 - len(jobs)
jobs += list(Job.get_feed_jobs())[:missing_jobs]
if len(jobs) == 0:
return "False"
plain_text = get_template("emails/weekly_summary.txt")
subject = "[Vagas][PyJobs] Vagas Python da semana passada"
context = {
"dono_do_site": settings.WEBSITE_OWNER_NAME,
"nome_do_site": settings.WEBSITE_NAME,
"url_do_site": settings.WEBSITE_HOME_URL,
"jobs": jobs,
}
text_content = plain_text.render(context)
for email_tup in zip(to_emails, from_emails):
msg = EmailMultiAlternatives(
subject, text_content, email_tup[0], [email_tup[1]]
)
msg.send()
return "True"
| 27.714286
| 87
| 0.66448
|
b1df6c42c5729736b50609dcb895f9b2216f8075
| 58,316
|
py
|
Python
|
angr/calling_conventions.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
angr/calling_conventions.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
angr/calling_conventions.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import claripy
import archinfo
from typing import Union, Optional, List
from .sim_type import SimTypeChar
from .sim_type import SimTypePointer
from .sim_type import SimTypeFixedSizeArray
from .sim_type import SimTypeArray
from .sim_type import SimTypeString
from .sim_type import SimTypeFunction
from .sim_type import SimTypeFloat
from .sim_type import SimTypeDouble
from .sim_type import SimTypeReg
from .sim_type import SimStruct
from .sim_type import parse_file
from .sim_type import SimTypeTop
from .state_plugins.sim_action_object import SimActionObject
l = logging.getLogger(name=__name__)
from .engines.soot.engine import SootMixin
# TODO: This file contains explicit and implicit byte size assumptions all over. A good attempt to fix them was made.
# If your architecture hails from the astral plane, and you're reading this, start fixing here.
class PointerWrapper:
def __init__(self, value):
self.value = value
class AllocHelper:
def __init__(self, ptrsize, reverse_result):
self.base = claripy.BVS('alloc_base', ptrsize)
self.ptr = self.base
self.reverse_result = reverse_result
self.stores = {}
def dump(self, val, state, endness='Iend_BE'):
self.stores[self.ptr.cache_key] = (val, endness)
out = self.ptr
self.ptr += val.length // state.arch.byte_width
return out.reversed if self.reverse_result else out
def translate(self, val, base):
return val.replace(self.base, base)
def apply(self, state, base):
for ptr, (val, endness) in self.stores.items():
state.memory.store(self.translate(ptr.ast, base), self.translate(val, base), endness=endness)
def size(self):
val = self.translate(self.ptr, claripy.BVV(0, len(self.ptr)))
assert val.op == 'BVV'
return abs(val.args[0])
class SimFunctionArgument:
"""
Represent a generic function argument.
:ivar int size: The size of the argument, in number of bytes.
"""
def __init__(self, size):
self.size = size
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(('function_argument', self.size))
def check_value(self, value):
if not isinstance(value, claripy.ast.Base) and self.size is None:
raise TypeError("Only claripy objects may be stored through SimFunctionArgument when size is not provided")
if self.size is not None and isinstance(value, claripy.ast.Base) and self.size*8 < value.length:
raise TypeError("%s doesn't fit in an argument of size %d" % (value, self.size))
def set_value(self, state, value, **kwargs):
raise NotImplementedError
def get_value(self, state, **kwargs):
raise NotImplementedError
class SimRegArg(SimFunctionArgument):
"""
Represents a function argument that has been passed in a register.
:ivar string reg_name: The name of the represented register.
:ivar int size: The size of the register, in number of bytes.
"""
def __init__(self, reg_name, size, alt_offsets=None):
SimFunctionArgument.__init__(self, size)
self.reg_name = reg_name
self.alt_offsets = {} if alt_offsets is None else alt_offsets
def __repr__(self):
return "<%s>" % self.reg_name
def __eq__(self, other):
return type(other) is SimRegArg and self.reg_name == other.reg_name
def __hash__(self):
return hash((self.size, self.reg_name, tuple(self.alt_offsets)))
def _fix_offset(self, state, size, arch=None):
"""
This is a hack to deal with small values being stored at offsets into large registers unpredictably
"""
if state is not None:
arch = state.arch
if arch is None:
raise ValueError('Either "state" or "arch" must be specified.')
offset = arch.registers[self.reg_name][0]
if size in self.alt_offsets:
return offset + self.alt_offsets[size]
elif size < self.size and arch.register_endness == 'Iend_BE':
return offset + (self.size - size)
return offset
def set_value(self, state, value, endness=None, size=None, **kwargs): # pylint: disable=unused-argument,arguments-differ
self.check_value(value)
if endness is None: endness = state.arch.register_endness
if isinstance(value, int): value = claripy.BVV(value, self.size*8)
if size is None: size = min(self.size, value.length // 8)
offset = self._fix_offset(state, size)
state.registers.store(offset, value, endness=endness, size=size)
def get_value(self, state, endness=None, size=None, **kwargs): # pylint: disable=unused-argument,arguments-differ
if endness is None: endness = state.arch.register_endness
if size is None: size = self.size
offset = self._fix_offset(state, size)
return state.registers.load(offset, endness=endness, size=size)
class SimStackArg(SimFunctionArgument):
"""
Represents a function argument that has been passed on the stack.
:var int stack_offset: The position of the argument relative to the stack pointer after the function prelude.
:ivar int size: The size of the argument, in number of bytes.
"""
def __init__(self, stack_offset, size):
SimFunctionArgument.__init__(self, size)
self.stack_offset = stack_offset
def __repr__(self):
return "[%#x]" % self.stack_offset
def __eq__(self, other):
return type(other) is SimStackArg and self.stack_offset == other.stack_offset
def __hash__(self):
return hash((self.size, self.stack_offset))
def set_value(self, state, value, endness=None, stack_base=None): # pylint: disable=arguments-differ
self.check_value(value)
if endness is None: endness = state.arch.memory_endness
if stack_base is None: stack_base = state.regs.sp
if isinstance(value, int): value = claripy.BVV(value, self.size*8)
state.memory.store(stack_base + self.stack_offset, value, endness=endness, size=value.length//8)
def get_value(self, state, endness=None, stack_base=None, size=None): # pylint: disable=arguments-differ
if endness is None: endness = state.arch.memory_endness
if stack_base is None: stack_base = state.regs.sp
return state.memory.load(stack_base + self.stack_offset, endness=endness, size=size or self.size)
class SimComboArg(SimFunctionArgument):
def __init__(self, locations):
super(SimComboArg, self).__init__(sum(x.size for x in locations))
self.locations = locations
def __repr__(self):
return 'SimComboArg(%s)' % repr(self.locations)
def __eq__(self, other):
return type(other) is SimComboArg and all(a == b for a, b in zip(self.locations, other.locations))
def set_value(self, state, value, endness=None, **kwargs): # pylint:disable=arguments-differ
# TODO: This code needs to be reworked for variable byte width and the Third Endness
self.check_value(value)
if endness is None: endness = state.arch.memory_endness
if isinstance(value, int):
value = claripy.BVV(value, self.size*state.arch.byte_width)
elif isinstance(value, float):
if self.size not in (4, 8):
raise ValueError("What do I do with a float %d bytes long" % self.size)
value = claripy.FPV(value, claripy.FSORT_FLOAT if self.size == 4 else claripy.FSORT_DOUBLE)
cur = 0
# TODO: I have no idea if this reversed is only supposed to be applied in LE situations
for loc in reversed(self.locations):
loc.set_value(state, value[cur*state.arch.byte_width + loc.size*state.arch.byte_width - 1:cur*state.arch.byte_width], endness=endness, **kwargs)
cur += loc.size
def get_value(self, state, endness=None, **kwargs): # pylint:disable=arguments-differ
if endness is None: endness = state.arch.memory_endness
vals = []
for loc in reversed(self.locations):
vals.append(loc.get_value(state, endness, **kwargs))
return state.solver.Concat(*vals)
class ArgSession:
"""
A class to keep track of the state accumulated in laying parameters out into memory
"""
__slots__ = ('cc', 'real_args', 'fp_iter', 'int_iter', 'both_iter', )
def __init__(self, cc):
self.cc = cc
self.real_args = None
self.fp_iter = None
self.int_iter = None
self.both_iter = None
# these iters should only be used if real_args are not set or real_args are intentionally ignored (e.g., when
# variadic arguments are used).
self.fp_iter = cc.fp_args
self.int_iter = cc.int_args
self.both_iter = cc.both_args
if cc.args is not None:
self.real_args = iter(cc.args)
# TODO: use safer errors than TypeError and ValueError
def next_arg(self, is_fp, size=None, ignore_real_args=False):
if self.real_args is not None and not ignore_real_args:
try:
arg = next(self.real_args)
if is_fp and self.cc.is_fp_arg(arg) is False:
raise TypeError("Can't put a float here - concrete arg positions are specified")
elif not is_fp and self.cc.is_fp_arg(arg) is True:
raise TypeError("Can't put an int here - concrete arg positions are specified")
except StopIteration:
raise TypeError("Accessed too many arguments - concrete number are specified")
else:
try:
if is_fp:
arg = next(self.fp_iter)
else:
arg = next(self.int_iter)
except StopIteration:
try:
arg = next(self.both_iter)
except StopIteration:
raise TypeError("Accessed too many arguments - exhausted all positions?")
if size is not None and size > arg.size:
arg = self.upsize_arg(arg, is_fp, size)
return arg
def upsize_arg(self, arg, is_fp, size):
if not is_fp:
raise ValueError("You can't fit a integral value of size %d into an argument of size %d!" % (size, arg.size))
if not isinstance(arg, SimStackArg):
raise ValueError("I don't know how to handle this? please report to @rhelmot")
arg_size = arg.size
locations = [arg]
while arg_size < size:
next_arg = self.next_arg(is_fp, None)
arg_size += next_arg.size
locations.append(next_arg)
return SimComboArg(locations)
class SimCC:
"""
A calling convention allows you to extract from a state the data passed from function to
function by calls and returns. Most of the methods provided by SimCC that operate on a state
assume that the program is just after a call but just before stack frame allocation, though
this may be overridden with the `stack_base` parameter to each individual method.
This is the base class for all calling conventions.
An instance of this class allows it to be tweaked to the way a specific function should be called.
"""
def __init__(self,
arch: archinfo.Arch,
args: Optional[List[SimFunctionArgument]]=None,
ret_val: Optional[SimFunctionArgument]=None,
sp_delta: Optional[int]=None,
func_ty: Optional[Union[SimTypeFunction, str]]=None):
"""
:param arch: The Archinfo arch for this CC
:param args: A list of SimFunctionArguments describing where the arguments go
:param ret_val: A SimFunctionArgument describing where the return value goes
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:param func_ty: A SimTypeFunction for the function itself, or a string that can be parsed into a
SimTypeFunction instance.
Example func_ty strings:
>>> "int func(char*, int)"
>>> "int f(int, int, int*);"
Function names are ignored.
"""
if func_ty is not None:
if isinstance(func_ty, str):
if not func_ty.endswith(";"):
func_ty += ";" # Make pycparser happy
parsed = parse_file(func_ty)
parsed_decl = parsed[0]
if not parsed_decl:
raise ValueError('Cannot parse the provided function prototype.')
_, func_ty = next(iter(parsed_decl.items()))
if not isinstance(func_ty, SimTypeFunction):
raise TypeError("Function prototype must be a SimTypeFunction instance or a string that can be parsed "
"into a SimTypeFunction instance.")
self.arch = arch
self.args = args
self.ret_val = ret_val
self.sp_delta = sp_delta
self.func_ty: Optional[SimTypeFunction] = func_ty if func_ty is None else func_ty.with_arch(arch)
@classmethod
def from_arg_kinds(cls, arch, fp_args, ret_fp=False, sizes=None, sp_delta=None, func_ty=None):
"""
Get an instance of the class that will extract floating-point/integral args correctly.
:param arch: The Archinfo arch for this CC
:param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp,
false if it is integral.
:param ret_fp: True if the return value for the function is fp.
:param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the
size of the corresponding argument in bytes.
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:parmm func_ty: A SimType for the function itself
"""
basic = cls(arch, sp_delta=sp_delta, func_ty=func_ty)
basic.args = basic.arg_locs(fp_args, sizes)
basic.ret_val = basic.fp_return_val if ret_fp else basic.return_val
return basic
#
# Here are all the things a subclass needs to specify!
#
ARG_REGS: List[SimFunctionArgument] = None # A list of all the registers used for integral args, in order (names or offsets)
FP_ARG_REGS: List[SimFunctionArgument] = None # A list of all the registers used for floating point args, in order
STACKARG_SP_BUFF = 0 # The amount of stack space reserved between the saved return address
# (if applicable) and the arguments. Probably zero.
STACKARG_SP_DIFF = 0 # The amount of stack space reserved for the return address
CALLER_SAVED_REGS: List[SimFunctionArgument] = None # Caller-saved registers
RETURN_ADDR: SimFunctionArgument = None # The location where the return address is stored, as a SimFunctionArgument
RETURN_VAL: SimFunctionArgument = None # The location where the return value is stored, as a SimFunctionArgument
OVERFLOW_RETURN_VAL: Optional[SimFunctionArgument] = None # The second half of the location where a double-length return value is stored
FP_RETURN_VAL: Optional[SimFunctionArgument] = None # The location where floating-point argument return values are stored
ARCH = None # The archinfo.Arch class that this CC must be used for, if relevant
CALLEE_CLEANUP = False # Whether the callee has to deallocate the stack space for the arguments
STACK_ALIGNMENT = 1 # the alignment requirement of the stack pointer at function start BEFORE call
#
# Here are several things you MAY want to override to change your cc's convention
#
@property
def int_args(self):
"""
Iterate through all the possible arg positions that can only be used to store integer or pointer values
Does not take into account customizations.
Returns an iterator of SimFunctionArguments
"""
if self.ARG_REGS is None:
raise NotImplementedError()
for reg in self.ARG_REGS: # pylint: disable=not-an-iterable
yield SimRegArg(reg, self.arch.bytes)
@property
def both_args(self):
"""
Iterate through all the possible arg positions that can be used to store any kind of argument
Does not take into account customizations.
Returns an iterator of SimFunctionArguments
"""
turtle = self.STACKARG_SP_BUFF + self.STACKARG_SP_DIFF
while True:
yield SimStackArg(turtle, self.arch.bytes)
turtle += self.arch.bytes
@property
def fp_args(self):
"""
Iterate through all the possible arg positions that can only be used to store floating point values
Does not take into account customizations.
Returns an iterator of SimFunctionArguments
"""
if self.FP_ARG_REGS is None:
raise NotImplementedError()
for reg in self.FP_ARG_REGS: # pylint: disable=not-an-iterable
yield SimRegArg(reg, self.arch.registers[reg][1])
def is_fp_arg(self, arg):
"""
This should take a SimFunctionArgument instance and return whether or not that argument is a floating-point
argument.
Returns True for MUST be a floating point arg,
False for MUST NOT be a floating point arg,
None for when it can be either.
"""
if arg in self.int_args:
return False
if arg in self.fp_args or arg == self.FP_RETURN_VAL:
return True
return None
ArgSession = ArgSession # import this from global scope so SimCC subclasses can subclass it if they like
@property
def arg_session(self):
"""
Return an arg session.
A session provides the control interface necessary to describe how integral and floating-point arguments are
laid out into memory. The default behavior is that there are a finite list of int-only and fp-only argument
slots, and an infinite number of generic slots, and when an argument of a given type is requested, the most
slot available is used. If you need different behavior, subclass ArgSession.
"""
return self.ArgSession(self)
def stack_space(self, args):
"""
:param args: A list of SimFunctionArguments
:returns: The number of bytes that should be allocated on the stack to store all these args,
NOT INCLUDING the return address.
"""
out = self.STACKARG_SP_DIFF
for arg in args:
if isinstance(arg, SimStackArg):
out = max(out, arg.stack_offset + self.arch.bytes)
out += self.STACKARG_SP_BUFF
return out
@property
def return_val(self):
"""
The location the return value is stored.
"""
# pylint: disable=unsubscriptable-object
if self.ret_val is not None:
return self.ret_val
if self.func_ty is not None and \
self.func_ty.returnty is not None and \
self.OVERFLOW_RETURN_VAL is not None and \
self.func_ty.returnty.size is not None and \
self.func_ty.returnty.size > self.RETURN_VAL.size * self.arch.byte_width:
return SimComboArg([self.RETURN_VAL, self.OVERFLOW_RETURN_VAL])
return self.RETURN_VAL
@property
def fp_return_val(self):
return self.FP_RETURN_VAL if self.ret_val is None else self.ret_val
@property
def return_addr(self):
"""
The location the return address is stored.
"""
return self.RETURN_ADDR
#
# Useful functions!
#
@staticmethod
def is_fp_value(val):
return isinstance(val, (float, claripy.ast.FP)) or \
(isinstance(val, claripy.ast.Base) and val.op.startswith('fp')) or \
(isinstance(val, claripy.ast.Base) and val.op == 'Reverse' and val.args[0].op.startswith('fp'))
def arg_locs(self, is_fp=None, sizes=None):
"""
Pass this a list of whether each parameter is floating-point or not, and get back a list of
SimFunctionArguments. Optionally, pass a list of argument sizes (in bytes) as well.
If you've customized this CC, this will sanity-check the provided locations with the given list.
"""
session = self.arg_session
if self.func_ty is None and self.args is None:
# No function prototype is provided, no args is provided. `is_fp` must be provided.
if is_fp is None:
raise ValueError('"is_fp" must be provided when no function prototype is available.')
else:
# let's rely on the func_ty or self.args for the number of arguments and whether each argument is FP or not
if self.func_ty is not None:
args = self.func_ty.args
else:
args = self.args
is_fp = [ True if isinstance(arg, (SimTypeFloat, SimTypeDouble)) or self.is_fp_arg(arg) else False
for arg in args ]
if sizes is None: sizes = [self.arch.bytes] * len(is_fp)
return [session.next_arg(ifp, size=sz) for ifp, sz in zip(is_fp, sizes)]
def arg(self, state, index, stack_base=None):
"""
Returns a bitvector expression representing the nth argument of a function.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless
you've customized this CC.
"""
session = self.arg_session
if self.args is None or index >= len(self.args):
# self.args may not be provided, or args is incorrect or includes variadic arguments that we must get the
# proper argument according to the default calling convention
arg_loc = [session.next_arg(False, ignore_real_args=True) for _ in range(index + 1)][-1]
else:
arg_loc = self.args[index]
return arg_loc.get_value(state, stack_base=stack_base)
def get_args(self, state, is_fp=None, sizes=None, stack_base=None):
"""
`is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point -
True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of
parameters as an int.
If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for
sanity-checking.
`sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit
the arg locations, since it might decide to combine two locations into one if an arg is too big.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
Returns a list of bitvector expressions representing the arguments of a function.
"""
if sizes is None and self.func_ty is not None:
sizes = [arg.size for arg in self.func_ty.args]
if is_fp is None:
if self.args is None:
if self.func_ty is None:
raise ValueError("You must either customize this CC or pass a value to is_fp!")
else:
arg_locs = self.arg_locs([False]*len(self.func_ty.args))
else:
arg_locs = self.args
elif type(is_fp) is int:
if self.args is not None and len(self.args) != is_fp:
raise ValueError("Bad number of args requested: got %d, expected %d" % (is_fp, len(self.args)))
arg_locs = self.arg_locs([False]*is_fp, sizes)
else:
arg_locs = self.arg_locs(is_fp, sizes)
return [loc.get_value(state, stack_base=stack_base) for loc in arg_locs]
def setup_callsite(self, state, ret_addr, args, stack_base=None, alloc_base=None, grow_like_stack=True):
"""
This function performs the actions of the caller getting ready to jump into a function.
:param state: The SimState to operate on
:param ret_addr: The address to return to when the called function finishes
:param args: The list of arguments that that the called function will see
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value
that can't fit in a register will be automatically put in a PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential
allocations happen at increasing addresses.
"""
# STEP 0: clerical work
if isinstance(self, SimCCSoot):
SootMixin.setup_callsite(state, args, ret_addr)
return
allocator = AllocHelper(self.arch.bits, self.arch.memory_endness == 'Iend_LE')
#
# STEP 1: convert all values into serialized form
# this entails creating the vals list of simple values to store and also populating the allocator's
# understanding of what aux data needs to be stored
# This is also where we compute arg locations (arg_locs)
#
if self.func_ty is not None:
vals = [self._standardize_value(arg, ty, state, allocator.dump) for arg, ty in zip(args, self.func_ty.args)]
else:
vals = [self._standardize_value(arg, None, state, allocator.dump) for arg in args]
arg_session = self.arg_session
arg_locs = [None]*len(args)
for i, (arg, val) in enumerate(zip(args, vals)):
if self.is_fp_value(arg) or \
(self.func_ty is not None and isinstance(self.func_ty.args[i], SimTypeFloat)):
arg_locs[i] = arg_session.next_arg(is_fp=True, size=val.length // state.arch.byte_width)
continue
if val.length > state.arch.bits or (self.func_ty is None and isinstance(arg, (bytes, str, list, tuple))):
vals[i] = allocator.dump(val, state)
elif val.length < state.arch.bits:
if self.arch.memory_endness == 'Iend_LE':
vals[i] = val.concat(claripy.BVV(0, state.arch.bits - val.length))
else:
vals[i] = claripy.BVV(0, state.arch.bits - val.length).concat(val)
arg_locs[i] = arg_session.next_arg(is_fp=False, size=vals[i].length // state.arch.byte_width)
#
# STEP 2: decide on memory storage locations
# implement the contract for stack_base/alloc_base/grow_like_stack
# after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location,
# and the stack pointer should be updated
#
if stack_base is None:
if alloc_base is None:
alloc_size = allocator.size()
state.regs.sp -= alloc_size
alloc_base = state.regs.sp
grow_like_stack = False
state.regs.sp -= self.stack_space(arg_locs)
# handle alignment
alignment = (state.regs.sp + self.STACKARG_SP_DIFF) % self.STACK_ALIGNMENT
state.regs.sp -= alignment
else:
state.regs.sp = stack_base
if alloc_base is None:
alloc_base = stack_base + self.stack_space(arg_locs)
grow_like_stack = False
if grow_like_stack:
alloc_base -= allocator.size()
if type(alloc_base) is int:
alloc_base = claripy.BVV(alloc_base, state.arch.bits)
for i, val in enumerate(vals):
vals[i] = allocator.translate(val, alloc_base)
#
# STEP 3: store everything!
#
allocator.apply(state, alloc_base)
for loc, val in zip(arg_locs, vals):
if val.length > loc.size * 8:
raise ValueError("Can't fit value {} into location {}".format(repr(val), repr(loc)))
loc.set_value(state, val, endness='Iend_BE', stack_base=stack_base)
self.return_addr.set_value(state, ret_addr, stack_base=stack_base)
def teardown_callsite(self, state, return_val=None, arg_types=None, force_callee_cleanup=False):
"""
This function performs the actions of the callee as it's getting ready to return.
It returns the address to return to.
:param state: The state to mutate
:param return_val: The value to return
:param arg_types: The fp-ness of each of the args. Used to calculate sizes to clean up
:param force_callee_cleanup: If we should clean up the stack allocation for the arguments even if it's not
the callee's job to do so
TODO: support the stack_base parameter from setup_callsite...? Does that make sense in this context?
Maybe it could make sense by saying that you pass it in as something like the "saved base pointer" value?
"""
if return_val is not None:
self.set_return_val(state, return_val)
ret_addr = self.return_addr.get_value(state)
if state.arch.sp_offset is not None:
if force_callee_cleanup or self.CALLEE_CLEANUP:
if arg_types is not None:
session = self.arg_session
state.regs.sp += self.stack_space([session.next_arg(x) for x in arg_types])
elif self.args is not None:
state.regs.sp += self.stack_space(self.args)
else:
l.warning("Can't perform callee cleanup when I have no idea how many arguments there are! Assuming 0")
state.regs.sp += self.STACKARG_SP_DIFF
else:
state.regs.sp += self.STACKARG_SP_DIFF
return ret_addr
# pylint: disable=unused-argument
def get_return_val(self, state, is_fp=None, size=None, stack_base=None):
"""
Get the return value out of the given state
"""
ty = self.func_ty.returnty if self.func_ty is not None else None
if self.ret_val is not None:
loc = self.ret_val
elif is_fp is not None:
loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL
elif ty is not None:
loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL
else:
loc = self.RETURN_VAL
if loc is None:
raise NotImplementedError("This SimCC doesn't know how to get this value - should be implemented")
val = loc.get_value(state, stack_base=stack_base, size=None if ty is None else ty.size//state.arch.byte_width)
if self.is_fp_arg(loc) or self.is_fp_value(val) or isinstance(ty, SimTypeFloat):
val = val.raw_to_fp()
return val
def set_return_val(self, state, val, is_fp=None, size=None, stack_base=None):
"""
Set the return value into the given state
"""
ty = self.func_ty.returnty if self.func_ty is not None else None
try:
betterval = self._standardize_value(val, ty, state, None)
except AttributeError:
raise ValueError("Can't fit value %s into a return value" % repr(val))
if self.ret_val is not None:
loc = self.ret_val
elif is_fp is not None:
loc = self.fp_return_val if is_fp else self.return_val
elif ty is not None:
loc = self.fp_return_val if isinstance(ty, SimTypeFloat) else self.return_val
else:
loc = self.fp_return_val if self.is_fp_value(val) else self.return_val
if loc is None:
raise NotImplementedError("This SimCC doesn't know how to store this value - should be implemented")
loc.set_value(state, betterval, endness='Iend_BE', stack_base=stack_base)
#
# Helper functions
#
@staticmethod
def _standardize_value(arg, ty, state, alloc):
check = ty is not None
if check:
ty = ty.with_arch(state.arch)
if isinstance(arg, SimActionObject):
return SimCC._standardize_value(arg.ast, ty, state, alloc)
elif isinstance(arg, PointerWrapper):
if check and not isinstance(ty, SimTypePointer):
raise TypeError("Type mismatch: expected %s, got pointer-wrapper" % ty.name)
real_value = SimCC._standardize_value(arg.value, ty.pts_to if check else None, state, alloc)
return alloc(real_value, state)
elif isinstance(arg, (str, bytes)):
if type(arg) is str:
arg = arg.encode()
arg += b'\0'
ref = False
if check:
if isinstance(ty, SimTypePointer) and \
isinstance(ty.pts_to, SimTypeChar):
ref = True
elif isinstance(ty, SimTypeFixedSizeArray) and \
isinstance(ty.elem_type, SimTypeChar):
ref = False
if len(arg) > ty.length:
raise TypeError("String %s is too long for %s" % (repr(arg), ty.name))
arg = arg.ljust(ty.length, b'\0')
elif isinstance(ty, SimTypeArray) and \
isinstance(ty.elem_type, SimTypeChar):
ref = True
if ty.length is not None:
if len(arg) > ty.length:
raise TypeError("String %s is too long for %s" % (repr(arg), ty.name))
arg = arg.ljust(ty.length, b'\0')
elif isinstance(ty, SimTypeString):
ref = False
if len(arg) > ty.length + 1:
raise TypeError("String %s is too long for %s" % (repr(arg), ty.name))
arg = arg.ljust(ty.length + 1, b'\0')
else:
raise TypeError("Type mismatch: Expected %s, got char*" % ty.name)
val = SimCC._standardize_value(list(arg), SimTypeFixedSizeArray(SimTypeChar(), len(arg)), state, alloc)
if ref:
val = alloc(val, state)
return val
elif isinstance(arg, list):
ref = False
subty = None
if check:
if isinstance(ty, SimTypePointer):
ref = True
subty = ty.pts_to
elif isinstance(ty, SimTypeFixedSizeArray):
ref = False
subty = ty.elem_type
if len(arg) != ty.length:
raise TypeError("Array %s is the wrong length for %s" % (repr(arg), ty.name))
elif isinstance(ty, SimTypeArray):
ref = True
subty = ty.elem_type
if ty.length is not None:
if len(arg) != ty.length:
raise TypeError("Array %s is the wrong length for %s" % (repr(arg), ty.name))
else:
raise TypeError("Type mismatch: Expected %s, got char*" % ty.name)
else:
types = list(map(type, arg))
if types[1:] != types[:-1]:
raise TypeError("All elements of list must be of same type")
val = claripy.Concat(*[SimCC._standardize_value(sarg, subty, state, alloc) for sarg in arg])
if ref:
val = alloc(val, state)
return val
elif isinstance(arg, tuple):
if check:
if not isinstance(ty, SimStruct):
raise TypeError("Type mismatch: Expected %s, got tuple (i.e. struct)" % ty.name)
if len(arg) != len(ty.fields):
raise TypeError("Wrong number of fields in struct, expected %d got %d" % (len(ty.fields), len(arg)))
return claripy.Concat(*[SimCC._standardize_value(sarg, sty, state, alloc)
for sarg, sty
in zip(arg, ty.fields.values())])
else:
return claripy.Concat(*[SimCC._standardize_value(sarg, None, state, alloc) for sarg in arg])
elif isinstance(arg, int):
if check and isinstance(ty, SimTypeFloat):
return SimCC._standardize_value(float(arg), ty, state, alloc)
val = state.solver.BVV(arg, ty.size if check else state.arch.bits)
if state.arch.memory_endness == 'Iend_LE':
val = val.reversed
return val
elif isinstance(arg, float):
sort = claripy.FSORT_FLOAT
if check:
if isinstance(ty, SimTypeDouble):
sort = claripy.FSORT_DOUBLE
elif isinstance(ty, SimTypeFloat):
pass
else:
raise TypeError("Type mismatch: expectd %s, got float" % ty.name)
else:
sort = claripy.FSORT_DOUBLE if state.arch.bits == 64 else claripy.FSORT_FLOAT
val = claripy.fpToIEEEBV(claripy.FPV(arg, sort))
if state.arch.memory_endness == 'Iend_LE':
val = val.reversed # pylint: disable=no-member
return val
elif isinstance(arg, claripy.ast.FP):
val = claripy.fpToIEEEBV(arg)
if state.arch.memory_endness == 'Iend_LE':
val = val.reversed # pylint: disable=no-member
return val
elif isinstance(arg, claripy.ast.Base):
endswap = False
bypass_sizecheck = False
if check:
if isinstance(ty, SimTypePointer):
# we have been passed an AST as a pointer argument. is this supposed to be the pointer or the
# content of the pointer?
# in the future (a breaking change) we should perhaps say it ALWAYS has to be the pointer itself
# but for now use the heuristic that if it's the right size for the pointer it is the pointer
endswap = True
elif isinstance(ty, SimTypeReg):
# definitely endswap.
# TODO: should we maybe pad the value to the type size here?
endswap = True
bypass_sizecheck = True
else:
# if we know nothing about the type assume it's supposed to be an int if it looks like an int
endswap = True
# yikes
if endswap and state.arch.memory_endness == 'Iend_LE' and (bypass_sizecheck or arg.length == state.arch.bits):
arg = arg.reversed
return arg
else:
raise TypeError("I don't know how to serialize %s." % repr(arg))
def __repr__(self):
return "<" + self.__class__.__name__ + '>'
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
def _compare_args(args0, args1):
if args0 is None and args1 is None:
return True
if args0 is None or args1 is None:
return False
return set(args0) == set(args1)
return _compare_args(self.args, other.args) and \
self.ret_val == other.ret_val and \
self.sp_delta == other.sp_delta
@classmethod
def _match(cls, arch, args, sp_delta):
if cls.ARCH is not None and not isinstance(arch, cls.ARCH):
return False
if sp_delta != cls.STACKARG_SP_DIFF:
return False
sample_inst = cls(arch)
all_fp_args = list(sample_inst.fp_args)
all_int_args = list(sample_inst.int_args)
both_iter = sample_inst.both_args
some_both_args = [next(both_iter) for _ in range(len(args))]
for arg in args:
if arg not in all_fp_args and arg not in all_int_args and arg not in some_both_args:
return False
return True
@staticmethod
def find_cc(arch, args, sp_delta):
"""
Pinpoint the best-fit calling convention and return the corresponding SimCC instance, or None if no fit is
found.
:param Arch arch: An ArchX instance. Can be obtained from archinfo.
:param list args: A list of arguments.
:param int sp_delta: The change of stack pointer before and after the call is made.
:return: A calling convention instance, or None if none of the SimCC subclasses seems to fit the
arguments provided.
:rtype: SimCC or None
"""
if arch.name not in CC:
return None
possible_cc_classes = CC[arch.name]
for cc_cls in possible_cc_classes:
if cc_cls._match(arch, args, sp_delta):
return cc_cls(arch, args=args, sp_delta=sp_delta)
return None
def get_arg_info(self, state, is_fp=None, sizes=None):
"""
This is just a simple wrapper that collects the information from various locations
is_fp and sizes are passed to self.arg_locs and self.get_args
:param angr.SimState state: The state to evaluate and extract the values from
:return: A list of tuples, where the nth tuple is (type, name, location, value) of the nth argument
"""
argument_locations = self.arg_locs(is_fp=is_fp, sizes=sizes)
argument_values = self.get_args(state, is_fp=is_fp, sizes=sizes)
if self.func_ty:
argument_types = self.func_ty.args
argument_names = self.func_ty.arg_names if self.func_ty.arg_names else ['unknown'] * len(self.func_ty.args)
else:
argument_types = [SimTypeTop] * len(argument_locations)
argument_names = ['unknown'] * len(argument_locations)
return list(zip(argument_types, argument_names, argument_locations, argument_values))
class SimLyingRegArg(SimRegArg):
"""
A register that LIES about the types it holds
"""
def __init__(self, name):
# TODO: This looks byte-related. Make sure to use Arch.byte_width
super(SimLyingRegArg, self).__init__(name, 8)
def get_value(self, state, size=None, endness=None, **kwargs): # pylint:disable=arguments-differ
#val = super(SimLyingRegArg, self).get_value(state, **kwargs)
val = getattr(state.regs, self.reg_name)
if endness and endness != state.arch.register_endness:
val = val.reversed
if size == 4:
val = claripy.fpToFP(claripy.fp.RM.RM_NearestTiesEven, val.raw_to_fp(), claripy.FSORT_FLOAT)
return val
def set_value(self, state, val, size=None, endness=None, **kwargs): # pylint:disable=arguments-differ
if size == 4:
if state.arch.register_endness == 'IEnd_LE' and endness == 'IEnd_BE':
# pylint: disable=no-member
val = claripy.fpToFP(claripy.fp.RM.RM_NearestTiesEven, val.reversed.raw_to_fp(), claripy.FSORT_DOUBLE).reversed
else:
val = claripy.fpToFP(claripy.fp.RM.RM_NearestTiesEven, val.raw_to_fp(), claripy.FSORT_DOUBLE)
if endness and endness != state.arch.register_endness:
val = val.reversed
setattr(state.regs, self.reg_name, val)
#super(SimLyingRegArg, self).set_value(state, val, endness=endness, **kwargs)
class SimCCCdecl(SimCC):
ARG_REGS = [] # All arguments are passed in stack
FP_ARG_REGS = []
STACKARG_SP_DIFF = 4 # Return address is pushed on to stack by call
CALLER_SAVED_REGS = ['eax', 'ecx', 'edx']
RETURN_VAL = SimRegArg('eax', 4)
OVERFLOW_RETURN_VAL = SimRegArg('edx', 4)
FP_RETURN_VAL = SimLyingRegArg('st0')
RETURN_ADDR = SimStackArg(0, 4)
ARCH = archinfo.ArchX86
class SimCCStdcall(SimCCCdecl):
CALLEE_CLEANUP = True
class SimCCMicrosoftAMD64(SimCC):
ARG_REGS = ['rcx', 'rdx', 'r8', 'r9']
FP_ARG_REGS = ['xmm0', 'xmm1', 'xmm2', 'xmm3']
STACKARG_SP_DIFF = 8 # Return address is pushed on to stack by call
STACKARG_SP_BUFF = 32 # 32 bytes of shadow stack space
RETURN_VAL = SimRegArg('rax', 8)
OVERFLOW_RETURN_VAL = SimRegArg('rdx', 8)
FP_RETURN_VAL = SimRegArg('xmm0', 32)
RETURN_ADDR = SimStackArg(0, 8)
ARCH = archinfo.ArchAMD64
class SimCCX86LinuxSyscall(SimCC):
ARG_REGS = ['ebx', 'ecx', 'edx', 'esi', 'edi', 'ebp']
FP_ARG_REGS = []
RETURN_VAL = SimRegArg('eax', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchX86
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.eax
class SimCCX86WindowsSyscall(SimCC):
# TODO: Make sure the information is correct
ARG_REGS = [ ]
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('eax', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchX86
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.eax
class SimCCSystemVAMD64(SimCC):
ARG_REGS = ['rdi', 'rsi', 'rdx', 'rcx', 'r8', 'r9']
FP_ARG_REGS = ['xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', 'xmm6', 'xmm7']
STACKARG_SP_DIFF = 8 # Return address is pushed on to stack by call
CALLER_SAVED_REGS = [ 'rdi', 'rsi', 'rdx', 'rcx', 'r8', 'r9', 'r10', 'r11', 'rax', ]
RETURN_ADDR = SimStackArg(0, 8)
RETURN_VAL = SimRegArg('rax', 8)
OVERFLOW_RETURN_VAL = SimRegArg('rdx', 8)
FP_RETURN_VAL = SimRegArg('xmm0', 32)
ARCH = archinfo.ArchAMD64
STACK_ALIGNMENT = 16
def __init__(self, arch, args=None, ret_val=None, sp_delta=None, func_ty=None):
super(SimCCSystemVAMD64, self).__init__(arch, args, ret_val, sp_delta, func_ty)
# Remove the ret address on stack
if self.args is not None:
self.args = [ i for i in self.args if not (isinstance(i, SimStackArg) and i.stack_offset == 0x0) ]
@classmethod
def _match(cls, arch, args, sp_delta):
if cls.ARCH is not None and not isinstance(arch, cls.ARCH):
return False
#if sp_delta != cls.STACKARG_SP_DIFF:
# return False
sample_inst = cls(arch)
all_fp_args = list(sample_inst.fp_args)
all_int_args = list(sample_inst.int_args)
both_iter = sample_inst.both_args
some_both_args = [next(both_iter) for _ in range(len(args))]
for arg in args:
if arg not in all_fp_args and arg not in all_int_args and arg not in some_both_args:
if isinstance(arg, SimStackArg) and arg.stack_offset == 0:
continue # ignore return address?
return False
return True
class SimCCAMD64LinuxSyscall(SimCC):
ARG_REGS = ['rdi', 'rsi', 'rdx', 'r10', 'r8', 'r9']
RETURN_VAL = SimRegArg('rax', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchAMD64
@staticmethod
def _match(arch, args, sp_delta): # pylint: disable=unused-argument
# doesn't appear anywhere but syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.rax
class SimCCAMD64WindowsSyscall(SimCC):
# TODO: Make sure the information is correct
ARG_REGS = [ ]
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('rax', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchAMD64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.rax
class SimCCARM(SimCC):
ARG_REGS = [ 'r0', 'r1', 'r2', 'r3' ]
FP_ARG_REGS = [] # TODO: ???
CALLER_SAVED_REGS = [ 'r0', 'r1', 'r2', 'r3' ]
RETURN_ADDR = SimRegArg('lr', 4)
RETURN_VAL = SimRegArg('r0', 4)
ARCH = archinfo.ArchARM
class SimCCARMLinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'r0', 'r1', 'r2', 'r3' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
RETURN_VAL = SimRegArg('r0', 4)
ARCH = archinfo.ArchARM
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.r7
class SimCCAArch64(SimCC):
ARG_REGS = [ 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_ADDR = SimRegArg('lr', 8)
RETURN_VAL = SimRegArg('x0', 8)
ARCH = archinfo.ArchAArch64
class SimCCAArch64LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_VAL = SimRegArg('x0', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchAArch64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.x8
class SimCCO32(SimCC):
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 16
CALLER_SAVED_REGS = [] # TODO: ???
RETURN_ADDR = SimRegArg('lr', 4)
RETURN_VAL = SimRegArg('v0', 4)
ARCH = archinfo.ArchMIPS32
class SimCCO32LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_VAL = SimRegArg('v0', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchMIPS32
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.v0
class SimCCO64(SimCC): # TODO: add n32 and n64
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 32
RETURN_ADDR = SimRegArg('lr', 8)
RETURN_VAL = SimRegArg('v0', 8)
ARCH = archinfo.ArchMIPS64
class SimCCO64LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_VAL = SimRegArg('v0', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchMIPS64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.v0
class SimCCPowerPC(SimCC):
ARG_REGS = [ 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 8
RETURN_ADDR = SimRegArg('lr', 4)
RETURN_VAL = SimRegArg('r3', 4)
ARCH = archinfo.ArchPPC32
class SimCCPowerPCLinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = ['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10']
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('r3', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchPPC32
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.r0
class SimCCPowerPC64(SimCC):
ARG_REGS = [ 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 0x70
RETURN_ADDR = SimRegArg('lr', 8)
RETURN_VAL = SimRegArg('r3', 8)
ARCH = archinfo.ArchPPC64
class SimCCPowerPC64LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ ]
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('r3', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchPPC64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.r0
class SimCCSoot(SimCC):
ARCH = archinfo.ArchSoot
ARG_REGS = []
class SimCCUnknown(SimCC):
"""
Represent an unknown calling convention.
"""
@staticmethod
def _match(arch, args, sp_delta): # pylint: disable=unused-argument
# It always returns True
return True
def __repr__(self):
return "<SimCCUnknown - %s %s sp_delta=%d>" % (self.arch.name, self.args, self.sp_delta)
class SimCCS390X(SimCC):
ARG_REGS = ['r2', 'r3', 'r4', 'r5', 'r6']
FP_ARG_REGS = ['f0', 'f2', 'f4', 'f6']
STACKARG_SP_BUFF = 0xa0
RETURN_ADDR = SimRegArg('r14', 8)
RETURN_VAL = SimRegArg('r2', 8)
ARCH = archinfo.ArchS390X
class SimCCS390XLinuxSyscall(SimCC):
ARG_REGS = ['r2', 'r3', 'r4', 'r5', 'r6', 'r7']
FP_ARG_REGS = []
RETURN_VAL = SimRegArg('r2', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchS390X
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.r1
CC = {
'AMD64': [
SimCCSystemVAMD64,
],
'X86': [
SimCCCdecl,
],
'ARMEL': [
SimCCARM,
],
'ARMHF': [
SimCCARM,
],
'ARMCortexM': [
SimCCARM,
],
'MIPS32': [
SimCCO32,
],
'MIPS64': [
SimCCO64,
],
'PPC32': [
SimCCPowerPC,
],
'PPC64': [
SimCCPowerPC64,
],
'AARCH64': [
SimCCAArch64,
],
'S390X': [
SimCCS390X,
],
}
DEFAULT_CC = {
'AMD64': SimCCSystemVAMD64,
'X86': SimCCCdecl,
'ARMEL': SimCCARM,
'ARMHF': SimCCARM,
'ARMCortexM': SimCCARM,
'MIPS32': SimCCO32,
'MIPS64': SimCCO64,
'PPC32': SimCCPowerPC,
'PPC64': SimCCPowerPC64,
'AARCH64': SimCCAArch64,
'Soot': SimCCSoot,
'AVR': SimCCUnknown,
'MSP': SimCCUnknown,
'S390X': SimCCS390X,
}
def register_default_cc(arch, cc):
DEFAULT_CC[arch] = cc
SYSCALL_CC = {
'X86': {
'default': SimCCX86LinuxSyscall,
'Linux': SimCCX86LinuxSyscall,
'Windows': SimCCX86WindowsSyscall,
'CGC': SimCCX86LinuxSyscall,
},
'AMD64': {
'default': SimCCAMD64LinuxSyscall,
'Linux': SimCCAMD64LinuxSyscall,
'Windows': SimCCAMD64WindowsSyscall,
},
'ARMEL': {
'default': SimCCARMLinuxSyscall,
'Linux': SimCCARMLinuxSyscall,
},
'ARMCortexM': {
# FIXME: TODO: This is wrong. Fill in with a real CC when we support CM syscalls
'default': SimCCARMLinuxSyscall,
},
'ARMHF': {
'default': SimCCARMLinuxSyscall,
'Linux': SimCCARMLinuxSyscall,
},
'AARCH64': {
'default': SimCCAArch64LinuxSyscall,
'Linux': SimCCAArch64LinuxSyscall,
},
'MIPS32': {
'default': SimCCO32LinuxSyscall,
'Linux': SimCCO32LinuxSyscall,
},
'MIPS64': {
'default': SimCCO64LinuxSyscall,
'Linux': SimCCO64LinuxSyscall,
},
'PPC32': {
'default': SimCCPowerPCLinuxSyscall,
'Linux': SimCCPowerPCLinuxSyscall,
},
'PPC64': {
'default': SimCCPowerPC64LinuxSyscall,
'Linux': SimCCPowerPC64LinuxSyscall,
},
'S390X': {
'default': SimCCS390XLinuxSyscall,
'Linux': SimCCS390XLinuxSyscall,
},
}
def register_syscall_cc(arch, os, cc):
if arch not in SYSCALL_CC:
SYSCALL_CC[arch] = {}
SYSCALL_CC[arch][os] = cc
SyscallCC = SYSCALL_CC
DefaultCC = DEFAULT_CC
| 40.024708
| 156
| 0.617138
|
e07725830b72468e881c6a291346bef2ccc8e4a3
| 39
|
py
|
Python
|
tests/__init__.py
|
BitKnitting/FHmonitor
|
317bf9bacadba48c437f5478cbd8227bcc644456
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
BitKnitting/FHmonitor
|
317bf9bacadba48c437f5478cbd8227bcc644456
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
BitKnitting/FHmonitor
|
317bf9bacadba48c437f5478cbd8227bcc644456
|
[
"MIT"
] | null | null | null |
"""Unit test package for FHmonitor."""
| 19.5
| 38
| 0.692308
|
289d73273919563d91698b5d38a093f11592a5a4
| 17,586
|
py
|
Python
|
keras_transformer/attention.py
|
ExpectationMax/keras-transformer
|
80094d869148bdd857036e7aee9175b82f48221b
|
[
"MIT"
] | 3
|
2020-05-23T14:19:34.000Z
|
2021-04-27T01:20:07.000Z
|
keras_transformer/attention.py
|
ExpectationMax/keras-transformer
|
80094d869148bdd857036e7aee9175b82f48221b
|
[
"MIT"
] | null | null | null |
keras_transformer/attention.py
|
ExpectationMax/keras-transformer
|
80094d869148bdd857036e7aee9175b82f48221b
|
[
"MIT"
] | 1
|
2020-12-08T18:17:46.000Z
|
2020-12-08T18:17:46.000Z
|
import numpy as np
# noinspection PyPep8Naming
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras.utils import get_custom_objects
class _BaseMultiHeadAttention(Layer):
"""
Base class for two types of Multi-head attention layers:
Self-attention and its more general form used in decoders (the one which
takes values and keys from the encoder).
"""
def __init__(self, num_heads: int, use_masking: bool,
dropout: float = 0.0,
compression_window_size: int = None,
**kwargs):
"""
:param num_heads: number of attention heads
:param use_masking: when True, forbids the attention to see the further
elements in the sequence (particularly important in language
modelling).
:param dropout: dropout that should be applied to the attention
(after the softmax).
:param compression_window_size: an integer value >= 1 controlling
how much we should compress the attention. For more details,
read about memory-compressed self-attention in
"Generating Wikipedia by summarizing long sequences"
(https://arxiv.org/pdf/1801.10198.pdf).
:param kwargs: any extra arguments typical for a Keras layer,
such as name, etc.
"""
self.num_heads = num_heads
self.use_masking = use_masking
self.dropout = dropout
if (compression_window_size is not None
and compression_window_size <= 0):
assert ValueError(
f"Too small compression window ({compression_window_size})")
self.compression_window_size = compression_window_size
super().__init__(**kwargs)
def get_config(self):
config = super().get_config()
config['num_heads'] = self.num_heads
config['use_masking'] = self.use_masking
config['dropout'] = self.dropout
config['compression_window_size'] = self.compression_window_size
return config
# noinspection PyAttributeOutsideInit
def build_output_params(self, d_model):
self.output_weights = self.add_weight(
name='output_weights',
shape=(d_model, d_model),
initializer='glorot_uniform',
trainable=True)
if self.compression_window_size is not None:
self.k_conv_kernel = self.add_weight(
name='k_conv_kernel',
shape=(self.compression_window_size,
d_model // self.num_heads,
d_model // self.num_heads),
initializer='glorot_uniform',
trainable=True)
self.k_conv_bias = self.add_weight(
name='k_conv_bias',
shape=(d_model // self.num_heads,),
initializer='zeros',
trainable=True)
self.v_conv_kernel = self.add_weight(
name='v_conv_kernel',
shape=(self.compression_window_size,
d_model // self.num_heads,
d_model // self.num_heads),
initializer='glorot_uniform',
trainable=True)
self.v_conv_bias = self.add_weight(
name='v_conv_bias',
shape=(d_model // self.num_heads,),
initializer='zeros',
trainable=True)
def validate_model_dimensionality(self, d_model: int):
if d_model % self.num_heads != 0:
raise ValueError(
f'The size of the last dimension of the input '
f'({d_model}) must be evenly divisible by the number'
f'of the attention heads {self.num_heads}')
def attention(self, pre_q, pre_v, pre_k, out_seq_len: int, d_model: int,
mask=None, training=None):
"""
Calculates the output of the attention once the affine transformations
of the inputs are done. Here's the shapes of the arguments:
:param pre_q: (batch_size, q_seq_len, num_heads, d_model // num_heads)
:param pre_v: (batch_size, v_seq_len, num_heads, d_model // num_heads)
:param pre_k: (batch_size, k_seq_len, num_heads, d_model // num_heads)
:param out_seq_len: the length of the output sequence
:param d_model: dimensionality of the model (by the paper)
:param training: Passed by Keras. Should not be defined manually.
Optional scalar tensor indicating if we're in training
or inference phase.
"""
# shaping Q and V into (batch_size, num_heads, seq_len, d_model//heads)
q = K.permute_dimensions(pre_q, [0, 2, 1, 3])
v = K.permute_dimensions(pre_v, [0, 2, 1, 3])
if self.compression_window_size is None:
k_transposed = K.permute_dimensions(pre_k, [0, 2, 3, 1])
else:
# Memory-compressed attention described in paper
# "Generating Wikipedia by Summarizing Long Sequences"
# (https://arxiv.org/pdf/1801.10198.pdf)
# It compresses keys and values using 1D-convolution which reduces
# the size of Q * K_transposed from roughly seq_len^2
# to convoluted_seq_len^2. If we use strided convolution with
# window size = 3 and stride = 3, memory requirements of such
# memory-compressed attention will be 9 times smaller than
# that of the original version.
if self.use_masking:
raise NotImplementedError(
"Masked memory-compressed attention has not "
"been implemented yet")
k = K.permute_dimensions(pre_k, [0, 2, 1, 3])
k, v = [
K.reshape(
# Step 3: Return the result to its original dimensions
# (batch_size, num_heads, seq_len, d_model//heads)
K.bias_add(
# Step 3: ... and add bias
K.conv1d(
# Step 2: we "compress" K and V using strided conv
K.reshape(
# Step 1: we reshape K and V to
# (batch + num_heads, seq_len, d_model//heads)
item,
(-1,
K.int_shape(item)[-2],
d_model // self.num_heads)),
kernel,
strides=self.compression_window_size,
padding='valid', data_format='channels_last'),
bias,
data_format='channels_last'),
# new shape
K.concatenate([
K.shape(item)[:2],
[-1, d_model // self.num_heads]]))
for item, kernel, bias in (
(k, self.k_conv_kernel, self.k_conv_bias),
(v, self.v_conv_kernel, self.v_conv_bias))]
k_transposed = K.permute_dimensions(k, [0, 1, 3, 2])
# shaping K into (batch_size, num_heads, d_model//heads, seq_len)
# for further matrix multiplication
sqrt_d = K.constant(np.sqrt(d_model // self.num_heads),
dtype=K.floatx())
q_shape = tf.shape(q)
k_t_shape = tf.shape(k_transposed)
v_shape = tf.shape(v)
# before performing batch_dot all tensors are being converted to 3D
# shape (batch_size * num_heads, rows, cols) to make sure batch_dot
# performs identically on all backends
attention_heads = K.reshape(
K.batch_dot(
self.apply_dropout_if_needed(
K.softmax(
self.mask_attention_if_needed(
K.batch_dot(
K.reshape(
q,
tf.stack((-1, q_shape[-2], q_shape[-1]))
),
K.reshape(
k_transposed,
tf.stack(
(-1, k_t_shape[-2], k_t_shape[-1]))
)
) / sqrt_d,
mask=mask
)),
training=training),
K.reshape(
v,
tf.stack((-1, v_shape[-2], v_shape[-1]))
)),
tf.stack((-1, self.num_heads, q_shape[-2], v_shape[-1])))
attention_heads_merged = K.reshape(
K.permute_dimensions(attention_heads, [0, 2, 1, 3]),
(-1, d_model))
if out_seq_len is None:
output_shape = tf.stack([-1, tf.shape(pre_k)[1], d_model])
else:
output_shape = (-1, out_seq_len, d_model)
attention_out = K.reshape(
K.dot(attention_heads_merged, self.output_weights),
output_shape)
return attention_out
def apply_dropout_if_needed(self, attention_softmax, training=None):
if 0.0 < self.dropout < 1.0:
def dropped_softmax():
return K.dropout(attention_softmax, self.dropout)
return K.in_train_phase(dropped_softmax, attention_softmax,
training=training)
return attention_softmax
def mask_attention_if_needed(self, dot_product, mask=None):
"""
Makes sure that (when enabled) each position
(of a decoder's self-attention) cannot attend to subsequent positions.
This is achieved by assigning -inf (or some large negative number)
to all invalid connections. Later softmax will turn them into zeros.
We need this to guarantee that decoder's predictions are based
on what has happened before the position, not after.
The method does nothing if masking is turned off.
:param dot_product: scaled dot-product of Q and K after reshaping them
to 3D tensors (batch * num_heads, rows, cols)
:param mask: Sequence mask to mask padding of shape (batch, max_seqlen)
should contain true for values which should be retained (similar to
output of tf.sequence_mask)
"""
# Always mask invalid values
# Comput block matrix by outer products of masks
expanded_mask = tf.cast(tf.expand_dims(mask, -1), tf.float32)
input_shape = tf.shape(dot_product)
attention_mask = tf.expand_dims(
tf.matmul(expanded_mask, expanded_mask, transpose_b=True), 1)
# In order to mask all heads of an instance, reshape first so we can
# use broadcasting, then reshape back
shape_with_attn_heads = tf.stack(
[-1, self.num_heads, input_shape[-2], input_shape[-1]])
input_reshaped = tf.reshape(dot_product, shape_with_attn_heads)
close_to_negative_inf = -1e9
if self.use_masking:
# If use_masking=True additionally mask future values from
# attention.
last_dims = tf.shape(dot_product)[-2:]
# to ensure proper broadcasting
low_triangle_ones = tf.linalg.band_part(
tf.ones(last_dims, dtype=tf.float32), -1, 0)
low_triangle_ones = tf.expand_dims(low_triangle_ones, 0)
attention_mask = attention_mask * low_triangle_ones
# Use elementary operations as tf.where cannot broadcast
result = (
attention_mask * input_reshaped
+ (attention_mask - 1) * close_to_negative_inf
)
return tf.reshape(result, input_shape)
class MultiHeadAttention(_BaseMultiHeadAttention):
"""
Multi-head attention which can use two inputs:
First: from the encoder - it's used to project the keys and the values
Second: from the decoder - used to project the queries.
"""
# noinspection PyAttributeOutsideInit
def build(self, input_shape):
if not (isinstance(input_shape, list) and len(input_shape) == 2):
raise ValueError(
'You must call this layer passing a list of two tensors'
'(for keys/values and queries)')
values_dim, query_dim = input_shape[0][-1], input_shape[1][-1]
if query_dim != values_dim:
raise ValueError(
f'Both keys/value and query inputs must be '
f'of the same dimensionality, instead of '
f'{values_dim} and {query_dim}.')
d_model = query_dim
self.validate_model_dimensionality(d_model)
# These weights are concatenated matrices W_k and W_v which
# are, in turn, concatenated W matrices of keys, and values
# for each of the heads. So, essentially it's a concatenation of
# W_k1, W_k2,..., W_kh, W_v1, W_v2,..., W_vh
# for all h heads.
self.kv_weights = self.add_weight(
name='kv_weights', shape=(d_model, d_model * 2),
initializer='glorot_uniform', trainable=True)
self.q_weights = self.add_weight(
name='q_weights', shape=(d_model, d_model),
initializer='glorot_uniform', trainable=True)
self.build_output_params(d_model)
return super().build(input_shape)
def call(self, inputs, **kwargs):
if not (isinstance(inputs, list) and len(inputs) == 2):
raise ValueError(
'You can call this layer only with a list of two tensors '
'(for keys/values and queries)')
key_values_input, query_input = inputs
_, value_seq_len, d_model = K.int_shape(key_values_input)
query_seq_len = K.int_shape(inputs[1])[-2]
# The first thing we need to do is to perform affine transformations
# of the inputs to get the Queries, the Keys and the Values.
kv = K.dot(K.reshape(key_values_input, [-1, d_model]), self.kv_weights)
# splitting the keys, the values and the queries before further
# processing
pre_k, pre_v = [
K.reshape(
# K.slice(kv, (0, i * d_model), (-1, d_model)),
kv[:, i * d_model: (i + 1) * d_model],
(-1, value_seq_len,
self.num_heads, d_model // self.num_heads))
for i in range(2)]
pre_q = K.reshape(
K.dot(K.reshape(query_input, [-1, d_model]), self.q_weights),
(-1, query_seq_len, self.num_heads, d_model // self.num_heads))
return self.attention(pre_q, pre_v, pre_k, query_seq_len, d_model,
training=kwargs.get('training'))
class MultiHeadSelfAttention(_BaseMultiHeadAttention):
"""
Multi-head self-attention for both encoders and decoders.
Uses only one input and has implementation which is better suited for
such use case that more general MultiHeadAttention class.
"""
# noinspection PyAttributeOutsideInit
def build(self, input_shape):
if not isinstance(input_shape, tuple):
input_shape = tuple(input_shape)
# raise ValueError('Invalid input')
d_model = input_shape[-1]
self.validate_model_dimensionality(d_model)
# These weights are concatenated matrices W_q, W_k and W_v which
# are, in turn, concatenated W matrices of keys, queries and values
# for each of the heads. So, essentially it's a concatenation of
# W_q1, W_q2,..., W_qh, W_k1, W_k2,..., W_kh, W_v1, W_v2,..., W_vh
# for all h heads.
self.qkv_weights = self.add_weight(
name='qkv_weights',
shape=(d_model, d_model * 3), # * 3 for q, k and v
initializer='glorot_uniform',
trainable=True)
self.build_output_params(d_model)
return super().build(input_shape)
def call(self, inputs, mask=None, **kwargs):
if not tf.is_tensor(inputs):
raise ValueError(
'The layer can be called only with one tensor as an argument')
d_model = K.int_shape(inputs)[-1]
input_shape = tf.shape(inputs)
# The first thing we need to do is to perform affine transformations
# of the inputs to get the Queries, the Keys and the Values.
qkv = K.dot(K.reshape(inputs, [-1, d_model]), self.qkv_weights)
qkv_shape = tf.stack(
[-1, input_shape[1], self.num_heads, d_model // self.num_heads])
# splitting the keys, the values and the queries before further
# processing
pre_q, pre_k, pre_v = [
tf.reshape(el, qkv_shape) for el in tf.split(qkv, 3, axis=-1)]
# pre_q, pre_k, pre_v = [
# K.reshape(
# # K.slice(qkv, (0, i * d_model), (-1, d_model)),
# qkv[:, i * d_model:(i + 1) * d_model],
# (-1, seq_len, self.num_heads, d_model // self.num_heads))
# for i in range(3)]
attention_out = self.attention(pre_q, pre_v, pre_k, None, d_model,
training=kwargs.get('training'),
mask=mask)
return attention_out
def compute_output_shape(self, input_shape):
return input_shape
get_custom_objects().update({
'MultiHeadSelfAttention': MultiHeadSelfAttention,
'MultiHeadAttention': MultiHeadAttention,
})
| 46.278947
| 79
| 0.576254
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.