content
stringlengths 5
1.05M
|
|---|
import logging
import os
from processFolder import loadTrajectories
from processXML import loadTrajectory
from processXML import loadTrajectoryFromString
from predict import predictDiversion
classifyLogger = logging.getLogger(__name__)
def classifyXml(clf, scaler, interval, xmlstring, threshold):
try:
trajectory = loadTrajectoryFromString(xmlstring)
diversionDetected = classifyTrajectory(clf, scaler, trajectory, interval, threshold)
return diversionDetected
except Exception as e:
classifyLogger.error("Error classifying XML: %s" %format(e))
def classifyFile(clf, scaler, interval, filepath, threshold):
try:
trajectory = loadTrajectory(filepath)
diversionDetected = classifyTrajectory(clf, scaler, trajectory, interval, threshold)
return diversionDetected
except Exception as e:
classifyLogger.error("Error classifying file %s: %s" %(filepath, format(e)))
def classifyFolder(clf, scaler, interval, folder, threshold):
filepahtsClassifiedAsDiverted = []
try:
(filepaths, trajectories) = loadTrajectories(folder)
i = 0
for trajectory in trajectories:
diversionDetected = classifyTrajectory(clf, scaler, trajectory, interval, threshold)
if diversionDetected:
filepahtsClassifiedAsDiverted.append(filepaths[i])
i += 1
return len(trajectories), len(filepahtsClassifiedAsDiverted), len(trajectories) - len(filepahtsClassifiedAsDiverted), filepahtsClassifiedAsDiverted
except Exception as e:
classifyLogger.error("Error classifying folder '%s': %s" %(folder, format(e)))
def classifyFsNodes(clf, scaler, interval, fsNodesPaths, threshold):
filepahtsClassifiedAsDiverted = []
trajectories = []
for fsNodePath in fsNodesPaths:
if (os.path.isfile(fsNodePath)):
try:
nutrajectory = loadTrajectory(fsNodePath)
diversionDetected = classifyTrajectory(clf, scaler, nutrajectory, interval, threshold)
if diversionDetected:
filepahtsClassifiedAsDiverted.append(fsNodePath)
trajectories = trajectories + nutrajectory
except Exception as e:
classifyLogger.error("Error classifying file '%s': %s" %(fsNodePath, format(e)))
elif (os.path.isdir(fsNodePath)):
try:
(filepaths, nutrajectories) = loadTrajectories(fsNodePath, interval)
i = 0
for nutrajectory in nutrajectories:
i += 1
diversionDetected = classifyTrajectory(clf, scaler, nutrajectory, interval, threshold)
if diversionDetected:
filepahtsClassifiedAsDiverted.append(filepaths[i])
trajectories = trajectories + nutrajectory
except Exception as e:
classifyLogger.error("Error classifying folder '%s': %s" %(fsNodePath, format(e)))
return len(trajectories), len(filepahtsClassifiedAsDiverted), len(trajectories) - len(filepahtsClassifiedAsDiverted), filepahtsClassifiedAsDiverted
def classifyTrajectory(clf, scaler, trajectory, interval, threshold):
try:
trajectoryUnderAnalysis = trajectory.createTrajectoryForAnalysis(interval)
if trajectoryUnderAnalysis.totaldist == 0:
classifyLogger.warn("Trajectory total distance = 0 for flight at '%s'" %(trajectoryUnderAnalysis.filename))
return True
data = trajectoryUnderAnalysis.getVectors()
if not data:
classifyLogger.debug("Insufficient data for diversion prediction of flight at '%s'" %(trajectoryUnderAnalysis.filename))
return False # insufficient data
try:
datat = scaler.transform(data)
except Exception as e:
# print "Error in classification of: ", trajectory.filename, format(e)
classifyLogger.error("Error while classifying trajectory of flight at '%s': %s", str(trajectoryUnderAnalysis.filename) + ":" + format(e))
return False
classification = clf.predict(datat)
decfunout = clf.decision_function(datat)
decfunout = [item for sublist in decfunout for item in sublist] # flatten the list of lists in a "simple" list
(diversionPredicted, severities, firstAlertPosition) = predictDiversion(trajectoryUnderAnalysis, classification, decfunout, threshold)
return diversionPredicted
except Exception as e:
classifyLogger.error("Error while classifying flight %s: %s" %(trajectory.flightId, format(e)))
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JingdongItem(scrapy.Item):
# define the fields for your item here like:
filename = scrapy.Field()
intruduce = scrapy.Field()
img_urls = scrapy.Field()
good_id = scrapy.Field()
img_name = scrapy.Field()
class JingdongItem2(scrapy.Item):
# define the fields for your item here like:
filename = scrapy.Field()
page = scrapy.Field()
contents_Urls = scrapy.Field()
contents = scrapy.Field()
productColors = scrapy.Field()
|
def cnd(ip,username,password):
net_dev = {}
net_dev['ip'] = ip
net_dev['username'] = username
net_dev['password'] = password
return net_dev
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: filter
:platform: Unix
:synopsis: the top-level submodule of T_System that contains the classes related to T_System's data filter ability.
.. moduleauthor:: Cem Baybars GÜÇLÜ <cem.baybars@gmail.com>
"""
import numpy as np
from scipy.signal import butter, lfilter, freqz
class LowPassFilter:
"""Class to define an kind of filter.
This class provides necessary initiations and a function named :func:`t_system.motion.Motor.move`
for the provide move of servo motor.
"""
def __init__(self):
"""Initialization method of :class:`t_system.motor.Motor` class.
"""
pass
@staticmethod
def butter_low_pass(cutoff, fs, order=5):
"""Method to start of the motor initially.
Args:
init_angel (float): Initialization angle value for servo motor in radian unit.
"""
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
c = butter(order, normal_cutoff, btype='low', analog=False)
b = c[0]
a = c[1] # this lines would be like "b,a = ..." but there was a warning.
return b, a
def butter_low_pass_filter(self, data, cutoff, fs, order=5):
"""Method to start of the motor initially.
Args:
init_angel (float): Initialization angle value for servo motor in radian unit.
"""
b, a = self. butter_low_pass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
|
import Tkinter as tk
import tkMessageBox
import ttk
import tkFont as tkFont
import tkColorChooser
from Tkinter import StringVar, IntVar
class ClockForegroundOptions(tk.Frame):
def __init__(self, master, parent):
tk.Frame.__init__(self, master)
self.parent = parent
self.grid()
self.clock = master.clock
clock = self.clock
self.valXLabel = StringVar()
self.valYLabel = StringVar()
self.valFontLabel = StringVar()
self.valFontSizeLabel = StringVar()
self.valFontColourLabel = StringVar()
self.xVar = StringVar()
self.yVar = StringVar()
self.fontVar = StringVar()
self.fontSizeVar = StringVar()
self._12HourFormatVar = IntVar()
self.hourLeadingZeroesVar = IntVar()
self.longMonthVar = IntVar()
self.onVar = IntVar()
self.symbolVar = StringVar()
self.lastSymbolId = 0
self.setSwitch = {"Hour": clock.setHour,
"Minute": clock.setMinute,
"Second": clock.setSecond,
"Year": clock.setYear,
"Month": clock.setMonth,
"Day Number": clock.setDayNumber,
"Day of Week": clock.setDayOfWeek,
"AM/PM": clock.setAMPM,
"Symbols": clock.setSymbol}
self.getSwitch = {"Hour": clock.getHour,
"Minute": clock.getMinute,
"Second": clock.getSecond,
"Year": clock.getYear,
"Month": clock.getMonth,
"Day Number": clock.getDayNumber,
"Day of Week": clock.getDayOfWeek,
"AM/PM" : clock.getAMPM,
"Symbols" : clock.getSymbols}
self.createWidgets()
self.getDefaults("Hour")
# Gets defaults for widgets based on label given and sets labels as necessary
def getDefaults(self, label):
defaults = self.getSwitch[label]()
self.setDefaults(defaults, label)
def setDefaults(self, defaults, label):
if label == "Symbols":
self.symbols = defaults
self.symbolTextbox.delete('1.0', 'end')
else:
self.xVar.set(defaults[0])
self.yVar.set(defaults[1])
self.fontVar.set(defaults[2])
self.valFont.select_clear(0, "end")
i = 0
while (i < self.valFont.size()):
if self.valFont.get(i).strip() == self.fontVar.get().strip():
t = self.valFont.get(i)
self.valFont.delete(i)
self.valFont.insert(0, t)
self.valFont.select_set(0)
i = 10000
i+=1
self.fontSizeVar.set(defaults[3])
self.fontColourDefault = defaults[4]
self.onVar.set(defaults[5])
if (label == "Hour"):
self._12HourFormatVar.set(defaults[5])
self.hourLeadingZeroesVar.set(defaults[6])
self.onVar.set(defaults[7])
elif (label == "Month"):
self.longMonthVar.set(defaults[5])
self.onVar.set(defaults[6])
self.setAllLabels(label)
def setAllLabels(self, label):
self.valXLabel.set(label + " X Coordinate")
self.valYLabel.set(label + " Y Coordinate")
self.valFontLabel.set(label + " Font")
self.valFontSizeLabel.set(label + " Font Size")
self.valFontColourLabel.set(label + " Font Colour")
# Handle edge case items
if (label != "Hour"):
self.hourCheckButton.grid_forget()
self.hourLeadingZeroes.grid_forget()
else:
self.hourCheckButton.grid(row = 0, column = 1, pady = 10, sticky = "NW")
self.hourLeadingZeroes.grid(row = 0, column = 2, pady = 10, sticky = "NW")
if (label != "Month"):
self.monthCheckButton.grid_forget()
else:
self.monthCheckButton.grid(row = 0, column = 1, pady = 10, sticky = "NW")
if (label != "Symbols"):
self.symbolSelect.grid_forget()
self.onCheckButton.grid(row = 1, column = 1, pady= 10, sticky="nw")
try:
self.symbolLabel.grid_forget()
self.symbolTextbox.grid_forget()
self.symbolDeleteButton.grid_forget()
except:
print("issues forgetting")
else:
symbols = list()
for item in self.symbols:
curSym = ("Symbol " + str(self.symbols[item]["idNum"]) + ' : "'
+ self.symbols[item]["sym"] + '"')
symbols.append(curSym)
symbols.append("New Symbol")
self.symbolSelect.config(values = symbols)
self.symbolVar.set(symbols[self.lastSymbolId])
self.lastSymbolId = 0
self.selectSymbol(None)
self.onCheckButton.grid_forget()
self.symbolSelect.grid(row = 0, column = 1, pady = 10, sticky = "NW")
# Fix apply to all
if (label == "Month" or label == "Year" or label == "Day of Week" or label == "Day Number"):
self.applyToAll.config(text="Apply font, font size, and\ncolour to all date options",
command = self.applyToAllDate)
elif (label == "Symbols"):
self.applyToAll.config(text="Apply font, font size, and\ncolour to all symbols",
command = self.applyToAllSymbols)
else:
self.applyToAll.config(text="Apply font, font size, and\ncolour to all time options",
command = self.applyToAllTime)
def createWidgets(self):
fonts = tkFont.families(self.master)
fonts = set(fonts)
fonts = sorted(fonts)
options = ["Hour", "Minute", "Second", "AM/PM",
"Day of Week", "Month", "Day Number", "Year",
"Symbols"]
self.currentOption = tk.Listbox(self, selectmode="SINGLE", exportselection=False)
for option in options:
self.currentOption.insert("end", option)
self.currentOption.select_set(0)
self.currentOption.bind("<ButtonRelease-1>", self.showOptions)
self.currentOption.grid(row = 0, column = 0, rowspan = 10, pady = 10, sticky = "NW")
# Hour checkbuttons
self.hourCheckButton = tk.Checkbutton(self, text = "12 hour clock", variable = self._12HourFormatVar)
self.hourCheckButton.grid(row = 0, column = 1, pady= 10, sticky = "NW")
self.hourLeadingZeroes = tk.Checkbutton(self, text = "Leading zeroes", variable = self.hourLeadingZeroesVar)
self.hourLeadingZeroes.grid(row = 0, column = 2, pady = 10, sticky = "NW")
# Month checkbutton
self.monthCheckButton = tk.Checkbutton(self, text = "Long month form",
variable = self.longMonthVar)
# Symbol stuff
self.symbolSelect = ttk.Combobox(self, textvariable=self.symbolVar,
state = "readonly")
self.symbolSelect.bind("<<ComboboxSelected>>", self.selectSymbol)
self.symbolLabel = tk.Label(self, text="Symbol: ")
self.symbolTextbox = tk.Text(self, width = 24, height=1)
self.symbolDeleteButton = tk.Button(self, text="Delete Symbol",
command = self.removeSymbol)
# On checkbutton
self.onCheckButton = tk.Checkbutton(self, text = "Visible", variable = self.onVar)
self.onCheckButton.grid(row = 1, column = 1, pady= 10, sticky = "NW")
# val X
tk.Label(self, text="Hour X", textvariable = self.valXLabel).grid(row=2, column = 1, pady = 5, sticky = "NW")
maxX = self.master.winfo_screenwidth()
maxY = self.master.winfo_screenheight()
self.valX = tk.Spinbox(self, from_=0, to=maxX, textvariable = self.xVar)
self.valX.grid(row=2, column = 2)
# val Y
tk.Label(self, textvariable = self.valYLabel).grid(row=3, column = 1, pady = 5, sticky = "NW")
self.valY = tk.Spinbox(self, from_=0, to=maxY, textvariable = self.yVar)
self.valY.grid(row=3, column = 2)
# val font listbox
scrollbar = tk.Scrollbar(self)
tk.Label(self, textvariable = self.valFontLabel).grid(row=4, column = 1, pady = 5, sticky = "NW")
self.valFont = tk.Listbox(self, selectmode = "SINGLE", exportselection = False, yscrollcommand = scrollbar.set)
scrollbar.config(command = self.valFont.yview)
scrollbar.grid(row=4, column = 2)
self.valFont.grid(row=4, column = 2, pady = 5)
for font in fonts:
self.valFont.insert("end", font)
# val font size
tk.Label(self, textvariable = self.valFontSizeLabel).grid(row=5, column = 1, pady = 5, sticky = "NW")
self.valFontSize = tk.Spinbox(self, from_=0, to=1000, textvariable = self.fontSizeVar)
self.valFontSize.grid(row = 5, column = 2)
# val font colour
tk.Button(self, textvariable = self.valFontColourLabel, command = self.showColourPicker).grid(row = 6, column = 1, sticky = "NW")
# Buttons
tk.Button(self, text="Apply", command=self.apply).grid(row = 8, column = 2, sticky = "NE", pady=30)
self.applyToAll = tk.Button(self, text="Apply font, font size, and\ncolour to all time options",
command = self.applyToAllTime)
self.applyToAll.grid(row = 8, column = 1, stick = "NW", pady=30)
tk.Button(self, text="Drag and drop mode", command=self.beginDragMode).grid(row=8, column=0, sticky="NW", pady=30)
# Applys current options
def apply(self):
f = self.valFont.get(self.valFont.curselection())
o = self.currentOption.get(self.currentOption.curselection())
try:
int(self.valX.get())
int(self.valY.get())
int(self.valFontSize.get())
if (o.strip() == "Month"):
self.setSwitch[o](self.valX.get(), self.valY.get(), f,
self.valFontSize.get(), self.fontColourDefault,
self.longMonthVar.get(), self.onVar.get())
elif o.strip() == "Hour":
self.setSwitch[o](self.valX.get(), self.valY.get(), f,
self.valFontSize.get(), self.fontColourDefault,
self._12HourFormatVar.get(),
self.hourLeadingZeroesVar.get(),
self.onVar.get())
elif o.strip() == "Symbols":
if self.symbolSelect.get() == "New Symbol":
idNum = len(self.symbols)
sym = self.symbolTextbox.get('1.0', 'end')
sym = sym[:-1]
if len(sym.strip()) == 0:
raise Exception
else:
t = self.symbolSelect.get()
idNum = int(t.split(" ")[1])
sym = self.symbols[idNum]["sym"]
self.setSwitch[o](self.valX.get(), self.valY.get(), f,
self.valFontSize.get(), self.fontColourDefault,
idNum, sym, self.onVar.get())
self.lastSymbolId = idNum
else :
self.setSwitch[o](self.valX.get(), self.valY.get(), f,
self.valFontSize.get(), self.fontColourDefault, self.onVar.get())
self.clock.saveClockOptions()
except:
tkMessageBox.showerror("Invalid input", "A value entered was not valid",
parent = self)
self.getDefaults(o.strip())
# Applies current settings to all time options
def applyToAllTime(self):
f = self.valFont.get(self.valFont.curselection())
self.clock.setHour(self.clock.hourX, self.clock.hourY, f,
self.valFontSize.get(), self.fontColourDefault,
self._12HourFormatVar.get(), self.hourLeadingZeroesVar.get(),
self.onVar.get())
self.clock.setMinute(self.clock.minuteX, self.clock.minuteY, f,
self.valFontSize.get(), self.fontColourDefault,
self.onVar.get())
self.clock.setSecond(self.clock.secondX, self.clock.secondY, f,
self.valFontSize.get(), self.fontColourDefault,
self.onVar.get())
self.clock.setAMPM(self.clock.ampmX, self.clock.ampmY, f,
self.valFontSize.get(), self.fontColourDefault,
self.onVar.get())
self.clock.saveClockOptions()
# Applies current settings to all date options
def applyToAllDate(self):
f = self.valFont.get(self.valFont.curselection())
self.clock.setYear(self.clock.yearX, self.clock.yearY, f,
self.valFontSize.get(), self.fontColourDefault, self.onVar.get())
self.clock.setMonth(self.clock.monthX, self.clock.monthY, f,
self.valFontSize.get(), self.fontColourDefault,
self.longMonthVar.get(), self.onVar.get())
self.clock.setDayNumber(self.clock.dayNumberX, self.clock.dayNumberY, f,
self.valFontSize.get(), self.fontColourDefault, self.onVar.get())
self.clock.setDayOfWeek(self.clock.dayOfWeekX, self.clock.dayOfWeekY, f,
self.valFontSize.get(), self.fontColourDefault, self.onVar.get())
self.clock.saveClockOptions()
# Applies current settings to all symbols
def applyToAllSymbols(self):
f = self.valFont.get(self.valFont.curselection())
for item in self.symbols:
cur = self.symbols[item]
self.setSwitch["Symbols"](cur['x'], cur['y'], f,
self.valFontSize.get(), self.fontColourDefault,
cur["idNum"], cur["sym"], 1)
# Handler for drag and drop placement mode button
def beginDragMode(self):
self.parent.beginDragMode()
# Handler to close windwo
def close(self):
self.parent.close()
# Event handler for click on select list of time options
def showOptions(self, position):
o = self.currentOption.get(self.currentOption.curselection())
self.getDefaults(o)
# Event handler for click on select list of all symbols
def selectSymbol(self, event):
s = self.symbolSelect.get()
if (s == "New Symbol"):
self.prepareForNewSymbol()
return
try:
self.symbolLabel.grid_forget()
self.symbolTextbox.grid_forget()
except:
None
self.clock.saveClockOptions()
self.symbolDeleteButton.grid(row = 0, column = 2)
idNum = s.split(" ")
idNum = int(idNum[1])
curSym = self.symbols[idNum]
self.xVar.set(curSym["x"])
self.yVar.set(curSym["y"])
self.fontVar.set(curSym["font"])
self.fontSizeVar.set(curSym["size"])
self.fontColourDefault = curSym["colour"]
self.onVar.set(curSym["on"])
self.valFont.select_clear(0, "end")
i = 0
while (i < self.valFont.size()):
if self.valFont.get(i).strip() == self.fontVar.get().strip():
t = self.valFont.get(i)
self.valFont.delete(i)
self.valFont.insert(0, t)
self.valFont.select_set(0)
i = 10000
i+=1
# Prepares window to show options for new symbol
def prepareForNewSymbol(self):
self.xVar.set(0)
self.yVar.set(0)
self.fontVar.set("")
self.fontSizeVar.set(12)
self.fontColourDefault = "#000000"
self.onVar.set(1)
self.valFont.select_clear(0, "end")
self.valFont.select_set(0)
self.symbolLabel.grid(row=1, column =1, sticky = "nw")
self.symbolTextbox.grid(row = 1, column = 2, sticky="nw")
print("Grid forget at 388")
self.symbolDeleteButton.grid_forget()
# Handler for delete symbol button
def removeSymbol(self):
idNum = int(self.symbolSelect.get().split(" ")[1])
self.clock.removeSymbol(idNum)
self.clock.saveClockOptions()
self.getDefaults("Symbols")
# Handler for font colour button
def showColourPicker(self):
colours = tkColorChooser.askcolor(parent = self, color = self.fontColourDefault)
if colours[1] != None:
self.fontColourDefault = colours[1]
|
import Qt.QtWidgets as QtWidgets
import Qt.QtCore as QtCore
import Qt.QtGui as QtGui
import kitsupublisher.utils.data as utils_data
from kitsupublisher.utils.colors import combine_colors
from kitsupublisher.views.TasksTabItem import TasksTabItem
from kitsupublisher.ui_data.color import (
main_color,
table_alternate_color,
text_color,
)
from kitsupublisher.ui_data.ui_values import (
height_table,
row_height,
max_width_table,
)
class StyleDelegateForQTableWidget(QtWidgets.QStyledItemDelegate):
"""
Class overriding QTableWidgetItem color policy, to obtain
transparency when a row is selected
"""
def __init__(self, parent):
QtWidgets.QStyledItemDelegate.__init__(self, parent)
self.parent = parent
self.color_default = QtGui.QColor("#5e60ba")
def paint(self, painter, option, index):
if option.state & QtWidgets.QStyle.State_Selected:
option.palette.setColor(
QtGui.QPalette.HighlightedText,
QtGui.QColor(self.parent.text_color),
)
color = combine_colors(
self.color_default, self.background(option, index)
)
option.palette.setColor(QtGui.QPalette.Highlight, color)
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
def background(self, option, index):
item = self.parent.itemFromIndex(index)
if item:
if item.background() != QtGui.QBrush():
return item.background().color()
if self.parent.alternatingRowColors():
if index.row() % 2 == 1:
return option.palette.color(QtGui.QPalette.AlternateBase)
return option.palette.color(QtGui.QPalette.Base)
class TasksTab(QtWidgets.QTableWidget):
"""
The table containing all the tasks to do for the current user.
The columns of the array are set manually at instantiation.
"""
def __init__(self, window, dict_cols):
QtWidgets.QTableWidget.__init__(self)
self.window = window
self.tab_columns = dict_cols
self.list_ids = list(dict_cols.keys())
self.setColumnCount(len(dict_cols))
self.text_color = text_color
self.create_header(dict_cols)
self.tasks_to_do = utils_data.get_all_tasks_to_do()
self.fill_tasks_tab(self.tasks_to_do)
self.resize_to_content()
self.activate_sort()
self.item_delegate = StyleDelegateForQTableWidget(self)
self.setItemDelegate(self.item_delegate)
self.color_tab()
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.manage_size()
self.selected_row = None
self.clicked.connect(self.on_click)
def manage_size(self):
data_width = self.horizontalHeader().length() + self.verticalHeader().width() + 2
if data_width > max_width_table:
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.setFixedWidth(min(max_width_table, data_width))
self.setSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed
)
def sizeHint(self):
"""
Overriden size hint.
"""
return QtCore.QSize(
self.horizontalHeader().length() + self.verticalHeader().width(),
height_table,
)
def create_header(self, dict_cols):
"""
Create the header and set its visual aspect.
"""
self.setHorizontalHeaderLabels(dict_cols.values())
self.horizontalHeader().setHighlightSections(False)
self.horizontalHeader().setSectionsClickable(False)
self.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignLeft)
stylesheet = (
"QHeaderView::section{color:"
+ self.text_color
+ "; font-weight: bold; font-size: 18px}"
)
self.horizontalHeader().setStyleSheet(stylesheet)
font = self.horizontalHeader().font()
font.setBold(True)
font.setPointSize(14)
self.horizontalHeader().setFont(font)
height = QtGui.QFontMetrics(
self.horizontalHeader().fontMetrics()
).height()
self.horizontalHeader().setFixedHeight(1.3 * height)
def fill_tasks_tab(self, tasks):
"""
Fill the table with the given tasks.
"""
for nb_row, task in enumerate(tasks):
current_row_nb = self.rowCount() + 1
self.setRowCount(current_row_nb)
for nb_col, task_attribute in enumerate(self.list_ids):
assert task_attribute in task, (
"The attribute "
+ task_attribute
+ " doesn't belong to the attributes of a "
"gazu task object "
)
item = TasksTabItem(self, nb_row, nb_col, task, task_attribute)
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.setItem(nb_row, nb_col, item)
self.setRowHeight(nb_row, row_height)
def reload(self):
"""
Delete the datas of the table, then fills with the new ones.
"""
self.clicked.disconnect()
self.empty()
self.deactivate_sort()
self.create_header(self.tab_columns)
self.tasks_to_do = utils_data.get_all_tasks_to_do()
self.fill_tasks_tab(self.tasks_to_do)
self.resize_to_content()
self.activate_sort()
self.color_tab()
if self.selected_row:
try:
for col in range(self.columnCount()):
self.item(self.selected_row, col).setSelected(True)
except:
try:
for col in range(self.columnCount()):
self.item(0, col).setSelected(True)
except:
pass
self.clicked.connect(self.on_click)
def empty(self):
"""
Empty the table.
"""
self.clear()
self.setRowCount(0)
def resize_to_content(self):
"""
Resize the table to its contents.
"""
self.resizeColumnsToContents()
self.manage_size()
def activate_sort(self):
"""
Activate the sorting of the table.
"""
self.setSortingEnabled(True)
self.sortItems(0, QtCore.Qt.AscendingOrder)
def deactivate_sort(self):
"""
Deactivate the sorting of the table.
"""
self.setSortingEnabled(False)
def on_click(self):
"""
On table item click, call the initialization/update of the right panel.
Does nothing if the row is the same.
"""
if (
not self.selected_row
or self.currentItem().row() != self.selected_row
):
self.selected_row = self.currentItem().row()
self.window.setup_task_panel(self.currentItem().task)
def color_tab(self):
"""
Paint the items of the table with alternate nuances of grey.
"""
for nb_row in range(self.rowCount()):
row_color = (
QtGui.QColor(main_color)
if nb_row % 2 == 0
else QtGui.QColor(table_alternate_color)
)
for nb_col in range(self.columnCount() - 1):
item = self.item(nb_row, nb_col)
item_color = row_color
if item.is_bg_colored:
item_color = combine_colors(
row_color, item.background().color()
)
brush = QtGui.QBrush(item_color)
item.setBackground(brush)
|
# Generated by Django 3.1.12 on 2021-06-28 20:03
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
("extras", "0005_configcontext_device_types"),
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="Sync",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True
),
),
("source", models.CharField(max_length=64)),
("target", models.CharField(max_length=64)),
("start_time", models.DateTimeField(blank=True, null=True)),
("dry_run", models.BooleanField(default=False)),
("diff", models.JSONField(blank=True)),
(
"job_result",
models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to="extras.jobresult"
),
),
],
options={
"ordering": ["start_time"],
},
),
migrations.CreateModel(
name="SyncLogEntry",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True
),
),
("timestamp", models.DateTimeField(auto_now_add=True)),
("action", models.CharField(max_length=32)),
("status", models.CharField(max_length=32)),
("diff", models.JSONField(blank=True, null=True)),
("synced_object_id", models.UUIDField(blank=True, null=True)),
("object_repr", models.CharField(blank=True, default="", editable=False, max_length=200)),
("message", models.CharField(blank=True, max_length=511)),
(
"sync",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="logs",
related_query_name="log",
to="nautobot_ssot.sync",
),
),
(
"synced_object_type",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="contenttypes.contenttype",
),
),
],
options={
"verbose_name_plural": "sync log entries",
"ordering": ["sync", "timestamp"],
},
),
]
|
class CombineAction(object):
@staticmethod
def combine(action, action_to_combine):
method_name = '_' + action.type
if hasattr(CombineAction, method_name):
return getattr(CombineAction, method_name)(action, action_to_combine)
raise ValueError('Action type {0} is not supported'.format(action.type))
@staticmethod
def _setVlan(action, action_to_combine):
action.connectionParams.vlanIds += action_to_combine.connectionParams.vlanIds
return action
@staticmethod
def _removeVlan(action, action_to_combine):
action.connectorAttributes += action_to_combine.connectorAttributes
return action
|
from __future__ import print_function
import boto3
import json
# needs to be generated
required_steps = {}
required_steps['sum'] = {'sq'}
required_steps['count'] = {'sq'}
required_steps['avg'] = {'sum', 'count'}
required_steps['out'] = {'avg', 'sum', 'count'}
# needs to be generated
work_flow = {
'inputs': {
},
'outputs': {
},
'steps': {
'sq': {
'inputs': {'': ''},
'outputs': {'out'},
'sqs_arn': ''
},
'sum': {
'inputs': {'inp': '#sq/out'},
'outputs': {'out'},
'sqs_arn': ''
},
'count': {
'inputs': {'inp': '#sq/out'},
'outputs': {'out'},
'sqs_arn': ''
},
'avg': {
'inputs': {'sum': '#sum/out', 'count': '#count/out'},
'outputs': {'out'},
'sqs_arn': ''
},
'out': {
'inputs': {},
'sqs_arn': ''
}
}
}
def lambda_handler(event, context):
'''
'''
record = event['Records']
if record['eventName'] == 'MODIFY':
newRecords = find_new_records(record['OldImage'], record['NewImage'])
startList = find_steps_to_start(record['NewImage'], newRecords)
start_docker_task(startList, record['NewImage'])
def find_new_records(oldImage, newImage):
'''
find the new records in the modified data entry
oldImage:
newImage
'''
res = {}
for key in newImage:
if key not in oldImage:
res[key] = newImage[key]
return res
def find_steps_to_start(newImage, newRecords):
'''
find new steps that could start based on newest update.
'''
res = []
for key in newRecords:
for step in required_steps:
if key in required_steps[step]:
has_every_steps = True
for x in required_steps[step]:
has_every_steps = has_every_steps and x in newImage
if has_every_steps is True:
res.append(step)
return res
def start_docker_task(startList, newImage):
for step in startList:
info = {}
for para in work_flow['steps'][step]['inputs']:
tmp = work_flow['steps'][step]['inputs'][para].split('/')
info[para] = newImage[tmp[0][1:]][tmp[1]]
# start ecs container
# addapt from previous lambda_run_task_template.py
|
from sqlalchemy import Column, Integer, String, ForeignKey, Text, or_, desc
from app.models.base import Base, db
from app.models.contest import Contest
from app.models.problem import Problem
from app.models.user import User
from app.models.relationship.problem_contest import ProblemContestRel
class Clarification(Base):
__tablename__ = 'clarification'
fields = ['id', 'who', 'to', 'problem_id_in_contest', 'content']
id = Column(Integer, primary_key=True, autoincrement=True)
_who = Column('who', String(100), ForeignKey(User.username), nullable=False)
_to = Column('to', String(100), ForeignKey(User.username))
contest_id = Column(Integer, ForeignKey(Contest.id))
problem_id = Column(Integer, ForeignKey(Problem.id))
content = Column(Text, default='')
@property
def who(self):
return User.get_by_id(self._who)
@who.setter
def who(self, user):
self._who = user.username
@property
def to(self):
return User.get_by_id(self._to)
@to.setter
def to(self, user):
self._to = user.username
@property
def problem_id_in_contest(self):
if self.problem_id is None:
return None
return ProblemContestRel. \
get_by_problem_id_and_contest_id(self.contest_id, self.problem_id).problem_id_in_contest
@classmethod
def search_by_contest_id(cls, contest_id, page=1, page_size=20):
res = db.session.query(Clarification).filter(Clarification.contest_id == contest_id)
from flask_login import current_user
if current_user.is_anonymous:
res = res.filter(Clarification._to.is_(None))
elif not Contest.get_by_id(contest_id).is_admin(current_user):
res = res.filter(
or_(
Clarification._to == current_user.username,
Clarification._who == current_user.username,
Clarification._to.is_(None)
)
)
res = res.order_by(desc(Clarification.id))
data = {
'meta': {
'count': res.count(),
'page': page,
'page_size': page_size
}
}
if page_size != -1:
res = res.offset((page - 1) * page_size).limit(page_size)
res = res.all()
data['data'] = res
return data
|
import click
import logging.config
import datetime
from config import LOGGING, STUDOMATIC_URL
from session import ScrapeSession
from lxml import html
from ics import Calendar, Event
from pytz import timezone
logging.config.dictConfig(LOGGING)
logger = logging.getLogger('studomatic-scrapper')
class Scrapper(object):
schedule = []
viewState = None
eventValidation = None
viewStateGenerator = None
def __init__(self, username, password, weeks, wait=None):
logger.info('Started scrapper for user {}'.format(username))
self.schedule = []
self.today = datetime.date.today()
self.last_monday = self.today - datetime.timedelta(days=self.today.weekday())
self.weeks = weeks
self.html_parser = html.HTMLParser(encoding='windows-1250')
self.session = ScrapeSession(wait=wait)
self.init_session()
self.username = username
self.login(username, password)
self.fetchSchedule()
logger.info('Scraping finished')
self.generateCalendar()
logger.info('Generated {}.ics'.format(username))
def init_session(self):
self.session.get(STUDOMATIC_URL)
def fetchSchedule(self):
monday = self.last_monday
weekCnt = 1
weekScheduleFirstPage = self.session.get(STUDOMATIC_URL + '/Raspored.aspx')
self.extractViewState(weekScheduleFirstPage)
while weekCnt <= self.weeks:
weekSchedulePage = self.session.post(STUDOMATIC_URL + '/Raspored.aspx',
{
'__EVENTTARGET': 'puiDatum',
'__EVENTARGUMENT': '',
'__LASTFOCUS': '',
'__VIEWSTATE': self.viewState,
'__VIEWSTATEGENERATOR': self.viewStateGenerator,
'__EVENTVALIDATION': self.eventValidation,
'puiDatum': 'ponedjeljak, ' + monday.strftime('%d. %m. %Y.'),
}
)
weekSchedulePageHtml = html.fromstring(weekSchedulePage.content, parser=self.html_parser)
classes = weekSchedulePageHtml.xpath('//table[@class="raspored"]//table/tr')
for cls in classes:
classInfo = cls.xpath('./td')[0].xpath('.//text()')
classDate = str(classInfo[0]).strip()
classTime = str(classInfo[1]).strip()
classLocation = str(classInfo[2]).strip()
classDescription = cls.xpath('./td')[1].xpath('.//text()')
classProfessor = str(classDescription[0]).strip()
className = str(classDescription[2]).strip()
classType = str(classDescription[4]).strip()
classAdditionalInfo = str(classDescription[5]).strip()
classDuration = int(classAdditionalInfo[0])
self.schedule.append({
'date': classDate,
'time': classTime,
'date_time': timezone('CET').localize(
datetime.datetime.strptime('{} {}'.format(classDate, classTime),
'%d.%m.%Y. %H:%M')),
'location': classLocation,
'professor': classProfessor,
'name': className,
'type': classType,
'additional_info': classAdditionalInfo,
'duration': classDuration
})
self.extractViewState(weekSchedulePage)
weekCnt += 1
monday += datetime.timedelta(days=7)
def extractViewState(self, response):
responseHtml = html.fromstring(response.content)
if len(responseHtml.xpath('//input[@id="__VIEWSTATE"]/@value')) > 0:
self.viewState = str(responseHtml.xpath('//input[@id="__VIEWSTATE"]/@value')[0])
else:
self.viewState = None
if len(responseHtml.xpath('//input[@id="__EVENTVALIDATION"]/@value')) > 0:
self.eventValidation = str(responseHtml.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0])
else:
self.eventValidation = None
if len(responseHtml.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')) > 0:
self.viewStateGenerator = str(responseHtml.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')[0])
else:
self.viewStateGenerator = None
def login(self, username, password):
loginPage = self.session.get(STUDOMATIC_URL)
self.extractViewState(loginPage)
startPage = self.session.post(STUDOMATIC_URL + 'Login.aspx?ReturnUrl=%2fvern-student%2fdefault.aspx',
{
'login': username,
'password': password,
'butSubmit.x':'37',
'butSubmit.y':'22',
'butSubmit':'Prijava',
'__EVENTVALIDATION': self.eventValidation,
'__VIEWSTATEGENERATOR': self.viewStateGenerator,
'__VIEWSTATE': self.viewState
})
self.extractViewState(startPage)
def generateCalendar(self):
calendar = Calendar(creator=self.username)
for event in self.schedule:
evt = Event()
evt.name = event['name']
evt.location = event['location']
evt.begin = event['date_time']
evt.duration = datetime.timedelta(minutes=45 * event['duration'])
evt.description = '{}, {}'.format(event['professor'], event['type'])
calendar.events.add(evt)
self.calendar = calendar
with open('{}.ics'.format(self.username), 'w') as f:
f.writelines(calendar)
@click.command()
@click.option( '--username', prompt=True, help='Studomatic username')
@click.option('--password', prompt=True, hide_input=True, help='Studomatic password')
@click.option('--weeks', default=20, help='Number of weeks ahead to scrape. Default is 20.')
@click.option('--wait', default=1, type=float, help='Minimum time in seconds to wait in between requests'
' for the session. Default is 1 second.')
def cli(*args, **kwargs):
Scrapper(*args, **kwargs)
if __name__ == '__main__':
cli()
|
import os
import shutil
import time
from collections import OrderedDict
from copy import deepcopy
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from skimage import io
from FfmpegWrapper.ffmpeg_wrapper import FfmpegWrapper
from Tracker.tracks_data import VideoMotAssignment
from Utils import multiprocess_worker as mp
from Animator.utils import eprint
class DemoCompiler:
def __init__(self, sampled_fps, out_fps, video_root, output_dir):
"""
demo video generator
:param sampled_fps: the frame sample rate
:param out_fps: the output video frame rate
:param video_root:
:param output_dir:
"""
self.sampled_fps = sampled_fps
self.out_fps = out_fps
self.num_distinct_colors = 16
self.colors = np.random.rand(self.num_distinct_colors, 3) # RGB
self.frame_name_format = '%06d.jpg'
self.visualization_video_name = 'demo.mp4'
containing_folder, vid_name = os.path.split(video_root)
self.original_video_name = vid_name.split(".")[0]
demo_file_name = f'{self.original_video_name}_demo.mp4'
self.demo_path = os.path.join(output_dir, demo_file_name)
def generate_demo(self, shot_to_frames_to_detections: VideoMotAssignment, shot_to_offline_mot: OrderedDict,
working_dir: str) -> None:
"""
compile a demo video based on the tracking output
:param working_dir: the visualization directory in which all frames will be rendered and saved
:param shot_to_frames_to_detections: mapping a shot to its frames and a frame to its detections
:param shot_to_offline_mot: shot name to tracks
:return: None (void)
"""
start_time = time.time()
plt.ioff()
# clear the visualization folder
if os.path.isdir(working_dir):
shutil.rmtree(working_dir)
os.mkdir(working_dir)
video_level_frame_number = 0
frame_index_to_kwargs = []
for shot_name, frame_to_detections in shot_to_frames_to_detections.shot_mot.items():
for frame_name, frame_path_and_detections in frame_to_detections.frame_mot.items():
# making sure no resources are shared between processes!
curr_frame_detections = deepcopy(frame_path_and_detections.detections)
current_frame_path = frame_path_and_detections.frame_path
current_shot_mot = OrderedDict({shot_name: deepcopy(shot_to_offline_mot[shot_name])})
curr_frame_args = dict(frame_str=str(video_level_frame_number),
frame_path=current_frame_path, shot_name=shot_name,
detections=curr_frame_detections,
shot_to_offline_mot=current_shot_mot, visualization_dir=working_dir)
frame_index_to_kwargs.append(curr_frame_args)
video_level_frame_number += 1
# process visualization in parallel
semaphore = mp.Semaphore(n_processes=10)
semaphore.parallelize(frame_index_to_kwargs, self.demo_single_frame)
plt.close('all')
# stats
duration = int(time.time()-start_time)
print(f'done with demo frames at {video_level_frame_number/duration:.3f} FPS in total time of {duration}')
# compile frames to video with ffmpeg
try:
FfmpegWrapper().compile_video(working_dir, self.sampled_fps, self.out_fps, self.demo_path)
print(f'Saved the demo video into: {self.demo_path}')
time.sleep(2)
shutil.rmtree(working_dir)
except PermissionError as e:
eprint(f'Generating a demo video raised a PermissionError exception: {e}', e)
eprint(f'Continue execution without deleting the visualization dir: {working_dir}')
except Exception as e:
eprint('Generating a demo video raised an exception.', e)
raise e
def demo_single_frame(self, frame_str: str, frame_path: str, shot_name: str, detections: list,
shot_to_offline_mot: OrderedDict, visualization_dir: str):
"""
draw a single frame as part of the demo video
:param frame_str: the string representation of the frame's index
:param frame_path: the path to the frame
:param shot_name: the shot str
:param detections: the list of CharacterDetections from the json
:param shot_to_offline_mot: the shot's MOT track ids
:param visualization_dir: the output visualization repo
:return:
"""
fig = plt.figure(frame_str)
ax = fig.add_subplot(111, aspect='equal')
im = io.imread(frame_path)
ax.imshow(im)
ax.set_title(f"{shot_name}, {frame_str}")
for box in detections:
if shot_name not in shot_to_offline_mot or box.Id not in shot_to_offline_mot[shot_name]:
continue
track_id = shot_to_offline_mot[shot_name][box.Id]
# draw a rectangular bbox
rect = patches.Rectangle((box.Rect.X, box.Rect.Y), box.Rect.Width, box.Rect.Height, fill=False,
lw=2.5, ec=self.colors[track_id % self.num_distinct_colors, :])
ax.add_patch(rect)
box_title = f'{track_id}, C:{box.Confidence:.2f}'
ax.text(box.Rect.X, box.Rect.Y, box_title, horizontalalignment='left', verticalalignment='top', fontsize=12)
fig.tight_layout()
fig.savefig(os.path.join(visualization_dir, self.frame_name_format % int(frame_str)))
fig.canvas.flush_events()
ax.cla()
plt.close(frame_str)
|
# python function def
print '************** Function def Test Programs **************'
def my_abs(x):
if x>=0:
return x
else:
return -x;
print my_abs(-1)
def empty_func(x):
pass
print empty_func(None)
raw_input()
|
from temboo.Library.CloudMine.ObjectStorage.ObjectDelete import ObjectDelete, ObjectDeleteInputSet, ObjectDeleteResultSet, ObjectDeleteChoreographyExecution
from temboo.Library.CloudMine.ObjectStorage.ObjectGet import ObjectGet, ObjectGetInputSet, ObjectGetResultSet, ObjectGetChoreographyExecution
from temboo.Library.CloudMine.ObjectStorage.ObjectSearch import ObjectSearch, ObjectSearchInputSet, ObjectSearchResultSet, ObjectSearchChoreographyExecution
from temboo.Library.CloudMine.ObjectStorage.ObjectSet import ObjectSet, ObjectSetInputSet, ObjectSetResultSet, ObjectSetChoreographyExecution
from temboo.Library.CloudMine.ObjectStorage.ObjectUpdate import ObjectUpdate, ObjectUpdateInputSet, ObjectUpdateResultSet, ObjectUpdateChoreographyExecution
|
from django.conf.urls import patterns, include, url
from .views import MapView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url('^$', 'mobiletrans.views.index', { 'template_name':'index.html'},
name="index"),
url('^about/$', 'mobiletrans.views.about', { 'template_name':'about.html'},
name="about"),
url('^routemap/$', MapView.as_view( template_name='routemap.html'),
name="routemap"),
url('^transitheat/$', MapView.as_view( template_name='transitheat.html'),
name="transitheat"),
url('^kml/$', 'mobiletrans.mtlocation.views.renderkml', { },
name="mtlocation_renderkml"),
url('^kml/longlat/(?P<long>[-\d.]+),(?P<lat>[-\d.]+)/$',
'mobiletrans.mtlocation.views.renderkml', { },
name="mtlocation_renderkml_longlat"),
url('^kml/latlong/(?P<lat>[-\d.]+),(?P<long>[-\d.]+)/$',
'mobiletrans.mtlocation.views.renderkml', { },
name="mtlocation_renderkml_latlong"),
url('^api/', include('mobiletrans.mtapi.urls')),
)
|
from unittest.mock import patch
import pytest
from hydep.settings import SerpentSettings
from hydep.serpent import SerpentRunner
MAGIC_OMP_THREADS = 1234
@pytest.mark.serpent
@patch.dict("os.environ", {"OMP_NUM_THREADS": str(MAGIC_OMP_THREADS)})
def test_runner():
r = SerpentRunner()
assert r.executable is None
assert r.omp == MAGIC_OMP_THREADS
assert r.mpi == 1
r.executable = "sss2"
assert r.executable == "sss2"
r.omp = 20
assert r.omp == 20
r.mpi = 20
assert r.mpi == 20
with pytest.raises(TypeError):
r.omp = 1.5
with pytest.raises(ValueError):
r.omp = 0
with pytest.raises(ValueError):
r.omp = -1
with pytest.raises(TypeError):
r.mpi = 1.5
with pytest.raises(ValueError):
r.mpi = 0
with pytest.raises(ValueError):
r.mpi = -1
r = SerpentRunner(executable="sss2", omp=20, mpi=4)
cmd = r.makeCommand()
assert int(cmd[cmd.index("-omp") + 1]) == r.omp == 20
assert cmd[0].startswith("mpi")
assert r.mpi == 4
for sub in cmd[1:cmd.index(r.executable)]:
if str(r.mpi) == sub:
break
else:
raise ValueError(f"Number of MPI tasks not found in {cmd}")
@pytest.mark.serpent
def test_config():
settings = SerpentSettings(executable="sss2", omp=10, mpi=4)
runner = SerpentRunner()
runner.configure(settings)
assert runner.executable == "sss2"
assert runner.omp == 10
assert runner.mpi == 4
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import time, inspect, re
from datetime import datetime
class PerfBenchmark(object):
PerfNameResults = {} ## <string, List<long>>
ExecutionTimeList = [] # List<long>
@staticmethod
def RunPerfSuite(perfClass, args, sparkContext, sqlContext):
for name, fun in inspect.getmembers(perfClass, lambda fun : inspect.isfunction(fun) and fun.__name__.startswith("Run")) :
PerfBenchmark.ExecutionTimeList = []
runCount = int(args[1])
for k in range(runCount):
print(str(datetime.now()) + " Starting perf suite : " + str(perfClass.__name__) + "." + str(name) + " times[" + str(k + 1) + "]-" + str(runCount))
fun(args, sparkContext, sqlContext)
executionTimeListRef = []
for v in PerfBenchmark.ExecutionTimeList :
executionTimeListRef.append(v)
PerfBenchmark.PerfNameResults[name] = executionTimeListRef
@staticmethod
def ReportResult() :
print(str(datetime.now()) + " ** Printing results of the perf run (python) **")
allMedianCosts = {}
for name in PerfBenchmark.PerfNameResults :
perfResult = PerfBenchmark.PerfNameResults[name]
# print(str(datetime.now()) + " " + str(result) + " time costs : " + ", ".join(("%.3f" % e) for e in perfResult))
# multiple enumeration happening - ignoring that for now
precision = "%.0f"
minimum = precision % min(perfResult)
maximum = precision % max(perfResult)
runCount = len(perfResult)
avg = precision % (sum(perfResult) / runCount)
median = precision % PerfBenchmark.GetMedian(perfResult)
values = ", ".join((precision % e) for e in perfResult)
print(str(datetime.now()) + " ** Execution time for " + str(name) + " in seconds: " + \
"Min=" + str(minimum) + ", Max=" + str(maximum) + ", Average=" + str(avg) + ", Median=" + str(median) + \
". Run count=" + str(runCount) + ", Individual execution duration=[" + values + "]")
allMedianCosts[name] = median
print(str(datetime.now()) + " ** *** **")
print(time.strftime('%Y-%m-%d %H:%M:%S ') + re.sub(r'(\w)\S*\s*', r'\1', time.strftime('%Z')) + " Python version: Run count = " + str(runCount) + ", all median time costs[" + str(len(allMedianCosts)) + "] : " + \
"; ".join((e + "=" + allMedianCosts[e]) for e in sorted(allMedianCosts)))
@staticmethod
def GetMedian(values) :
itemCount = len(values)
values.sort()
if itemCount == 1:
return values[0]
if itemCount % 2 == 0:
return (values[int(itemCount / 2)] + values[int(itemCount / 2 - 1)]) / 2
return values[int((itemCount - 1) / 2)]
|
from Collection import *
def topNTime(collection):
time = []
for i in collection:
tmp = str()
tmp += i.__getitem__("StartTime") +" - " + i.__getitem__("EndTime")
time.append(tmp)
counts = []
dell = []
for i in range(len(time)):
count = 1
for j in range(len(time)):
if i != j and time[i] == time[j]:
count += 1
dell.append(j)
counts.append(count)
for i in range(len(dell)):
time.pop(dell[i])
return time, counts
def topNType(collection):
type = []
for i in collection:
type.append(i.__getitem__("Type"))
counts = []
dell = []
for i in range(len(type)):
count = 1
for j in range(len(type)):
if i != j and type[i] == type[j]:
count += 1
dell.append(j)
counts.append(count)
for i in range(len(dell)):
type.pop(dell[i])
return type, counts
if __name__ == "__main__":
collection = readCollection("Input")
print(collection)
time, counts = topNTime(collection)
for i in range(len(time)):
print(time[i] + " " + str(counts[i]))
with open("Out", "w") as file:
for i in range(0, len(time)):
file.write(time[i] + " " + str(counts[i]) + "\n")
ell = inputElement()
collection.append(ell)
print("After append:")
print(collection)
|
import autofit as af
from autofit.mock.mock import MockSearch, MockSamples
class MockResult(af.MockResult):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
max_log_likelihood_tracer=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
positions=None,
updated_positions=None,
updated_positions_threshold=None,
stochastic_log_evidences=None,
use_as_hyper_dataset=False,
):
super().__init__(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
)
self.previous_model = model
self.gaussian_tuples = None
self.mask = None
self.positions = None
self.mask = mask
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
self.hyper_model_image = hyper_model_image
self.hyper_galaxy_visibilities_path_dict = hyper_galaxy_visibilities_path_dict
self.hyper_model_visibilities = hyper_model_visibilities
self.model_image = model_image
self.unmasked_model_image = model_image
self.max_log_likelihood_tracer = max_log_likelihood_tracer
self.pixelization = pixelization
self.use_as_hyper_dataset = use_as_hyper_dataset
self.positions = positions
self.updated_positions = (
updated_positions if updated_positions is not None else []
)
self.updated_positions_threshold = updated_positions_threshold
self._stochastic_log_evidences = stochastic_log_evidences
def stochastic_log_evidences(self):
return self._stochastic_log_evidences
@property
def image_plane_multiple_image_positions_of_source_plane_centres(self):
return self.updated_positions
class MockResults(af.ResultsCollection):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
max_log_likelihood_tracer=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
positions=None,
updated_positions=None,
updated_positions_threshold=None,
stochastic_log_evidences=None,
use_as_hyper_dataset=False,
):
"""
A collection of results from previous phases. Results can be obtained using an index or the name of the phase
from whence they came.
"""
super().__init__()
result = MockResult(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
mask=mask,
model_image=model_image,
max_log_likelihood_tracer=max_log_likelihood_tracer,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
hyper_galaxy_visibilities_path_dict=hyper_galaxy_visibilities_path_dict,
hyper_model_visibilities=hyper_model_visibilities,
pixelization=pixelization,
positions=positions,
updated_positions=updated_positions,
updated_positions_threshold=updated_positions_threshold,
stochastic_log_evidences=stochastic_log_evidences,
use_as_hyper_dataset=use_as_hyper_dataset,
)
self.__result_list = [result]
@property
def last(self):
"""
The result of the last phase
"""
if len(self.__result_list) > 0:
return self.__result_list[-1]
return None
def __getitem__(self, item):
"""
Get the result of a previous phase by index
Parameters
----------
item: int
The index of the result
Returns
-------
result: Result
The result of a previous phase
"""
return self.__result_list[item]
def __len__(self):
return len(self.__result_list)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File to produce Gutzwiller coefficients
Needs some installation from
https://github.com/tcompa/BoseHubbardGutzwiller
Credits:
- https://github.com/tcompa/BoseHubbardGutzwiller
Author: Patrick Huembeli
"""
import math
import numpy
# import matplotlib
# matplotlib.use('Agg')
import pylab as plt
from lib_gutzwiller_simulated_annealing import Gutzwiller
from lib_gutzwiller_simulated_annealing import SA_for_gutzwiller
# Physical parameters
z = 4 # number of neighbors
nmax = 20 # cutoff on the occupation number per site
U = 1.0 # on-site interaction coefficient
mu = 0.5 # chemical potential
V = 0.00 # nearest-neighbor interaction coefficient
P = 0.0 # induced-tunneling coefficient
make_2D_plot = False
# Simulated-annealing parameters
beta_min = 0.1
beta_max = 1e4
cooling_rate = 0.025 # 0.05
n_steps_per_T = 1000
quench_to_T_equal_to_0 = True
n_steps_at_T_equal_to_0 = 10000
def produce_states(run, start, end, steps, J):
mu_list = numpy.linspace(start, end, steps)
coeff_out = []
labels = []
for mu in mu_list:
G = Gutzwiller(nmax=nmax, U=U, zJ=J, mu=mu, zV=z*V, zP=z*P)
G = SA_for_gutzwiller(G, beta_min=beta_min, beta_max=beta_max,
cooling_rate=cooling_rate, n_steps_per_T=n_steps_per_T,
quench_to_T_equal_to_0=quench_to_T_equal_to_0,
n_steps_at_T_equal_to_0=n_steps_at_T_equal_to_0)
coeff = [i for i in G.f_new]
coeff_out.append(coeff)
dens = G.compute_density()
print('mu:', mu, 'density:', dens)
if mu % 1 == 0:
if abs(dens - mu) > 10**(-3):
labels.append([1., 0.])
print('SF-Phase')
else:
labels.append([0., 1.])
print('Mott-Phase')
else:
if abs(dens - math.ceil(mu)) > 10**(-2):
labels.append([1., 0.])
print('SF-Phase')
else:
labels.append([0., 1.])
print('Mott-Phase')
numpy.save('J_'+str(J)+'labels_source'+str(run), labels)
numpy.save('J_'+str(J)+'coeff_source'+str(run), coeff_out)
J_list = numpy.linspace(0.003, 0.3, 100)
start = 0.03
end = 3.0
steps = 100
run_list = numpy.linspace(1, 100, 100)
for run in run_list:
for J in J_list:
produce_states(int(run), start, end, steps, J)
if make_2D_plot: # make 2D plot for a 50x50 grid
J_list = numpy.linspace(0.0, 0.3, 51)
mu_list = numpy.linspace(0.0, 3.00, 51)
mu_list = mu_list.tolist()
meas_J = []
coeff_J = []
dens_der_J = []
for J in J_list:
meas_mu = []
coeff_mu = []
density_list = []
dens_der_mu = []
for mu in mu_list:
# Initialize Gutzwiller-class instance
G = Gutzwiller(nmax=nmax, U=U, zJ=J, mu=mu, zV=z*V, zP=z*P)
# Perform simulated-annealing optimization
G = SA_for_gutzwiller(G, beta_min=beta_min, beta_max=beta_max,
cooling_rate=cooling_rate,
n_steps_per_T=n_steps_per_T,
quench_to_T_equal_to_0=quench_to_T_equal_to_0,
n_steps_at_T_equal_to_0=n_steps_at_T_equal_to_0)
density = G.compute_density()
density_list.append(density)
if mu_list.index(mu) == 0:
dens_derivative = 1.0
else:
dens_derivative = density-density_list[-2]
coeff = [i for i in G.f_new]
coeff_mu.append(coeff)
dens_der_mu.append(dens_derivative)
meas_mu.append([J, mu, G.energy, density, dens_derivative])
meas_J.append(meas_mu)
coeff_J.append(coeff_mu)
dens_der_J.append(dens_der_mu)
numpy.save('Gutzwiller_find_boundaries_params_2D_plot', meas_J)
numpy.save('Gutzwiller_find_boundaries_coeffs_2D_plot', coeff_J)
print('This is the screen with 2D plot')
plt.clf()
plt.pcolormesh(numpy.array(dens_der_J))
plt.savefig('states_plot')
|
#!/usr/bin/python
# TODO:
# * Give a useful error if the serial port disappears or has an error.
# * Save and restore state.
import itertools
import logging
import optparse
import subprocess
import sys
import time
import trollius
import trollius as asyncio
sys.path.append('../../legtool/')
from legtool.async import trollius_trace
from trollius import From, Return
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from legtool.async import asyncio_qt
from legtool.async import asyncio_serial
import ui_manager_main_window
import ui_kiosk_window
def _critical_task_done(task):
if task.exception() is None:
return
if task._tb_logger:
tb_text = task._tb_logger.tb
else:
tb_text = ['No traceback info']
e = task.exception()
print '%s %s' % (e.__class__.__name__, e)
print '\n'.join(tb_text)
sys.exit(1)
def CriticalTask(coro):
task = asyncio.Task(coro)
task.add_done_callback(_critical_task_done)
return task
class Mech(object):
def __init__(self, item):
self.ident = 0
self.name = 'unknown'
self.hp = 0
self.item = item
self.sound = ''
def update(self):
self.item.setText('%02X: %s: HP %d' % (self.ident, self.name, self.hp))
class HistoryItem(object):
def __init__(self, ident, value):
self.stamp = time.time()
self.ident = ident
self.value = value
class State(object):
def __init__(self):
self.mechs = []
self.history = []
def add_history(self, item):
assert isinstance(item, HistoryItem)
self.history.append(item)
def find(self, ident):
for x in self.mechs:
if x.ident == ident:
return x
return
class Panel(object):
parent = None
layout = None
header = None
line = None
history = None
class KioskWindow(QtGui.QDialog):
def __init__(self, state, parent=None):
super(KioskWindow, self).__init__(parent)
self.state = state
self.ui = ui_kiosk_window.Ui_KioskWindow()
self.ui.setupUi(self)
self.layout = QtGui.QHBoxLayout(self.ui.widget)
palette = QtGui.QPalette(self.palette())
palette.setColor(QtGui.QPalette.Background, QtCore.Qt.white)
self.ui.widget.setAutoFillBackground(True)
self.ui.widget.setPalette(palette)
self.panels = []
def update(self):
while len(self.panels) > len(self.state.mechs):
self.layout.removeWidget(self.panels[-1].parent)
del self.panels[-1]
while self.layout.count() < len(self.state.mechs):
panel = Panel()
panel.parent = QtGui.QWidget(self.ui.widget)
panel.layout = QtGui.QVBoxLayout(panel.parent)
panel.header = QtGui.QLabel(panel.parent)
panel.header.setFont(QtGui.QFont("Helvetica", 40, 3))
panel.layout.addWidget(panel.header)
panel.line = QtGui.QFrame(panel.parent)
panel.line.setFrameShape(QtGui.QFrame.HLine)
panel.line.setFrameShadow(QtGui.QFrame.Sunken)
panel.layout.addWidget(panel.line)
panel.history = QtGui.QLabel(panel.parent)
panel.history.setAlignment(QtCore.Qt.AlignLeft |
QtCore.Qt.AlignTop)
panel.layout.addWidget(panel.history)
panel.layout.setStretch(2, 1)
self.layout.addWidget(panel.parent)
self.panels.append(panel)
assert len(self.panels) == len(self.state.mechs)
for panel, mech in zip(self.panels, self.state.mechs):
panel.header.setText(
'%s (%02X): %d' % (mech.name, mech.ident, mech.hp))
text = ''
for item in itertools.islice(
[x for x in reversed(self.state.history)
if x.ident == mech.ident], 10):
text += '%s: %02X: %s\n' % (
time.asctime(time.localtime(item.stamp)),
item.ident,
item.value)
panel.history.setText(text)
class ManagerMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(ManagerMainWindow, self).__init__(parent)
self.ui = ui_manager_main_window.Ui_ManagerMainWindow()
self.ui.setupUi(self)
self.ui.mechAddButton.clicked.connect(
self.handle_mech_add_button)
self.ui.mechRemoveButton.clicked.connect(
self.handle_mech_remove_button)
self.ui.mechListWidget.currentRowChanged.connect(
self.handle_mech_current_row)
self.ui.propertiesIdEdit.editingFinished.connect(
self.handle_mech_properties)
self.ui.propertiesNameEdit.editingFinished.connect(
self.handle_mech_properties)
self.ui.propertiesHpEdit.editingFinished.connect(
self.handle_mech_properties)
self.ui.propertiesAddHpButton.clicked.connect(
self.handle_add_hp_button)
self.ui.propertiesRemoveHpButton.clicked.connect(
self.handle_remove_hp_button)
self.ui.propertiesSetHpButton.clicked.connect(
self.handle_set_hp_button)
self.ui.propertiesSoundButton.clicked.connect(
self.handle_sound_button)
self.ui.openKioskButton.clicked.connect(
self.handle_open_kiosk_button)
self.state = State()
self.kiosk = KioskWindow(self.state)
def open_serial(self, serial):
self.serial = asyncio_serial.AsyncioSerial(serial, baudrate=38400)
CriticalTask(self._read_serial())
@asyncio.coroutine
def _read(self):
value = yield From(self.serial.read(1))
raise Return(ord(value))
@asyncio.coroutine
def _read_serial(self):
with (yield From(self.serial.read_lock)):
while True:
data = yield From(self._read())
if data != 0x55:
continue
ident = yield From(self._read())
identcsum = yield From(self._read())
panel = yield From(self._read())
if identcsum != 0xff - ident:
print 'malformed packet %02X %02X %02X %02X' % (
0x55, ident, identcsum, panel)
continue
self._transponder_hit(ident, panel)
def _transponder_hit(self, ident, panel):
mech = self.state.find(ident)
if mech is None:
self.handle_mech_add_button()
mech = self.state.mechs[-1]
mech.ident = ident
newhp = mech.hp - 1
self._add_history(HistoryItem(
mech.ident,
'(%s) panel %d HP %d -> %d' % (
mech.name, panel, mech.hp, newhp)))
mech.hp = newhp
if mech.sound != '':
subprocess.check_call(
'mplayer %s </dev/null >/dev/null &' % mech.sound,
shell=True)
QtGui.QSound.play(mech.sound)
mech.update()
self.handle_mech_current_row()
self.kiosk.update()
def handle_mech_add_button(self):
widget = self.ui.mechListWidget
widget.addItem('')
item = widget.item(widget.count() - 1)
self.state.mechs.append(Mech(item))
self.state.mechs[-1].update()
widget.setCurrentRow(widget.count() - 1)
def handle_mech_remove_button(self):
mech = self._current_mech()
if mech is None:
return
result = QtGui.QMessageBox.question(
self, 'Remove Mech',
'Confirm removing mech %02X (%s)' % (mech.ident, mech.name),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.No:
return
index = self.state.mechs.index(mech)
del self.state.mechs[index]
self.ui.mechListWidget.takeItem(index)
self.handle_mech_current_row()
self.kiosk.update()
def handle_mech_current_row(self):
row = self.ui.mechListWidget.currentRow()
if row < 0 or row >= len(self.state.mechs):
return
mech = self.state.mechs[row]
self.ui.propertiesIdEdit.setText('%02X' % mech.ident)
self.ui.propertiesNameEdit.setText(mech.name)
self.ui.propertiesHpEdit.setText('%d' % mech.hp)
self.ui.propertiesSoundEdit.setText(mech.sound)
def _current_mech(self):
row = self.ui.mechListWidget.currentRow()
if row < 0:
return
if row >= len(self.state.mechs):
return
return self.state.mechs[row]
def _add_history(self, item):
self.state.add_history(item)
self.update_history()
def handle_mech_properties(self):
mech = self._current_mech()
if mech is None:
return
mech.ident = int(self.ui.propertiesIdEdit.text(), 16)
mech.name = self.ui.propertiesNameEdit.text()
if not self.ui.propertiesHpEdit.isReadOnly():
newhp = int(self.ui.propertiesHpEdit.text())
self._add_history(HistoryItem(
mech.ident,
'(%s) manual HP %d -> %d' % (
mech.name, mech.hp, newhp)))
mech.hp = newhp
self.ui.propertiesHpEdit.setReadOnly(True)
mech.update()
self.kiosk.update()
def handle_add_hp_button(self):
self._change_hp(1)
def handle_remove_hp_button(self):
self._change_hp(-1)
def _change_hp(self, delta):
mech = self._current_mech()
if mech is None:
return
newhp = mech.hp + delta
self._add_history(
HistoryItem(mech.ident,
'(%s) manual HP %d -> %d' % (
mech.name, mech.hp, newhp)))
mech.hp = newhp
mech.update()
self.handle_mech_current_row()
self.kiosk.update()
def handle_set_hp_button(self):
mech = self._current_mech()
if mech is None:
return
self.ui.propertiesHpEdit.setReadOnly(False)
def handle_sound_button(self):
mech = self._current_mech()
if mech is None:
return
result = QtGui.QFileDialog.getOpenFileName(
self, 'Select sound', '', 'Sounds (*.wav *.mp3 *.ogg)')
if result != None:
result = result[0]
else:
return
mech.sound = result
self.handle_mech_current_row()
def update_history(self):
text = ''
for item in itertools.islice(reversed(self.state.history), 20):
text += '%s: %02X: %s\n' % (
time.asctime(time.localtime(item.stamp)),
item.ident,
item.value)
self.ui.historyEdit.setPlainText(text)
def handle_open_kiosk_button(self):
self.kiosk.show()
def main():
logging.basicConfig(level=logging.WARN, stream=sys.stdout)
asyncio.set_event_loop_policy(asyncio_qt.QtEventLoopPolicy())
app = QtGui.QApplication(sys.argv)
app.setApplicationName('mjscore_manager')
parser = optparse.OptionParser()
parser.add_option('-s', '--serial',
help='serial port to use')
options, args = parser.parse_args()
assert len(args) == 0
manager = ManagerMainWindow()
if options.serial:
manager.open_serial(options.serial)
manager.show()
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main()
|
import numpy as np
import pandas as pd
from orion.primitives.timeseries_anomalies import _find_sequences
from orion.evaluation.utils import from_list_points_timestamps
def format_anomalies(y_hat, index, interval=21600, anomaly_padding=50):
"""Format binary predictions into anomalous sequences.
Args:
y_hat (ndarray):
Array of predictions.
index (ndarray):
Array of indices of the windows.
threshold (int):
Space between indices.
anomaly_padding (int):
Optional. Number of errors before and after a found anomaly that are added to the
anomalous sequence. If not given, 50 is used.
Returns:
ndarray:
Array containing start-index, end-index for each anomalous sequence that
was found.
"""
gap = interval + 2 * anomaly_padding
anomalies = from_list_points_timestamps(index[y_hat.astype(bool)], gap=gap)
return np.asarray(anomalies)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import sys
import getopt
import array
import xml.etree.cElementTree as ET
import subprocess
# from abc import ABCMeta
# class CartelTemplate:
# __metaclass__ = ABCMeta
# @abstractmethod
# def width: raise NotImplementedError
# @abstractmethod
# def height: raise NotImplementedError
# class BaseCartelDescription:
# __metaclass__ = ABCMeta
# @abstractmethod
# def render(self, template): raise NotImplementedError
avenirStyle = "font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;line-height:125%;font-family:Avenir;-inkscape-font-specification:'Avenir, Medium';writing-mode:lr-tb;"
avenirStyleLeft = avenirStyle+"text-anchor:start;text-align:start;"
avenirStyleRight = avenirStyle+"text-anchor:end;text-align:start;"
avenirStyleJustified = avenirStyle+"text-anchor:end;text-align:justify;"
format_config = {
"1": {"w":20, "h":15},
"2": {"w":10, "h":6}
}
unit = "cm"
maxLengthBottomLine = 55
class CartelContent:
def __init__(self, author, title, date, description, technique, dimensions, collection, template):
self._template = template
self._author = author
self._title = title
self._date = date
self._description = description
self._technique = technique
self._dimensions = dimensions
self._collection = collection
def size(self):
return format_config[self._template]
def widthWithUnit(self):
return str(format_config[self._template]["w"]) + unit
def heightWithUnit(self):
return str(format_config[self._template]["h"]) + unit
def render(self, rootXML, x, y):
cartel = ET.SubElement(rootXML, "svg", {"height":self.heightWithUnit(), "width":self.widthWithUnit(), "x":str(x)+unit, "y":str(y)+unit})
if self._template == "1":
image = ET.SubElement(cartel, "image", {"height":self.heightWithUnit(), "width":self.widthWithUnit(), "x":"0cm", "y":"0cm", "xlink:href":"img/vermont_big_hq.jpg"})
author = ET.SubElement(cartel, "text", {"style":avenirStyleLeft + "font-size:22px", "x":"2cm", "y":"2cm"}).text = self._author.decode('utf-8')
titleBox = ET.SubElement(cartel, "flowRoot")
titleBoxRegion = ET.SubElement(titleBox, "flowRegion")
titleBoxRegionShape = ET.SubElement(titleBoxRegion, "rect", {"width":"17cm", "height":"4cm", "x":"2cm", "y":"2.8cm"})
title = ET.SubElement(titleBox, "flowPara", { "style":avenirStyleLeft + "font-size:40px" }).text = self._title.decode('utf-8')
date = ET.SubElement(titleBox, "flowPara", { "style":avenirStyleLeft + "font-size:23px" }).text = self._date.decode('utf-8')
descBox = ET.SubElement(cartel, "flowRoot")
descBoxRegion = ET.SubElement(descBox, "flowRegion")
descBoxRegionShape = ET.SubElement(descBoxRegion, "rect", {"width":"17cm", "height":"5.5cm", "x":"2cm", "y":"7.5cm"})
text = ET.SubElement(descBox, "flowPara", { "style":avenirStyleJustified + "font-size:21px" }).text = self._description.decode('utf-8')
if len(self._technique) + len(self._dimensions) + len(self._collection) > maxLengthBottomLine:
technique = ET.SubElement(cartel, "text", {"style":avenirStyleLeft + "font-size:22px", "x":"2cm", "y":"12.7cm"}).text = self._technique.decode('utf-8')
dimensions = ET.SubElement(cartel, "text", {"style":avenirStyleLeft + "font-size:22px", "x":"2cm", "y":"13.5cm"}).text = self._dimensions.decode('utf-8')
else:
media = ET.SubElement(cartel, "text", {"style":avenirStyleLeft + "font-size:22px", "x":"2cm", "y":"13.5cm"}).text = str.join(", ", filter(None, [self._technique, self._dimensions])).decode('utf-8')
collection = ET.SubElement(cartel, "text", {"style":avenirStyleRight + "font-size:22px", "x":"19cm", "y":"13.5cm"}).text = self._collection.decode('utf-8')
elif self._template == "2":
cartel = ET.SubElement(rootXML, "svg", {"height":self.heightWithUnit(), "width":self.widthWithUnit(), "x":str(x)+unit, "y":str(y)+unit})
image = ET.SubElement(cartel, "image", {"height":self.heightWithUnit(), "width":self.widthWithUnit(), "x":"0cm", "y":"0cm", "xlink:href":"img/vermont_small_hq.jpg"})
author = ET.SubElement(cartel, "text", {"style":avenirStyleLeft + "font-size:15px", "x":"1cm", "y":"1.2cm"}).text = self._author.decode('utf-8')
titleBox = ET.SubElement(cartel, "flowRoot")
titleBoxRegion = ET.SubElement(titleBox, "flowRegion")
titleBoxRegionShape = ET.SubElement(titleBoxRegion, "rect", {"width":"8.5cm", "height":"2.5cm", "x":"1cm", "y":"1.8cm"})
title = ET.SubElement(titleBox, "flowPara", { "style":avenirStyleLeft + "font-size:24px" }).text = self._title.decode('utf-8')
date = ET.SubElement(titleBox, "flowPara", { "style":avenirStyleLeft + "font-size:17px" }).text = self._date.decode('utf-8')
media = ET.SubElement(cartel, "text", {"style":avenirStyleRight + "font-size:12px", "x":"9.6cm", "y":"5cm"}).text = self._technique.decode('utf-8')
collection = ET.SubElement(cartel, "text", {"style":avenirStyleRight + "font-size:12px", "x":"9.6cm", "y":"5.5cm"}).text = self._collection.decode('utf-8')
cornerTopLeft = ET.SubElement(cartel, "path", {"style":"fill:none;fill-rule:evenodd;stroke:#696866;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1", "d":"M 20 0 L 0 0 L 0 20"})
class FinalDocument:
def __init__(self, documents, width = 100, unit = "cm"):
self._documents = documents
self._width = width
self._x = 0
self._y = 0
self.initSVG()
# compute document limits
def initSVG(self):
x = 0
y = 0
for document in filter(lambda x : x._template == "1", self._documents):
if(x + document.size()["w"] > self._width):
# goto new line
y += document.size()["h"]
# reset x origin
x = 0
x += document.size()["w"]
else:
x += document.size()["w"]
# go to next line
if(x != 0):
y += format_config["1"]["h"]
x = 0
for document in filter(lambda x : x._template == "2", self._documents):
if(x + document.size()["w"] > self._width):
# goto new line
y += document.size()["h"]
# reset x origin
x = 0
x += document.size()["w"]
else:
x += document.size()["w"]
# go to next line
if(x != 0):
y += format_config["2"]["h"]
x = 0
self._svgContainer = ET.Element("svg", {"version":"1.2", "xmlns:xlink":"http://www.w3.org/1999/xlink", "xmlns":"http://www.w3.org/2000/svg", "height":str(y)+unit, "width":str(self._width)+unit})
colorProfileDef = ET.SubElement(self._svgContainer, "defs")
colorProfile = ET.SubElement(colorProfileDef, "color-profile", { "name":"FOGRA39L-Coated", "xlink:href":"/usr/share/color/icc/colord/FOGRA39L_coated.icc" })
# render global document
def render(self, filePath = "document.svg"):
# insert first template
for document in filter(lambda x : x._template == "1", self._documents):
if(self._x + document.size()["w"] > self._width):
# goto new line
self._y += document.size()["h"]
# reset x origin
self._x = 0
document.render(self._svgContainer, self._x, self._y)
#
self._x += document.size()["w"]
else:
document.render(self._svgContainer, self._x, self._y)
self._x += document.size()["w"]
# go to nex line
if(self._x != 0):
self._y += format_config["1"]["h"]
self._x = 0
# insert other template
for document in filter(lambda x : x._template == "2", self._documents):
if(self._x + document.size()["w"] > self._width):
# goto new line
self._y += document.size()["h"]
# reset x origin
self._x = 0
document.render(self._svgContainer, self._x, self._y)
#
self._x += document.size()["w"]
else:
document.render(self._svgContainer, self._x, self._y)
self._x += document.size()["w"]
tree = ET.ElementTree(self._svgContainer)
tree.write(filePath)
def loadCartelDescriptions(csvfile):
result = []
with open(csvfile, 'rb') as csvcontent:
spamreader = csv.reader(csvcontent, delimiter='|', quoting=csv.QUOTE_NONE)
for row in spamreader:
if row and len(row) == 8:
auteur, titre, date, technique, dimensions, collection, template, description = row
result.append(CartelContent(auteur, titre, date, description, technique, dimensions, collection, template))
else:
print "Ignore line" + str(row)
return result
def help():
print 'Command line:'
print 'cartel-generation.py -i <input-csv-file> [-s <intermediate-svg-file> -o <output-pdf-file>]'
print "Generate a svg (then a pdf if required) from a csv description."
print " "
print "Options"
print " -i, --icsv=INPUTFILE input CSV file. Separator: \"|\", no quotes arround each cell."
print " -s, --svg=INPUTFILE output SVG file (default: document.svg)"
print " -o, --opdf=INPUTFILE output PDF file"
def main(argv):
inputcsvfile = ''
outputsvgfile = 'document.svg'
outputpdffile = ''
try:
opts, args = getopt.getopt(argv,"hi:s:o:",["icsv=","svg=", "opdf="])
except getopt.GetoptError:
print "Error while reading parameters. Abort."
help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
help()
sys.exit()
elif opt in ("-i", "--icsv"):
inputcsvfile = arg
elif opt in ("-s", "--svg"):
outputsvgfile = arg
elif opt in ("-o", "--odf"):
outputpdffile = arg
if inputcsvfile == "":
print "You have to give an input csv file."
print " "
help()
sys.exit(3)
print 'Loading input csv file "'+inputcsvfile+'"'
try:
cartelDescriptions = loadCartelDescriptions(inputcsvfile)
except ValueError as e:
print "Error while reading csv file:", e
print "Abort"
sys.exit(4)
except:
print "Error while reading csv file. Abort."
sys.exit(4)
print "Initialize final document"
finalDocument = FinalDocument(cartelDescriptions)
print 'Save final document as "'+outputsvgfile+'"'
finalDocument.render(outputsvgfile)
if outputpdffile:
# export in PDF the generated SVG, using the inkscape command line
# choose 300dpi and convert text to paths
print 'Save final document as "'+outputpdffile+'"'
subprocess.call(["inkscape", "-A", outputpdffile, outputsvgfile, "-d", "300", "-T"])
if __name__ == "__main__":
main(sys.argv[1:])
|
import numpy as np
import random
def blobs(
width=8,
height=8,
k=3,
min_extend=0,
max_extend=1,
random_seed=None):
"""Generates a 1 to k number of blobs on a 2D grid
Args:
width: width of grid generated
height: height of grid generated
k: number of random points
min_extend: min cells to recursively extend for each neighbor of k
max_extend: max cells to recursively extend for each neighbor of k
random_seed: not used
Returns: grid, count: count of blobs
"""
grid = np.zeros((width, height), dtype=np.uint8)
def getNeighbours(coordinates, visited):
"""Return the neighbors of coordinates, that are not in visited (taboo set)
The neighbors are represented with the pairs of their (row, column) coordinates.
Args:
coordinates: set of (row, column) coordinate pairs
visited: a taboo set
Returns:
neighbors: list of (r, c) pairs that are neighbors to coordinates
"""
neighbors = []
for r, c in coordinates:
for dr, dc in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
rn, cn = r + dr, c + dc
if ((0 <= rn < width) and (0 <= cn < height) and grid[rn, cn] != 1 and (rn, cn) not in visited):
neighbors.append((rn, cn))
return neighbors
filled = set()
# STEP 1: take k random points
for i in range(k):
rc = random.randint(0, width - 1)
rr = random.randint(0, height - 1)
cell = (rr, rc)
grid[cell] = 1
filled.add(cell)
neighbours = getNeighbours(filled, filled)
# STEP 2: extend recursively in min to max distance randomly
# from every k point
l = random.randint(min_extend, max_extend)
while l > 0:
l -= 1
for i in range(len(neighbours)):
r = random.randint(0, len(neighbours) - 1)
neighbor = neighbours[r]
if neighbor not in filled:
grid[neighbor] += 1
filled.add(neighbor)
neighbours = getNeighbours(filled, filled)
visited = set()
def get_blob_size(i, j):
"""Add 1 for every neighbor that is in filled, else return 0"""
if 0 < i > width or 0 < j > height or (i, j) not in filled or (i, j) in visited:
return 0
visited.add((i, j))
size = 1
size += get_blob_size(i + 1, j)
size += get_blob_size(i - 1, j)
size += get_blob_size(i, j + 1)
size += get_blob_size(i, j - 1)
return size
# STEP 3: count blobs
count = 0
for r, c in filled:
if (get_blob_size(r, c) > 0):
count += 1
return grid, count
def blob_generator(*args, **kwargs):
while True:
yield blobs(*args, **kwargs)
if __name__ == "__main__":
b = blob_generator(14, 14, k=4, max_extend=2)
grid, count = next(b)
print (grid, count)
|
# -*- coding: utf-8 -*-
"""
A class for FFT filtering General-mode spectroscopic imaging data as reported in:
[Rapid mapping of polarization switching through complete information acquisition](http://www.nature.com/articles/ncomms13290)
Created on Tue Nov 07 11:48:53 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import h5py
import numpy as np
from collections import Iterable
from pyUSID.processing.process import Process
from pyUSID.processing.comp_utils import parallel_compute
from pyUSID.io.hdf_utils import create_results_group, write_main_dataset, write_simple_attrs, create_empty_dataset, \
write_ind_val_dsets
from pyUSID.io.write_utils import Dimension
from .fft import get_noise_floor, are_compatible_filters, build_composite_freq_filter
from .gmode_utils import test_filter
# TODO: correct implementation of num_pix
class SignalFilter(Process):
def __init__(self, h5_main, frequency_filters=None, noise_threshold=None, write_filtered=True,
write_condensed=False, num_pix=1, phase_rad=0, **kwargs):
"""
Filters the entire h5 dataset with the given filtering parameters.
Parameters
----------
h5_main : h5py.Dataset object
Dataset to process
frequency_filters : (Optional) single or list of pycroscopy.fft.FrequencyFilter objects
Frequency (vertical) filters to apply to signal
noise_threshold : (Optional) float. Default - None
Noise tolerance to apply to data. Value must be within (0, 1)
write_filtered : (Optional) bool. Default - True
Whether or not to write the filtered data to file
write_condensed : Optional) bool. Default - False
Whether or not to write the condensed data in frequency space to file. Use this for datasets that are very
large but sparse in frequency space.
num_pix : (Optional) uint. Default - 1
Number of pixels to use for filtering. More pixels means a lower noise floor and the ability to pick up
weaker signals. Use only if absolutely necessary. This value must be a divisor of the number of pixels in
the dataset
phase_rad : (Optional). float
Degrees by which the output is rotated with respect to the input to compensate for phase lag.
This feature has NOT yet been implemented.
kwargs : (Optional). dictionary
Please see Process class for additional inputs
"""
super(SignalFilter, self).__init__(h5_main, 'FFT_Filtering', **kwargs)
if frequency_filters is None and noise_threshold is None:
raise ValueError('Need to specify at least some noise thresholding / frequency filter')
if noise_threshold is not None:
if noise_threshold >= 1 or noise_threshold <= 0:
raise ValueError('Noise threshold must be within (0 1)')
self.composite_filter = 1
if frequency_filters is not None:
if not isinstance(frequency_filters, Iterable):
frequency_filters = [frequency_filters]
if not are_compatible_filters(frequency_filters):
raise ValueError('frequency filters must be a single or list of FrequencyFilter objects')
self.composite_filter = build_composite_freq_filter(frequency_filters)
else:
write_condensed = False
if write_filtered is False and write_condensed is False:
raise ValueError('You need to write the filtered and/or the condensed dataset to the file')
num_effective_pix = h5_main.shape[0] * 1.0 / num_pix
if num_effective_pix % 1 > 0:
raise ValueError('Number of pixels not divisible by the number of pixels to use for FFT filter')
self.num_effective_pix = int(num_effective_pix)
self.phase_rad = phase_rad
self.noise_threshold = noise_threshold
self.frequency_filters = frequency_filters
self.write_filtered = write_filtered
self.write_condensed = write_condensed
"""
Remember that the default number of pixels corresponds to only the raw data that can be held in memory
In the case of signal filtering, the datasets that will occupy space are:
1. Raw, 2. filtered (real + freq space copies), 3. Condensed (substantially lesser space)
The actual scaling of memory depends on options:
"""
scaling_factor = 1 + 2 * self.write_filtered + 0.25 * self.write_condensed
self._max_pos_per_read = int(self._max_pos_per_read / scaling_factor)
if self.verbose and self.mpi_rank == 0:
print('Allowed to read {} pixels per chunk'.format(self._max_pos_per_read))
self.parms_dict = dict()
if self.frequency_filters is not None:
for filter in self.frequency_filters:
self.parms_dict.update(filter.get_parms())
if self.noise_threshold is not None:
self.parms_dict['noise_threshold'] = self.noise_threshold
self.parms_dict['num_pix'] = self.num_effective_pix
self.duplicate_h5_groups, self.partial_h5_groups = self._check_for_duplicates()
self.data = None
self.filtered_data = None
self.condensed_data = None
self.noise_floors = None
self.h5_filtered = None
self.h5_condensed = None
self.h5_noise_floors = None
def test(self, pix_ind=None, excit_wfm=None, **kwargs):
"""
Tests the signal filter on a single pixel (randomly chosen unless manually specified) worth of data.
Parameters
----------
pix_ind : int, optional. default = random
Index of the pixel whose data will be used for inference
excit_wfm : array-like, optional. default = None
Waveform against which the raw and filtered signals will be plotted. This waveform can be a fraction of the
length of a single pixel's data. For example, in the case of G-mode, where a single scan line is yet to be
broken down into pixels, the excitation waveform for a single pixel can br provided to automatically
break the raw and filtered responses also into chunks of the same size.
Returns
-------
fig, axes
"""
if self.mpi_rank > 0:
return
if pix_ind is None:
pix_ind = np.random.randint(0, high=self.h5_main.shape[0])
return test_filter(self.h5_main[pix_ind], frequency_filters=self.frequency_filters, excit_wfm=excit_wfm,
noise_threshold=self.noise_threshold, plot_title='Pos #' + str(pix_ind), show_plots=True,
**kwargs)
def _create_results_datasets(self):
"""
Creates all the datasets necessary for holding all parameters + data.
"""
self.h5_results_grp = create_results_group(self.h5_main, self.process_name)
self.parms_dict.update({'last_pixel': 0, 'algorithm': 'pycroscopy_SignalFilter'})
write_simple_attrs(self.h5_results_grp, self.parms_dict)
assert isinstance(self.h5_results_grp, h5py.Group)
if isinstance(self.composite_filter, np.ndarray):
h5_comp_filt = self.h5_results_grp.create_dataset('Composite_Filter',
data=np.float32(self.composite_filter))
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Composite_Filter dataset'.format(self.mpi_rank))
# First create the position datsets if the new indices are smaller...
if self.num_effective_pix != self.h5_main.shape[0]:
# TODO: Do this part correctly. See past solution:
"""
# need to make new position datasets by taking every n'th index / value:
new_pos_vals = np.atleast_2d(h5_pos_vals[slice(0, None, self.num_effective_pix), :])
pos_descriptor = []
for name, units, leng in zip(h5_pos_inds.attrs['labels'], h5_pos_inds.attrs['units'],
[int(np.unique(h5_pos_inds[:, dim_ind]).size / self.num_effective_pix)
for dim_ind in range(h5_pos_inds.shape[1])]):
pos_descriptor.append(Dimension(name, units, np.arange(leng)))
ds_pos_inds, ds_pos_vals = build_ind_val_dsets(pos_descriptor, is_spectral=False, verbose=self.verbose)
h5_pos_vals.data = np.atleast_2d(new_pos_vals) # The data generated above varies linearly. Override.
"""
h5_pos_inds_new, h5_pos_vals_new = write_ind_val_dsets(self.h5_results_grp,
Dimension('pixel', 'a.u.', self.num_effective_pix),
is_spectral=False, verbose=self.verbose and self.mpi_rank==0)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Created the new position ancillary dataset'.format(self.mpi_rank))
else:
h5_pos_inds_new = self.h5_main.h5_pos_inds
h5_pos_vals_new = self.h5_main.h5_pos_vals
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Reusing source datasets position datasets'.format(self.mpi_rank))
if self.noise_threshold is not None:
self.h5_noise_floors = write_main_dataset(self.h5_results_grp, (self.num_effective_pix, 1), 'Noise_Floors',
'Noise', 'a.u.', None, Dimension('arb', '', [1]),
dtype=np.float32, aux_spec_prefix='Noise_Spec_',
h5_pos_inds=h5_pos_inds_new, h5_pos_vals=h5_pos_vals_new,
verbose=self.verbose and self.mpi_rank == 0)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Noise_Floors dataset'.format(self.mpi_rank))
if self.write_filtered:
# Filtered data is identical to Main_Data in every way - just a duplicate
self.h5_filtered = create_empty_dataset(self.h5_main, self.h5_main.dtype, 'Filtered_Data',
h5_group=self.h5_results_grp)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Filtered dataset'.format(self.mpi_rank))
self.hot_inds = None
if self.write_condensed:
self.hot_inds = np.where(self.composite_filter > 0)[0]
self.hot_inds = np.uint(self.hot_inds[int(0.5 * len(self.hot_inds)):]) # only need to keep half the data
condensed_spec = Dimension('hot_frequencies', '', int(0.5 * len(self.hot_inds)))
self.h5_condensed = write_main_dataset(self.h5_results_grp, (self.num_effective_pix, len(self.hot_inds)),
'Condensed_Data', 'Complex', 'a. u.', None, condensed_spec,
h5_pos_inds=h5_pos_inds_new, h5_pos_vals=h5_pos_vals_new,
dtype=np.complex, verbose=self.verbose and self.mpi_rank == 0)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Condensed dataset'.format(self.mpi_rank))
if self.mpi_size > 1:
self.mpi_comm.Barrier()
self.h5_main.file.flush()
def _get_existing_datasets(self):
"""
Extracts references to the existing datasets that hold the results
"""
if self.write_filtered:
self.h5_filtered = self.h5_results_grp['Filtered_Data']
if self.write_condensed:
self.h5_condensed = self.h5_results_grp['Condensed_Data']
if self.noise_threshold is not None:
self.h5_noise_floors = self.h5_results_grp['Noise_Floors']
def _write_results_chunk(self):
"""
Writes data chunks back to the file
"""
# Get access to the private variable:
pos_in_batch = self._get_pixels_in_current_batch()
if self.write_condensed:
self.h5_condensed[pos_in_batch, :] = self.condensed_data
if self.noise_threshold is not None:
self.h5_noise_floors[pos_in_batch, :] = np.atleast_2d(self.noise_floors)
if self.write_filtered:
self.h5_filtered[pos_in_batch, :] = self.filtered_data
# Not responsible for checkpointing anymore. Process class handles this.
def _unit_computation(self, *args, **kwargs):
"""
Processing per chunk of the dataset
Parameters
----------
args : list
Not used
kwargs : dictionary
Not used
"""
# get FFT of the entire data chunk
self.data = np.fft.fftshift(np.fft.fft(self.data, axis=1), axes=1)
if self.noise_threshold is not None:
self.noise_floors = parallel_compute(self.data, get_noise_floor, cores=self._cores,
func_args=[self.noise_threshold],
verbose=self.verbose)
if isinstance(self.composite_filter, np.ndarray):
# multiple fft of data with composite filter
self.data *= self.composite_filter
if self.noise_threshold is not None:
# apply thresholding
self.data[np.abs(self.data) < np.tile(np.atleast_2d(self.noise_floors), self.data.shape[1])] = 1E-16
if self.write_condensed:
# set self.condensed_data here
self.condensed_data = self.data[:, self.hot_inds]
if self.write_filtered:
# take inverse FFT
self.filtered_data = np.real(np.fft.ifft(np.fft.ifftshift(self.data, axes=1), axis=1))
if self.phase_rad > 0:
# TODO: implement phase compensation
# do np.roll on data
# self.data = np.roll(self.data, 0, axis=1)
pass
|
"""
Assuming that we have some email addresses in the "username@companyname.com" format, please write program to print the user name of a given email address. Both user names and company names are composed of letters only.
"""
"""Question:
Assuming that we have some email addresses in the "username@companyname.com" format, please write program to print the user name of a given email address. Both user names and company names are composed of letters only.
Example:
If the following email address is given as input to the program:
john@google.com
Then, the output of the program should be:
john
In case of input data being supplied to the question, it should be assumed to be a console input.
Hints:
Use \w to match letters.
"""
import re
emailAddress = raw_input()
pat2 = "(\w+)@((\w+\.)+(com))"
r2 = re.match(pat2,emailAddress)
print r2.group(1)
|
from pathlib import Path
import os
import logging
# Build paths inside the project like this: BASE_DIR / 'subdir'.
PROD_STATUS = os.environ.get("PRODUCTION", False)
# Using the production status of the server to set the DEBUG value
# (doing it this way because of a qwerk of the django-celery module).
if PROD_STATUS:
DEBUG=False
else:
DEBUG=True
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY:
SECRET_KEY = os.environ.get("SECRET_KEY", "test_django_secret_key")
ALLOWED_HOSTS = ["*"]
# Application definition:
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd Party Applications:
"rest_framework",
"rest_framework.authtoken",
"django_celery_beat",
"djoser",
"drf_yasg",
"django_filters",
"tinymce",
"crispy_forms",
"django_plotly_dash.apps.DjangoPlotlyDashConfig",
# Frontend Application:
"application_frontend",
# Core API Logic:
"api_core",
# Project specific API Applications:
"data_APIs.reddit_api",
"data_APIs.twitter_api",
"data_APIs.articles_api"
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'private_rest_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [(os.path.join(BASE_DIR, "templates"))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'private_rest_api.wsgi.application'
# REST FRAMEWORK Configuration:
REST_FRAMEWORK = {
# Authentication/Permission:
'DEFAULT_PERMISSION_CLASSES': ['rest_framework.permissions.IsAuthenticated'],
'DEFAULT_AUTHENTICATION_CLASSES': ['rest_framework.authentication.TokenAuthentication'],
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
}
# Database
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ["POSTGRES_DB"],
'USER': os.environ["POSTGRES_USER"],
'PASSWORD': os.environ["POSTGRES_PASSWORD"],
'HOST': os.environ["POSTGRES_HOST"],
'PORT': os.environ["POSTGRES_PORT"]
}
}
"""
# Celery Settings:
CELERY_BEAT_SCHEDULER = 'django_celery_beat.schedulers:DatabaseScheduler'
CELERY_BEAT_SCHEDULE_FILENAME = "celerybeat-schedule"
"""
# Don't use pickle as serializer, json is much safer
CELERY_BROKER_URL = os.environ["CELERY_BROKER_URL"]
CELERY_RESULT_BACKEND = os.environ["CELERY_RESULT_BACKEND"]
CELERY_TASK_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = "UTC"
"""
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Sentry Error Catching Configuration (not for ):
"""
# Importing SDK:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn="https://fe8a2428b4984abd83604d5c26a9c051@o1148429.ingest.sentry.io/6219915",
integrations=[DjangoIntegration()],
traces_sample_rate=1.0,
send_default_pii=True
)
"""
# Configuration for Swagger UI:
SWAGGER_SETTINGS = {
# Token Authorization:
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
},
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# Media Files:
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Frontend styling for TinyMCE:
TINYMCE_JS_URL = 'https://cdn.tiny.cloud/1/no-api-key/tinymce/5/tinymce.min.js'
TINYMCE_COMPRESSOR = False
# Pointing to the Custom User Model:
AUTH_USER_MODEL = "api_core.CustomUser"
# Login route configs:
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
# Crispy Forms Configurations:
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Django-Dash Settings:
X_FRAME_OPTIONS = 'SAMEORIGIN'
|
import os, sys, re, os.path, copy, pickle, gc, string, weakref, math, new
try:
import numpy # Request, but do not require
except:
pass
import paraview
import paraview.annotation
import paraview.benchmark
import paraview.calculator
import paraview.collaboration
import paraview.compile_all_pv
import paraview.coprocessing
import paraview.cpexport
import paraview.cpstate
import paraview.extract_selection
import paraview.lookuptable
import paraview.numeric
import paraview.pvfilters
import paraview.pvvtkextensions
import paraview.python_view
import paraview.servermanager
import paraview.simple
import paraview.smstate
import paraview.smtesting
import paraview.smtrace
import paraview.spatiotemporalparallelism
import paraview.util
import paraview.variant
import paraview.vtk
import paraview.vtk.algorithms
import paraview.vtk.dataset_adapter
|
'''tzinfo timezone information for America/Port_of_Spain.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Port_of_Spain(DstTzInfo):
'''America/Port_of_Spain timezone definition. See datetime.tzinfo for details'''
zone = 'America/Port_of_Spain'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1912,3,2,4,6,4),
]
_transition_info = [
i(-14760,0,'LMT'),
i(-14400,0,'AST'),
]
Port_of_Spain = Port_of_Spain()
|
# from data import models
# from algo import pde
# %%
from scipy.linalg import cholesky
import numpy as np
import datetime
from opricer.model import models
from opricer.algo import pde, analytics, mc
import matplotlib.pyplot as plt
from matplotlib.widgets import Cursor
from scipy.sparse import diags
from scipy.linalg import lu_solve, lu_factor
# from opricer.teststaff import simulate
import pandas as pd
np.random.seed(123)
a = models.Underlying(datetime.datetime(2010, 1, 1), 100)
a1 = models.Underlying(datetime.datetime(2010, 1, 1), 200)
b = models.EurOption(datetime.datetime(2011, 1, 1), 'call')
b1 = models.AmeOption(datetime.datetime(2011, 1, 1), 'call')
c = models.BasketOption(datetime.datetime(2011, 1, 1), 'call')
d = models.BarOption(datetime.datetime(2011, 1, 1), 'put')
b._attach_asset(100, a)
b1._attach_asset(100, a1)
c._attach_asset(100, a, a1)
# d._attach_asset([30, np.inf], 100, a)
solver = analytics.AnalyticSolver(high_val=2, low_val=0)
price = solver(b)
solver1 = pde.EurSolver()
solver2 = pde.AmeSolver(high_val=2, low_val=0)
# AMeprice = solver2()
Msolver = mc.EurMCSolver(path_no=60000, asset_no=10,
time_no=100, high_val=2, low_val=0)
print(str(solver.__class__.__name__))
solver4 = pde.BarSolver(high_val=2, low_val=0, asset_no=solver.asset_no)
Msolver2 = mc.BarMCSolver(high_val=2, low_val=0, asset_no=solver.asset_no)
Msolver3 = mc.BasketMCSolver(high_val=2, low_val=0, asset_no=solver.asset_no)
ABSolver = mc.BasketAmeSolver(high_val=2, low_val=0, asset_no=solver.asset_no)
ASolver = mc.AmeMCSolver(high_val=2, low_val=0, asset_no=solver.asset_no)
def plot(options, solvers, Msolvers, with_cursor=False):
fig = plt.figure(figsize=(15, 8))
ax = plt.axes()
price = solver(b)
MCprice = Msolver(b)
ax.plot(solver.asset_samples, price, label='AnalyticSol')
# for opt, sol in zip(options, solvers):
# ax.plot(solver.asset_samples, sol(opt)[0], label=type(
# sol).__name__ + type(opt).__name__)
for opt, sol in zip(options, Msolvers):
ax.plot(solver.asset_samples, sol(opt), label=type(
sol).__name__ + type(opt).__name__)
ax.legend(loc='best')
if with_cursor:
cursor = Cursor(ax, useblit=True, linewidth=2)
plt.show()
plt.gcf()
# plot([b1, c], [], [ASolver, ABSolver])
# print(solver.asset_samples.flatten(), price[:, 0])
# Msolver3(c)
# print(Msolver3.asset_samples.shape,
# Msolver3.time_samples.shape, Msolver3(c).shape)
# plt.show()
# plt.gcf()
# print(ABSolver(c), ABSolver.asset_samples.squeeze(
# 1).sum(axis=1).shape, ABSolver.time_samples.shape)
# print(Msolver3.asset_samples.squeeze(1))
# corr_mat = cholesky(np.array([[1, 0.2], [0.2, 1]]))
# asset = np.dot(Msolver3.asset_samples.squeeze(1), corr_mat)
# print(np.power(asset, 2).sum(axis=1).shape)
# a = np.arange(30)
# b = np.arange(40).reshape(20, 1, 2).squeeze(1)
# print([str(np.tile(b, (1, 30, 1)[x, y]))
# for x, y in zip(range(20), range(30))])
# bStr = [', '.join(row) for row in b.astype(str)]
# print(f('a', 'b', 'c'))
# print(np.tile(bStr, (10, 1)))
|
# PLOTS THE FIELD GENERATED BY THE PF COILS
# Andre Torres
#18.01.19
from field import getPFFlux2
from coilDefinitions import PF0, PF1, PF2, v, ht, hb
import matplotlib.pyplot as plt
import numpy as np
<<<<<<< HEAD
#%matplotlib qt4
=======
%matplotlib qt4
>>>>>>> e2761a0bad5346fc8b93f67684dcc2c0a49829ff
def plotPF(PF, title ="", probes=True):
nr=50
nz=50
if probes:
rlim=[0.5,0.75]
zlim=[-0.1,0.1]
else:
rlim=[0.25,0.65]
zlim=[-0.2,0.2]
R=np.linspace(rlim[0],rlim[1], nr)
Z=np.linspace(zlim[0],zlim[1], nz)
Br=np.zeros(shape=(nz,nr))
Bz=np.zeros(shape=(nz,nr))
for i in range(len(R)):
for j in range(len(Z)):
Br[j][i], Bz[j][i] = getPFFlux2(R[i],Z[j],PF, biotSavart=False)
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
plt.xlabel("R [m]")
plt.ylabel("Z [m]")
plt.xlim(rlim)
plt.ylim(zlim)
plt.title(title)
limiter = plt.Circle((0.46, 0), 0.0935, color='k', linestyle="-", fill=False, linewidth=3)
ax.add_artist(limiter)
if probes:
vcoil=plt.plot([v.r-v.w/2., v.r+v.w/2.], [v.z,v.z], "r", linewidth=2)
htcoil=plt.plot([ht.r, ht.r], [ht.z-ht.w/2.,ht.z+ht.w/2.], "g",linewidth=2)
hbcoil=plt.plot([hb.r, hb.r], [hb.z-hb.w/2.,hb.z+hb.w/2.], "b",linewidth=2)
ctr=plt.contourf(R,Z,np.sqrt(Bz**2+Br**2)*1e3, cmap="GnBu")#, levels=np.linspace(-1e-2,0,200)
plt.streamplot(R,Z,Br,Bz, color="k")
cbar=plt.colorbar(ctr)
cbar.set_label("[mT/A]")
ax.set_aspect('equal')
plotPF(PF0[0],"Primary, PF0")
plotPF(PF0[1],"Vertical, PF0")
plotPF(PF0[2],"Horizontal, PF0")
plotPF(PF0[0],"Primary", False)
plotPF(PF0[1],"Vertical PF", False)
plotPF(PF0[2],"Horizontal PF", False)
<<<<<<< HEAD
if __name__ == '__main__':
plt.show()
=======
>>>>>>> e2761a0bad5346fc8b93f67684dcc2c0a49829ff
|
import json
import uuid
from enum import Enum, IntEnum
from typing import Optional
from fastapi import HTTPException
from pydantic import BaseModel
from sqlalchemy import text
from sqlalchemy.exc import NoResultFound
from sqlalchemy.sql.expression import select
from .db import engine
class InvalidToken(Exception):
"""指定されたtokenが不正だったときに投げる"""
class SafeUser(BaseModel):
"""token を含まないUser"""
id: int
name: str
leader_card_id: int
class Config:
orm_mode = True
def create_user(name: str, leader_card_id: int) -> str:
"""Create new user and returns their token"""
token = str(uuid.uuid4())
# NOTE: tokenが衝突したらリトライする必要がある.
with engine.begin() as conn:
result = conn.execute(
text(
"INSERT INTO `user` (name, token, leader_card_id) VALUES (:name, :token, :leader_card_id)"
),
{"name": name, "token": token, "leader_card_id": leader_card_id},
)
# print(result)
return token
def _get_user_by_token(conn, token: str) -> Optional[SafeUser]:
# TODO: 実装
result = conn.execute(
text("SELECT `id`, `name`, `leader_card_id` FROM `user` WHERE `token`=:token"),
dict(token=token),
)
try:
row = result.one()
except NoResultFound:
return None
return SafeUser.from_orm(row)
def get_user_by_token(token: str) -> Optional[SafeUser]:
with engine.begin() as conn:
return _get_user_by_token(conn, token)
def update_user(token: str, name: str, leader_card_id: int) -> None:
# このコードを実装してもらう
with engine.begin() as conn:
user = _get_user_by_token(conn, token)
if user is None:
raise InvalidToken
conn.execute(
text(
"UPDATE `user` SET `name` = :name, `leader_card_id` = :leader_card_id WHERE `id` = :id"
),
dict(name=name, leader_card_id=leader_card_id, id=user.id),
)
# Room
RoomMaxUserCount = 4
class LiveDifficulty(IntEnum):
normal = 1
hard = 2
class JoinRoomResult(IntEnum):
Ok = 1
RoomFull = 2
Disbanded = 3
OtherError = 4
class WaitRoomStatus(IntEnum):
Waiting = 1
LiveStart = 2
Dissoution = 3
class RoomInfo(BaseModel):
room_id: int
live_id: int
joined_user_count: int
max_user_count: int
class RoomUser(BaseModel):
user_id: int
name: str
leader_card_id: int
select_difficulty: LiveDifficulty
is_me: bool
is_host: bool
class ResultUser(BaseModel):
user_id: int
judge_count_list: list[int]
score: int
def _add_user_in_room(
conn, room_id: int, user_id: int, select_difficulty: LiveDifficulty
):
result = conn.execute(
text(
"INSERT INTO `room_user` (room_id, user_id, select_difficulty) VALUES (:room_id, :user_id, :select_difficulty)"
),
{
"room_id": room_id,
"user_id": user_id,
"select_difficulty": int(select_difficulty),
},
)
def create_room_with_host(
token: str, live_id: int, select_difficulty: LiveDifficulty
) -> int:
"""Create new user and returns their token"""
with engine.begin() as conn:
user = _get_user_by_token(conn, token)
if user is None:
raise InvalidToken
result = conn.execute(
text(
"INSERT INTO `room` (live_id, status, owner) VALUES (:live_id, :status, :owner)"
),
{
"live_id": live_id,
"status": int(WaitRoomStatus.Waiting),
"owner": user.id,
},
)
room_id = result.lastrowid
_add_user_in_room(conn, room_id, user.id, select_difficulty)
return room_id
def list_room(live_id: int) -> list[RoomInfo]:
with engine.begin() as conn:
if live_id == 0:
result = conn.execute(
text(
"""
SELECT r.`id`, r.`live_id`, COUNT(*)
FROM `room` r JOIN `room_user` ru
ON r.`id` = ru.`room_id`
WHERE r.`status` = :status
GROUP BY r.`id`
"""
),
{"status": int(WaitRoomStatus.Waiting), "live_id": live_id},
)
else:
result = conn.execute(
text(
"""
SELECT r.`id`, r.`live_id`, COUNT(*)
FROM `room` r JOIN `room_user` ru
ON r.`id` = ru.`room_id`
WHERE r.`status` = :status AND r.`live_id` = :live_id
GROUP BY r.`id`
"""
),
{"status": int(WaitRoomStatus.Waiting), "live_id": live_id},
)
ret = []
for row in result:
ret.append(
RoomInfo(
room_id=row.id,
live_id=row.live_id,
joined_user_count=row["COUNT(*)"],
max_user_count=4,
)
)
return ret
def join_room(
token: str, room_id: int, select_difficulty: LiveDifficulty
) -> JoinRoomResult:
with engine.begin() as conn:
user = _get_user_by_token(conn, token)
if user is None:
raise InvalidToken
room_status = conn.execute(
text(
"""SELECT status
FROM `room`
WHERE id=:room_id
FOR UPDATE"""
),
{"room_id": room_id},
).one()
room_user_count = conn.execute(
text(
"""SELECT COUNT(*)
FROM `room_user`
WHERE `room_id` = :room_id
GROUP BY `room_id`
FOR UPDATE"""
),
{"room_id": room_id},
).one()
if room_user_count["COUNT(*)"] >= RoomMaxUserCount:
return JoinRoomResult.RoomFull
elif room_status.status != WaitRoomStatus.Waiting:
return JoinRoomResult.Disbanded
_add_user_in_room(conn, room_id, user.id, select_difficulty)
conn.execute(text("COMMIT"))
return JoinRoomResult.Ok
def wait_room(room_id: int, token: str) -> tuple[WaitRoomStatus, list[RoomUser]]:
with engine.begin() as conn:
user = _get_user_by_token(conn, token)
if user is None:
raise InvalidToken
room = conn.execute(
text("SELECT `status`, `owner` FROM `room` WHERE id = :room_id"),
{"room_id": room_id},
).one()
result = conn.execute(
text(
"""SELECT u.`id`, u.`name`, u.`leader_card_id`, ru.`select_difficulty`
FROM `user` u JOIN `room_user` ru
ON u.`id` = ru.`user_id`
WHERE ru.`room_id` = :room_id"""
),
{"room_id": room_id},
)
ret = []
for row in result:
ret.append(
RoomUser(
user_id=row.id,
name=row.name,
leader_card_id=row.leader_card_id,
select_difficulty=row.select_difficulty,
is_me=True if user.id == row.id else False,
is_host=True if room.owner == row.id else False,
)
)
return room.status, ret
def _update_room_status(conn, room_id: int, status: WaitRoomStatus):
result = conn.execute(
text(
"""UPDATE `room`
SET `status` = :status
WHERE `id` = :room_id"""
),
{
"status": int(status),
"room_id": room_id,
},
)
def start_room(token: str, room_id: int):
with engine.begin() as conn:
# user = _get_user_by_token(conn, token)
# if user is None:
# raise InvalidToken
# result = conn.execute(
# text(
# """
# SELECT `owner`
# FROM `room`
# WHERE `id` = :room_id
# """
# ),
# {"room_id": room_id},
# )
# if result.one().owner != user.id:
# raise InvalidToken
conn.execute(
text(
"""SELECT `status`
FROM `room`
WHERE `id`=:room_id
FOR UPDATE"""
),
{"room_id": room_id},
)
_update_room_status(conn, room_id, WaitRoomStatus.LiveStart)
conn.execute(text("COMMIT"))
def end_room(token: str, room_id: int, judge: list[int], score: int):
with engine.begin() as conn:
user = _get_user_by_token(conn, token)
_update_room_status(conn, room_id, WaitRoomStatus.Dissoution)
result = conn.execute(
text(
"""UPDATE `room_user`
SET `score` = :score, `judge_count_list` = :judge
WHERE `room_id`=:room_id and `user_id` = :user_id"""
),
{
"room_id": room_id,
"user_id": user.id,
"score": score,
"judge": ",".join(map(str, judge)),
# decoding judge: list(map(int, judge.split(",")))
},
)
def result_room(room_id: int) -> list[ResultUser]:
with engine.begin() as conn:
result = conn.execute(
text(
"""SELECT `user_id`, `judge_count_list`, `score`
FROM `room_user`
WHERE `room_id` = :room_id"""
),
{"room_id": room_id},
)
try:
rows = result.all()
except NoResultFound:
return []
ret = []
for row in rows:
if row.score is None:
return []
ret.append(
ResultUser(
user_id=row.user_id,
judge_count_list=list(map(int, row.judge_count_list.split(","))),
score=row.score,
)
)
return ret
def leave_room(token: str, room_id: int):
with engine.begin() as conn:
user = _get_user_by_token(conn, token)
if user is None:
raise InvalidToken
conn.execute(
text(
"""SELECT status
FROM `room`
WHERE `id`=:room_id
FOR UPDATE"""
),
{"room_id": room_id},
)
count_result = conn.execute(
text(
"""SELECT COUNT(*)
FROM `room_user`
WHERE `room_id` = :room_id
GROUP BY `room_id`
FOR UPDATE"""
),
{"room_id": room_id},
).one()
count = count_result["COUNT(*)"]
conn.execute(
text(
"""DELETE FROM `room_user`
WHERE `room_id`=:room_id AND `user_id`=:user_id"""
),
{
"room_id": room_id,
"user_id": user.id,
},
)
if count == 1:
_update_room_status(conn, room_id, WaitRoomStatus.Dissoution)
conn.execute(text("COMMIT"))
|
import os
import subprocess
prefix = 'raw/'
for id in range(1,105):
filetoread = prefix + 'id_'+str(id)+'.txt'
with open(filetoread, "r") as ins:
linecounter = 0
for line in ins:
linecounter += 1
if linecounter == 1:
year = int(line[-5:])
print(year,end=''),
print(',',end=''),
if linecounter == 3:
nstages = int(line[-3:])
print(nstages,end=''),
print(',',end=''),
if linecounter == 4:
nkm = line[-6:]
nkm = nkm.replace(" ","")
nkm = int(nkm)
print(nkm,end=''),
print(',',end=''),
if linecounter == 5:
nkm = line[-7:]
nkm = float(nkm)
print(nkm)
if linecounter > 15: # drivers
continue
if not(len(line)==1 or len(line)==14): # skip last line
wasname = False
wasriderno = False
wasteam = False
wastime = False
wasgap = False
wasfirst = False
lastisachar = False
lastchar = ''
last2char = ''
isnumberlast3 = False
isnumberlast2 = False
isnumberlast1 = False
isnumber = False
print(str(year)+", ",end=''),
flag = False
col = 0
skipnext = 0
haveskipped = False
for char in line:
if skipnext>0:
skipnext -= 1
continue
if haveskipped and char.isalpha():
haveskipped = False
print(char,end= "")
continue
col += 1
if char=="\'" and col< 50:
continue
if char=="," and col< 50:
continue
if char.isspace():
print(" ",end=''),
continue
if char=="*":
lastisanumber = False
lastisachar = True
print(',',end='')
skipnext = 5
haveskipped = True
continue
isachar = char.isalpha() or char=="*" or char==")" or char=="."
isanumber = char.isdigit()
isnumberlast3 = isnumberlast2
isnumberlast2 = isnumberlast1
isnumberlast1 = isanumber
if lastchar=='\'' and last2char =='\'':
break
if isachar and lastisanumber:
if not(char=="h"):
print(",",end=''),
if char=="\'":
if lastchar=="\'":
print('s',end=''),
else:
print('m',end=''),
if lastisachar and isanumber:
if not(lastchar=="h"):
if not(lastchar=="F" and char=="1" and last2char=="D"):
print(",",end=''),
lastisachar = isachar
lastisanumber = isanumber
last2char = lastchar
lastchar = char;
if not(char=="*"):
print(char,end=''),
if lastchar=='\'' and last2char =='\'':
if id==16:
if not(flag):
flag = True
else:
print(",",end=''),
print(",",end=''),
#if char=="+":
# print(",",end=''),
# if linecounter==16:
# print(" ,+ 00h 00\' 00\'\'",end=''),
print("")
|
# Generated by Django 2.1.7 on 2019-03-03 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topic', '0008_auto_20190216_1712'),
]
operations = [
migrations.AlterField(
model_name='topic',
name='pin',
field=models.BooleanField(default=False, help_text='push pin topic will sort at first', verbose_name='Pushpin topic'),
),
]
|
from django.conf import settings
from django.contrib.auth.models import User
from theJekyllProject.models import (
Page, Post, PostCategory, SiteData, SiteSocialProfile, SiteTheme, Repo
)
from github import Github
from markdown2 import Markdown
import html2markdown
import os
import re
import shutil
import subprocess
def assign_boolean_to_comments(comments):
if(comments == 'on'):
return True
else:
return False
def save_post_database(repo, author, comments, date, time, layout, title,
content, pk=None):
if pk is not None:
post = Post.objects.get(pk=pk)
post.author = author
post.comments = comments
post.date = date
post.time = time
post.layout = layout
post.title = title
post.content = content
post.save()
else:
post = Post(
repo=repo,
author=author,
comments=comments,
date=date,
time=time,
layout=layout,
title=title,
content=content,
)
post.save()
return post
def save_page_database(repo, title, permalink, content, pk=None):
if pk is not None:
page = Page.objects.get(pk=pk)
page.title = title
page.permalink = permalink
page.content = content
page.save()
else:
page = Page(
repo=repo,
title=title,
permalink=permalink,
content=content,
)
page.save()
return page
def save_post_category_database(post, category, pk=None):
if pk is not None:
# FIXME use filter instead of get
post = Post.objects.get(pk=pk)
try:
post_category = PostCategory.objects.get(post=post)
except PostCategory.DoesNotExist:
post_category = ''
else:
post_category.category = category
post_category.save()
else:
post_category = PostCategory(
post=post,
category=category
)
post_category.save()
def create_file_name(date, title):
title = title.lower().replace(' ', '-')
file_name = str(date) + '-' + title + '.markdown'
return file_name
def header_content(author=None, comments=None, date=None, time=None,
layout=None, title=None):
string = '---\n'
if(author is not None):
string += 'author: ' + author + '\n'
if(comments is not None):
comments = str(comments).lower()
string += 'comments: ' + comments + '\n'
if(date is not None):
string += 'date: ' + date
if(time is not None):
string += ' ' + time + '\n'
if(layout is not None):
string += 'layout: ' + layout + '\n'
if(title is not None):
string += 'title: ' + title + '\n'
title = title.lower()
slug = title.replace(' ', '-')
string += 'slug: ' + slug + '\n'
string += '---\n'
return string
def page_header_content(title=None, permalink=None):
string = '---\n'
layout = 'page'
string += 'layout: ' + layout + '\n'
if(title is not None):
string += 'title: ' + title + '\n'
if(permalink is not None):
string += 'permalink: ' + permalink + '\n'
string += '---\n'
return string
def convert_content(content):
return html2markdown.convert(content)
def write_file(user, repo, file_name, head_content, body_content):
base_dir = settings.BASE_DIR
file = open(base_dir + '/../JekLog/' + user.username + '/' + repo.repo +
'/_posts/' + file_name, 'w+')
file.write(head_content + body_content)
file.close()
def write_page_file(file_name, user, repo, head_content, body_content):
base_dir = settings.BASE_DIR
file = open(base_dir + '/../JekLog/' + user.username + '/' +
repo.repo + '/' + file_name + '.md', 'w+')
file.write(head_content + body_content)
file.close()
def push_online(user, repo):
base_dir = settings.BASE_DIR
subprocess.Popen(['/bin/bash', base_dir + '/../' + 'gitsendupstream.sh',
user.username, repo.repo, base_dir])
def save_site_data(repo, title=None, description=None, avatar=None):
site_data = SiteData(
repo=repo,
title=title,
description=description,
avatar=avatar
)
site_data.save()
def save_site_theme_data(repo, theme=None):
site_theme = SiteTheme(
repo=repo,
theme=theme
)
site_theme.save()
def create_config_file(user, repo):
user = User.objects.get(username=user.username)
try:
site_data = SiteData.objects.get(repo=repo)
title = site_data.title
description = site_data.description
# FIXME Check avatar properly
avatar = site_data.avatar
except:
title = 'your name'
description = 'Web Developer from Somewhere'
# FIXME Create avatar properly
#avatar = ''
try:
site_social_profile = SiteSocialProfile.objects.get(user=user)
dribbble = site_social_profile.dribbble
email = site_social_profile.email
facebook = site_social_profile.facebook
flickr = site_social_profile.flickr
github = site_social_profile.github
instagram = site_social_profile.instagram
linkedin = site_social_profile.linkedin
pinterest = site_social_profile.pinterest
rss = site_social_profile.rss
twitter = site_social_profile.twitter
stackoverflow = site_social_profile.stackoverflow
youtube = site_social_profile.youtube
googleplus = site_social_profile.googleplus
disqus = site_social_profile.disqus
google_analytics = site_social_profile.google_analytics
except:
dribbble = ''
email = ''
facebook = ''
flickr = ''
github = ''
instagram = ''
linkedin = ''
pinterest = ''
rss = ''
twitter = ''
stackoverflow = ''
youtube = ''
googleplus = ''
disqus = ''
google_analytics = ''
try:
site_theme = SiteTheme.objects.get(user=user)
theme = site_theme.theme
except:
theme = 'jekyll-theme-cayman'
base_dir = settings.BASE_DIR
with open(base_dir + '/../' + 'JekLog/' + user.username + '/' + repo.repo + '/' + '_config.yml', 'r') as conf_file:
file_data = conf_file.read()
title_data = re.findall(r'name:.+', file_data)
description_data = re.findall(r'description:.+', file_data)
avatar_data = re.findall(r'avatar:.+', file_data)
dribbble_data = re.findall(r'dribbble:.+|dribbble:', file_data)
email_data = re.findall(r'email:.+|email:', file_data)
facebook_data = re.findall(r'facebook:.+|facebook:', file_data)
flickr_data = re.findall(r'flickr:.+|flickr:', file_data)
github_data = re.findall(r'github:.+|github:', file_data)
instagram_data = re.findall(r'instagram:.+|instagram:', file_data)
linkedin_data = re.findall(r'linkedin:.+|linkedin:', file_data)
pinterest_data = re.findall(r'pinterest:.+|pinterest:', file_data)
rss_data = re.findall(r'rss:.+|rss:', file_data)
twitter_data = re.findall(r'twitter:.+|twitter:', file_data)
stackoverflow_data = re.findall(r'stackoverflow:.+|stackoverflow:', file_data)
youtube_data = re.findall(r'youtube:.+|youtube:', file_data)
googleplus_data = re.findall(r'googleplus:.+|googleplus:', file_data)
disqus_data = re.findall(r'disqus:.+|disqus:', file_data)
google_analytics_data = re.findall(r'google_analytics:.+|google_analytics:', file_data)
theme_data = re.findall(r'theme:.+|theme:', file_data)
file_data = file_data.replace(title_data[0], 'name: ' + title)
file_data = file_data.replace(description_data[0], 'description: ' + description)
#file_data = file_data.replace(avatar_data[0], 'avatar: ' + avatar)
file_data = file_data.replace(dribbble_data[0], 'dribbble: ' + dribbble)
file_data = file_data.replace(email_data[0], 'email: ' + email)
file_data = file_data.replace(facebook_data[0], 'facebook: ' + facebook)
file_data = file_data.replace(flickr_data[0], 'flickr: ' + flickr)
file_data = file_data.replace(github_data[0], 'github: ' + github)
file_data = file_data.replace(instagram_data[0], 'instagram: ' + instagram)
file_data = file_data.replace(linkedin_data[0], 'linkedin: ' + linkedin)
file_data = file_data.replace(pinterest_data[0], 'pinterest: ' + pinterest)
file_data = file_data.replace(rss_data[0], 'rss: ' + rss)
file_data = file_data.replace(twitter_data[0], 'twitter: ' + twitter)
file_data = file_data.replace(stackoverflow_data[0], 'stackoverflow: ' + stackoverflow)
file_data = file_data.replace(youtube_data[0], 'youtube: ' + youtube)
file_data = file_data.replace(googleplus_data[0], 'googleplus: ' + googleplus)
file_data = file_data.replace(disqus_data[0], 'disqus: ' + disqus)
file_data = file_data.replace(google_analytics_data[0], 'google_analytics: ' + google_analytics)
file_data = file_data.replace(theme_data[0], 'theme: ' + theme)
with open(base_dir + '/../' + 'JekLog/' + user.username + '/' + repo.repo + '/' + '_config.yml', 'w') as conf_file:
conf_file.write(file_data)
def get_repo_list(token):
g = Github(token)
repositories_name = []
for repo in g.get_user().get_repos():
repositories_name.append(repo.name)
return repositories_name
def save_repo_data(user, repo):
repo = Repo(
user=user,
repo=repo,
main=True
)
repo.save()
# Now set all other repo `main` to False for the given user
all_repos = Repo.objects.filter(user=user)
current_repo = Repo.objects.get(id=repo.id)
for repo in all_repos:
if repo.id is not current_repo.id:
repo.main = False
repo.save()
def create_repo(user, repo):
user = User.objects.get(username=user.username)
social = user.social_auth.get(provider='github')
user_token = social.extra_data['access_token']
g = Github(user_token)
user = g.get_user()
repo = user.create_repo(repo)
def copy_jekyll_files(user, repo_name):
base_dir = settings.BASE_DIR
dest_path = '/'.join(['JekLog', user.username, repo_name])
dest_path = base_dir + '/../' + dest_path
source_path = '/'.join(['JekyllNow', 'jekyll-now'])
source_path = base_dir + '/../' + source_path
shutil.copytree(source_path, dest_path)
def add_theme_name(user, repo_name):
base_dir = settings.BASE_DIR
with open(base_dir + '/../' + 'JekLog/' + user.username + '/' + repo_name + '/' + '_config.yml', 'a') as conf_file:
conf_file.write('theme: jekyll-theme-cayman')
def read_all_pages(user, repo_name):
"""read_all_pages will put all the pages information into the database
Example:
No need to click any button this is the default behaviour.
TODO:
* Read all .md files in the root directory of the blog code.
* Leave the README and 404 file.
* Process all other files and put things into Page model.
"""
base_dir = settings.BASE_DIR
for file in os.listdir(base_dir + "/../JekLog/" + user.username + "/" + repo_name):
if file.endswith(".md"):
if(str(file) != 'README.md' and str(file) != '404.md'):
with open(base_dir+ '/../JekLog/' + user.username + '/' + repo_name + '/' + str(file)) as page_file:
file_data = page_file.read()
title = re.findall(r'title:.+', file_data)
permalink = re.findall(r'permalink:.+', file_data)
page_text = ''
temp = 0
list_file_data = file_data.split('\n')
for line in list_file_data:
if(temp==2):
page_text += line + '\n'
if(temp == 1):
if(line == '---'):
temp=2
if(line == '---' and temp==0):
temp=1
title = title[0].replace('title: ', '')
permalink = permalink[0].replace('permalink: ', '')
repo = Repo.objects.get(main=True, user=user)
markdowner = Markdown()
page_text = markdowner.convert(page_text)
page = Page(repo=repo, title=title, permalink=permalink, content=page_text)
page.save()
def change_site_baseurl(user, repo_name):
"""
Deprecated: new eg: jekyllnow.handlers.jekyllnow_handlers.update_baseurl
"""
base_dir = settings.BASE_DIR
with open(base_dir + '/../' +'JekLog/' + user.username + '/' + repo_name + '/' + '_config.yml', 'r') as conf_file:
filedata = conf_file.read()
filedata = filedata.replace('baseurl: ""', 'baseurl: "/' + repo_name + '"')
with open(base_dir + '/../' + 'JekLog/' + user.username + '/' + repo_name + '/' + '_config.yml', 'w') as conf_file:
conf_file.write(filedata)
def run_git_script(user, repo_name):
base_dir = settings.BASE_DIR
user = User.objects.get(username=user.username)
social = user.social_auth.get(provider='github')
user_token = social.extra_data['access_token']
subprocess.Popen(['/bin/bash', base_dir + '/../' + 'gitscript.sh', user.username, repo_name, user_token, base_dir])
def select_main_site(user, pk):
"""select_main_site to select the following repo as the main site.
Example:
Triggers when:
User chooses another repo to be considered as the main repo
Tasks:
* Find all repos of the user.
* Get the current repo using the primary key
* Change the attribute main to True
* Set all the other repo's main attribute to False
"""
all_repos = Repo.objects.filter(user=user)
current_repo = Repo.objects.get(pk=pk)
current_repo.main = True
current_repo.save()
for repo in all_repos:
if repo.id is not current_repo.id:
repo.main = False
repo.save()
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs operating on explorations that can be used for production tests.
To use these jobs, first need to register them in jobs_registry (at
the moment they are not displayed there to avoid accidental use).
"""
from core import jobs
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import user_services
from core.platform import models
import feconf
(base_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration])
class ExpCopiesRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
"""Realtime model class for ExpCopiesAggregator."""
pass
class ExpCopiesAggregator(jobs.BaseContinuousComputationManager):
"""A continuous-computation job creating 10 published copies of every
existing exploration, with the eid being '[old_eid]copy[copy_number]',
title 'Copy' and category 'Copies'.
"""
@classmethod
def get_event_types_listened_to(cls):
"""Returns the list of events that this class subscribes to.
Returns:
list. An empty list of events.
"""
return []
@classmethod
def _get_realtime_datastore_class(cls):
return ExpCopiesRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return ExpCopiesMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
pass
class ExpCopiesMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""A continuous-computation job creating 10 published copies of every
existing exploration, with the eid being '[old_eid]copy[copy_number]',
title 'Copy' and category 'Copies'.
"""
@classmethod
def _get_continuous_computation_class(cls):
"""Returns the ExpCopiesAggregator class associated with this MapReduce
job.
"""
return ExpCopiesAggregator
@classmethod
def entity_classes_to_map_over(cls):
"""Returns a list of datastore class references to map over."""
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
"""Yields the id of each newly-created exploration copy, together with
its YAML representation.
Args:
item: ExplorationModel. An exploration storage model.
Yields:
str. The string containing exploration id of each newly-created
exploration. It is of the format:
<exp_id>copy<copy_number>
"""
if ExpCopiesMRJobManager._entity_created_before_job_queued(item):
for count in range(10):
yield ('%scopy%d' % (item.id, count),
exp_services.get_exploration_from_model(item).to_yaml())
@staticmethod
def reduce(exp_id, list_of_exps):
"""Saves and publishes the newly created copy of the existing
exploration.
Args:
exp_id: str. The exploration id.
list_of_exps: list(str). The list containing newly-created
exploration copies in the YAML representation.
"""
for stringified_exp in list_of_exps:
exploration = exp_domain.Exploration.from_untitled_yaml(
exp_id, 'Copy', 'Copies', stringified_exp)
exp_services.save_new_exploration(
feconf.SYSTEM_COMMITTER_ID, exploration)
system_user = user_services.get_system_user()
rights_manager.publish_exploration(
system_user, exp_id)
# Job to delete all copied explorations.
class DeleteExpCopiesRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
"""Realtime model class for DeleteExpCopiesAggregator."""
pass
class DeleteExpCopiesAggregator(jobs.BaseContinuousComputationManager):
"""A continuous-computation job deleting all explorations in category
'Copies'.
"""
@classmethod
def get_event_types_listened_to(cls):
"""Returns the list of events that this class subscribes to.
Returns:
list. An empty list of events.
"""
return []
@classmethod
def _get_realtime_datastore_class(cls):
return DeleteExpCopiesRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return DeleteExpCopiesMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
pass
class DeleteExpCopiesMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""Job that deletes all explorations in category 'Copies'."""
@classmethod
def _get_continuous_computation_class(cls):
"""Returns the DeleteExpCopiesAggregator class associated with this
MapReduce job.
"""
return DeleteExpCopiesAggregator
@classmethod
def entity_classes_to_map_over(cls):
"""Returns a list of datastore class references to map over."""
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
"""Deletes the exploration with the given id if it is of the 'Copies'
category.
Args:
item: ExplorationModel. An exploration storage model.
"""
if item.category == 'Copies':
exp_services.delete_exploration(
feconf.SYSTEM_COMMITTER_ID, item.id, force_deletion=True)
@staticmethod
def reduce(exp_id, list_of_exps):
"""Null reduce method (not used)."""
pass
|
import logging
import argparse
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import random
import numpy as np
import paddle
from visualdl import LogWriter
from tqdm import tqdm
import paddle.nn.functional as F
from models.modeling import VisionTransformer
from utils.data_utils import get_loader
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def save_model(args, model):
# layer_state_dict = emb.state_dict()
model_to_save = model.state_dict() #.module if hasattr(model, 'module') else model
model_checkpoint = os.path.join(args.output_dir, "%s_checkpoint.pdparams" % args.name)
paddle.save(model_to_save, model_checkpoint)
def setup(args):
# Prepare model
# config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 100
if args.dataset == "imagenet":
num_classes=1000
model = VisionTransformer()
model_PATH = "data/data105204/cifar100-224_checkpoint.pdparams"
model_state_dict = paddle.load(model_PATH)
model.set_dict(model_state_dict)
return args, model
class kl_loss(paddle.nn.Layer):
def __init__(self):
super(kl_loss, self).__init__()
self.cross_entropy_loss = paddle.nn.CrossEntropyLoss()
def forward(self, p, q, label):
ce_loss = 0.5 * (self.cross_entropy_loss(p, label) + self.cross_entropy_loss(q, label))
kl_loss = self.compute_kl_loss(p, q)
# carefully choose hyper-parameters
loss = ce_loss + 0.3 * kl_loss
return loss
def compute_kl_loss(self, p, q):
p_loss = F.kl_div(F.log_softmax(p, axis=-1), F.softmax(q, axis=-1), reduction='none')
q_loss = F.kl_div(F.log_softmax(q, axis=-1), F.softmax(p, axis=-1), reduction='none')
# You can choose whether to use function "sum" and "mean" depending on your task
p_loss = p_loss.sum()
q_loss = q_loss.sum()
loss = (p_loss + q_loss) / 2
return loss
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
paddle.seed(args.seed)
def valid(args, model, test_loader):
# Validation!
eval_losses = AverageMeter()
model.eval()
all_preds, all_label = [], []
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
loss_fct = paddle.nn.CrossEntropyLoss()
for step, batch in enumerate(epoch_iterator):
x, y = batch
with paddle.no_grad():
logits = model(x)#[0]
eval_loss = loss_fct(logits, y)
eval_losses.update(eval_loss.item())
preds = paddle.argmax(logits, axis=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0
)
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0
)
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
print("accuracy: {}".format(accuracy))
return accuracy
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--name", required=True,
help="Name of this run. Used for monitoring.", default="cifar100-test")
parser.add_argument("--dataset", choices=["cifar10", "cifar100","imagenet"], default="cifar100",
help="Which downstream task.")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--pretrained_dir", type=str, default="data/data104692/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--output_dir", default="output", type=str,
help="The output directory where checkpoints will be written.")
parser.add_argument("--img_size", default=384, type=int,
help="Resolution size")
parser.add_argument("--train_batch_size", default=512, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Total batch size for eval.")
parser.add_argument("--eval_every", default=100, type=int,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
parser.add_argument("--learning_rate", default=1e-2, type=float,
help="The initial learning rate for SGD.")
parser.add_argument("--weight_decay", default=0., type=float,
help="Weight deay if we apply some.")
parser.add_argument("--num_steps", default=100000, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
help="How to decay the learning rate.")
parser.add_argument("--warmup_steps", default=500, type=int,
help="Step of training to perform learning rate warmup for.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--alpha", default=0.3, type=float,
help="alpha for kl loss")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
# Set seed
set_seed(args)
# Model & Tokenizer Setup
args, model = setup(args)
train_loader, test_loader = get_loader(args)
accuracy = valid(args, model, test_loader)
# Training
if __name__ == "__main__":
main()
|
from TextGameModule import TextGame
# Game start
# To get user's name, we need to build a welcome scene to do it.
TextGame.clearScreen()
# print('歡迎來到文字遊戲世界')
# name = input('請輸入你的名字: ')
# TextGame.clearScreen()
# print("Hello", name)
# print("遊戲即將開始...")
# TextGame.screenWait(500)
# TextGame.showString('abcdefg')
# input(':')
# # TextGame.showString('hijklmnop', 200)
options = 'test1 test2 test3 test4'.split()
TextGame.userInputOptions(options)
#Game Setting
# tg = TextGame(name)
#開始場景
startScene = ""
#遊戲過程場景
# tg.setScene('front',
# 'test',
# ['test1','test2','test3'],
# ['1','2'])
|
names = input().split(", ")
dict_names = {name: {} for name in names}
while True:
command = input()
if command == "End":
break
d_name, d_item, d_value = command.split("-")
if d_item not in dict_names[d_name]:
dict_names[d_name][d_item] = int(d_value)
# [print(f"{k} -> Items: {len(dict_names[k])}, Cost: {sum(dict_names[k].values())}") for k, v in dict_names.items()]
[print(f"{k} -> Items: {len(dict_names[k])}, Cost: {sum(dict_names[k].values())}") for k in dict_names]
# for k, v in dict_names.items():
# print(f"{k} -> Items: {len(dict_names[k])}, Cost: {sum(dict_names[k].values())}")
# #Вариант с речник и списъци като values:
# names = input().split(", ")
# dict_names = {name: {} for name in names}
#
# while True:
# command = input()
# if command == "End":
# break
# d_name, d_item, d_value = command.split("-")
# if d_name in dict_names:
# if d_item not in dict_names[d_name]:
# dict_names[d_name][d_item] = int(d_value)
#
# key_sum = 0
# for key, value in dict_names.items():
# for k, v in value.items():
# key_sum += v
#
# print(f"{key} -> Items: {len(value)}, Cost: {key_sum}")
|
#!/usr/bin/env python3
from yateto import *
def add(generator, dim, nbf, Nbf, nq, Nq, petsc_alignment):
J_Q = Tensor('J_Q', (Nq,))
Jinv_Q = Tensor('Jinv_Q', (Nq,))
G_Q = Tensor('G_Q', (dim, dim, Nq))
K = Tensor('K', (Nbf,))
K_Q = Tensor('K_Q', (Nq,))
W = Tensor('W', (Nq,))
E_Q = Tensor('E_Q', (Nbf, Nq))
matE_Q_T = Tensor('matE_Q_T', (Nq, Nbf))
Dxi_Q = Tensor('Dxi_Q', (Nbf, dim, Nq))
Dx_Q = Tensor('Dx_Q', Dxi_Q.shape())
A = Tensor('A', (Nbf, Nbf))
M = Tensor('M', (Nbf, Nbf))
MinvRef = Tensor('MinvRef', (Nbf, Nbf))
MinvWA = Tensor('MinvWA', (Nbf, Nbf))
matM = Tensor('matM', (Nbf, Nbf))
generator.add('massMatrix', M['kl'] <= E_Q['kq'] * W['q'] * J_Q['q'] * E_Q['lq'])
generator.add('MinvWA', MinvWA['kl'] <=
MinvRef['kr'] * W['q'] * Jinv_Q['q'] * E_Q['rq'] * E_Q['sq'] * MinvRef['sl'])
generator.add('project_K_lhs', matM['kl'] <= matE_Q_T['qk'] * W['q'] * J_Q['q'] * matE_Q_T['ql'])
generator.add('project_K_rhs', K['k'] <= K_Q['q'] * matE_Q_T['qk'] * W['q'] * J_Q['q'])
generator.add('Dx_Q', Dx_Q['kiq'] <= G_Q['eiq'] * Dxi_Q['keq'])
generator.add('assembleVolume',
A['kl'] <= J_Q['q'] * W['q'] * K['m'] * matE_Q_T['qm'] * Dx_Q['kiq'] * Dx_Q['liq']
)
G_q = Tensor('G_q', (dim, dim, nq))
n_q = Tensor('n_q', (dim, nq))
n_unit_q = Tensor('n_unit_q', (dim, nq))
nl_q = Tensor('nl_q', (nq,))
w = Tensor('w', (nq,))
E_q = [Tensor('E_q({})'.format(x), (Nbf, nq)) for x in range(2)]
matE_q_T = Tensor('matE_q_T', (nq, Nbf))
Dxi_q = [Tensor('Dxi_q({})'.format(x), (Nbf, dim, nq)) for x in range(2)]
K_Dx_q = [Tensor('K_Dx_q({})'.format(x), (Nbf, dim, nq)) for x in range(2)]
a = [[Tensor('a({},{})'.format(x, y), (Nbf, Nbf))
for y in range(2)] for x in range(2)]
c0 = [Scalar('c0{}'.format(x)) for x in range(2)]
c1 = [Scalar('c1{}'.format(x)) for x in range(2)]
c2 = [Scalar('c2{}'.format(x)) for x in range(2)]
Lift = [Tensor('Lift({})'.format(x), (Nbf, dim, Nbf)) for x in range(2)]
L_q = [Tensor('L_q({})'.format(x), (Nbf, nq)) for x in range(2)]
Minv = [Tensor('Minv({})'.format(x), (Nbf, Nbf)) for x in range(2)]
K_q = [Tensor('K_q({})'.format(x), (nq,)) for x in range(2)]
generator.add('K_Dx_q', K_Dx_q[0]['kiq'] <= K['m'] * matE_q_T['qm'] * G_q['eiq'] * Dxi_q[0]['keq'])
generator.add('K_q', K_q[0]['q'] <= K['m'] * matE_q_T['qm'])
generator.addFamily('lift_ip', simpleParameterSpace(2), \
lambda x: L_q[x]['lq'] <= E_q[x]['lq'] * nl_q['q'])
generator.addFamily('lift_skeleton', simpleParameterSpace(2), lambda x: [
Lift[0]['liu'] <= 0.5 * Minv[0]['us'] * E_q[0]['sq'] * E_q[x]['lq'] * n_q['iq'] * w['q'],
Lift[1]['liv'] <= 0.5 * Minv[1]['vs'] * E_q[1]['sq'] * E_q[x]['lq'] * n_q['iq'] * w['q'],
L_q[x]['lq'] <= 0.5 * n_q['iq'] * ( \
K_q[0]['q'] * E_q[0]['uq'] * Lift[0]['liu'] + \
K_q[1]['q'] * E_q[1]['vq'] * Lift[1]['liv'])
])
generator.add('lift_boundary', [
Lift[0]['liu'] <= Minv[0]['us'] * E_q[0]['sq'] * E_q[0]['lq'] * n_q['iq'] * w['q'],
L_q[0]['lq'] <= n_q['iq'] * K_q[0]['q'] * E_q[0]['uq'] * Lift[0]['liu']
])
def surface(x, y):
return a[x][y]['kl'] <= c0[y] * w['q'] * K_Dx_q[x]['kiq'] * n_q['iq'] * E_q[y]['lq'] + \
c1[x] * w['q'] * K_Dx_q[y]['liq'] * n_q['iq'] * E_q[x]['kq'] + \
c2[abs(y-x)] * w['q'] * E_q[x]['kq'] * L_q[y]['lq']
generator.addFamily('assembleSurface', simpleParameterSpace(2, 2), surface)
b = Tensor('b', (Nbf,), alignStride=petsc_alignment)
F_Q = Tensor('F_Q', (Nq,))
generator.add('rhsVolume', b['k'] <= b['k'] + J_Q['q'] * W['q'] * E_Q['kq'] * F_Q['q'])
f_q = Tensor('f_q', (nq,))
f_lifted = [Tensor('f_lifted({})'.format(x), (Nbf, dim)) for x in range(2)]
f_lifted_q = Tensor('f_lifted_q', (nq,))
generator.add('rhs_lift_ip', f_lifted_q['q'] <= nl_q['q'] * f_q['q'])
generator.add('rhs_lift_boundary', [
f_lifted[0]['li'] <= Minv[0]['lm'] * E_q[0]['mq'] * f_q['q'] * w['q'] * n_q['iq'],
f_lifted_q['q'] <= n_q['iq'] * K_q[0]['q'] * E_q[0]['lq'] * f_lifted[0]['li']
])
generator.add('rhs_lift_skeleton', [
f_lifted[0]['li'] <= 0.5 * Minv[0]['lm'] * E_q[0]['mq'] * f_q['q'] * w['q'] * n_q['iq'],
f_lifted[1]['li'] <= 0.5 * Minv[1]['lm'] * E_q[1]['mq'] * f_q['q'] * w['q'] * n_q['iq'],
f_lifted_q['q'] <= 0.5 * n_q['iq'] * ( \
K_q[0]['q'] * E_q[0]['lq'] * f_lifted[0]['li'] + \
K_q[1]['q'] * E_q[1]['lq'] * f_lifted[1]['li'] \
)
])
generator.add('rhsFacet', b['k'] <= b['k'] + \
c1[0] * w['q'] * K_Dx_q[0]['kiq'] * n_q['iq'] * f_q['q'] + \
c2[0] * w['q'] * E_q[0]['kq'] * f_lifted_q['q'])
# matrix-free
U = Tensor('U', (Nbf,), alignStride=petsc_alignment)
U_ext = Tensor('U_ext', (Nbf,), alignStride=petsc_alignment)
U_new = Tensor('U_new', (Nbf,), alignStride=petsc_alignment)
u_hat_q = Tensor('u_hat_q', (nq,))
sigma_hat_q = Tensor('sigma_hat_q', (dim, nq))
E_Q_T = Tensor('E_Q_T', (Nq, Nbf))
negative_E_Q_T = Tensor('negative_E_Q_T', (Nq, Nbf))
E_q_T = [Tensor('E_q_T({})'.format(x), (nq, Nbf)) for x in range(2)]
negative_E_q_T = [Tensor('negative_E_q_T({})'.format(x), (nq, Nbf)) for x in range(2)]
Dxi_q_120 = [Tensor('Dxi_q_120({})'.format(x), (dim, nq, Nbf)) for x in range(2)]
J_W_K_Q = Tensor('J_W_K_Q', (Nq,))
K_G_q = [Tensor('K_G_q({})'.format(x), (dim, dim, nq)) for x in range(2)]
generator.add('J_W_K_Q', J_W_K_Q['q'] <= J_Q['q'] * W['q'] * K['m'] * matE_Q_T['qm'])
generator.add('K_G_q', K_G_q[0]['eiq'] <= K['m'] * matE_q_T['qm'] * G_q['eiq'])
generator.add('flux_u_skeleton',
u_hat_q['q'] <= 0.5 * (negative_E_q_T[0]['ql'] * U['l'] + E_q_T[1]['ql'] * U_ext['l']))
generator.add('flux_u_boundary', u_hat_q['q'] <= negative_E_q_T[0]['ql'] * U['l'])
generator.add('flux_sigma_skeleton', sigma_hat_q['pq'] <= 0.5 *
(K_G_q[0]['epq'] * Dxi_q_120[0]['eql'] * U['l']
+ K_G_q[1]['epq'] * Dxi_q_120[1]['eql'] * U_ext['l']) +
c0[0] * (E_q_T[0]['ql'] * U['l'] + negative_E_q_T[1]['ql'] * U_ext['l']) * n_unit_q['pq'])
generator.add('flux_sigma_boundary', sigma_hat_q['pq'] <=
K_G_q[0]['epq'] * Dxi_q_120[0]['eql'] * U['l']
+ c0[0] * E_q_T[0]['ql'] * U['l'] * n_unit_q['pq'])
generator.add('apply_volume', [
Dx_Q['krq'] <= Dxi_Q['keq'] * G_Q['erq'],
U_new['k'] <= J_W_K_Q['q'] * Dx_Q['krq'] * Dx_Q['lrq'] * U['l']
])
generator.add('apply_facet', U_new['k'] <= U_new['k'] + w['q'] * n_q['rq'] * (
u_hat_q['q'] * K_G_q[0]['erq'] * Dxi_q[0]['keq'] -
E_q[0]['kq'] * sigma_hat_q['rq']
))
# traction
u = [Tensor('u({})'.format(x), (Nbf,), alignStride=petsc_alignment) for x in range(2)]
grad_u = Tensor('grad_u', (dim, nq))
generator.add('grad_u',
grad_u['pq'] <= 0.5 * (K_Dx_q[0]['lpq'] * u[0]['l'] + K_Dx_q[1]['lpq'] * u[1]['l']) +
c0[0] * (E_q[0]['lq'] * u[0]['l'] - E_q[1]['lq'] * u[1]['l'] - f_q['q']) * n_unit_q['pq'])
generator.add('grad_u_bnd',
grad_u['pq'] <= K_Dx_q[0]['lpq'] * u[0]['l'] +
c0[0] * (E_q[0]['lq'] * u[0]['l'] - f_q['q']) * n_unit_q['pq'])
|
import numpy as anp
def metadata(A):
return anp.shape(A), anp.ndim(A), anp.result_type(A), anp.iscomplexobj(A)
def unbroadcast(x, target_meta, broadcast_idx=0):
target_shape, target_ndim, dtype, target_iscomplex = target_meta
while anp.ndim(x) > target_ndim:
x = anp.sum(x, axis=broadcast_idx)
for axis, size in enumerate(target_shape):
if size == 1:
x = anp.sum(x, axis=axis, keepdims=True)
if anp.iscomplexobj(x) and not target_iscomplex:
x = anp.real(x)
return x
def unbroadcast_f(target, f):
target_meta = metadata(target)
return lambda g: unbroadcast(f(g), target_meta)
def balanced_eq(x, z, y):
return (x == z) / (1.0 + (x == y))
fn = lambda ans, x, y : unbroadcast_f(x, lambda g: g * balanced_eq(x, ans, y))
|
from codecs import open
from os import path, listdir
import re
from setuptools import setup, find_packages
from setuptools.command.install import install
here = path.abspath(path.dirname(__file__))
NAME = "mockquitto"
class InstallWithBabel(install):
def run(self):
self.babel_compile()
super().run()
def babel_compile(self):
from babel.messages.frontend import compile_catalog
compiler = compile_catalog(self.distribution)
option_dict = self.distribution.get_option_dict('compile_catalog')
compiler.domain = [option_dict['domain'][1]]
compiler.directory = option_dict['directory'][1]
compiler.run()
# super().run()
def get_release() -> str:
version = get_version()
return ".".join(version.split('.')[:2])
def get_version() -> str:
filehash = {}
with open("{}/version.py".format(get_name())) as fp:
exec(fp.read(), filehash)
return filehash['__version__']
def get_name() -> str:
return NAME
def read(fname):
with open(path.join(here, fname), encoding='utf-8', mode='r') as f:
return f.read()
setup(
name=get_name(),
version=get_version(),
description='A sample Python project',
long_description=read("README.rst"),
url='https://github.com/Samsung-IoT-Academy/mockquitto',
author='Georgiy Odisharia',
author_email='math.kraut.cat@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education',
'Topic :: Communications',
'Topic :: Internet',
],
keywords='mqtt',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
cmdclass={
'install': InstallWithBabel,
},
command_options={
'build_sphinx': {
'project': ('setup.py', get_name()),
'release': ('setup.py', get_release()),
'version': ('setup.py', get_version()),
},
},
install_requires=[
'hbmqtt>=0.9.1'
],
python_requires="~=3.4",
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
include_package_data=True,
entry_points={
'console_scripts': [
'mockquitto-server = mockquitto.scripts.broker:main',
'mockquitto-async-generator = mockquitto.scripts.mqtt_generator_asyncio:main',
],
},
)
|
import scandir
import sys
import os
import re
DEP_RE = re.compile(r"(\s*\.*)(\w+\()")
'''
run from root dir of project
'''
EXCLUDE_DIR = ["venv", ".ipynb_checkpoints","ipynb"]
EXCLUDE_FILE = ["__init__", "auto_doc"]
EXCLUDE_DEPS = ["print","list","enumerate","Exception"]
EXCLUDE_ARG = ["self"]
def get_annotation_list(line_list, idx):
annotation_list = []
if line_list[idx-1].strip().startswith("@"):
annotation_list.append(line_list[idx-1].strip())
annotation_list = annotation_list + get_annotation_list(line_list, idx-1)
return annotation_list
def get_f_args(line):
try:
arg_csv = line.split("(")[1].split(")")[0]
except IndexError:
return []
arg_list = [a.strip() for a in arg_csv.split(",") if a != ""]
arg_list = [a for a in arg_list if a not in EXCLUDE_ARG]
return arg_list
def get_thingy(type_of_thingy, line_list, idx, pause_characters=False):
# print(f"{idx}|{pause_characters}|{line_list[idx]}")
return_list = []
if pause_characters == True and ("'''" in line_list[idx] or '"""' in line_list[idx]):
if idx+1 < len(line_list):
return_list = get_thingy(type_of_thingy, line_list, idx+1, pause_characters=False)
elif pause_characters == True and not ("'''" in line_list[idx] or '"""' in line_list[idx]):
if idx+1 < len(line_list):
return_list = get_thingy(type_of_thingy, line_list, idx+1, pause_characters=pause_characters)
elif pause_characters == False and ("'''" in line_list[idx] or '"""' in line_list[idx]):
if idx+1 < len(line_list):
return_list = get_thingy(type_of_thingy, line_list, idx+1, pause_characters=True)
else:
if line_list[idx].startswith(" ") and line_list[idx].strip().split(" ")[0] == str(type_of_thingy):
try:
return_list.append(line_list[idx].strip().split("{} ".format(str(type_of_thingy)))[1])
except IndexError:
pass
if idx + 1 < len(line_list)\
and not line_list[idx+1].strip().startswith("class") \
and not line_list[idx+1].strip().startswith("def"):
return_list = return_list + get_thingy(type_of_thingy, line_list, idx+1)
return return_list
def get_attributes(line_list, idx):
attr_list = []
if " self." in line_list[idx] or ",self." in line_list[idx]:
attr_list = [element for element in line_list[idx].split(" ") if element.strip().startswith("self.")]
attr_list = [element.split("=")[0].strip() for element in attr_list]
mstr_attr_list = []
for attr_l in attr_list:
mstr_attr_list += [element.strip() for element in attr_l.split(",")]
attr_list = [element for element in attr_list if not element.strip().endswith(")")]
if idx + 1 < len(line_list) \
and not line_list[idx+1].strip().startswith("class"):
attr_list = attr_list + get_attributes(line_list, idx+1)
attr_list = [x for x in attr_list if "(" not in x]
return attr_list
def get_deps(line_list, idx):
return None
return_list = []
if not line_list[idx].strip().startswith("def") and not line_list[idx].strip().startswith("class") \
and ")" in line_list[idx].split("#")[0].strip() and "(" in line_list[idx].split("#")[0].strip():
re_matches = re.findall(string=line_list[idx].split("#")[0].strip(), pattern=DEP_RE)
return_list = [thing[1].strip("(").strip(".").strip() for thing in re_matches]
if idx + 1 < len(line_list) and line_list[idx+1].startswith(" ") \
and not line_list[idx+1].strip().startswith("class") \
and not line_list[idx+1].strip().startswith("def"):
return_list = return_list + get_deps(line_list, idx+1)
return_list = [x for x in return_list if x not in EXCLUDE_DEPS]
return_list = list(set(return_list))
return return_list
def get_import_list(line_list):
imports = []
for line in line_list:
if line.startswith('import'):
import_list = line.replace("import","").strip().split(",")
import_list = [x.strip() for x in import_list]
imports += import_list
elif (line.startswith('from') and 'import' in line):
import_list = line.replace("from","").strip().split("import")[-1].split(",")
first_part = line.replace("from","").strip().split("import")[0]
import_list = [first_part + x.strip() for x in import_list]
import_list = [x.replace(" ",".").replace(":","") for x in import_list]
imports += import_list
return imports
def write_lines(file_obj, header, iter_list):
if iter_list:
sys.stdout.write("\n\n##### {}".format(str(header)))
file_obj.write("\n\n##### {}".format(str(header)))
for i in iter_list:
this_line = "\n- {}".format(str(i))
sys.stdout.write(this_line)
file_obj.write(this_line)
def write_data(file_obj, current_module=None, dep_list=None, annotation_list=None, current_class=None,\
inheritance_list=None, attribute_list=None, current_function=None,
args_list=None, return_list=None, exception_list=None):
full_module_parts_list = list(filter(lambda x: x, [current_module,current_class,current_function]))
full_module_str = ".".join(full_module_parts_list)
full_module_str = "\n\n#### {}".format(str(full_module_str))
sys.stdout.write(full_module_str)
file_obj.write(full_module_str)
write_lines(file_obj, header="Inherits from", iter_list=inheritance_list)
write_lines(file_obj, header="Attributes", iter_list=attribute_list)
write_lines(file_obj, header="Dependencies", iter_list=dep_list)
write_lines(file_obj, header="Arguments", iter_list=args_list)
write_lines(file_obj, header="Returns", iter_list=return_list)
write_lines(file_obj, header="Exceptions", iter_list=exception_list)
write_lines(file_obj, header="Annotations", iter_list=annotation_list)
sys.stdout.flush()
def write_import_list(file_obj, import_list):
write_lines(file_obj, header="Imports", iter_list=import_list)
def document_module(module_path, file_obj):
module_name = module_path.rstrip(".py").replace("./","").replace(".","").replace("\\",".").replace("/",".").lstrip(".")
file_obj.write("\n\n# {}\n".format(str(module_name)))
sys.stdout.write("\n\n# {}\n".format(str(module_name)))
with open(module_path, "r") as f:
current_class = None
current_function = None
current_module = module_name
line_list = f.readlines()
line_list = [element for element in line_list if not element.strip().startswith("#")]
import_list = get_import_list(line_list)
write_import_list(file_obj, import_list)
for idx, line in enumerate(line_list):
line = line.replace("\n","")
if line.strip() == "":
continue
#determine current class:
if line.startswith("class"):
current_class = line.split("class ")[1].split("(")[0]
inheritance_list = get_f_args(line)
attribute_list = get_attributes(line_list, idx)
write_data(file_obj, current_module=current_module, dep_list=None, annotation_list=None, current_class=current_class,\
inheritance_list=inheritance_list, attribute_list=attribute_list, current_function=None,
args_list=None, return_list=None, exception_list=None)
else:
inheritance_list = None
attribute_list = None
#determine class inheritance
#determine current function (and if function is part of a class)
if line.startswith("def"):
current_class = None
current_function = line.split("def ")[1].split("(")[0]
args_list = get_f_args(line)
return_list = get_thingy("return", line_list, idx)
exception_list = get_thingy("raise", line_list, idx)
annotation_list = get_annotation_list(line_list, idx)
#dep_list = get_deps(line_list, idx)
dep_list = None
write_data(file_obj, current_module, dep_list, annotation_list, current_class, inheritance_list, attribute_list, current_function, args_list, return_list, exception_list)
elif line.startswith(" ") and "def " in line:
current_function = line.split("def ")[1].split("(")[0]
args_list = get_f_args(line)
return_list = get_thingy("return", line_list, idx)
exception_list = get_thingy("raise", line_list, idx)
annotation_list = get_annotation_list(line_list, idx)
#dep_list = get_deps(line_list, idx)
dep_list = None
write_data(file_obj, current_module, dep_list, annotation_list, current_class, inheritance_list, attribute_list, current_function, args_list, return_list, exception_list)
def scany_stuff(scan_path, file_obj):
for element in scandir.scandir(scan_path):
if element.is_dir():
matches = list(filter(lambda x: x in element.path, EXCLUDE_DIR))
if not matches:
scany_stuff(element.path, file_obj)
elif element.is_file():
if element.name.endswith(".py") and element.name.split(".")[0] not in EXCLUDE_FILE:
document_module(element.path, file_obj)
if __name__=="__main__":
with open("docs.md","w") as file_obj:
scany_stuff(os.curdir,file_obj=file_obj)
|
from django.urls import path
from . import views
app_name = "news"
urlpatterns = [
path("", views.IndexView.as_view(), name="index"),
path(
"<int:year>/<int:month>/<int:day>/<slug:slug>/",
views.ArticleView.as_view(),
name="article",
),
]
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T N A N O
#
# Copyright (c) 2020+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# ThemeSpecimens.py
#
# This ThemeColors.py shows samples of all standard theme colors,
# with their closest spot color, CMYK, RGB, CSS hex-color and CSS name.
#
from random import shuffle
from pagebotnano.constants import (A4, LEFT, RIGHT, SPORTS, BIRDS, TOOLS,
CITIES, FRUITS, COFFEES)
from pagebotnano.document import Document
from pagebotnano.elements import Rect, Text, Stacked
from pagebotnano.toolbox.color import Color
from pagebotnano.babelstring import BabelString
S = 4 # Scaling for Instagram
W, H = 1000*S, 1000*S
FONT = 'Georgia'
GUTTER_Y = 10*S
GUTTER_X = 40*S
PAD = 50*S
labelSize=15*S
labelColor = Color(0.8)
labelFontName = 'Upgrade-Medium'
SETS = (
# Choose other fonts that exist on your computer here.
(SPORTS, 'Sports', CITIES, 'Cities', 'Upgrade-Medium', 'Upgrade-MediumItalic', True),
(BIRDS, 'Birds', FRUITS, 'Fruits', 'Upgrade-UltraBlack', 'Upgrade-UltraBlackItalic', True),
(TOOLS, 'Tools', COFFEES, 'Coffees', 'Upgrade-Bold', 'Upgrade-BoldItalic', True),
)
for words1, title1, words2, title2, fontName1, fontName2, capsOnly in SETS:
doc = Document(w=W, h=H)
words1 = list(words1)
words2 = list(words2)
shuffle(words1)
shuffle(words2)
page = doc.newPage()
page.padding = PAD
cw = (page.pw-GUTTER_X)/2
e = Stacked([title1] + words1, font=fontName1, x=page.pl, y=page.pt,
w=cw, h=page.ph, gh=GUTTER_Y, capsOnly=capsOnly, fill=0.9)
page.addElement(e)
e = Stacked([title2] + words2, font=fontName2, x=page.pl+cw+GUTTER_X, y=page.pt,
w=cw, h=page.ph, gh=GUTTER_Y, capsOnly=capsOnly, fill=0.9)
page.addElement(e)
footNoteStyle = dict(font=labelFontName, tracking=0.5,
fontSize=labelSize, fill=labelColor, align=LEFT)
bs = BabelString('TYPETR '+fontName1, footNoteStyle)
e = Text(bs, x=page.pl, y=page.pb/2, w=page.pw)
page.addElement(e)
footNoteStyle = dict(font=labelFontName, tracking=0.5,
fontSize=labelSize, fill=labelColor, align=LEFT)
bs = BabelString('TYPETR '+fontName1, footNoteStyle)
e = Text(bs, x=page.pl, y=page.pb/2, w=page.pw)
page.addElement(e)
footNoteStyle['align'] = RIGHT
bs = BabelString('Generated by PageBotNano', footNoteStyle)
e = Text(bs, x=page.pl+page.pw, y=page.pb/2, w=page.pw)
page.addElement(e)
doc.export('_export/Stacked-%s-%s.jpg' % (title1, title2), multipage=True) # Instagram
print('Done')
|
from sqlalchemy import create_engine, Column, Integer, String, Numeric
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import URL
import settings
DeclarativeBase = declarative_base()
def create_competitor_prices_table(engine):
DeclarativeBase.metadata.create_all(engine)
def db_connect():
"""
Performs database connection using settings from settings.py.
Returns sqlalchemy engine instance.
"""
return create_engine(URL(**settings.DATABASE))
class CompetitorPrices(DeclarativeBase):
"""
sqlalchemy competitor_prices model
"""
__tablename__ = "competitor_prices"
product_id = Column(Integer, primary_key = True)
product_name = Column('product_name', String, nullable = True)
brand = Column('brand', String, nullable = True)
price_high = Column('price_high', Numeric(10, 2), nullable = True)
price_low = Column('price_low', Numeric(10, 2), nullable = True)
|
# Generated by Django 3.1.5 on 2021-01-31 17:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('network', '0004_auto_20210128_2324'),
]
operations = [
migrations.AlterField(
model_name='follower',
name='followers',
field=models.ManyToManyField(blank=True, related_name='following', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='follower',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followed_user', to=settings.AUTH_USER_MODEL),
),
]
|
import numpy as np
def vertical_grid(h,dz):
"""
Description:
Computes vertical grid for column model.
Input:
h : depth of ocean [ m ]
dz : size of grid box [ m ]
Output:
zt : vertical grid for temperature/salinity [ m ]
zw : vertical grid for velocity [ m ]
nzt : size of temperature/salinity vertical grid
nzw : size of velocity vertical grid
"""
nzt = int(h/dz)
nzw = nzt + 1
zt = np.arange(-h, 0, dz) + dz/2
zw = np.arange(-h, dz, dz)
return zt, zw, nzt, nzw
|
from .util import find_keywords
# putting the old parser back in here for now until there's a solution
# making Automat faster
from .spaghetti import FSM, State, Transition
class MicrodescriptorParser(object):
"""
Parsers microdescriptors line by line. New relays are emitted via
the 'create_relay' callback.
"""
def __init__(self, create_relay):
self._create_relay = create_relay
self._relay_attrs = None
class die(object):
__name__ = 'die' # FIXME? just to ease spagetti.py:82's pain
def __init__(self, msg):
self.msg = msg
def __call__(self, *args):
raise RuntimeError(self.msg % tuple(args))
waiting_r = State("waiting_r")
waiting_w = State("waiting_w")
waiting_p = State("waiting_p")
waiting_s = State("waiting_s")
def ignorable_line(x):
x = x.strip()
return x in ['.', 'OK', ''] or x.startswith('ns/')
waiting_r.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_r.add_transition(Transition(waiting_s, lambda x: x.startswith('r '), self._router_begin))
# FIXME use better method/func than die!!
waiting_r.add_transition(Transition(waiting_r, lambda x: not x.startswith('r '), die('Expected "r " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_w, lambda x: x.startswith('s '), self._router_flags))
waiting_s.add_transition(Transition(waiting_s, lambda x: x.startswith('a '), self._router_address))
waiting_s.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_s.add_transition(Transition(waiting_r, lambda x: not x.startswith('s ') and not x.startswith('a '), die('Expected "s " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', None))
waiting_w.add_transition(Transition(waiting_p, lambda x: x.startswith('w '), self._router_bandwidth))
waiting_w.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_w.add_transition(Transition(waiting_s, lambda x: x.startswith('r '), self._router_begin)) # "w" lines are optional
waiting_w.add_transition(Transition(waiting_r, lambda x: not x.startswith('w '), die('Expected "w " while parsing routers not "%s"')))
waiting_w.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', None))
waiting_p.add_transition(Transition(waiting_r, lambda x: x.startswith('p '), self._router_policy))
waiting_p.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_p.add_transition(Transition(waiting_s, lambda x: x.startswith('r '), self._router_begin)) # "p" lines are optional
waiting_p.add_transition(Transition(waiting_r, lambda x: x[:2] != 'p ', die('Expected "p " while parsing routers not "%s"')))
waiting_p.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', None))
self._machine = FSM([waiting_r, waiting_s, waiting_w, waiting_p])
self._relay_attrs = None
def feed_line(self, line):
"""
A line has been received.
"""
self._machine.process(line)
def done(self, *args):
"""
All lines have been fed.
"""
self._maybe_callback_router()
def _maybe_callback_router(self):
if self._relay_attrs is not None:
self._create_relay(**self._relay_attrs)
self._relay_attrs = None
def _router_begin(self, data):
self._maybe_callback_router()
args = data.split()[1:]
self._relay_attrs = dict(
nickname=args[0],
idhash=args[1],
orhash=args[2],
modified=args[3] + ' ' + args[4],
ip=args[5],
orport=args[6],
dirport=args[7],
)
def _router_flags(self, data):
args = data.split()[1:]
self._relay_attrs['flags'] = args
def _router_address(self, data):
"""only for IPv6 addresses"""
args = data.split()[1:]
try:
self._relay_attrs['ip_v6'].extend(args)
except KeyError:
self._relay_attrs['ip_v6'] = list(args)
def _router_bandwidth(self, data):
args = data.split()[1:]
kw = find_keywords(args)
self._relay_attrs['bandwidth'] = kw['Bandwidth']
def _router_policy(self, data):
pass
|
# -*- coding: utf-8 -*-
#from datetime import datetime
# import time
# DateTime
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey, \
create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker, relationship, joinedload, lazyload
engine = create_engine('mysql://nrs_app:HydrogenAlpha522#@DEVSFRPSQL1.mcap.com/newrecoverydb', echo=True,\
convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
#from orm.database import Base
class Task(Base):
"""
select t.name_ from jbpm_processdefinition d left join jbpm_task t on d.id_=t.PROCESSDEFINITION_ where d.id_=3655;
"""
__tablename__ = 'jbpm_task'
id = Column('id_', Integer, Sequence('id_'), primary_key=True)
name = Column('NAME_',String(255))
flow_definition_id = Column('PROCESSDEFINITION_', Integer, ForeignKey('jbpm_processdefinition.id_'))
def __repr__(self):
return "<Task('%d','%s')>" % (self.id, self.name)
class FlowDefinition(Base):
__tablename__ = 'jbpm_processdefinition'
id = Column('id_', Integer, Sequence('id_'), primary_key=True)
name = Column('NAME_',String(255))
version = Column('VERSION_', Integer)
def __repr__(self):
return "<FlowDefinition('%d','%s')>" % (self.id, self.name)
class Node(Base):
"""
select n.name_ from jbpm_processdefinition d left join jbpm_node n on d.id_=n.PROCESSDEFINITION_ where d.id_=3655;
"""
__tablename__ = 'jbpm_node'
id = Column('id_', Integer, Sequence('id_'), primary_key=True)
name = Column('NAME_',String(255))
flow_definition_id = Column('PROCESSDEFINITION_', Integer, ForeignKey('jbpm_processdefinition.id_'))
def __repr__(self):
return "<Node('%d','%s')>" % (self.id, self.name)
class Transition(Base):
"""
select t.id_, t.name_, n1.id_ as start_id, n1.name_ as start,n2.id_ as end_id, n2.name_ as end from jbpm_processdefinition d left join jbpm_transition t on d.id_=t.PROCESSDEFINITION_ left join jbpm_node n1 on t.from_=n1.id_ left join jbpm_node n2 on t.to_=n2.id_ where d.id_=3655;
"""
__tablename__ = 'jbpm_transition'
id = Column('id_', Integer, Sequence('id_'), primary_key=True)
name = Column('NAME_',String(255))
flow_definition_id = Column('PROCESSDEFINITION_', Integer, ForeignKey('jbpm_processdefinition.id_'))
start_node_id = Column('FROM_', Integer, ForeignKey('jbpm_node.id_'))
end_node_id = Column('TO_', Integer, ForeignKey('jbpm_node.id_'))
flow_definition = relationship("FlowDefinition")
start = relationship("Node", foreign_keys=[start_node_id],lazy='joined')
end = relationship("Node", foreign_keys=[end_node_id], lazy='joined')
def __repr__(self):
return "<Transition('%d','%s','%s','%s')>" % (self.id, self.name, self.start.name, self.end.name)
tasks = Task.query.filter(Task.flow_definition_id == 3655)
# for task in tasks:
# print task
#print task.id, task.name
transitions = Transition.query.filter(Transition.flow_definition_id == 3655)
for t in transitions:
if t.start.name == 'biddingMemo' or t.end.name=='mortgageSale':
print t
|
"""
Field-like classes that aren't really fields. It's easier to use objects that
have the same attributes as fields sometimes (avoids a lot of special casing).
"""
from django.db.models import fields
class OrderWrt(fields.IntegerField):
"""
A proxy for the _order database field that is used when
Meta.order_with_respect_to is specified.
"""
def __init__(self, *args, **kwargs):
kwargs['name'] = '_order'
kwargs['editable'] = False
super().__init__(*args, **kwargs)
|
from converter.qiskit.transpiler._basepasses import TransformationPass
class CXCancellation(TransformationPass):
pass
def run(self, dag):
pass
|
import vsketch
class PenElectrophoresisSketch(vsketch.SketchClass):
rows = vsketch.Param(250)
cols = vsketch.Param(25)
def draw(self, vsk: vsketch.Vsketch) -> None:
vsk.size("a4", landscape=False)
vsk.scale("cm")
vsk.penWidth('.3mm') # Sharpie Ultra Fine
for y in range(1, self.rows):
with vsk.pushMatrix():
for x in range(1, self.cols):
with vsk.pushMatrix():
random = vsk.random(.0001 * (x * y))
vsk.line(.5,random,0,random)
vsk.translate(.75, 0)
vsk.translate(0, .1)
def finalize(self, vsk: vsketch.Vsketch) -> None:
vsk.vpype("linemerge linesimplify reloop linesort")
if __name__ == "__main__":
PenElectrophoresisSketch.display()
|
import unittest
from conans.test.utils.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
class QbsGeneratorTest(unittest.TestCase):
def test(self):
client = TestClient()
client.run("new dep/0.1 -b")
client.run("create . user/testing")
pkg = GenConanfile("pkg", "0.1").with_requires("dep/0.1@user/testing")
client.save({"conanfile.py": pkg}, clean_first=True)
client.run("create . user/testing")
client.run("install pkg/0.1@user/testing -g=qbs")
qbs = client.load("conanbuildinfo.qbs")
self.assertIn('Depends { name: "dep" }', qbs)
|
from __future__ import print_function
import sys
from setuptools import setup
from Cython.Build import cythonize
with open('requirements.txt') as f:
INSTALL_REQUIRES = [l.strip() for l in f.readlines() if l]
try:
import numpy
except ImportError:
print('numpy is required during installation')
sys.exit(1)
try:
import scipy
except ImportError:
print('scipy is required during installation')
sys.exit(1)
setup(name='pyarff',
version='0.0.1',
description='A cythonized ARFF reader/writer for python',
author='Raghav R V',
packages=['pyarff'],
ext_modules=cythonize('pyarff/*.pyx', language='c++'),
include_dirs=[numpy.get_include()],
install_requires=['numpy', 'scipy'],
author_email='rvraghav93@gmail.com',
)
|
# Generated by Django 3.1.7 on 2021-04-24 13:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20210424_1632'),
]
operations = [
migrations.AlterModelOptions(
name='auto_brands',
options={'managed': True},
),
migrations.AlterModelTable(
name='auto_brands',
table='Auto_brands',
),
]
|
import sys
import os
import signal
from getopt import getopt
from .helper import TujianHelper
from .tools import printSort, getToday, getArchive, getAll, printByPID, getByPID
from .upload import upoladPics
from . import print2
pars = sys.argv[1:]
try:
opt, par = getopt(pars, 'hp:', ['help', 'path='])
except:
TujianHelper(pars)
dir = './Tujian/'
path = os.path.abspath(dir)
def exitTujian(signum, frame):
raise KeyboardInterrupt('操作被用户终止')
signal.signal(signal.SIGINT, exitTujian)
signal.signal(signal.SIGTERM, exitTujian)
for o, a in opt:
if o in ('-h', '--help'):
par2 = ['help'] + par
TujianHelper(par2)
sys.exit()
elif o in ('-p', '--path'):
path = os.path.join(a, 'Tujian')
if not os.path.isdir(path):
os.makedirs(path)
try:
key = par[0]
except IndexError:
TujianHelper(par)
sys.exit()
if key == 'help':
TujianHelper(par)
elif key == 'path':
print(path)
elif key == 'today':
getToday(path)
elif key == 'archive':
getArchive(par, path)
elif key == 'sort':
printSort()
elif key == 'all':
getAll(path)
elif key == 'info':
printByPID(par)
elif key == 'upload':
upoladPics(par)
elif key == 'get':
getByPID(par,path)
else:
print2.error('找不到这个命令')
print('使用 help 查看帮助')
sys.exit(1)
sys.exit()
|
# -*- coding: utf-8 -*-
"""
module containing the logic for the chart abstract base class
"""
__author__ = 'Samir Adrik'
__email__ = 'samir.adrik@gmail.com'
from abc import abstractmethod
from itertools import chain
import numpy as np
from pyqtgraph import PlotWidget
from PyQt5.QtCore import QObject
from source.util import Assertor
class Chart(QObject):
"""
Chart abstract base class
"""
@abstractmethod
def __init__(self):
"""
Constructor / Instantiation of class
"""
super().__init__(parent=None)
self.name = self.__class__.__name__
@staticmethod
def clear_graphics(graphics_view: PlotWidget):
"""
static method for clearing content in all graphics
Parameters
----------
graphics_view : PlotWidget
graphics view to place chart
"""
Assertor.assert_data_types([graphics_view], [PlotWidget])
graphics_view.clear()
@staticmethod
def create_bins(x: list, y: list, bins: list):
"""
method for creating bins for bar char
Parameters
----------
x : list
x-values
y : list
y-value
bins : list
bins
Returns
-------
out : np.ndarray
bins for bar chart
"""
Assertor.assert_data_types([x, y, bins], [list, list, list])
bin_array = []
for i, value in enumerate(x):
if y[i] != 0:
bin_array.append([value] * y[i])
else:
bin_array.append([0])
return np.histogram(list(chain(*bin_array)), bins=bins)
|
import os
import glob
import shutil
import pathspec
import click
@click.command()
@click.argument('src',
metavar='<source>',
type=click.Path(exists=True,
dir_okay=True,
readable=True),
required=True,
nargs=1)
@click.argument('dst',
metavar='<destination>',
type=click.Path(exists=False),
required=True,
nargs=1)
@click.option('--overwrite',
'-ow',
help='Overwrite dst folder if exists',
is_flag=True)
@click.option('--git',
'-g',
help='Copy .git folder if exists',
is_flag=True)
def main(src, dst, overwrite, git):
"""This script copies folder <source> which must be a git repo
(i.e. containing a .gitignore file) to folder <destination>.
Only those files not mentionned in .gitignore are copied.
If <overwrite> is set the <destination> folder contents
are overwritten.
If <git> is set the .git folder in <source> if any is copied to
<destination>.
"""
click.echo('source = {}'.format(os.path.abspath(src)))
click.echo('destination = {}'.format(os.path.abspath(dst)))
click.echo('src is a git repo ? {}'.format(is_git_repo(src)))
if os.path.exists(dst):
click.echo('destination folder exists')
if not overwrite or (overwrite and not click.confirm('overwrite ?')):
click.echo('abort')
return
else:
delete_folder(dst)
else:
click.echo('destination folder will be created')
click.echo('make copy')
ig_func = build_ignore_function(src, incl_git=git)
make_copy(src, dst, ignore_func=ig_func)
click.echo('done')
def is_git_repo(src):
"""
"""
path = os.path.join(src, '.git')
return os.path.exists(path)
def remove_empty_folders(path):
"""
remove empty directories
as shutil.copytree cannot do it
"""
for (_path, _dirs, _files) in os.walk(path, topdown=False):
# skip remove
if _files:
continue
if '.git' in _path:
continue
try:
delete_folder(_path)
except OSError as e:
print('error :', e)
def make_copy(src, dst, ignore_func=None):
"""
shutil copytree call
"""
shutil.copytree(src, dst, ignore=ignore_func)
remove_empty_folders(dst)
def delete_folder(drc):
"""
shutil rmtree
"""
shutil.rmtree(drc)
def build_ignore_function(src, incl_git=False):
path = os.path.join(src, '.gitignore')
spec_src = []
if os.path.isfile(path):
with open(path, 'r') as f:
# .gitignore lines
spec_src = [e.rstrip() for e in f.readlines()]
# .git folder
if not incl_git:
spec_src.append('.git/*')
spec = pathspec.PathSpec.from_lines('gitwildmatch', spec_src)
# data to recompose path relative to source
abs_src = os.path.abspath(src)
len_abs_src = len(abs_src) + 1
def ig_f(curr_dir, files):
"""
ignore function to be used in shutil.copytree
"""
def path_rel_src(f):
"""
build path relative to source
necessary to use pathspec
"""
abs_f = os.path.abspath(os.path.join(curr_dir, f))
path = abs_f[len_abs_src:]
return path
if os.path.basename(curr_dir) == 'node_modules':
ignored_files = files
else:
ignored_files = [f for f in files
if spec.match_file(path_rel_src(f))]
return ignored_files
return ig_f
|
import cupy
def test_bytes():
out = cupy.random.bytes(10)
assert isinstance(out, bytes)
|
from django.apps import AppConfig
from django.core import checks
from .checks import storage_check
class S3FileConfig(AppConfig):
name = "s3file"
verbose_name = "S3File"
def ready(self):
from django import forms
from django.core.files.storage import FileSystemStorage, default_storage
from storages.backends.s3boto3 import S3Boto3Storage
from .forms import S3FileInputMixin
if (
isinstance(default_storage, (S3Boto3Storage, FileSystemStorage))
and S3FileInputMixin not in forms.ClearableFileInput.__bases__
):
forms.ClearableFileInput.__bases__ = (
S3FileInputMixin,
) + forms.ClearableFileInput.__bases__
elif S3FileInputMixin in forms.ClearableFileInput.__bases__:
forms.ClearableFileInput.__bases__ = tuple(
cls
for cls in forms.ClearableFileInput.__bases__
if cls is not S3FileInputMixin
)
checks.register(storage_check, checks.Tags.security, deploy=True)
|
# -*- coding: utf-8 -*-
from django.http import JsonResponse
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# from django.template import RequestContext
from django import forms
from django.forms import ModelForm
from django.db.models import Q
from django.template.loader import render_to_string
from registro.models import Registro, Fichero
from gauss.rutas import *
# from django.views.decorators.csrf import csrf_exempt
# import simplejson as json
from django.http import HttpResponse
# from django.forms.formsets import formset_factory
from datetime import datetime
# from django.core.mail import EmailMessage
from autenticar.control_acceso import permiso_required
import mimetypes
mimetypes.add_type('application/vnd.openxmlformats-officedocument.wordprocessingml.document', '.docx')
class RegistroForm(ModelForm):
class Meta:
model = Registro
fields = ('asunto', 'texto', 'receptor', 'tipo', 'emisor', 'fecha')
widgets = {
'asunto': forms.TextInput(attrs={'size': '100', 'class': 'detectar'}),
'receptor': forms.TextInput(attrs={'size': '50', 'class': 'detectar'}),
'emisor': forms.TextInput(attrs={'size': '50', 'class': 'detectar'}),
'tipo': forms.Select(attrs={'class': 'detectar'}),
'texto': forms.Textarea(attrs={'cols': 80, 'rows': 8}),
'fecha': forms.TextInput(attrs={'size': '10', 'value': datetime.today().strftime("%d/%m/%Y")}),
}
# @permiso_required('acceso_absentismo')
def registro(request):
g_e = request.session['gauser_extra']
registros = Registro.objects.filter(entidad=g_e.ronda.entidad).order_by('num_id').reverse()[:8]
if request.method == 'POST':
if request.POST['action'] == 'save_registro' and g_e.has_permiso('crea_registros'):
try:
num_id = Registro.objects.filter(entidad=g_e.ronda.entidad).latest('num_id').num_id + 1
except:
num_id = 1000
# Rellenamos en un nuevo registro los datos que no se piden en el formulario
nuevo_registro = Registro(entidad=g_e.ronda.entidad, num_id=num_id, registrador=g_e.gauser)
# Completamos el formulario con la instancia del nuevo registro creada
form1 = RegistroForm(request.POST, prefix="texto", instance=nuevo_registro)
if form1.is_valid():
registro = form1.save()
for input_file, object_file in request.FILES.items():
for fichero in request.FILES.getlist(input_file):
archivo = Fichero.objects.create(fichero=fichero, content_type=fichero.content_type,
entidad=g_e.ronda.entidad)
registro.ficheros.add(archivo)
if request.POST['action'] == 'pdf_registro':
id_fich = request.POST['id_registro']
fichero = Fichero.objects.get(id=id_fich)
response = HttpResponse(open(RUTA_BASE + fichero.fichero.url, 'rb'), content_type=fichero.content_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % (fichero.fich_name)
return response
return render(request, "registro.html",
{
'iconos':
({'tipo': 'button', 'nombre': 'plus', 'texto': 'Nuevo',
'title': 'Crear un nuevo registro', 'permiso': 'crea_registros'},
{'tipo': 'button', 'nombre': 'check', 'texto': 'Aceptar',
'title': 'Grabar el nuevo registro',
'permiso': 'crea_registros'}),
'formname': 'registro',
'registros': registros,
})
@login_required()
def ajax_registros(request):
g_e = request.session['gauser_extra']
if request.is_ajax():
action = request.POST['action']
if action == 'busca_registros':
texto = request.POST['texto']
tipo = request.POST['tipo_busqueda']
try:
inicio = datetime.strptime(request.POST['id_fecha_inicio'], '%d-%m-%Y')
except:
inicio = datetime.strptime(request.POST['id_fecha_inicio'], '%d/%m/%Y')
try:
fin = datetime.strptime(request.POST['id_fecha_fin'], '%d-%m-%Y')
except:
fin = datetime.strptime(request.POST['id_fecha_fin'], '%d/%m/%Y')
registros = Registro.objects.filter(entidad=g_e.ronda.entidad, fecha__gte=inicio, fecha__lte=fin)
registros_contain_texto = registros.filter(
Q(asunto__icontains=texto) | Q(texto__icontains=texto) | Q(emisor__icontains=texto) | Q(
receptor__icontains=texto), ~Q(tipo=tipo))
num_registros = registros_contain_texto.count()
max_num_registros = 100
if num_registros > max_num_registros:
html = '<b style="color:red;">Se han encontrado %s coincidencias.</b><b><br>' \
'El sistema está configurado para no mostrar los resultados cuando la cantidad ' \
'de coincidencias excede de</b> <b style="color:red;">%s</b>.<b><br>Escribe más texto para' \
' mejorar la búsqueda.</b>' % (
num_registros, max_num_registros)
return JsonResponse({'html': html, 'ok': True})
else:
html = render_to_string('registro_append.html', {'registros': registros_contain_texto})
return JsonResponse({'html': html, 'ok': True})
elif action == 'registro_append':
registro = Registro.objects.get(id=request.POST['id_registro'])
accordion = render_to_string('registro_append.html', {'registros': [registro]})
return HttpResponse(accordion)
elif action == 'delete_registro' and g_e.has_permiso('borra_registros'):
registro = Registro.objects.get(id=request.POST['id'])
crear_aviso(request, True, u'Ejecuta borrar registro: %s' % (registro.asunto))
registro.delete()
return HttpResponse(True)
elif action == 'add_registro' and g_e.has_permiso('crea_registros'):
form1 = RegistroForm(prefix="texto")
formulario = render_to_string('registro_add.html', {'form1': form1})
return HttpResponse(formulario)
|
from .graph import Graph
from .vertex import Vertex
import numpy as np
def generate_K(x):
spacing = 2*np.pi/(x)
g = Graph(monitor=False, figsize=(x, x))
for i in range(x):
g.add_vertex(Vertex(name=i, position=((x+3)*np.cos(spacing*i), (x+3)*np.sin(spacing*i)), radius=1))
for j in range(i):
g.add_edge(a=i, b=j)
return g
|
"""
Functies specifiek voor waterschap Vallei en Veluwe
"""
import math
import numpy as np
import geopandas as gpd
import pandas as pd
import fiona
from shapely.geometry import LineString
from tohydamogml.domeinen_damo_1_4 import *
from tohydamogml.read_database import read_filegdb
# Columns in DAMO to search for id of related object
DM_COL_CODEGERELATEERD_OBJECT = [
# 'COUPUREID',
'DUIKERSIFONHEVELID',
# 'FLEXIBELEWATERKERINGID',
'GEMAALID',
# 'SLUISID',
'STUWID',
# 'REGENWATERBUFFERCOMPARTIMENTID',
# 'TUNNELID',
# 'VISPASSAGEID'
]
DM_LAYERS = {
"hydroobject": "HydroObject",
"stuw": "Stuw",
"afsluitmiddel": "Afsluitmiddel",
"doorstroomopening": "Doorstroomopening",
"duikersifonhevel": "DuikerSifonHevel",
"gemaal": "Gemaal",
"brug": "Brug",
"bodemval": "Bodemval",
"aquaduct": "Aquaduct",
"afvoergebied": "AfvoergebiedAanvoergebied",
}
COL_OBJECTID = "OBJECTID"
def stuw_kruinbreedte(damo_gdf=None, obj=None, damo_kruinbreedte="KRUINBREEDTE",
damo_doorstroombreedte="DOORSTROOMBREEDTE",
damo_kruinvorm="WS_KRUINVORM"):
"""
als KRUINBREEDTE is NULL en WS_KRUINVORM =3 (rechthoek) dan KRUINBREEDTE = DOORSTROOMBREEDTE
"""
return damo_gdf.apply(
lambda x: _stuw_get_kruinbreedte_rechthoek(x[damo_kruinbreedte], x[damo_kruinvorm], x[damo_doorstroombreedte]),
axis=1)
def stuw_laagstedoorstroombreedte(damo_gdf=None, obj=None, damo_doorstroombreedte="DOORSTROOMBREEDTE",
damo_kruinvorm="WS_KRUINVORM"):
"""
als LAAGSTEDOORSTROOMHOOGTE is NULL en WS_KRUINVORM =3 (rechthoek) dan LAAGSTEDOORSTROOMBREEDTE = DOORSTROOMBREEDTE
"""
return damo_gdf.apply(
lambda x: _stuw_get_laagstedoorstroombreedte_rechthoek(x[damo_kruinvorm], x[damo_doorstroombreedte]), axis=1)
def _stuw_get_kruinbreedte_rechthoek(kruinbreedte: float, kruinvorm: float, doorstroombreedte: float,
kruinvorm_rechthoek=[3.0]):
"""
als KRUINBREEDTE is NULL en WS_KRUINVORM =3 (rechthoek) dan KRUINBREEDTE = DOORSTROOMBREEDTE
"""
if np.isnan(kruinbreedte):
if kruinvorm in kruinvorm_rechthoek:
return doorstroombreedte
else:
return kruinbreedte
def _stuw_get_laagstedoorstroombreedte_rechthoek(kruinvorm: float, doorstroombreedte: float, kruinvorm_rechthoek=[3.0]):
"""
als LAAGSTEDOORSTROOMHOOGTE is NULL en WS_KRUINVORM =3 (rechthoek) dan LAAGSTEDOORSTROOMBREEDTE = DOORSTROOMBREEDTE
"""
if kruinvorm in kruinvorm_rechthoek:
return doorstroombreedte
else:
return np.nan
def duikerhevelsifon_soortkokervormigeconstructiecode(damo_gdf=None, obj=None, damo_typekruising="TYPEKRUISING"):
return damo_gdf.apply(lambda x: _duikerhevelsifon_get_skvccode(x[damo_typekruising]), axis=1)
def _duikerhevelsifon_get_skvccode(damo_typekruising):
"""
Convert DAMO Typekruising to soortkokervormigeconstructiecode
"""
if TYPEKRUISING[damo_typekruising] == "duiker":
return 1
elif TYPEKRUISING[damo_typekruising] == "hevel":
return 2
elif TYPEKRUISING[damo_typekruising] == "sifon":
return 3
else:
return 999
def obj_soortmateriaal(damo_gdf=None, obj=None, damo_soortmateriaal="SOORTMATERIAAL"):
return damo_gdf.apply(lambda x: _obj_get_soortmateriaal(x[damo_soortmateriaal]), axis=1)
def _obj_get_soortmateriaal(materiaalcode):
"""Return Strickler Ks waarde
Bron: Ven te Chow - Open channel hydraulics tbl 5-6
TODO: omschrijven naar dictionary in config"""
if materiaalcode in MATERIAALKUNSTWERK.keys():
if MATERIAALKUNSTWERK[materiaalcode] == "beton":
return 75
if MATERIAALKUNSTWERK[materiaalcode] == "gewapend beton":
return 75
if MATERIAALKUNSTWERK[materiaalcode] == "metselwerk":
return 65
if MATERIAALKUNSTWERK[materiaalcode] == "metaal":
return 80
if MATERIAALKUNSTWERK[materiaalcode] == "aluminium":
return 80
if MATERIAALKUNSTWERK[materiaalcode] == "ijzer":
return 80
if MATERIAALKUNSTWERK[materiaalcode] == "gietijzer":
return 75
if MATERIAALKUNSTWERK[materiaalcode] == "PVC":
return 80
if MATERIAALKUNSTWERK[materiaalcode] == "gegolfd plaatstaal":
return 65
if MATERIAALKUNSTWERK[materiaalcode] == "asbestcement":
return 110
return 999
def afsluitmiddel_codegerelateerdobject(damo_gdf=None, obj=None, col_relaties=DM_COL_CODEGERELATEERD_OBJECT):
"""Get code of related object. Is more the one code is defined, None value is returned
TODO: improve code
"""
code_src = []
code_rel = []
for objectname in col_relaties:
if DM_LAYERS[objectname[0:-2].lower()] in fiona.listlayers(obj["source"]['path']):
gdf_tmp = _create_gdf(DM_LAYERS[objectname[0:-2].lower()], obj["source"]['path'],
index_name=obj["index"]['name'], index_col_src=obj["index"]["src_col"])
for index, value in damo_gdf[objectname].dropna().iteritems():
code_src.append(index)
code_rel.append(gdf_tmp[gdf_tmp['OBJECTID'].astype(int) == int(value)].index.values[0])
damo_gdf["relaties"] = pd.DataFrame(data=code_rel, index=code_src)
return damo_gdf["relaties"]
def brug_pt_to_line(damo_gdf=None):
return damo_gdf.apply(lambda x: _brug_profile_geometry(x, lijn_lengte="WS_LENGTEBRUG", rotate_degree=0), axis=1)
def brug_profile_geometry(damo_gdf=None):
return damo_gdf.apply(lambda x: _brug_profile_geometry(x, lijn_lengte="WS_BREEDTEBRUG", rotate_degree=90), axis=1)
def _brug_profile_geometry(row, direction="RICHTING", lijn_lengte="WS_LENGTEBRUG", rotate_degree=0, default_width=1,
default_dir=0):
"""
Convert bridge point to line
direction in degrees"""
dir = math.radians(row[direction] + rotate_degree) if not pd.isnull(row[direction]) else math.radians(
default_dir + rotate_degree)
length = row[lijn_lengte] if not pd.isnull(row[lijn_lengte]) else default_width
dx = math.cos(dir) * 0.5 * length
dy = math.sin(dir) * 0.5 * length
return LineString([(row.geometry.x - float(dx), row.geometry.y - float(dy)),
(row.geometry.x + float(dx), row.geometry.y + float(dy))])
def _create_gdf(layer, filegdb, index_name: str, index_col_src: str = None):
"""
Create geodataframe from database.
Optionally remove object based on statusobjectcode
filter: dict, {"column_name": [values] }
filter_type: str, include or exclude
"""
gdf = read_filegdb(filegdb, layer=layer)
gdf.set_index(index_col_src, inplace=True)
gdf.index.names = [index_name]
# remove objects without a unique code
gdf = gdf[gdf.index.notnull()]
return gdf
|
import os
import json
import pathlib
import eland as ed
import pandas as pd
from elasticsearch import Elasticsearch
from slugify import slugify
from typer import Typer
app = Typer()
column_values = {
"INSTNM": str,
"INSTURL": str,
"CITY": str,
"ST_FIPS": int,
"PBI": float,
"ANNHI": float,
"TRIBAL": float,
"AANAPII": float,
"HSI": float,
"NANTI": float,
"HBCU": float,
"CURROPER": float,
"LATITUDE": float,
"LONGITUDE": float,
"MENONLY": float,
"WOMENONLY": float,
"CONTROL": int,
"RELAFFIL": str,
"HIGHDEG": str,
"MAIN": float,
"HCM2": float,
}
def _translate_code(json_file, df_column, if_None: str="Unknown"):
j_file = json.loads(pathlib.Path(json_file).read_text())
return lambda x:j_file.get(str(x), if_None)
converter_list = [
("RELAFFIL", _translate_code("translations/relaffil.json", "RELAFFIL", if_None="None")),
("CONTROL", _translate_code("translations/control.json", "CONTROL")),
("ST_FIPS", _translate_code("translations/st_fips.json", "ST_FIPS")),
("HIGHDEG", _translate_code("translations/high_deg.json", "HIGHDEG")),
]
converters = {x:y for x,y in converter_list} # To return "ST_FIPS": lambda x:st_fips_json.get(str(x), "Unknown"), etc
base_df = pd.read_csv(
"base_data/Most-Recent-Cohorts-All-Data-Elements.csv",
usecols=list(column_values.keys()),
converters=converters,
dtype=column_values,
)
base_df.fillna(0, inplace=True)
base_df['location'] = base_df.apply(lambda x:f"{x.LATITUDE}, {x.LONGITUDE}", axis=1)
# Create DATAFRAME FOR ACTIVE HBCUs and PBIs
hbcus = base_df.loc[(base_df.HBCU == 1) & (base_df.CURROPER == 1)]
pbis = base_df.loc[(base_df.PBI == 1) & (base_df.CURROPER == 1)]
def _gen_slug_link(school):
"""create markdown link to associated pages object"""
slug = slugify(school.INSTNM)
return f"[{school.INSTNM}](/pages/{slug}.md) - {school.INSTURL}"
@app.command()
def gen_list(
filename: pathlib.Path = "readme.md", title="HBCUs in the the United States"
):
"""build readme with readme_templates"""
states = []
for df in (hbcus, pbis):
df["readme"] = df.apply(_gen_slug_link, axis=1)
for name in sorted(df["ST_FIPS"].unique()):
state_sections = {}
for df_name, df in (("hbcus", hbcus), ("pbis", pbis)):
schools = df[df["ST_FIPS"] == name]["readme"].values.tolist()
if schools:
schools = "\n\n".join(schools)
else:
schools = "None"
state_sections[df_name] = schools
state_section = f"""## {name}
### HBCUs
{state_sections['hbcus']}
### PBIs
{state_sections['pbis']}"""
states.append(state_section)
states = "\n".join(states)
filename.write_text(
f"""# {title}
{states}
---
#### source:
[College Scorecard US Dept. of Education](https://data.ed.gov/dataset/college-scorecard-all-data-files-through-6-2020/resources?resource=823ac095-bdfc-41b0-b508-4e8fc3110082)
#### license: [MIT License](/LICENSE)"""
)
# Elastic Load Data
client = Elasticsearch(
cloud_id=os.environ.get("ES_CLOUD_ID"),
http_auth=(
os.environ.get("ES_USERNAME"),
os.environ.get("ES_PASSWORD"),
),
)
es_type_overrides = {
"location": "geo_point",
}
@app.command()
def load_to_es():
ed.pandas_to_eland(
pd_df=base_df,
es_client=client,
es_dest_index=f"US_COLLEGES".lower(),
es_if_exists="replace",
es_refresh=True,
use_pandas_index_for_es_ids=False,
es_type_overrides = es_type_overrides,
)
@app.command()
def build_pages(): # TODO REMOVE DEPENDENCY ON WIKIPEDIA
for df in (hbcus, pbis):
df.drop(columns=['CURROPER', 'LATITUDE', 'LONGITUDE'], inplace=True)
dfj = df.to_dict(orient="records")
for row in dfj:
f_meta = []
f_name = row.pop('INSTNM')
filepath = pathlib.Path(slugify(f_name)).with_suffix(".md")
f_url = row.pop('INSTURL').rstrip('/')
for name, val in row.items():
if val in (1.0, 0.0):
if val:
f_name += f" - {name}"
else:
f_meta.append(f"**{name}**: {val}")
f_text = "\n\n".join(f_meta)
page = pathlib.Path("pages").joinpath(filepath)
page.write_text(
f"""# {f_name}
### {f_url}
---
{f_text}"""
)
if __name__ == "__main__":
app()
|
import os
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
STATIONS_PATH = os.path.join(ROOT_DIR, "stations.json")
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
from django.conf.urls import url
app_name = "departments"
urlpatterns = [
path('create/new/', views.CreateDepartment , name='create_department'),
path('UIC/all/departments/', views.AllDepartments.as_view(), name='all_departments'),
path('edit/<department_slug>/', views.UpdateDepartment.as_view(), name='update_department'),
path('<department_slug>/', views.DepartmentDetail.as_view(), name='department_detail'),
path('department/<department_slug>/delete', views.DepartmentDelete.as_view(), name='delete_department'),
path('<department_slug>/students/', views.DepartmentStudents.as_view(), name='department_students' ),
path('<department_slug>/teachers/', views.DepartmentTeachers.as_view(), name='department_teachers' ),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###################################################
#
# pitch2svg.py
#
# Make an SVG of a pitch trace.
#
# Based loosely off code from Martijn Millecamp
# (martijn.millecamp@student.kuleuven.be) and
# Miroslav Masat (miro.masat@gmail.com):
# https://github.com/miromasat/pitch-detection-librosa-python
#
###################################################
from __future__ import print_function, unicode_literals, division, absolute_import
from io import open
import logging
import argparse
import os
import numpy as np
from math import floor
import pystache
import librosa
from audio_util import *
FMIN = 80
FMAX = 1000
THRESHOLD = 0.75
SVG_TEMPLATE = '''<svg id='pitch' preserveAspectRatio='none' viewBox="0 0 {{width}} {{height}}" xmlns="http://www.w3.org/2000/svg" height="{{height}}" width="{{width}}">
<polygon points="{{#points}}{{x}},{{y}} {{/points}}"></polygon>
</svg>
'''
def render_svg(pitches, width=512, height=100, zero_height=5):
data = { "height": height, "width": width, "points": [] }
data["points"].append({"x":0.0, "y": float(height)})
data["points"].append({"x":0.0, "y": float(height - zero_height)})
for i, pitch in enumerate(pitches):
x = i + 0.5
y = (1.0 - pitch) * (height - zero_height)
y = "%.2f" % y
data["points"].append({"x": x, "y": y})
data["points"].append({"x":float(width), "y": float(height - zero_height)})
data["points"].append({"x":float(width), "y": float(height)})
return pystache.render(SVG_TEMPLATE, data)
def extract_pitches(waveform, nbuckets=512):
nsamples = waveform.shape[0]
hop_length = int(floor(nsamples / nbuckets))
pitches, magnitudes = librosa.core.piptrack(y=waveform, sr=SAMPLE_RATE,
fmin=FMIN, fmax=FMAX, hop_length=hop_length, threshold=THRESHOLD)
pitches = pitches[:,:nbuckets]
pitches = pitches.max(axis=0)
pitches /= pitches.max()
return smooth(pitches, window_size=int(floor(nbuckets/40)))
def make_pitch_svg(input_path, nbuckets=512, height=100, width=512, zero_height=5):
waveform = load_wav_or_smil(input_path)
pitches = extract_pitches(waveform, nbuckets)
return render_svg(pitches, width, height, zero_height)
def main(input_path, output_path, nbuckets=512, width=512, height=100, zero_height=5):
svg = make_pitch_svg(input_path, nbuckets, width, height, zero_height)
save_txt(output_path, svg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Convert a WAV or SMIL file to a SVG file of its pitch trace')
parser.add_argument('input', type=str, help='Input WAV or SMIL file')
parser.add_argument('output', type=str, help='Output SVG file')
parser.add_argument('--nbuckets', type=int, default=512,
help='Number of sample buckets (default: 256)')
parser.add_argument('--width', type=int, default=512, help='Width of output SVG (default: 512)')
parser.add_argument('--height', type=int, default=100, help='Height of output SVG (default: 100)')
parser.add_argument('--zero_height', type=int, default=5, help='Padding around zero (default: 5)')
args = parser.parse_args()
main(args.input,
args.output,
args.nbuckets,
args.width,
args.height,
args.zero_height)
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: time_based_correction
:platform: Unix
:synopsis: A time-based dark and flat field correction using linear\
interpolation
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import numpy as np
from savu.plugins.driver.cpu_plugin import CpuPlugin
from savu.plugins.corrections.base_correction import BaseCorrection
from savu.plugins.utils import register_plugin
@register_plugin
class TimeBasedCorrection(BaseCorrection, CpuPlugin):
"""
Apply a time-based dark and flat field correction to data.
:param in_range: Set to True if you require values in the \
range [0, 1]. Default: False.
"""
def __init__(self, name="TimeBasedCorrection"):
super(TimeBasedCorrection, self).__init__(name)
def pre_process(self):
self.count = 0
inData = self.get_in_datasets()[0]
pData = self.get_plugin_in_datasets()[0]
self.mfp = inData._get_plugin_data()._get_max_frames_process()
self.proj_dim = \
inData.get_data_dimension_by_axis_label('rotation_angle')
self.slice_dir = pData.get_slice_dimension()
nDims = len(pData.get_shape())
self.sslice = [slice(None)]*nDims
self.image_key = inData.data.get_image_key()
changes = np.where(np.diff(self.image_key) != 0)[0] + 1
self.split_key = np.split(self.image_key, changes)
self.split_idx = np.split(np.arange(len(self.image_key)), changes)
self.data_key = inData.data.get_index(0)
self.dark, self.dark_idx = self.calc_average(inData.data.dark(), 2)
self.flat, self.flat_idx = self.calc_average(inData.data.flat(), 1)
inData.meta_data.set('multiple_dark', self.dark)
inData.meta_data.set('multiple_flat', self.flat)
def calc_average(self, data, key):
im_key = np.where(self.image_key == key)[0]
splits = np.where(np.diff(im_key) > 1)[0]+1
local_idx = np.split(np.arange(len(im_key)), splits)
mean_data = [np.mean(data[np.array(local_idx[i])], axis=0)
for i in range(len(local_idx))]
list_idx = list(np.where([key in i for i in self.split_key])[0])
return mean_data, list_idx
def process_frames(self, data):
proj = data[0]
frame = self.get_global_frame_index()[self.count]
flat = self.calculate_flat_field(
*self.find_nearest_frames(self.flat_idx, frame))
dark = self.calculate_dark_field(
*self.find_nearest_frames(self.dark_idx, frame))
if self.parameters['in_range']:
proj = self.in_range(proj, flat)
self.count += 1
return np.nan_to_num((proj-dark)/(flat-dark))
def in_range(self, data, flat):
data[data > flat] = flat[data > flat]
return data
def find_nearest_frames(self, idx_list, value):
""" Find the index of the two entries that 'value' lies between in \
'idx_list' and calculate the distance between each of them.
"""
global_val = self.data_key[value]
# find which list (index) global_val belongs to
list_idx = [global_val in i for i in self.split_idx].index(True)
val_list = self.split_idx[list_idx]
# find length of list
length_list = len(val_list)
# find position of global_val in list and distance from each end
pos = np.where(val_list == global_val)[0][0]
dist = [(length_list-pos)/float(length_list), pos/float(length_list)]
# find closest before and after idx_list entries
new_list = list(np.sort(np.append(idx_list, list_idx)))
new_idx = new_list.index(list_idx)
entry1 = new_idx-1 if new_idx != 0 else new_idx+1
entry2 = new_idx+1 if new_idx != len(new_list)-1 else new_idx-1
before = idx_list.index(new_list[entry1])
after = idx_list.index(new_list[entry2])
return [before, after], dist
def calculate_flat_field(self, frames, distance):
return self.flat[frames[0]]*distance[0] + \
self.flat[frames[1]]*distance[1]
def calculate_dark_field(self, frames, distance):
return self.dark[frames[0]]*distance[0] + \
self.dark[frames[1]]*distance[1]
def get_max_frames(self):
return 'single'
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines the UIAutomatorOptions named tuple."""
import collections
UIAutomatorOptions = collections.namedtuple('UIAutomatorOptions', [
'tool',
'cleanup_test_files',
'push_deps',
'annotations',
'exclude_annotations',
'test_filter',
'test_data',
'save_perf_json',
'screenshot_failures',
'uiautomator_jar',
'uiautomator_info_jar',
'package_name'])
|
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/760/A
m,d = list(map(int,input().split()))
l = [None] + [31,28] + [31,30]*2 + [31,31] + [30,31]*2
print((l[m]+d+5)//7)
|
"""
a person object: fields + behavior
change: the tax method is now a computed attribute
"""
class Person:
def __init__(self, name, job, pay=0):
self.name = name
self.job = job
self.pay = pay # real instance data
def __getattr__(self, attr): # on person.attr
if attr == 'tax':
return self.pay * 0.30 # computed on access
else:
raise AttributeError() # other unknown names
def info(self):
return self.name, self.job, self.pay, self.tax
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Schedule(models.Model):
DAY_CHOICES = [
('A','A'),
('B','B'),
('C','C'),
('D','D'),
('E','E'),
]
day = models.CharField(max_length=1,choices=DAY_CHOICES)
date = models.DateField(auto_now_add=True)
def __str__(self):
return self.day
class Routine(models.Model):
DAY_CHOICES = [
('A','A'),
('B','B'),
('C','C'),
('D','D'),
('E','E'),
]
day = models.CharField(max_length=1,choices=DAY_CHOICES)
morning_lab = models.BooleanField(default = False)
first_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
second_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
third_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
after_morning_lab=models.BooleanField(default = False)
fourth_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
fifth_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
sixth_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
afternoon_lab=models.BooleanField(default = False)
seventh_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
eigth_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
ninth_period = models.CharField(max_length=30, null = " ",blank=True,default = "-")
def __str__(self):
return self.day + " Day Routine"
|
'''
Functions given the file list of korenberg photography
For each block, the entier file list is split into 3 groups:
filelist = bad + moderate + great
good files = moderate + great
'''
import matplotlib.pyplot as plt
import PyCACalebExtras.Common as cc
import PyCACalebExtras.Display as cd
import PyCA.Core as ca
import time
plt.ion() # tell it to use interactive mode -- see results immediately
import glob
from subprocess import Popen, PIPE
plt.close('all')
def get_file_dist(fname):
return int(fname[fname.rfind('_')+1: fname.rfind('.')])
def get_imset(hd=False, col='bw'):
if hd is False:
if col == 'bw':
imdir = '/home/sci/crottman/korenberg/data/photo/seg_low_res_crop8/bw/'
elif col == 'rgb':
imdir = '/home/sci/crottman/korenberg/data/photo/seg_low_res_crop8/color/'
else:
if col == 'bw':
imdir = '/home/sci/crottman/korenberg/data/photo/seg_high_res_crop/bw/'
elif col == 'rgb':
imdir = '/home/sci/crottman/korenberg/data/photo/seg_high_res_crop/color/'
files = glob.glob(imdir + 'block1/' + '*.png')
newfiles1 = sorted(files, key=lambda x: get_file_dist(x))
files = glob.glob(imdir + 'block2/' + '*.png')
newfiles2 = sorted(files, key=lambda x: get_file_dist(x))
files = glob.glob(imdir + 'block3/' + '*.png')
newfiles3 = sorted(files, key=lambda x: get_file_dist(x))
files = glob.glob(imdir + 'block4/' + '*.png')
newfiles4 = sorted(files, key=lambda x: get_file_dist(x))
# Very Bad Files
b1 = [
'/DDP_Seg_660.png',
'/DDP_Seg_12180.png',
'/DDP_Seg_12210.png',
'/DDP_Seg_12240.png',
'/DDP_Seg_12270.png'
]
b2 = [
'/DDP_Seg_3030.png',
]
b3 = [
'/DDP_Seg_3300.png',
]
b4 = [
'/DDP_Seg_3030.png',
'/DDP_Seg_3060.png',
'/DDP_Seg_4560.png',
'/DDP_Seg_15450.png',
'/DDP_Seg_18420.png',
'/DDP_Seg_22920.png',
'/DDP_Seg_25530.png', # wrong size
'/DDP_Seg_5070.png',
'/DDP_Seg_10200.png', # out of order
]
# Moderately bad files
m1 = []
m2 = [
'/DDP_Seg_12570.png',
'/DDP_Seg_12660.png',
'/DDP_Seg_12690.png',
'/DDP_Seg_12720.png',
'/DDP_Seg_12750.png',
'/DDP_Seg_12780.png',
'/DDP_Seg_12810.png',
'/DDP_Seg_12840.png',
'/DDP_Seg_12870.png',
'/DDP_Seg_12900.png',
'/DDP_Seg_12930.png',
'/DDP_Seg_12990.png',
'/DDP_Seg_13020.png',
'/DDP_Seg_13140.png',
'/DDP_Seg_13200.png',
'/DDP_Seg_13230.png',
'/DDP_Seg_13260.png',
'/DDP_Seg_13350.png',
'/DDP_Seg_13380.png',
'/DDP_Seg_13410.png',
'/DDP_Seg_13440.png',
'/DDP_Seg_13470.png',
'/DDP_Seg_13500.png',
'/DDP_Seg_13530.png',
'/DDP_Seg_13590.png',
'/DDP_Seg_13620.png',
'/DDP_Seg_13680.png',
'/DDP_Seg_13710.png',
'/DDP_Seg_13740.png',
'/DDP_Seg_13770.png'
]
m3 = [
# Ice damage:
'/DDP_Seg_0.png',
'/DDP_Seg_30.png',
'/DDP_Seg_60.png',
'/DDP_Seg_90.png',
'/DDP_Seg_120.png',
'/DDP_Seg_150.png',
'/DDP_Seg_180.png',
'/DDP_Seg_210.png',
'/DDP_Seg_390.png',
'/DDP_Seg_420.png',
'/DDP_Seg_450.png',
'/DDP_Seg_480.png',
'/DDP_Seg_510.png',
'/DDP_Seg_540.png',
'/DDP_Seg_570.png',
'/DDP_Seg_600.png',
'/DDP_Seg_630.png',
'/DDP_Seg_660.png',
'/DDP_Seg_690.png',
'/DDP_Seg_1440.png',
'/DDP_Seg_1470.png',
'/DDP_Seg_1500.png',
'/DDP_Seg_1530.png',
'/DDP_Seg_1560.png',
'/DDP_Seg_1590.png',
'/DDP_Seg_1620.png',
'/DDP_Seg_240.png',
'/DDP_Seg_270.png',
'/DDP_Seg_300.png',
'/DDP_Seg_330.png',
'/DDP_Seg_360.png',
'/DDP_Seg_720.png',
'/DDP_Seg_750.png',
'/DDP_Seg_780.png',
'/DDP_Seg_810.png',
'/DDP_Seg_840.png',
'/DDP_Seg_870.png',
'/DDP_Seg_900.png',
'/DDP_Seg_930.png',
'/DDP_Seg_960.png',
'/DDP_Seg_990.png',
'/DDP_Seg_1020.png',
'/DDP_Seg_1050.png',
'/DDP_Seg_1080.png',
'/DDP_Seg_1110.png',
'/DDP_Seg_1140.png',
'/DDP_Seg_1170.png',
'/DDP_Seg_1200.png',
'/DDP_Seg_1230.png',
'/DDP_Seg_1260.png',
'/DDP_Seg_1290.png',
'/DDP_Seg_1320.png',
'/DDP_Seg_1350.png',
'/DDP_Seg_1380.png',
'/DDP_Seg_1410.png',
'/DDP_Seg_1440.png',
'/DDP_Seg_4950.png',
'/DDP_Seg_4980.png',
'/DDP_Seg_5010.png',
'/DDP_Seg_5040.png',
'/DDP_Seg_5070.png',
'/DDP_Seg_5100.png',
'/DDP_Seg_5130.png',
'/DDP_Seg_5190.png',
'/DDP_Seg_5220.png',
'/DDP_Seg_5250.png',
'/DDP_Seg_5280.png',
'/DDP_Seg_5310.png',
'/DDP_Seg_5340.png',
'/DDP_Seg_5370.png',
'/DDP_Seg_5400.png',
'/DDP_Seg_5430.png',
]
m4 = []
imset = [[newfiles1, b1, m1],
[newfiles2, b2, m2],
[newfiles3, b3, m3],
[newfiles4, b4, m4]]
return imset
def get_all_good_files(hd=False, col='bw'):
'''returns a list of all of the good (moderate + great) files'''
return get_good_files(0, hd, col) + \
get_good_files(1, hd, col) + \
get_good_files(2, hd, col) + \
get_good_files(3, hd, col)
def get_all_great_files(hd=False, col='bw'):
'''returns a list of all of the great (neither moderate or poor)
files'''
return get_great_files(0, hd, col) + \
get_great_files(1, hd, col) + \
get_great_files(2, hd, col) + \
get_great_files(3, hd, col)
def get_all_files(hd=False, col='bw'):
'''returns a list of all (poor + moderate + great) files'''
[[newfiles1, l1, m1],
[newfiles2, l2, m2],
[newfiles3, l3, m3],
[newfiles4, l4, m4]] = get_imset(hd, col)
return newfiles1+newfiles2+newfiles3+newfiles4
def get_bad_files(idx=0, hd=False, col='bw'):
'''returns a list of all of the bad (neither moderate or great)
files for a given set'''
imset = get_imset(hd, col)
filelist = imset[idx][0]
badfilelist = imset[idx][1]
badlist = []
for badstr in badfilelist:
badlist += [f for f in filelist if badstr in f]
return badlist
def get_mod_files(idx=0, hd=False, col='bw'):
'''returns a list of all of the moderate (neither poor or great)
files for a given set'''
imset = get_imset(hd, col)
filelist = imset[idx][0]
modfilelist = imset[idx][2]
modlist = []
for modstr in modfilelist:
modlist += [f for f in filelist if modstr in f]
return modlist
def get_good_files(idx=0, hd=False, col='bw'):
'''returns a list of all of the good (moderate and great)
files for a given set'''
imset = get_imset(hd, col)
filelist = imset[idx][0]
badfilelist = imset[idx][1]
goodlist = filelist[:]
for badfile in badfilelist:
goodlist = [f for f in goodlist if badfile not in f]
return goodlist
def get_great_files(idx=0, hd=False, col='bw'):
'''returns a list of all of the great (neither moderate or poor)
files for a given set'''
imset = get_imset(hd, col)
filelist = imset[idx][0]
badfilelist = imset[idx][1] + imset[idx][2]
goodlist = filelist[:]
for badfile in badfilelist:
goodlist = [f for f in goodlist if badfile not in f]
return goodlist
def show_files(filelist, sleeptime=.6):
'''given a list of files, it shows all those images (Doesn't yet
work for color images'''
plt.figure()
loadtimelist = []
if type(filelist) is not list:
filelist = [filelist]
for i in xrange(len(filelist)):
fname = filelist[i]
t = time.time()
name = fname[fname.rfind('/') : fname.rfind('.')]
Im = cc.LoadTIFF(fname, ca.MEM_HOST)
print name, i
cd.DispImage(Im, newFig=False, title=name)
p = Popen(['xsel', '-pi'], stdin=PIPE)
p.communicate(input="'" + name + ".png'" + ',\n')
loadtime = time.time() - t
loadtimelist.append(loadtime)
time.sleep(sleeptime)
# plt.figure()
# plt.plot(loadtimelist)
if __name__ == '__main__':
# show_files(get_bad_files(3), .5)
# show_files(get_good_files(3)[310:], 1)
# print 'block1:', len(get_great_files(0)), ',', len(get_good_files(0)), 'out of', len(newfiles1)
# print 'block2:', len(get_great_files(1)), ',', len(get_good_files(1)), 'out of', len(newfiles2)
# print 'block3:', len(get_great_files(2)), ',', len(get_good_files(2)), 'out of', len(newfiles3)
# print 'block4:', len(get_great_files(3)), ',', len(get_good_files(3)), 'out of', len(newfiles4)
# show_files(get_great_files(3)[:], .1)
# show_files(get_pretty_bad_files(1)[:], .5)
# show_files(get_mod_files(2))
# print get_great_files(0)
show_files(get_bad_files(3))
|
""" Example of using the API class """
from filmweb_api import FilmwebApi
if __name__ == '__main__':
filmweb_api = FilmwebApi('USERNAME', 'PASSWORD')
filmweb_api.login()
films = filmweb_api.get_user_films_want_to_see()
print(films)
films = filmweb_api.get_film_info_full(824633)
print(films)
|
import time
import logging
import threading
class Log(object):
_loggers = {}
CRITICAL=logging.CRITICAL
ERROR=logging.ERROR
WARNING=logging.WARNING
INFO=logging.INFO
DEBUG=logging.DEBUG
NOTSET=logging.NOTSET
chosen_level=logging.INFO
COLOURS = {
"critical": "\033[1;31;40m", # red
"error": "\033[1;33;40m", # yellow
"warning": "\033[1;35;40m", # purple
"info": "\033[1;37;40m", # white
"debug": "\033[1;37;40m" # grey
}
@staticmethod
def debug(tag, message=None):
"""
Post a debug-level message.
:param String tag: tag for the log message.
:param String message: message to post, if one is not provided, the tag
is used as the message instead.
:return: None
"""
Log._post("debug", tag, message)
@staticmethod
def info(tag, message=None):
"""
Post an info-level message.
:param String tag: tag for the log message.
:param String message: message to post, if one is not provided, the tag
is used as the message instead.
:return: None
"""
Log._post("info", tag, message)
@staticmethod
def warning(tag, message=None):
"""
Post a warning-level message.
:param String tag: tag for the log message.
:param String message: message to post, if one is not provided, the tag
is used as the message instead.
:return: None
"""
Log._post("warning", tag, message)
@staticmethod
def error(tag, message=None):
"""
Post an error-level message.
:param String tag: tag for the log message.
:param String message: message to post, if one is not provided, the tag
is used as the message instead.
:return: None
"""
Log._post("error", tag, message)
@staticmethod
def critical(tag, message=None):
"""
Post a critical-level message.
:param String tag: tag for the log message.
:param String message: message to post, if one is not provided, the tag
is used as the message instead.
:return: None
"""
Log._post("critical", tag, message)
@staticmethod
def log(tag, message=None):
"""
Post a log-level message.
:param String tag: tag for the log message.
:param String message: message to post, if one is not provided, the tag
is used as the message instead.
:return: None
"""
Log._post("log", tag, message)
@staticmethod
def exception(tag, message=None):
"""
Post an exception-level message.
:param String tag: tag for the log message.
:param String message: message to post, if one is not provided, the tag
is used as the message instead.
:return: None
"""
Log._post("exception", tag, message)
@staticmethod
def init(level):
"""
Initiate the logging system.
:param int level: level to set for the logger (only applies on first
call, can't be changed once logger is created).
:return: Logger
"""
Log.chosen_level = level
logging.basicConfig(
format="%(levelname)s\t%(name)s\t%(asctime)s\t%(message)s",
level=level)
@staticmethod
def _post(level, tag, message=None):
"""
Post a message to a logger of a given tag at the given level.
:param String tag: tag for the log message.
:param String level: level of the log message, as a lowercase String,
:param String message: message to post, the message is posted as-is, but
in the right colour.
:return: None
"""
if message == None:
message = tag
tag = "hotword"
message = "%s%s\033[0;37;40m" % (Log.COLOURS[level], message)
logger = Log._get_logger(level, tag)
method = getattr(logger, level)
method(Log._message(message))
@staticmethod
def _get_logger(level, tag):
"""
Retrieve a Logger for a given tag.
:param int level: level to set for the logger (only applies on first
call, can't be changed once logger is created).
:param String tag: tag for the log message.
:return: Logger
"""
try:
return Log._loggers[tag]
except KeyError:
Log._loggers[tag] = logging.getLogger(tag)
Log._loggers[tag].setLevel(Log.chosen_level)
return Log._loggers[tag]
@staticmethod
def _message(message):
"""
Augment a message by including Thread information.
:param String message: message to post, adds time and Thread information
to the String.
:return: String
"""
str_thread = "Thread-%d" % threading.current_thread().ident
return "%s\t%s" % (str_thread, message)
|
import numpy as np
import numpy.linalg as la
class GROUSE(object):
def __init__(self, n, d, U = None, eta = None):
self.n = n
self.d = d
if U is not None:
self.U = U
else:
self.U = np.eye(N=self.n, M=self.d)
if eta is not None:
self.eta0 = eta
else:
self.eta0 = 10.0
self.it = 1.0
def add_data(self, v):
mask_c = np.isnan(v)
mask = 1 - mask_c
U = self.U
n = self.n
d = self.d
Ov = v[mask==1]
OU = U[mask==1,:]
w, _, __, ___ = la.lstsq(OU, Ov)
p = U.dot(w)
r = np.zeros((n,))
r[mask==1] = Ov - p[mask==1]
sigma = la.norm(r) * la.norm(p)
eta = self.eta0 / self.it
pw = la.norm(p) * la.norm(w)
rw = la.norm(r) * la.norm(w)
if pw == 0 or rw == 0: return
U = U + (np.cos(sigma * eta) - 1.0) * np.outer(p, w) / pw \
+ np.sin(sigma * eta) * np.outer(r, w) / rw
self.U = U
self.it += 1.0
|
# I was going to do numbers and strings together but nope, we ain't doing it that way
# write a string using " " or ' '
"this is a string"
'this is also a string'
""" this
is
also
a
string
"""
'''
this
as
well
'''
"i'll stop being annoying now"
a_string_variable = "assign a string to a variable"
print("i know this seems stupid but yeah, this is how you print strings")
# okay enough playing around
example_string = "hello world"
# access elements of a string using []
# string indexes start from 0
x = example_string[4]
print(x)
print(example_string[6])
print(example_string[-2]) #access elements from the right
# get a range of elements
y = example_string[3:7]
print(y)
print(example_string[-2:-5])
# some string methods now
another_example = " Hello world "
uppercase = another_example.upper()
lowercase = another_example.lower()
length_of_string = len(another_example)
remove_white_spaces = another_example.strip()
split_space_seperated_string_into_list = another_example.split(" ")
a_string = "Hi, Hello"
split_string_into_list_by_comma = a_string.split(",")
print("orignal string:", another_example)
print("uppercase: ", uppercase)
print("lowercase: ", lowercase)
print("remove white spaces: ", remove_white_spaces)
print("split with space: ", uppercase)
print("\nother example string:", a_string)
print("split string with comma:", split_string_into_list_by_comma)
# you can use different operators to split the string with
# there are lots of string methods, i'll keep on adding more in the future,
# but till then please check out the python documentation for more methods
# concat string
string_one = "first string"
string_two = " second string"
concat_string = string_one + string_two
print("\nstring one: ", string_one)
print("string two: ", string_two)
print("concat string: ", concat_string)
# concat string and numbers using the format method
an_integer_num = 775
a_floating_num = 44.5
string_for_formatting = "\nThe integer number is {} and the floating number is {}."
print(string_for_formatting.format(an_integer_num, a_floating_num))
# find substrings in string
print("find:", string_for_formatting.find("he")) # this will return the first index where it found the substring
print("rfind:", string_for_formatting.rfind("he")) # this will return the last index where it found the substring
|
# yedict_script.py
"""Script for generating two files from the yedict dictionary. One for traditional
characters and one for simplified characters. One word per line.
"""
path = 'data/yedict.txt'
simp_path = 'data/yue-Hans.txt'
trad_path = 'data/yue-Hant.txt'
def main():
with open(path, 'r') as f:
with open(simp_path, 'w') as simp_f:
with open(trad_path, 'w') as trad_f:
for line in f:
trad = line.split()[0]
simp = line.split()[1]
simp_f.write(simp + '\n')
trad_f.write(trad + '\n')
if __name__ == '__main__':
main()
|
import os
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import MonthLocator
# import matplotlib.ticker
import numpy as np
from sense.canopy import OneLayer
from sense.soil import Soil
from sense import model
import scipy.stats
from scipy.optimize import minimize
import pdb
# Helper functions for statistical parameters
#--------------------------------------------
def rmse_prediction(predictions, targets):
""" calculation of RMSE """
return np.sqrt(np.nanmean((predictions - targets) ** 2))
def linregress(predictions, targets):
""" Calculate a linear least-squares regression for two sets of measurements """
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(predictions, targets)
return slope, intercept, r_value, p_value, std_err
def read_mni_data(path, file_name, extention, field, sep=';'):
""" read MNI campaign data """
df = pd.io.parsers.read_csv(os.path.join(path, file_name + extension), header=[0, 1], sep=sep)
df = df.set_index(pd.to_datetime(df[field]['date']))
df = df.drop(df.filter(like='date'), axis=1)
return df
def read_agrometeo(path, file_name, extentio, sep=';', decimal=','):
""" read agro-meteorological station (hourly data) """
df = pd.read_csv(os.path.join(path, file_name + extension), sep=sep, decimal=decimal)
df['SUM_NN050'] = df['SUM_NN050'].str.replace(',','.')
df['SUM_NN050'] = df['SUM_NN050'].str.replace('-','0').astype(float)
df['date'] = df['Tag'] + ' ' + df['Stunde']
df = df.set_index(pd.to_datetime(df['date'], format='%d.%m.%Y %H:%S'))
return df
def filter_relativorbit(data, field, orbit1, orbit2=None, orbit3=None, orbit4=None):
""" data filter for relativ orbits """
output = data[[(check == orbit1 or check == orbit2 or check == orbit3 or check == orbit4) for check in data[(field,'relativeorbit')]]]
return output
def smooth(x,window_len=11,window='hanning'):
if x.ndim != 1:
raise ValueError #, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError #, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError #, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[2*x[0]-x[window_len-1::-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='same')
return y[window_len:-window_len+1]
def read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro):
# Read MNI data
df = read_mni_data(path, file_name, extension, field)
# Read agro-meteorological station
df_agro = read_agrometeo(path_agro, file_name_agro, extension_agro)
# filter for field
field_data = df.filter(like=field)
# filter for relativorbit
field_data_orbit = filter_relativorbit(field_data, field, 95, 168)
# field_data = field_data_orbit
# get rid of NaN values
parameter_nan = 'LAI'
field_data = field_data[~np.isnan(field_data.filter(like=parameter_nan).values)]
# available auxiliary data
theta_field = np.deg2rad(field_data.filter(like='theta'))
# theta_field[:] = 45
sm_field = field_data.filter(like='SM')
height_field = field_data.filter(like='Height')/100
lai_field = field_data.filter(like='LAI')
vwc_field = field_data.filter(like='VWC')
pol_field = field_data.filter(like='sigma_sentinel_'+pol)
return df, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field
### Optimization ###
#-----------------------------------------------------------------
def solve_fun(VALS):
for i in range(len(var_opt)):
dic[var_opt[i]] = VALS[i]
ke = dic['coef'] * np.sqrt(dic['lai'])
# ke = dic['coef'] * np.sqrt(dic['vwc'])
# ke=1
dic['ke'] = ke
# surface
soil = Soil(mv=dic['mv'], C_hh=dic['C_hh'], C_vv=dic['C_vv'], D_hh=dic['D_hh'], D_vv=dic['D_vv'], C_hv=dic['C_hv'], D_hv=dic['D_hv'], V2=dic['V2'], s=dic['s'], clay=dic['clay'], sand=dic['sand'], f=dic['f'], bulk=dic['bulk'], l=dic['l'])
# canopy
can = OneLayer(canopy=dic['canopy'], ke_h=dic['ke'], ke_v=dic['ke'], d=dic['d'], ks_h = dic['omega']*dic['ke'], ks_v = dic['omega']*dic['ke'], V1=dic['V1'], V2=dic['V2'], A_hh=dic['A_hh'], B_hh=dic['B_hh'], A_vv=dic['A_vv'], B_vv=dic['B_vv'], A_hv=dic['A_hv'], B_hv=dic['B_hv'])
S = model.RTModel(surface=soil, canopy=can, models=models, theta=dic['theta'], freq=dic['f'])
S.sigma0()
return S.__dict__['stot'][pol[::-1]]
def fun_opt(VALS):
# return(10.*np.log10(np.nansum(np.square(solve_fun(VALS)-dic['pol_value']))))
return(np.nansum(np.square(solve_fun(VALS)-dic['pol_value'])))
def data_optimized_run(n, field_data, theta_field, sm_field, height_field, lai_field, vwc_field, pol):
n = np.int(np.floor(n/2))
if n > 0:
field_data = field_data.drop(field_data.index[-n:])
field_data = field_data.drop(field_data.index[0:n])
theta_field = theta_field.drop(theta_field.index[-n:])
theta_field = theta_field.drop(theta_field.index[0:n])
sm_field = field_data.filter(like='SM')
height_field = field_data.filter(like='Height')/100
lai_field = field_data.filter(like='LAI')
vwc_field = field_data.filter(like='VWC')
vv_field = field_data.filter(like='sigma_sentinel_vv')
vh_field = field_data.filter(like='sigma_sentinel_vh')
pol_field = field_data.filter(like='sigma_sentinel_'+pol)
return field_data, theta_field, sm_field, height_field, lai_field, vwc_field, vv_field, vh_field, pol_field
#-----------------------------------------------------------------
### Data preparation ###
#-----------------------------------------------------------------
# storage information
path = '/media/tweiss/Daten/new_data'
file_name = 'multi10' # theta needs to be changed to for norm multi
extension = '.csv'
path_agro = '/media/nas_data/2017_MNI_campaign/field_data/meteodata/agrarmeteorological_station'
file_name_agro = 'Eichenried_01012017_31122017_hourly'
extension_agro = '.csv'
field = '508_high'
field_plot = ['508_high', '508_low', '508_med']
pol = 'vv'
# pol = 'vh'
# output path
plot_output_path = '/media/tweiss/Daten/plots/paper/'
df, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field = read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro)
#-----------------------------------------------------------------
### Run SenSe module
#-----------------------------------------------------------------
#### Choose models
#-----------------
surface_list = ['Oh92', 'Oh04', 'Dubois95', 'WaterCloud', 'I2EM']
# surface_list = ['Oh92', 'Oh04', 'WaterCloud']
# surface_list = ['WaterCloud']
canopy_list = ['turbid_isotropic', 'water_cloud']
# canopy_list = ['water_cloud']
# surface_list = ['Oh92']
# surface_list = ['Oh04']
# surface_list = ['Dubois95']
# surface_list = ['WaterCloud']
# surface_list = ['I2EM']
# canopy_list = ['turbid_isotropic']
# canopy_list = ['water_cloud']
### option for time invariant or variant calibration of parameter
#-------------------------------
opt_mod = 'time invariant'
# opt_mod = 'time variant'
#---------------------------
### plot option: "single" or "all" modelcombination
#------------------------------
# plot = 'single'
plot = 'all'
#------------------------------
### plot option scatterplot or not
#-------------------------------
# style = 'scatterplot'
style = ''
### plot option for scatterplot single ESU
#------------------------------------
# style_2 = 'scatterplot_single_ESU'
style_2 = ''
#-----------------------------------
# Initialize plot settings
#---------------------------
if style == 'scatterplot':
fig, ax = plt.subplots(figsize=(10, 10))
else:
fig, ax = plt.subplots(figsize=(17, 10))
# plt.title('Winter Wheat')
plt.ylabel('Backscatter [dB]', fontsize=15)
plt.xlabel('Date', fontsize=15)
plt.tick_params(labelsize=12)
if pol == 'vv':
ax.set_ylim([-25,-7.5])
elif pol == 'vh':
ax.set_ylim([-30,-15])
colormaps = ['Greens', 'Purples', 'Blues', 'Oranges', 'Reds', 'Greys', 'pink', 'bone', 'Blues', 'Blues', 'Blues']
j = 0
colormap = plt.get_cmap(colormaps[j])
colors = [colormap(jj) for jj in np.linspace(0.35, 1., 3)]
for k in surface_list:
for kk in canopy_list:
df, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field = read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro)
freq = 5.405
clay = 0.08
sand = 0.12
bulk = 1.5
s = 0.0105 # vv
s = 0.0115
# s = 0.009 # vh ?????
C_hh = 0
D_hh = 0
C_hv = -22.5
D_hv = 3.2
C_vv = -14.609339
D_vv = 12.884086
### Canopy
# Water Cloud (A, B, V1, V2, theta)
# SSRT (coef, omega, theta)
#-----------------------------------
A_hh = 0
B_hh = 0
A_hv = 0.029
B_hv = 0.0013
A_vv = 0.0029
B_vv = 0.13
V1 = lai_field.values.flatten()
V2 = V1 # initialize in surface model
coef = 1.
omega = 0.027 # vv
omega = 0.015 # vh
# IEM
l = 0.01
surface = k
canopy = kk
models = {'surface': surface, 'canopy': canopy}
#### Optimization
#-----------------
if opt_mod == 'time invariant':
dic = {"mv":sm_field.values.flatten(), "C_hh":C_hh, "C_vv":C_vv, "D_hh":D_hh, "D_vv":D_vv, "C_hv":C_hv, "D_hv":D_hv, "s":s, "clay":clay, "sand":sand, "f":freq, "bulk":bulk, "l":l, "canopy":canopy, "d":height_field.values.flatten(), "V1":V1, "V2":V2, "A_hh":A_hh, "B_hh":B_hh, "A_vv":A_vv, "B_vv":B_vv, "A_hv":A_hv, "B_hv":B_hv, "lai":lai_field.values.flatten(), "vwc":vwc_field.values.flatten(), "pol_value":pol_field.values.flatten(), "theta":theta_field.values.flatten(), "omega": omega, "coef": coef}
if canopy == 'turbid_isotropic':
var_opt = ['coef']
guess = [2.]
bounds = [(0.001,5.5)]
elif surface == 'WaterCloud' and canopy == 'water_cloud':
var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv', 'C_vv', 'D_vv', 'C_hv', 'D_hv']
guess = [A_vv, B_vv, A_hv, B_hv, C_vv, D_vv, C_hv, D_hv]
bounds = [(0.,1), (0.,1), (0.,1), (0.,1), (-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]
elif canopy == 'water_cloud':
var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv']
guess = [A_vv, B_vv, A_hv, B_hv]
bounds = [(0.,1), (0.,1), (0.,1), (0.,1)]
method = 'L-BFGS-B'
res = minimize(fun_opt,guess,bounds=bounds, method=method)
fun_opt(res.x)
aaa = res.x
if opt_mod == 'time variant':
aaa = [[],[],[],[],[],[],[],[],[],[],[],[]]
n=7
for i in range(len(pol_field.values.flatten())-n+1):
if type(coef) == float:
dic = {"mv":sm_field.values.flatten()[i:i+n], "C_hh":C_hh, "C_vv":C_vv, "D_hh":D_hh, "D_vv":D_vv, "C_hv":C_hv, "D_hv":D_hv, "V2":V2[i:i+n], "s":s, "clay":clay, "sand":sand, "f":freq, "bulk":bulk, "l":l, "canopy":canopy, "d":height_field.values.flatten()[i:i+n], "V1":V1[i:i+n], "A_hh":A_hh, "B_hh":B_hh, "A_vv":A_vv, "B_vv":B_vv, "A_hv":A_hv, "B_hv":B_hv, "lai":lai_field.values.flatten()[i:i+n], "vwc":vwc_field.values.flatten()[i:i+n], "pol_value":pol_field.values.flatten()[i:i+n], "theta":theta_field.values.flatten()[i:i+n], "omega": omega, "coef": coef}
else:
dic = {"mv":sm_field.values.flatten()[i:i+n], "C_hh":C_hh, "C_vv":C_vv, "D_hh":D_hh, "D_vv":D_vv, "C_hv":C_hv, "D_hv":D_hv, "V2":V2[i:i+n], "s":s, "clay":clay, "sand":sand, "f":freq, "bulk":bulk, "l":l, "canopy":canopy, "d":height_field.values.flatten()[i:i+n], "V1":V1[i:i+n], "A_hh":A_hh, "B_hh":B_hh, "A_vv":A_vv, "B_vv":B_vv, "A_hv":A_hv, "B_hv":B_hv, "lai":lai_field.values.flatten()[i:i+n], "vwc":vwc_field.values.flatten()[i:i+n], "pol_value":pol_field.values.flatten()[i:i+n], "theta":theta_field.values.flatten()[i:i+n], "omega": omega, "coef": coef[i:i+n]}
if canopy == 'turbid_isotropic' and surface == 'WaterCloud':
var_opt = ['coef', 'C_vv', 'D_vv', 'C_hv', 'D_hv']
guess = [0.01, C_vv, D_vv, C_hv, D_hv]
bounds = [(0.1,5.5), (-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]
elif canopy == 'turbid_isotropic':
var_opt = ['coef']
guess = [0.1]
bounds = [(0.,2)]
elif surface == 'WaterCloud' and canopy == 'water_cloud':
# var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv', 'C_vv', 'D_vv', 'C_hv', 'D_hv']
# guess = [A_vv, B_vv, A_hv, B_hv, C_vv, D_vv, C_hv, D_hv]
# bounds = [(0.,1), (guess[1]*0.55, guess[1]*1.55), (0.,1), (guess[3]*0.75, guess[3]*1.25), (-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]
var_opt = ['C_vv', 'D_vv', 'C_hv', 'D_hv']
guess = [C_vv, D_vv, C_hv, D_hv]
bounds = [(-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]
elif canopy == 'water_cloud':
var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv']
guess = [A_vv, B_vv, A_hv, B_hv]
bounds = [(0.,1), (0.,1), (0.00001,1), (0.00001,1)]
# var_opt = ['omega']
# guess = [0.1]
# bounds = [(0.,5.5)]
# var_opt = ['s', 'coef', 'omega']
# guess = [0.01, 0.1, 0.01]
# bounds = [(0.001,0.03),(0.,2.5),(0.001,0.1)]
# var_opt = ['C_hv', 'D_hv']
# guess = [-13, 14]
# bounds = [(-200.,100.),(-200.,400.)]
# var_opt = ['A_vv', 'B_vv']
# try:
# guess = [res.x[0], res.x[1]]
# except:
# guess = [0.005, 0.09]
# # bounds = [(0.000,5.),(0.001,5.)]
# bounds = [(guess[0]*0.75, guess[0]*1.25), (guess[1]*0.75, guess[1]*1.25)]
# bounds = [(guess[0]*0.9, guess[0]*1.1), (guess[1]*0.75, guess[1]*1.25)]
# var_opt = ['coef', 'omega']
# guess = [0.1, 0.22]
# bounds = [(0.,5.5),(0.00001,0.2)]
method = 'L-BFGS-B'
# method = 'trust-exact'
res = minimize(fun_opt,guess,bounds=bounds, method=method)
fun_opt(res.x)
for j in range(len(res.x)):
aaa[j].append(res.x[j])
field_data, theta_field, sm_field, height_field, lai_field, vwc_field, vv_field, vh_field, pol_field = data_optimized_run(n, field_data, theta_field, sm_field, height_field, lai_field, vwc_field, pol)
V1 = lai_field.values.flatten()
V2 = V1 # initialize in surface model
#-----------------------------------------------------------------
for i in range(len(res.x)):
exec('%s = %s' % (var_opt[i],aaa[i]))
ke = coef * np.sqrt(lai_field.values.flatten())
# ke = smooth(ke, 11)
soil = Soil(mv=sm_field.values.flatten(), C_hh=np.array(C_hh), C_vv=np.array(C_vv), D_hh=np.array(D_hh), D_vv=np.array(D_vv), C_hv=np.array(C_hv), D_hv=np.array(D_hv), s=s, clay=clay, sand=sand, f=freq, bulk=bulk, l=l)
can = OneLayer(canopy=canopy, ke_h=ke, ke_v=ke, d=height_field.values.flatten(), ks_h = omega*ke, ks_v = omega*ke, V1=np.array(V1), V2=np.array(V2), A_hh=np.array(A_hh), B_hh=np.array(B_hh), A_vv=np.array(A_vv), B_vv=np.array(B_vv), A_hv=np.array(A_hv), B_hv=np.array(B_hv))
S = model.RTModel(surface=soil, canopy=can, models=models, theta=theta_field.values.flatten(), freq=freq)
S.sigma0()
#-----------------------------------------------------------------
date = field_data.index
colormap = plt.get_cmap(colormaps[j])
colors = [colormap(jj) for jj in np.linspace(0.35, 1., 4)]
# ax.plot(10*np.log10(pol_field), 'ks-', label='Sentinel-1 Pol: ' + pol, linewidth=3)
# ax.plot(date, 10*np.log10(S.__dict__['s0g'][pol[::-1]]), color=colors[0], marker='s', linestyle='--', label=pol+' s0g')
# ax.plot(date, 10*np.log10(S.__dict__['s0c'][pol[::-1]]), color=colors[1], marker='s', linestyle='--', label=pol+' s0c')
# ax.plot(date, 10*np.log10(S.__dict__['s0cgt'][pol[::-1]]), 'ms-', label=pol+' s0cgt')
# ax.plot(date, 10*np.log10(S.__dict__['s0gcg'][pol[::-1]]), 'ys-', label=pol+' s0gcg')
mask = ~np.isnan(pol_field.values.flatten()) & ~np.isnan(S.__dict__['stot'][pol[::-1]])
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress((pol_field.values.flatten()[mask]), (S.__dict__['stot'][pol[::-1]][mask]))
slope1, intercept1, r_value1, p_value1, std_err1 = scipy.stats.linregress(10*np.log10(pol_field.values.flatten())[mask], 10*np.log10(S.__dict__['stot'][pol[::-1]])[mask])
rmse = rmse_prediction(10*np.log10(pol_field.values.flatten()), 10*np.log10(S.__dict__['stot'][pol[::-1]]))
if k == 'Oh92':
hm = 'Oh92'
colors = 'blue'
elif k == 'Oh04':
hm = 'Oh04'
colors = 'red'
elif k == 'Dubois95':
hm='Dubois95'
colors = 'orange'
elif k == 'WaterCloud':
hm = 'Water Cloud'
colors = 'purple'
elif k == 'I2EM':
hm = 'IEM'
colors = 'green'
if plot == 'all':
if kk == 'turbid_isotropic':
ax.plot(date, 10*np.log10(S.__dict__['stot'][pol[::-1]]), color=colors, marker='s', linestyle='dashed', label = hm+ ' + ' + 'SSRT' + '; Pol: ' + pol + '; RMSE: ' + str(rmse)[0:4] + '; $R^2$: ' + str(r_value)[0:4])
else:
ax.plot(date, 10*np.log10(S.__dict__['stot'][pol[::-1]]), color=colors, marker='s', label = hm+ ' + ' + 'Water Cloud' + '; Pol: ' + pol + '; RMSE: ' + str(rmse)[0:4] + '; $R^2$: ' + str(r_value)[0:4])
if plot == 'single':
if style == 'scatterplot':
if pol == 'vv':
ax.set_xlim([-22.5,-7.5])
elif pol == 'vh':
ax.set_xlim([-30,-15])
if style_2 == 'scatterplot_single_ESU':
ax.plot(10*np.log10(pol_field.values.flatten()),10*np.log10(S.__dict__['stot'][pol[::-1]]), 'rs', label=field)
x = 10*np.log10(pol_field.values.flatten())
y = 10*np.log10(S.__dict__['stot'][pol[::-1]])
lower_position = np.nanargmin(x)
upper_position = np.nanargmax(x)
ax.plot(np.array((x[lower_position],x[upper_position])),np.array((y[lower_position],y[upper_position])), '--r')
else:
aa = []
bb = []
# cc = []
# field_plot = ['508_high', '508_low', '508_med']
jj = 0
colors = ['ks', 'ys', 'ms', 'rs']
for field in field_plot:
df, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field = read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro)
field_data, theta_field, sm_field, height_field, lai_field, vwc_field, vv_field, vh_field, pol_field = data_optimized_run(n, field_data, theta_field, sm_field, height_field, lai_field, vwc_field, pol)
soil = Soil(mv=sm_field.values.flatten(), C_hh=np.array(C_hh), C_vv=np.array(C_vv), D_hh=np.array(D_hh), D_vv=np.array(D_vv), C_hv=np.array(C_hv), D_hv=np.array(D_hv), s=s, clay=clay, sand=sand, f=freq, bulk=bulk, l=l)
can = OneLayer(canopy=canopy, ke_h=ke, ke_v=ke, d=height_field.values.flatten(), ks_h = omega*ke, ks_v = omega*ke, V1=np.array(V1), V2=np.array(V2), A_hh=np.array(A_hh), B_hh=np.array(B_hh), A_vv=np.array(A_vv), B_vv=np.array(B_vv), A_hv=np.array(A_hv), B_hv=np.array(B_hv))
S = model.RTModel(surface=soil, canopy=can, models=models, theta=theta_field.values.flatten(), freq=freq)
S.sigma0()
ax.plot(10*np.log10(pol_field.values.flatten()),10*np.log10(S.__dict__['stot'][pol[::-1]]), colors[jj], label=field)
slope, intercept, r_value, p_value, std_err = linregress(10*np.log10(pol_field.values.flatten())[~np.isnan(10*np.log10(S.__dict__['stot'][pol[::-1]]))], 10*np.log10(S.__dict__['stot'][pol[::-1]])[~np.isnan(10*np.log10(S.__dict__['stot'][pol[::-1]]))])
line = slope * 10*np.log10(S.__dict__['stot'][pol[::-1]]) + intercept
# ax.plot(10*np.log10(S.__dict__['stot'][pol[::-1]]), line)
lower_position = np.nanargmin(line)
upper_position = np.nanargmax(line)
ax.plot(np.array((10*np.log10(S.__dict__['stot'][pol[::-1]])[lower_position],10*np.log10(S.__dict__['stot'][pol[::-1]])[upper_position])),np.array((line[lower_position],line[upper_position])), '--'+colors[jj][0])
aa = np.append(aa, 10*np.log10(pol_field.values.flatten()))
bb = np.append(bb, 10*np.log10(S.__dict__['stot'][pol[::-1]]))
jj = jj+1
else:
ax.plot(date, 10*np.log10(S.__dict__['stot'][pol[::-1]]), color='orange', marker='s', label=S.models['surface']+ ' + ' + S.models['canopy'] + ' Pol: ' + pol + '; RMSE: ' + str(rmse)[0:4] + '; $R^2$: ' + str(r_value)[0:4])
ax.plot(date, 10*np.log10(S.__dict__['s0g'][pol[::-1]]), color='red', marker='s', label='Ground contribution')
ax.plot(date, 10*np.log10(S.__dict__['s0c'][pol[::-1]]), color='green', marker='s', label='Canopy contribution')
j = j+1
if style == 'scatterplot':
pass
else:
ax.plot(10*np.log10(pol_field), 'ks-', label='Sentinel-1 Pol: ' + pol, linewidth=3)
plt.legend()
plt.title(field)
if plot == 'all':
# plt.show()
plt.savefig(plot_output_path+pol+'_all_'+opt_mod)
if plot == 'single':
if style == 'scatterplot':
plt.ylabel(surface + ' ' + canopy + ' [dB]')
plt.xlabel('Sentinel-1 [dB]')
plt.legend()
x = np.linspace(np.min(10*np.log10(pol_field.values.flatten()))-2, np.max(10*np.log10(pol_field.values.flatten()))+2, 16)
ax.plot(x,x)
if style_2 == 'scatterplot_single_ESU':
www = rmse_prediction(10*np.log10(pol_field).values.flatten(), 10*np.log10(S.__dict__['stot'][pol[::-1]]))
plt.title(pol+' ' + field + ' ' + surface + ' ' + canopy + '$R^2$='+str(r_value)+' RMSE='+str(www))
plt.savefig(plot_output_path+'scatterplot_fertig_single_'+field+'_'+pol+'_'+file_name+'_'+S.models['surface']+'_'+S.models['canopy'])
else:
www = rmse_prediction(aa, bb)
# slope, intercept, r_value, p_value, std_err = linregress(aaa[~np.isnan(bbb)], bbb[~np.isnan(bbb)])
plt.title(pol+' ' + field + ' ' + surface + ' ' + canopy + '$R^2$='+str(r_value)+' RMSE='+str(www))
plt.savefig(plot_output_path+'scatterplot_fertig_'+field+'_'+pol+'_'+file_name+'_'+S.models['surface']+'_'+S.models['canopy'])
else:
plt.savefig(plot_output_path+pol+'_single_'+opt_mod+'_'+S.models['surface']+'_'+S.models['canopy'])
pdb.set_trace()
|
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Proof for adding 3 operand xmm/memory fp scalar ops."""
import proof_tools
from proof_tools_templates import XmmOrMemory3operand
# FP scalar (single operand/double operand) AVX1 Instructions of the form:
# src1 is an xmm register.
# src2 is an xmm register or memory operand.
# dest is an xmm register.
#
# These essentially operate on the least signficant bits of the vector.
MNEMONICS = [
'vaddsd', 'vaddss',
'vdivsd', 'vdivss',
'vmaxsd', 'vmaxss',
'vminsd', 'vminss',
'vmulsd', 'vmulss',
'vsubsd', 'vsubss'
]
def Validate(trie_diffs, bitness):
"""Validates that all allowed patterns of MNEMONICS are added."""
expected_adds = set()
for mnemonic in MNEMONICS:
expected_adds.update(XmmOrMemory3operand(mnemonic_name=mnemonic,
bitness=bitness))
proof_tools.AssertDiffSetEquals(
trie_diffs,
expected_adds=expected_adds,
expected_removes=set())
if __name__ == '__main__':
proof_tools.RunProof(proof_tools.ParseStandardOpts(), Validate)
|
from django.test import TestCase
from ..models import Puppy
class PuppyTest(TestCase):
""" Test module for Puppy model """
def setUp(self):
Puppy.objects.create(
name='Casper', age=3, breed='Bull Dog', color='Black')
Puppy.objects.create(
name='Muffin', age=1, breed='Gradane', color='Brown')
def test_puppy_breed(self):
puppy_casper = Puppy.objects.get(name='Casper')
puppy_muffin = Puppy.objects.get(name='Muffin')
self.assertEqual(
puppy_casper.get_breed(), "Casper belongs to Bull Dog breed.")
self.assertEqual(
puppy_muffin.get_breed(), "Muffin belongs to Gradane breed.")
|
"""
Implementation for AWS SecretsManager
@author Arttu Manninen <arttu@kaktus.cc>
"""
import re
from config.external.aws.boto3 import Boto3
from config.external.interface import ExternalInterface
boto3 = Boto3()
class SecretsManager(ExternalInterface):
def load(self):
""" Load AWS SecretManager secrets to the configuration """
prefix = self.config.get('aws.secretsmanager.prefix', default='')
client = boto3.client('secretsmanager')
paginator = client.get_paginator('list_secrets')
secrets = []
for page in paginator.paginate():
for _i, secret_metadata in enumerate(page['SecretList']):
secrets.append(secret_metadata)
def sort_secrets(secret):
""" Sort secrets """
if '@' in secret['Name']:
return 1
return 0
secrets.sort(key=sort_secrets)
for secret_metadata in secrets:
name = secret_metadata['Name']
key = name
if prefix and re.search('@', key):
if name.find(prefix + '@') != 0:
continue
key = name[len(prefix) + 1:]
if self.config.get('aws.secretsmanager.skip_unprefixed') and \
(name.find(prefix + '@') != 0):
continue
stored_secret = client.get_secret_value(SecretId=name)
stored_value = self._parse_secret_value(stored_secret['SecretString'])
# Special case: when the name of the secret is "config" it is handled
# as a full set of configuration instead of a subset
if key == 'config':
self.config.set(None, stored_value)
break
self.config.set(key, stored_value)
|
ip_ranges = {
"Bank of Canada": [
"140.80.0.0/16"
],
"CRTC": [
"199.246.230.0/23",
"199.246.232.0/21",
"199.246.240.0/21",
"199.246.248.0/22",
"199.246.252.0/23"
],
"Canada Centre for Inland Waters": [
"192.75.68.0/24",
"198.73.135.0/24",
"198.73.136.0/24",
"205.189.5.0/24",
"205.189.6.0/23"
],
"Canadian Broadcasting Corporation": [
"159.33.160.0/19",
"159.33.0.0/19"
],
"Canadian Department of National Defence": [
"131.132.0.0/14",
"131.136.0.0/14",
"131.140.0.0/15"
],
"Canadian House of Commons": [
"192.197.82.0/24"
],
"Canadian Hydrographic Service": [
"204.187.48.0/24"
],
"Canadian Museum of Civilization": [
"204.19.14.0/24"
],
"Canadian Space Agency": [
"142.74.1.0/24",
"142.74.2.0/23",
"142.74.4.0/24"
],
"Communications Security Establishment Canada": [
"108.174.30.160/29"
],
"Correctional Services Canada": [
"142.191.8.0/21",
"142.236.0.0/17"
],
"Department of Fisheries and Oceans": [
"192.197.244.0/24",
"192.139.141.0/24",
"192.197.243.0/24",
"198.103.161.0/24"
],
"Department of Justice Canada": [
"199.212.200.0/24",
"199.212.215.0/23"
],
"Employment and Immigration Canada": [
"167.227.0.0/16"
],
"Environment Canada": [
"199.212.16.0/22",
"199.212.20.0/23",
"205.189.8.0/23",
"205.189.10.0/24",
"205.211.132.0/23",
"205.211.134.0/24"
],
"Federal Court of Canada": [
"198.103.145.0/24"
],
"Finance Canada": [
"198.103.32.0/19"
],
"Foreign Affairs Canada": [
"216.174.155.0/28"
],
"Government of Canada": [
"192.139.201.0/24",
"192.139.202.0/23",
"192.139.204.0/24",
"192.197.77.0/23",
"192.197.80.0/24",
"192.197.84.0/24",
"192.197.86.0/24"
],
"Health Canada": [
"204.187.49.0/24",
],
"Industry Canada": [
"192.197.183.0/24",
"161.187.0.0/16",
"142.53.0.0/16",
"192.197.185.0/24",
"192.197.178.0/23",
"192.197.180.0/23",
"192.197.182.0/24",
"192.197.184.0",
"142.92.0.0/16",
"192.75.72.0/16"
],
"Institute for Biodiagnostics": [
"192.70.172.0/24"
],
"Library and Archives Canada": [
"142.78.0.0/16"
],
"National Research Council": [
"132.246.0.0/16",
"192.75.14.0/24",
"192.139.21.0/24",
"192.139.198.0/24",
"204.174.103.0/24",
"132.246.3.0/24"
],
"National Research Council (SSC)": [
"167.37.244.0/24",
"167.37.246.0/23",
"167.37.249.0/24",
"192.139.116.0/23"
],
"National Research Council (UBC)": [
"206.12.28.0/24"
],
"National Research Council (UofT)": [
"198.164.40.0/22"
],
"Natural Resources Canada": [
"132.156.4.0/23",
"132.156.10.0/23",
"132.156.12.0/23",
"132.156.16.0/23",
"132.156.20.0/22",
"132.156.46.0/23",
"132.156.120.0/23",
"192.67.45.0/24",
"192.75.99.0/24",
"192.139.6.0/24",
"192.139.7.0/24",
"192.139.194.0/24",
"132.156.0.0/16",
"192.197.114.0/24",
"192.197.115.0/24"
],
"Service Canada": [
"216.174.155.0/28"
],
"Shared Services Canada": [
"167.55.240.0/24",
"205.194.0.0/17",
"205.193.0.0/16",
"167.32.0.0/17",
"192.197.69.0/24",
"192.197.76.0/23",
"192.197.79.0/24",
"192.197.83.0/24",
"198.103.0.0/16"
],
"Statistics Canada": [
"142.206.0.0/16"
],
"Transport Canada": [
"142.209.0.0/16",
"142.210.0.0/15",
"198.103.96.0/24"
]
}
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.get_loyalty_stores_corporation_id_offers_required_item import GetLoyaltyStoresCorporationIdOffersRequiredItem # noqa: F401,E501
class GetLoyaltyStoresCorporationIdOffers200Ok(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'offer_id': 'int',
'type_id': 'int',
'quantity': 'int',
'lp_cost': 'int',
'isk_cost': 'int',
'ak_cost': 'int',
'required_items': 'list[GetLoyaltyStoresCorporationIdOffersRequiredItem]'
}
attribute_map = {
'offer_id': 'offer_id',
'type_id': 'type_id',
'quantity': 'quantity',
'lp_cost': 'lp_cost',
'isk_cost': 'isk_cost',
'ak_cost': 'ak_cost',
'required_items': 'required_items'
}
def __init__(self, offer_id=None, type_id=None, quantity=None, lp_cost=None, isk_cost=None, ak_cost=None, required_items=None): # noqa: E501
"""GetLoyaltyStoresCorporationIdOffers200Ok - a model defined in Swagger""" # noqa: E501
self._offer_id = None
self._type_id = None
self._quantity = None
self._lp_cost = None
self._isk_cost = None
self._ak_cost = None
self._required_items = None
self.discriminator = None
self.offer_id = offer_id
self.type_id = type_id
self.quantity = quantity
self.lp_cost = lp_cost
self.isk_cost = isk_cost
if ak_cost is not None:
self.ak_cost = ak_cost
self.required_items = required_items
@property
def offer_id(self):
"""Gets the offer_id of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
offer_id integer # noqa: E501
:return: The offer_id of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:rtype: int
"""
return self._offer_id
@offer_id.setter
def offer_id(self, offer_id):
"""Sets the offer_id of this GetLoyaltyStoresCorporationIdOffers200Ok.
offer_id integer # noqa: E501
:param offer_id: The offer_id of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:type: int
"""
if offer_id is None:
raise ValueError("Invalid value for `offer_id`, must not be `None`") # noqa: E501
self._offer_id = offer_id
@property
def type_id(self):
"""Gets the type_id of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
type_id integer # noqa: E501
:return: The type_id of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:rtype: int
"""
return self._type_id
@type_id.setter
def type_id(self, type_id):
"""Sets the type_id of this GetLoyaltyStoresCorporationIdOffers200Ok.
type_id integer # noqa: E501
:param type_id: The type_id of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:type: int
"""
if type_id is None:
raise ValueError("Invalid value for `type_id`, must not be `None`") # noqa: E501
self._type_id = type_id
@property
def quantity(self):
"""Gets the quantity of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
quantity integer # noqa: E501
:return: The quantity of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this GetLoyaltyStoresCorporationIdOffers200Ok.
quantity integer # noqa: E501
:param quantity: The quantity of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:type: int
"""
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`") # noqa: E501
self._quantity = quantity
@property
def lp_cost(self):
"""Gets the lp_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
lp_cost integer # noqa: E501
:return: The lp_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:rtype: int
"""
return self._lp_cost
@lp_cost.setter
def lp_cost(self, lp_cost):
"""Sets the lp_cost of this GetLoyaltyStoresCorporationIdOffers200Ok.
lp_cost integer # noqa: E501
:param lp_cost: The lp_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:type: int
"""
if lp_cost is None:
raise ValueError("Invalid value for `lp_cost`, must not be `None`") # noqa: E501
self._lp_cost = lp_cost
@property
def isk_cost(self):
"""Gets the isk_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
isk_cost integer # noqa: E501
:return: The isk_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:rtype: int
"""
return self._isk_cost
@isk_cost.setter
def isk_cost(self, isk_cost):
"""Sets the isk_cost of this GetLoyaltyStoresCorporationIdOffers200Ok.
isk_cost integer # noqa: E501
:param isk_cost: The isk_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:type: int
"""
if isk_cost is None:
raise ValueError("Invalid value for `isk_cost`, must not be `None`") # noqa: E501
self._isk_cost = isk_cost
@property
def ak_cost(self):
"""Gets the ak_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
Analysis kredit cost # noqa: E501
:return: The ak_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:rtype: int
"""
return self._ak_cost
@ak_cost.setter
def ak_cost(self, ak_cost):
"""Sets the ak_cost of this GetLoyaltyStoresCorporationIdOffers200Ok.
Analysis kredit cost # noqa: E501
:param ak_cost: The ak_cost of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:type: int
"""
self._ak_cost = ak_cost
@property
def required_items(self):
"""Gets the required_items of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
required_items array # noqa: E501
:return: The required_items of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:rtype: list[GetLoyaltyStoresCorporationIdOffersRequiredItem]
"""
return self._required_items
@required_items.setter
def required_items(self, required_items):
"""Sets the required_items of this GetLoyaltyStoresCorporationIdOffers200Ok.
required_items array # noqa: E501
:param required_items: The required_items of this GetLoyaltyStoresCorporationIdOffers200Ok. # noqa: E501
:type: list[GetLoyaltyStoresCorporationIdOffersRequiredItem]
"""
if required_items is None:
raise ValueError("Invalid value for `required_items`, must not be `None`") # noqa: E501
self._required_items = required_items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetLoyaltyStoresCorporationIdOffers200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_ec2, mock_kms
from tests import EXAMPLE_AMI_ID
@mock_ec2
@mock_kms
def test_run_instance_with_encrypted_ebs():
kms = boto3.client("kms", region_name="us-east-1")
resp = kms.create_key(Description="my key", KeyUsage="ENCRYPT_DECRYPT")
key_id = resp["KeyMetadata"]["Arn"]
ec2 = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda2",
"Ebs": {
"VolumeSize": 50,
"VolumeType": "gp2",
"Encrypted": True,
"KmsKeyId": key_id,
},
}
],
}
instance = ec2.run_instances(**kwargs)
instance_id = instance["Instances"][0]["InstanceId"]
instances = (
ec2.describe_instances(InstanceIds=[instance_id])
.get("Reservations")[0]
.get("Instances")
)
volume = instances[0]["BlockDeviceMappings"][0]["Ebs"]
volumes = ec2.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(50)
volumes["Volumes"][0]["Encrypted"].should.equal(True)
volumes["Volumes"][0]["KmsKeyId"].should.equal(key_id)
|
# 1046. Last Stone Weight - LeetCode Contest
# https://leetcode.com/contest/weekly-contest-137/problems/last-stone-weight/
class Solution:
def lastStoneWeight(self, stones) -> int:
stones = sorted(stones,reverse=True)
while len(stones) > 1:
first_stone = stones.pop(0)
second_stone = stones.pop(0)
if first_stone > second_stone:
remains = first_stone - second_stone
i = 0
while i < len(stones) and stones[i] > remains:
i += 1
stones.insert(i,remains)
if len(stones) == 1:
return stones[0]
else:
return 0
s = Solution()
print(s.lastStoneWeight([2,7,4,1,8,1]))
|
import eel
import random
from time import sleep
eel.init("front")
@eel.expose
def randGen(top):
for num in range(top):
#print in front end
eel.diceShow(random.randint(1,top))
if top > 100:
sleep(.015)
else:
sleep(.15)
eel.start("front.html")
|
from datetime import datetime
from django.shortcuts import get_object_or_404
from django.views import generic
from django.conf import settings
from django.db.models import Sum, Count, ExpressionWrapper, Max, Min, DurationField
import json
from .models import Observations, Pulsars, Proposals, Ephemerides, Utcs, get_observations_summary
from .plots import pulsar_summary_plot
from .logic import get_meertime_filters
from sentry_sdk import last_event_id
from django.shortcuts import render
def handler500(request):
if settings.ENABLE_SENTRY_DSN:
return render(request, "500.html", {"sentry_event_id": last_event_id(), "sentry_dsn": settings.SENTRY_DSN,})
else:
return render(request, "500.html", {})
class WelcomeView(generic.TemplateView):
"""
This is the landing page which informs visitors what they can find on the portal.
It also provides a link to the public data accessible without logging in.
"""
template_name = "dataportal/welcome.html"
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
qs = Pulsars.get_observations(mode="observations")
context["totals"] = qs.aggregate(global_tint_h=Sum("total_tint_h"), global_nobs=Sum("nobs"))
context["totals"]["global_npsr"] = qs.count()
return context
class SessionView(generic.ListView):
"""
Display observations in an observing session
"""
context_object_name = "obs_list"
template_name = "dataportal/session.html"
page_title = "last meertime session"
detail_url_name = "pulsar_detail"
get_proposal_filters = get_meertime_filters
def get_queryset(cls):
return Observations.get_last_session_by_gap(get_proposal_filters=cls.get_proposal_filters)
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
context["session_meta"] = get_observations_summary(context["obs_list"])
context["detail_url_name"] = cls.detail_url_name
context["title"] = cls.page_title
return context
class IndexBaseView(generic.ListView):
"""
Base view for main table views.
"""
context_object_name = "per_pulsar_list"
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
context["project_id"] = cls.request.GET.get("project_id")
context["band"] = cls.request.GET.get("band")
qs = context["per_pulsar_list"]
context["totals"] = qs.aggregate(global_tint_h=Sum("total_tint_h"), global_nobs=Sum("nobs"))
context["totals"]["global_npsr"] = qs.count()
return context
class FoldView(IndexBaseView):
"""
Display pulsars and the latest meertime observation data.
"""
template_name = "dataportal/index.html"
page_title = "folded observations"
detail_url_name = "pulsar_detail"
get_proposal_filters = get_meertime_filters
def get_queryset(cls):
qs = Pulsars.get_observations(
mode="observations",
proposal=cls.request.GET.get("project_id"),
band=cls.request.GET.get("band"),
get_proposal_filters=cls.get_proposal_filters,
)
if not cls.request.user.is_authenticated:
return qs.order_by("jname")
else:
return qs
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
proposal_filter = cls.get_proposal_filters()
context["projects"] = Proposals.objects.filter(**proposal_filter)
# page title
context["title"] = cls.page_title
if cls.request.user.is_authenticated:
context["detail_url_name"] = cls.detail_url_name
else:
context["detail_url_name"] = f"public_{cls.detail_url_name}"
return context
class SearchmodeView(IndexBaseView):
"""
Display pulsars and the latest observation data.
"""
template_name = "dataportal/searchmode.html"
get_proposal_filters = get_meertime_filters
detail_url_name = "pulsar_detail_search"
page_title = "searchmode observations"
def get_queryset(cls):
return Pulsars.get_observations(
mode="searchmode",
proposal=cls.request.GET.get("project_id"),
get_proposal_filters=cls.get_proposal_filters,
)
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
# page title
context["title"] = cls.page_title
context["detail_url_name"] = cls.detail_url_name
return context
class DetailView(generic.ListView):
context_object_name = "obs_list"
parent_url_name = "fold"
def setup(cls, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
cls.pulsar = get_object_or_404(Pulsars, jname=cls.kwargs["psr"])
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
# Add ephemeris to the context
context["psr"] = cls.kwargs["psr"]
try:
ephemeris = Ephemerides.objects.get(pulsar=cls.pulsar)
except Ephemerides.DoesNotExist:
ephemeris = None
updated = None
if ephemeris:
updated = ephemeris.updated_at
ephemeris = json.loads(ephemeris.ephemeris)
context["ephemeris"] = ephemeris
context["updated"] = updated
context["parent_url_name"] = cls.parent_url_name
# Add a payload for kronos/meerwatch links
context["kronos"] = settings.KRONOS_PAYLOAD
return context
class PulsarDetailView(DetailView):
"""
Display detail list of meertime observations for a single pulsar.
"""
template_name = "dataportal/show_single_psr.html"
get_proposal_filters = get_meertime_filters
def get_queryset(cls):
return cls.pulsar.observations_detail_data(get_proposal_filters=cls.get_proposal_filters)
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
# Add a summary plot to the context
plot_list = [(obs.utc.utc_ts, obs.snr_spip, obs.length, obs.band) for obs in context["obs_list"]]
# If no observations exist, the unpacking below will throw a value error
try:
[UTCs, snrs, lengths, bands] = list(zip(*plot_list))
except ValueError:
[UTCs, snrs, lengths, bands] = [
(),
(),
(),
(),
]
bokeh_js, bokeh_div = pulsar_summary_plot(UTCs, snrs, lengths, bands)
context["bokeh_js"] = bokeh_js
context["bokeh_div"] = bokeh_div
# get total size
qs = context["obs_list"]
context["total_size_estimate"] = qs.aggregate(total_size_estimate=Sum("estimated_size"))
# get other aggregates
annotations = {}
context["totals"] = qs.annotate(**annotations).aggregate(
tint=Sum("length"),
nobs=Count("id"),
project_count=Count("proposal", distinct=True),
timespan=ExpressionWrapper(Max("utc__utc_ts") - Min("utc__utc_ts"), output_field=DurationField()),
)
context["title"] = context["psr"]
return context
class SearchDetailView(DetailView):
"""
Display detail list of search mode observations for a single pulsar
"""
template_name = "dataportal/show_single_psr_search.html"
page_title_prefix = ""
get_proposal_filters = get_meertime_filters
def get_queryset(cls):
return cls.pulsar.searchmode_detail_data(get_proposal_filters=cls.get_proposal_filters)
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
# page title
context["title"] = f'{cls.page_title_prefix} {context["psr"]} searchmode'
return context
class ObservationDetailView(generic.TemplateView):
"""
Display details of a single observation
"""
template_name = "dataportal/observation.html"
def setup(cls, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
cls.beam = cls.kwargs["beam"]
cls.pulsar = get_object_or_404(Pulsars, jname=cls.kwargs["psr"])
cls.utc_str = cls.kwargs["utc"]
utc_ts = datetime.strptime(f"{cls.utc_str} +0000", "%Y-%m-%d-%H:%M:%S %z")
utc = get_object_or_404(Utcs, utc_ts=utc_ts)
cls.observation = get_object_or_404(Observations, pulsar=cls.pulsar, utc=utc, beam=cls.beam)
def get_context_data(cls, **kwargs):
context = super().get_context_data(**kwargs)
context["obs"] = cls.observation
# Add a payload for kronos/meerwatch links
context["kronos"] = settings.KRONOS_PAYLOAD
context["title"] = f"{cls.pulsar}/{cls.utc_str}/{cls.beam}"
return context
|
import sys
from ai_interface import *
from utils import *
class TestAI:
def __init__(self):
pass
def action(self, state):
res = {}
for ship in state.my_ships:
if state.my_side == Side.DEFENSE:
if ship.params.soul > 1 and ship.temp > 0:
res[ship.id] = [{'command': 'split', 'p1': 1, 'p2': 0, 'p3': 0, 'p4': 1}]
else:
gx, gy = calc_gravity(ship.x, ship.y)
res[ship.id] = [{'command': 'accel', 'x': gx, 'y': gy}]
else:
res[ship.id] = [
{'command': 'lazer', 'x': state.enemy_ships[0].x, 'y': state.enemy_ships[0].y,
'power': ship.params.p2}]
print(f'id={ship.id}, x={ship.x}, y={ship.y}, vx={ship.vx}, vy={ship.vy}, ')
return res
def set_specs(self, limit, side):
if side == Side.ATTACK:
return ShipParameter(0, (limit - 2) // 12, 0, 1)
else:
return ShipParameter(limit - (4 * 0 + 12 * 0 + 2 * 2), 0, 0, 2)
|
def count_subsets(nums, sum):
n = len(nums)
dp = [[-1 for x in range(sum + 1)] for y in range(n)]
# populate the sum = 0 columns, as we will always have an empty set for zero sum
for i in range(0, n):
dp[i][0] = 1
# with only one number, we can form a subset only when the required sum is equal to its value
for s in range(1, sum + 1):
dp[0][s] = 1 if nums[0] == s else 0 # Note <<< 0's
# process all subsets for all sums
for i in range(1, n):
for s in range(1, sum + 1):
count_by_excluding = dp[i - 1][s] # exclude the number
count_by_including = 0
if s >= nums[i]: # include the number, if it does not exceed the sum
count_by_including = dp[i - 1][s - nums[i]]
dp[i][s] = count_by_including + count_by_excluding
return dp[n - 1][sum] # the bottom-right corner will have our answer.
def main():
print("Total subsets : ", count_subsets([1, 1, 2, 3], 4))
print("Total subsets : ", count_subsets([1, 2, 7, 1, 5], 9))
main()
|
import numpy as np
import matplotlib.pyplot as plt
import reltest
from reltest.mctest import MCTestPSI, MCTestCorr
import reltest.mmd as mmd
import reltest.ksd as ksd
from reltest import kernel
from kmod.mctest import SC_MMD
from freqopttest.util import meddistance
import logging
import sys
import os
from ex_models import generatelRBM
## Setting
n_samples = 1000
ydim = 20
xdim = 5
n_trials = 300
to_perturb = float(sys.argv[1])
perturbs = [0.2, 0.25, 0.35, 0.4, 0.45, 0.5]
n_models = len(perturbs)
perturbs[1] = to_perturb
print("perturb " + str(to_perturb))
print(perturbs)
def independent_test(perturbs, n_samples, n_trials, setting):
src = generatelRBM(perturbs, ydim, xdim)
res_psi= {'mmd_u':[],
'mmd_lin':[],
'ksd_u':[],
'ksd_lin':[],
}
res_cor= {
#'ksd_u_bh':[],
'ksd_u_by':[],
# 'ksd_u_bn':[],
#'mmd_u_bh':[],
'mmd_u_by':[],
#'mmd_u_bn':[],
}
model_dens = src.get_densities()
for j in range(n_trials):
models, Q = src.sample(n_samples, seed=j)
psiTest = MCTestPSI(Q.data())
corrtest = MCTestCorr(Q.data())
mmd_med = mmd.med_heuristic([i.data() for i in models], Q.data(),
subsample=1000)
ksd_med = ksd.med_heuristic(Q.data(),
subsample=1000)
mmd_kernel, ksd_kernel = kernel.KGauss(mmd_med), kernel.KGauss(ksd_med)
mmd_u = mmd.MMD_U(mmd_kernel)
mmd_lin = mmd.MMD_Linear(mmd_kernel)
ksd_u = ksd.KSD_U(ksd_kernel)
ksd_lin = ksd.KSD_Linear(ksd_kernel)
model_samples = [i.data() for i in models]
## PSI Based Test
res_psi['ksd_u'].append(psiTest.perform_tests(model_dens, ksd_u))
res_psi['ksd_lin'].append(psiTest.perform_tests(model_dens, ksd_lin))
res_psi['mmd_u'].append(psiTest.perform_tests(model_samples, mmd_u))
res_psi['mmd_lin'].append(psiTest.perform_tests(model_samples, mmd_lin))
## Correction Based Test
#res_cor['mmd_u_bh'].append(corrtest.perform_tests(model_samples, Q.data(), n_samples, mmd_u, split=0.5, density=False, correction=0))
res_cor['mmd_u_by'].append(corrtest.perform_tests(model_samples, mmd_u, split=0.5, density=False, correction=1))
# res_cor['mmd_u_bn'].append(corrtest.perform_tests(model_samples, Q.data(), n_samples, mmd_u, split=True, density=False, correction=2))
# res_cor['ksd_u_bh'].append(corrtest.perform_tests(model_dens, Q.data(), n_samples, ksd_u, split=0.5, density=True, correction=0))
res_cor['ksd_u_by'].append(corrtest.perform_tests(model_dens, ksd_u, split=0.5, density=True, correction=1))
# res_cor['ksd_u_bn'].append(corrtest.perform_tests(model_dens, Q.data(), n_samples, ksd_u, split=True, density=True, correction=2))
return res_psi,res_cor
setting = {'n':n_models,
'dim':ydim}
SAVE_DIR = "./temp/rbm/"
if not os.path.isdir(SAVE_DIR):
os.mkdir(SAVE_DIR)
np.save(SAVE_DIR+"PSI"+str(to_perturb),0)
np.save(SAVE_DIR+"COR"+str(to_perturb),0)
res_psi,res_cor = independent_test(perturbs,n_samples,n_trials,setting)
np.save(SAVE_DIR+"PSI"+str(to_perturb),res_psi)
np.save(SAVE_DIR+"COR"+str(to_perturb),res_cor)
|
#
# This file is part of ravstack. Ravstack is free software available under
# the terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2015 the ravstack authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import sys
import json
from . import ravello, util, runtime, defaults
from .runtime import LOG
def main():
"""Check a password."""
# We support two ways of validating a password:
#
# - If the username is "admin", the password is checked against the
# per-instance password in /var/run/ravstack. Per-instance means that the
# password is only valid under the current CloudInit instance, and is
# regenerated if the instance changes. This closes a security hole where
# static passwords could become embedded in a Ravello blueprint.
#
# - If the username is not "admin", the password is checked against the
# Ravello API. This also prevents the issue where a static passwords gets
# embedded into a blueprint. However, if you are preparing a public
# appliance, do not use this technique as the password is cached on disk
# by the default mod_authnz_external configuration in share/.
username = sys.stdin.readline()
if not username.endswith('\n'):
sys.exit(2)
username = username[:-1]
password = sys.stdin.readline()
if not password.endswith('\n'):
sys.exit(2)
password = password[:-1]
if username == 'admin':
instance = util.get_cloudinit_instance()
if not instance:
LOG.error('no cloudinit instance, cannot use `admin` user.')
sys.exit(1)
with open(defaults.password_file) as fin:
pwdata = json.loads(fin.read())
if instance not in pwdata:
LOG.error('instance not configured in password file.')
sys.exit(1)
if util.constant_time_strcmp(password, pwdata[instance]):
LOG.error('unable to authenticate user `{}`.'.format(username))
sys.exit(1)
else:
meta = ravello.get_injected_metadata()
appid = meta.get('appId')
if appid is None:
LOG.error('metadata not injected, cannot check password.')
sys.exit(3)
client = ravello.RavelloClient()
try:
client.login(username, password)
client.call('GET', '/applications/{}'.format(appid))
except Exception as e:
LOG.error('unable to authenticate user `{}`: {}'.format(username, e))
sys.exit(1)
LOG.info('successfully authenticated user `{}`.'.format(username))
sys.exit(0)
if __name__ == '__main__':
runtime.run_main(main)
|
import os
from leapp.actors import Actor
from leapp.models import RpmTransactionTasks
from leapp.tags import IPUWorkflowTag, FactsPhaseTag
class TransactionWorkarounds(Actor):
"""
Provides additional RPM transaction tasks based on bundled RPM packages.
After collecting bundled RPM packages, a message with relevant data will be produced.
"""
name = 'transaction_workarounds'
consumes = ()
produces = (RpmTransactionTasks,)
tags = (IPUWorkflowTag, FactsPhaseTag)
def process(self):
location = self.get_folder_path('bundled-rpms')
local_rpms = []
for name in os.listdir(location):
if name.endswith('.rpm'):
local_rpms.append(os.path.join(location, name))
if local_rpms:
self.produce(RpmTransactionTasks(local_rpms=local_rpms))
|
# Copyright 2014 Florian Ludwig
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import subprocess
import atexit
import shutil
import argparse
import pkg_resources
def start(root, address='127.0.0.1', port=8000):
conf_template = pkg_resources.resource_string('nginc', 'nginx.conf')
conf_template = conf_template.decode('utf-8')
tmp = tempfile.mkdtemp(prefix='nginc')
@atexit.register
def cleanup_tmp():
shutil.rmtree(tmp)
root = os.path.abspath(root)
root = root.replace('"', '\\"')
config = conf_template.format(tmp=tmp, root=root, port=port, address=address)
conf_path = tmp + '/nginx.conf'
conf_file = open(conf_path, 'w')
conf_file.write(config)
conf_file.close()
proc = subprocess.Popen(['nginx', '-c', conf_path])
@atexit.register
def cleanup_proc():
try:
proc.kill()
except OSError:
pass
return proc
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=8000,
help='port to bind to')
parser.add_argument('-r', '--root', type=str, default='.',
help='directory to serve, defaults to current working directory')
parser.add_argument('-a', '--address', type=str, default='127.0.0.1',
help='address to bind to')
parser.add_argument('-A', action='store_true',
help='shortcut for --address 0.0.0.0')
args = parser.parse_args()
address = args.address
if args.A:
address = '0.0.0.0'
proc = start(args.root, address, args.port)
try:
proc.wait()
except KeyboardInterrupt:
proc.kill()
|
import math
import random
import numpy as np
from src.dataclass import Context
from src.model import LinearAttention, Trainer
from src.utils.formatting import pretty_print
import tensorflow as tf
def setup_torch(seed: int):
random.seed(seed)
np.random.seed(seed)
def get_model(ctx: Context, load_model: bool) -> Trainer:
mod = Trainer(LinearAttention(ctx))
if ctx.model.print_on_init:
pretty_print(str(mod))
#parameters = sum(np.prod(p.size()) for p in filter(lambda p: p.requires_grad, mod.parameters()))
#base = int(math.log10(parameters) / 3)
#pretty_print(f'Parameters: {parameters / (1000 ** base):.1f}{" kMBT"[base]}')
if load_model:
mod.load()
return mod
def encode(prompt: str) -> tf.Tensor:
return tf.convert_to_tensor(np.frombuffer(prompt.encode('UTF-8'), dtype=np.uint8))
def decode(output: tf.Tensor) -> str:
return ''.join(chr(c) for c in output.view(-1).unbind(0))
|
#!/usr/bin/env python
from __future__ import print_function
import optparse
import re
from bs4 import BeautifulSoup
def ftp_profile(publish_settings):
"""Takes PublishSettings, extracts ftp user, password, and host"""
soup = BeautifulSoup(publish_settings, 'html.parser')
profiles = soup.find_all('publishprofile')
ftp_profile = [profile for profile in profiles if profile['publishmethod'] == 'FTP'][0]
matches = re.search('ftp://(.+)/site/wwwroot', ftp_profile['publishurl'])
host = matches.group(1) if matches else ''
username = ftp_profile['username'].replace("\\$", "%5C%24")
password = ftp_profile['userpwd']
return host, username, password, ftp_profile['publishurl']
def default_db_connection(publish_settings):
""""Takes PublishSettings looks for Azure default db connection, returns default db connection string for local environment and SQL to add user to local db"""
username, password = '', ''
soup = BeautifulSoup(publish_settings, 'html.parser')
connections = soup.find_all('add')
regex = 'Database=(.+);Data Source=(.+);User Id=(.+);Password=(.+)'
db_connection = [conn for conn in connections if conn['name'] == 'defaultConnection'][0]
matches = re.search(regex, db_connection['connectionstring'])
if matches:
username = matches.group(3)
password = matches.group(4)
return username, password
def main():
"""Executes program and handles options"""
p = optparse.OptionParser(
description='Parses MS Azure PublishSettings file and eturns properly formatted CLI for use with Unix ftp command',
prog='get_ftp_cli',
version='%prog 1.0.1',
usage='%prog [path/to/*.PublishSettings]')
options, arguments = p.parse_args()
if len(arguments) == 1:
infile = open(arguments[0], 'r')
contents = infile.read()
ftp_host, ftp_username, ftp_password, publish_url = ftp_profile(contents)
db_user, db_passwd = default_db_connection(contents)
print('')
print('Execute the following command to connect to the Azure FTP:')
print('----------------------------------------------------------')
print('ftp ftp://{}:{}@{}'.format(ftp_username, ftp_password, ftp_host))
print('')
print('Execute the following command to put the contents of a directory to the Azure website:')
print('----------------------------------------------------------')
print('wput $HOME/Sites/[appdir]/* ftp://{}:{}@{}/'.format(ftp_username, ftp_password, publish_url))
print('')
print('Append the following line to `./httpd.conf`. (Substitute correct [Local_DB_Name]:')
print('----------------------------------------------------------')
print(
'''SetEnv MYSQLCONNSTR_defaultConnection "Database=[Local_DB_Name];Data Source=localhost;User Id={};Password={}"'''.format(db_user, db_passwd))
print('')
print('Execute the following command to add the user to the local MySQL database:')
print('----------------------------------------------------------')
print(
'''mysql -u root -p -e "GRANT ALL PRIVILEGES ON *.* TO '{}'@'localhost' IDENTIFIED BY '{}';"'''.format(db_user, db_passwd))
print('')
else:
p.print_help()
if __name__ == '__main__':
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
@pulumi.output_type
class GetCertificateResult:
"""
A collection of values returned by getCertificate.
"""
def __init__(__self__, certificate=None, created=None, domain_names=None, fingerprint=None, id=None, labels=None, name=None, not_valid_after=None, not_valid_before=None, type=None, with_selector=None):
if certificate and not isinstance(certificate, str):
raise TypeError("Expected argument 'certificate' to be a str")
pulumi.set(__self__, "certificate", certificate)
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if domain_names and not isinstance(domain_names, list):
raise TypeError("Expected argument 'domain_names' to be a list")
pulumi.set(__self__, "domain_names", domain_names)
if fingerprint and not isinstance(fingerprint, str):
raise TypeError("Expected argument 'fingerprint' to be a str")
pulumi.set(__self__, "fingerprint", fingerprint)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if not_valid_after and not isinstance(not_valid_after, str):
raise TypeError("Expected argument 'not_valid_after' to be a str")
pulumi.set(__self__, "not_valid_after", not_valid_after)
if not_valid_before and not isinstance(not_valid_before, str):
raise TypeError("Expected argument 'not_valid_before' to be a str")
pulumi.set(__self__, "not_valid_before", not_valid_before)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if with_selector and not isinstance(with_selector, str):
raise TypeError("Expected argument 'with_selector' to be a str")
pulumi.set(__self__, "with_selector", with_selector)
@property
@pulumi.getter
def certificate(self) -> str:
"""
(string) PEM encoded TLS certificate.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter
def created(self) -> str:
"""
(string) Point in time when the Certificate was created at Hetzner Cloud (in ISO-8601 format).
"""
return pulumi.get(self, "created")
@property
@pulumi.getter(name="domainNames")
def domain_names(self) -> Sequence[str]:
"""
(list) Domains and subdomains covered by the certificate.
"""
return pulumi.get(self, "domain_names")
@property
@pulumi.getter
def fingerprint(self) -> str:
"""
(string) Fingerprint of the certificate.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter
def id(self) -> int:
"""
(int) Unique ID of the certificate.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def labels(self) -> Mapping[str, Any]:
"""
(map) User-defined labels (key-value pairs) assigned to the certificate.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
(string) Name of the Certificate.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notValidAfter")
def not_valid_after(self) -> str:
"""
(string) Point in time when the Certificate stops being valid (in ISO-8601 format).
"""
return pulumi.get(self, "not_valid_after")
@property
@pulumi.getter(name="notValidBefore")
def not_valid_before(self) -> str:
"""
(string) Point in time when the Certificate becomes valid (in ISO-8601 format).
"""
return pulumi.get(self, "not_valid_before")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="withSelector")
def with_selector(self) -> Optional[str]:
return pulumi.get(self, "with_selector")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
certificate=self.certificate,
created=self.created,
domain_names=self.domain_names,
fingerprint=self.fingerprint,
id=self.id,
labels=self.labels,
name=self.name,
not_valid_after=self.not_valid_after,
not_valid_before=self.not_valid_before,
type=self.type,
with_selector=self.with_selector)
def get_certificate(id: Optional[int] = None,
name: Optional[str] = None,
with_selector: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Provides details about a specific Hetzner Cloud Certificate.
```python
import pulumi
import pulumi_hcloud as hcloud
sample_certificate1 = hcloud.get_certificate(name="sample-certificate-1")
sample_certificate2 = hcloud.get_certificate(id=4711)
```
:param int id: ID of the certificate.
:param str name: Name of the certificate.
:param str with_selector: [Label selector](https://docs.hetzner.cloud/#overview-label-selector)
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
__args__['withSelector'] = with_selector
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('hcloud:index/getCertificate:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
certificate=__ret__.certificate,
created=__ret__.created,
domain_names=__ret__.domain_names,
fingerprint=__ret__.fingerprint,
id=__ret__.id,
labels=__ret__.labels,
name=__ret__.name,
not_valid_after=__ret__.not_valid_after,
not_valid_before=__ret__.not_valid_before,
type=__ret__.type,
with_selector=__ret__.with_selector)
|
from jpype import *
import yaml
import numpy as np
import sys
sys.path.append("resources")
from python.TreeBuilder import *
import unittest
startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % "./RiverSolver.jar")
PokerSolver = JClass('icybee.solver.runtime.PokerSolver')
class TestSolver(unittest.TestCase):
@classmethod
def setUpClass(self):
with open('resources/yamls/general_rule.yaml') as fhdl:
self.conf = yaml.load(fhdl)
self.ps_shortdeck = PokerSolver("Dic5Compairer",
"./resources/compairer/card5_dic_sorted_shortdeck.txt",
376993,
['A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6'],
['h', 's', 'd', 'c']
)
self.ps_holdem = PokerSolver("Dic5Compairer",
"./resources/compairer/card5_dic_sorted.txt",
2598961,
['A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'],
['h', 's', 'd', 'c']
)
@classmethod
def tearDownClass(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_shortdeck_flop(self):
rule = RulesBuilder(
self.conf,
current_commit = [2,2],
current_round = 2,
raise_limit = 1,
check_limit = 2,
small_blind = 0.5,
big_blind = 1,
stack = 10,
bet_sizes = ["1_pot"],
)
gameTree = PartGameTreeBuilder(rule)
depth = np.inf
json = gameTree.gen_km_json("./.tree.km".format(depth),limit=depth,ret_json=True)
self.ps_shortdeck.build_game_tree("./.tree.km")
result_json = self.ps_shortdeck.train(
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76",
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76",
"Kd,Jd,Td",
50, # iterations
10, # print_interval
False, # debug
True, # parallel
None,
"log.txt",
"discounted_cfr",
"none",
-1, # threads
1, # action fork prob
1, # chance fork prob
1, # fork every tree depth
4, # fork minimal size
)
def test_shortdeck_turn(self):
rule = RulesBuilder(
self.conf,
current_commit = [2,2],
current_round = 3,
raise_limit = 1,
check_limit = 2,
small_blind = 0.5,
big_blind = 1,
stack = 10,
bet_sizes = ["0.5_pot","1_pot","2_pot","all-in"],
)
gameTree = PartGameTreeBuilder(rule)
depth = np.inf
json = gameTree.gen_km_json("./.tree.km".format(depth),limit=depth,ret_json=True)
self.ps_shortdeck.build_game_tree("./.tree.km")
result_json = self.ps_shortdeck.train(
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76",
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76",
"Kd,Jd,Td,7s",
50, # iterations
10, # print_interval
False, # debug
True, # parallel
None,
"log.txt",
"discounted_cfr",
"none",
-1, # threads
1, # action fork prob
1, # chance fork prob
1, # fork every tree depth
4, # fork minimal size
)
def test_shortdeck_river(self):
rule = RulesBuilder(
self.conf,
current_commit = [2,2],
current_round = 4,
raise_limit = 1,
check_limit = 2,
small_blind = 0.5,
big_blind = 1,
stack = 10,
bet_sizes = ["0.5_pot","1_pot","2_pot","all-in"],
)
gameTree = PartGameTreeBuilder(rule)
depth = np.inf
json = gameTree.gen_km_json("./.tree.km".format(depth),limit=depth,ret_json=True)
self.ps_shortdeck.build_game_tree("./.tree.km")
result_json = self.ps_shortdeck.train(
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76",
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76",
"Kd,Jd,Td,7s,8s",
50, # iterations
10, # print_interval
False, # debug
True, # parallel
None,
"log.txt",
"discounted_cfr",
"none",
-1, # threads
1, # action fork prob
1, # chance fork prob
1, # fork every tree depth
4, # fork minimal size
)
def test_holdem_turn(self):
rule = RulesBuilder(
self.conf,
current_commit = [2,2],
current_round = 3,
raise_limit = 1,
check_limit = 2,
small_blind = 0.5,
big_blind = 1,
stack = 10,
bet_sizes = ["0.5_pot","1_pot","2_pot","all-in"],
)
gameTree = PartGameTreeBuilder(rule)
depth = np.inf
json = gameTree.gen_km_json("./.tree.km".format(depth),limit=depth,ret_json=True)
self.ps_holdem.build_game_tree("./.tree.km")
result_json = self.ps_holdem.train(
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76,83",
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76,83",
"Kd,Jd,Td,7s",
50, # iterations
10, # print_interval
False, # debug
True, # parallel
None,
"log.txt",
"discounted_cfr",
"none",
-1, # threads
1, # action fork prob
1, # chance fork prob
1, # fork every tree depth
4, # fork minimal size
)
def test_holdem_river(self):
rule = RulesBuilder(
self.conf,
current_commit = [2,2],
current_round = 4,
raise_limit = 1,
check_limit = 2,
small_blind = 0.5,
big_blind = 1,
stack = 10,
bet_sizes = ["0.5_pot","1_pot","2_pot","all-in"],
)
gameTree = PartGameTreeBuilder(rule)
depth = np.inf
json = gameTree.gen_km_json("./.tree.km".format(depth),limit=depth,ret_json=True)
self.ps_holdem.build_game_tree("./.tree.km")
result_json = self.ps_holdem.train(
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76,83",
"AA,KK,QQ,JJ,TT,99,88,77,66,AK,AQ,AJ,AT,A9,A8,A7,A6,KQ,KJ,KT,K9,K8,K7,K6,QJ,QT,Q9,Q8,Q7,Q6,JT,J9,J8,J7,J6,T9,T8,T7,T6,98,97,96,87,86,76,83",
"Kd,Jd,Td,7s,8s",
50, # iterations
10, # print_interval
False, # debug
True, # parallel
None,
"log.txt",
"discounted_cfr",
"none",
-1, # threads
1, # action fork prob
1, # chance fork prob
1, # fork every tree depth
4, # fork minimal size
)
def test_holdem_river_range_with_float(self):
rule = RulesBuilder(
self.conf,
current_commit = [2,2],
current_round = 4,
raise_limit = 1,
check_limit = 2,
small_blind = 0.5,
big_blind = 1,
stack = 10,
bet_sizes = ["0.5_pot","1_pot","2_pot","all-in"],
)
gameTree = PartGameTreeBuilder(rule)
depth = np.inf
json = gameTree.gen_km_json("./.tree.km".format(depth),limit=depth,ret_json=True)
self.ps_holdem.build_game_tree("./.tree.km")
result_json = self.ps_holdem.train(
"AA:0.7,KK:0.6,QQ:0.5,76:0.4,83:0.9",
"AA:0.87,KK:0.9,QQ:0.2,76:0.5,83:0.4",
"Kd,Jd,Td,7s,8s",
50, # iterations
10, # print_interval
False, # debug
True, # parallel
None,
"log.txt",
"discounted_cfr",
"none",
-1, # threads
1, # action fork prob
1, # chance fork prob
1, # fork every tree depth
4, # fork minimal size
)
if __name__ == '__main__':
unittest.main(verbosity=1)
|
# Robert Li 18 March 2019 <robertwli@gmail.com>
# Predicting 2019 AFL results for fun
# Based on https://github.com/dashee87/blogScripts/blob/master/Jupyter/
# 2017-06-04-predicting-football-results-with-statistical-modelling.ipynb
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import poisson
import statsmodels.api as sm
import statsmodels.formula.api as smf
HISTORICAL_DATA = 'data_historical/afl.xlsx'
HISTORICAL_YEARS_TO_MODEL = 2016 # Code drops data older than this date
FIXTURE_DATA_2019 = 'data_current_season/afl-2019-AUSEasternStandardTime.xlsx'
RESULTS_FILE_MARKDOWN = 'outputs/results.md'
RESULTS_POISSON_GRAPH = 'outputs/results.png'
# Generate a basic distribution graph.
def graph_hist_poisson(historical_score_data):
# construct Poisson for each mean goals value
poisson_pred = np.column_stack([[poisson.pmf(i, historical_score_data.mean()[j]) for i in range(200)] for j in range(2)])
# plot histogram of actual goals
plt.hist(historical_score_data[['Home Score', 'Away Score']].values, range(201),
alpha=0.7, label=['Home', 'Away'], normed=True, color=["#fc7e0f", "#BBBBBB"])
# add lines for the Poisson distributions
pois1, = plt.plot([i for i in range(0, 201)], poisson_pred[:, 0],
linestyle='-', marker='o', label="Home", color='#fc7e0f')
pois2, = plt.plot([i for i in range(0, 201)], poisson_pred[:, 1],
linestyle='-', marker='o', label="Away", color='#BBBBBB')
leg = plt.legend(loc='upper right', fontsize=13, ncol=2)
leg.set_title("Poisson Actual ", prop={'size': '14', 'weight': 'bold'})
plt.xticks([i for i in range(0, 201, 20)], [i for i in range(0, 201, 20)])
plt.xlabel("Points per Match", size=13)
plt.ylabel("Proportion of Matches", size=13)
plt.title("Number of Points per Match (AFL from 2009)", size=14, fontweight='bold')
plt.ylim([-0.004, 0.05])
plt.tight_layout()
plt.savefig(RESULTS_POISSON_GRAPH)
def simulate_match(stats_model, home_team, away_team, max_goals=200):
home_goals_avg = stats_model.predict(pd.DataFrame(data={'team': home_team,
'opponent': away_team,'home': 1},
index=[1])).values[0]
away_goals_avg = stats_model.predict(pd.DataFrame(data={'team': away_team,
'opponent': home_team,'home': 0},
index=[1])).values[0]
team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals+1)] for team_avg in [home_goals_avg, away_goals_avg]]
return np.outer(np.array(team_pred[0]), np.array(team_pred[1]))
def main():
# Historical data
history = pd.read_excel(HISTORICAL_DATA, header=1)
history = history.drop(history.loc[history['Play Off Game?'] == 'Y'].index) # Get rid of finals games
graph_hist_poisson(history) # Generate Poisson distribution
history = history.drop(history.loc[history['Date'].dt.year < HISTORICAL_YEARS_TO_MODEL].index)
# 2019 Match fixtures data and cleansing
matches = pd.read_excel(FIXTURE_DATA_2019)
matches = matches.drop(matches.loc[matches['Home Team'] == 'To be announced'].index) # Get rid of finals games
# Clean matches names to be same as history data
matches['Home Team'] = matches['Home Team'].str.replace(r"^Adelaide Crows$", 'Adelaide', regex=True)
matches['Away Team'] = matches['Away Team'].str.replace(r"^Adelaide Crows$", 'Adelaide', regex=True)
matches['Home Team'] = matches['Home Team'].str.replace(r"Brisbane Lions", 'Brisbane', regex=True)
matches['Away Team'] = matches['Away Team'].str.replace(r"Brisbane Lions", 'Brisbane', regex=True)
matches['Home Team'] = matches['Home Team'].str.replace(r"Sydney Swans", 'Sydney', regex=True)
matches['Away Team'] = matches['Away Team'].str.replace(r"Sydney Swans", 'Sydney', regex=True)
matches['Home Team'] = matches['Home Team'].str.replace(r"Geelong Cats", 'Geelong', regex=True)
matches['Away Team'] = matches['Away Team'].str.replace(r"Geelong Cats", 'Geelong', regex=True)
matches['Home Team'] = matches['Home Team'].str.replace(r"West Coast Eagles", 'West Coast', regex=True)
matches['Away Team'] = matches['Away Team'].str.replace(r"West Coast Eagles", 'West Coast', regex=True)
matches['Home Team'] = matches['Home Team'].str.replace(r"Gold Coast Suns", 'Gold Coast', regex=True)
matches['Away Team'] = matches['Away Team'].str.replace(r"Gold Coast Suns", 'Gold Coast', regex=True)
# Clean up historical data for stats model
goal_model_data = pd.concat([
history[['Home Team', 'Away Team', 'Home Score']].assign(home=1).rename(
columns={'Home Team': 'team', 'Away Team': 'opponent', 'Home Score': 'goals'}),
history[['Away Team', 'Home Team', 'Away Score']].assign(home=0).rename(
columns={'Away Team': 'team', 'Home Team': 'opponent', 'Away Score': 'goals'})
])
# Create Poisson model
poisson_model = smf.glm(formula="goals ~ home + team + opponent", data=goal_model_data,
family=sm.families.Poisson()).fit()
# poisson_model.summary()
# Use Poisson model to simulate every matchup for 2019 and write results to markdown file.
with open(RESULTS_FILE_MARKDOWN, 'w') as file:
print('# Stat model results', file=file)
print('## Predictions for 2019 AFL Season', file=file)
print('| Round | Predicted Winner | Home Team | Away Team '
'| Chance Home Team Wins | Chance of Draw | Chance Away Team Wins |', file=file)
print('| --- | --- | --- | --- | ---: | ---: | ---: |', file=file)
for index, row in matches.iterrows():
match_info = row
round_nb = match_info['Round Number']
home_team = match_info['Home Team']
away_team = match_info['Away Team']
home_away_sim = simulate_match(poisson_model, home_team, away_team, max_goals=200)
home_win_perc = np.sum(np.tril(home_away_sim, -1))
draw_perc = np.sum(np.diag(home_away_sim))
away_win_perc = np.sum(np.triu(home_away_sim, 1))
predicted_winner = '???'
if home_win_perc > away_win_perc:
predicted_winner = home_team
else:
predicted_winner = away_team
print(f'| {round_nb} | {predicted_winner} | {home_team} | {away_team} | '
f'{home_win_perc:.2%} | {draw_perc:.2%} | {away_win_perc:.2%} |', file=file)
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.