content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(881, 600)
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
MainWindow.setFont(font)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setOpaqueResize(False)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName("splitter")
self.tblStudents = QtWidgets.QTableWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tblStudents.sizePolicy().hasHeightForWidth())
self.tblStudents.setSizePolicy(sizePolicy)
self.tblStudents.setMinimumSize(QtCore.QSize(200, 0))
self.tblStudents.setObjectName("tblStudents")
self.tblStudents.setColumnCount(0)
self.tblStudents.setRowCount(0)
self.editor = QtWidgets.QWidget(self.splitter)
self.editor.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.editor.sizePolicy().hasHeightForWidth())
self.editor.setSizePolicy(sizePolicy)
self.editor.setMinimumSize(QtCore.QSize(220, 0))
self.editor.setBaseSize(QtCore.QSize(220, 0))
self.editor.setObjectName("editor")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.editor)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_4 = QtWidgets.QWidget(self.editor)
self.widget_4.setObjectName("widget_4")
self.formLayout = QtWidgets.QFormLayout(self.widget_4)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.widget_4)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.txtId = QtWidgets.QLineEdit(self.widget_4)
self.txtId.setObjectName("txtId")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.txtId)
self.label_2 = QtWidgets.QLabel(self.widget_4)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.txtName = QtWidgets.QLineEdit(self.widget_4)
self.txtName.setObjectName("txtName")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.txtName)
self.label_3 = QtWidgets.QLabel(self.widget_4)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.txtClass = QtWidgets.QLineEdit(self.widget_4)
self.txtClass.setObjectName("txtClass")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.txtClass)
self.label_4 = QtWidgets.QLabel(self.widget_4)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.txtScore = QtWidgets.QLineEdit(self.widget_4)
self.txtScore.setObjectName("txtScore")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.txtScore)
self.verticalLayout_2.addWidget(self.widget_4)
spacerItem = QtWidgets.QSpacerItem(20, 271, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.widget_2 = QtWidgets.QWidget(self.editor)
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.btnOK = QtWidgets.QPushButton(self.widget_2)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("resources/icons/dialog-ok-apply.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnOK.setIcon(icon)
self.btnOK.setObjectName("btnOK")
self.horizontalLayout_2.addWidget(self.btnOK)
self.btnCancel = QtWidgets.QPushButton(self.widget_2)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("resources/icons/dialog-cancel.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnCancel.setIcon(icon1)
self.btnCancel.setObjectName("btnCancel")
self.horizontalLayout_2.addWidget(self.btnCancel)
self.verticalLayout_2.addWidget(self.widget_2)
self.verticalLayout.addWidget(self.splitter)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 881, 26))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menu_2 = QtWidgets.QMenu(self.menubar)
self.menu_2.setObjectName("menu_2")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionNew = QtWidgets.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("resources/icons/document-new.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionNew.setIcon(icon2)
self.actionNew.setObjectName("actionNew")
self.actionOpen = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("resources/icons/document-open.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon3)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("resources/icons/document-save.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon4)
self.actionSave.setObjectName("actionSave")
self.actionSave_As = QtWidgets.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("resources/icons/document-save-as.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave_As.setIcon(icon5)
self.actionSave_As.setObjectName("actionSave_As")
self.actionExit = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("resources/icons/application-exit.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon6)
self.actionExit.setObjectName("actionExit")
self.actionAdd = QtWidgets.QAction(MainWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("resources/icons/add.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionAdd.setIcon(icon7)
self.actionAdd.setObjectName("actionAdd")
self.actionDelete = QtWidgets.QAction(MainWindow)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("resources/icons/edit-delete.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDelete.setIcon(icon8)
self.actionDelete.setObjectName("actionDelete")
self.actionPiechart = QtWidgets.QAction(MainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("resources/icons/piechart.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPiechart.setIcon(icon9)
self.actionPiechart.setObjectName("actionPiechart")
self.actionHistogram = QtWidgets.QAction(MainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("resources/icons/histogram.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionHistogram.setIcon(icon10)
self.actionHistogram.setObjectName("actionHistogram")
self.menu.addAction(self.actionNew)
self.menu.addAction(self.actionOpen)
self.menu.addAction(self.actionSave)
self.menu.addAction(self.actionSave_As)
self.menu.addSeparator()
self.menu.addAction(self.actionExit)
self.menuEdit.addAction(self.actionAdd)
self.menuEdit.addAction(self.actionDelete)
self.menu_2.addAction(self.actionPiechart)
self.menu_2.addAction(self.actionHistogram)
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.toolBar.addAction(self.actionNew)
self.toolBar.addAction(self.actionOpen)
self.toolBar.addAction(self.actionSave)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionAdd)
self.toolBar.addAction(self.actionDelete)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionHistogram)
self.toolBar.addAction(self.actionPiechart)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "学号"))
self.label_2.setText(_translate("MainWindow", "姓名"))
self.label_3.setText(_translate("MainWindow", "班级"))
self.label_4.setText(_translate("MainWindow", "成绩"))
self.btnOK.setText(_translate("MainWindow", "OK"))
self.btnCancel.setText(_translate("MainWindow", "Cancel"))
self.menu.setTitle(_translate("MainWindow", "文件"))
self.menuEdit.setTitle(_translate("MainWindow", "编辑"))
self.menu_2.setTitle(_translate("MainWindow", "可视化分析"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionNew.setText(_translate("MainWindow", "新建"))
self.actionOpen.setText(_translate("MainWindow", "打开"))
self.actionSave.setText(_translate("MainWindow", "保存"))
self.actionSave_As.setText(_translate("MainWindow", "另存为"))
self.actionExit.setText(_translate("MainWindow", "退出"))
self.actionAdd.setText(_translate("MainWindow", "添加学生"))
self.actionAdd.setToolTip(_translate("MainWindow", "添加学生"))
self.actionDelete.setText(_translate("MainWindow", "删除选中学生"))
self.actionDelete.setToolTip(_translate("MainWindow", "删除选中学生"))
self.actionPiechart.setText(_translate("MainWindow", "饼图"))
self.actionHistogram.setText(_translate("MainWindow", "条形图"))
|
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.shortcuts import redirect, render
from urllib3 import request
from tcgen.forms import DocumentForm
from tcgen.generator import generate
def create_tc(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
else:
form = DocumentForm()
return render(request, 'tc_gen.html', {
'form': form,
'form_title': "Test Case Generator",
'button': "Upload file",
})
|
WORD="tHIS IS ANUDEEP";
lister=WORD.spit();
print(lister);
|
from typing import List
import numpy as np
import pandas as pd
import math
"""
lagrange is a mathematical interpolation methods that approximate
a polynomial of degree n given n-1 points
"""
def lagrange(X,Y) -> List:
"""
the lagrange method that takes n points as input and return a pandas dataframe
"""
my_columns = ['x','y']
dataframe = pd.DataFrame(columns=my_columns)
for pt in np.arange(-1,2,0.001):
condition = False
for i in range(0,len(X)):
check = False
for j in range(0,len(X)):
if (i is not j and check==False):
Lp = (pt-X[j])/(X[i]-X[j])
L = Lp
check = True
elif (i is not j):
L = L* (pt-X[j])/(X[i]-X[j])
if (condition ==False):
P = Y[i]* L
condition = True
else:
P = Y[i]* L + P
dataframe = dataframe.append(
pd.Series(
[pt,P],
index=my_columns
),
ignore_index=True)
return dataframe
if __name__=="__main__":
"""
creating the polynome that we want to approximate
using lagrange method
"""
my_columns = ['x','y']
dataframe = pd.DataFrame(columns=my_columns)
for i in np.arange(-1,2,0.001):
dataframe = dataframe.append(
pd.Series(
[i,pow(i,3)],
index=my_columns
),
ignore_index=True)
"""
calculating the mean square error from the given polynome and the result
"""
res = lagrange(np.array([-1,1,2]),np.array([-1,1,8]))
m = len(dataframe)
diff = pow(dataframe['y']-res['y'],2).sum()
RMSE = 1/m * (math.sqrt(diff))
print(RMSE)
|
from admin_honeypot.signals import honeypot
from django.conf import settings
from django.core.mail import mail_admins
from django.urls import reverse
from django.template.loader import render_to_string
def notify_admins(instance, request, **kwargs):
path = reverse('admin:admin_honeypot_loginattempt_change', args=(instance.pk,))
admin_detail_url = 'http://{0}{1}'.format(request.get_host(), path)
context = {
'request': request,
'instance': instance,
'admin_detail_url': admin_detail_url,
}
subject = render_to_string('admin_honeypot/email_subject.txt', context).strip()
message = render_to_string('admin_honeypot/email_message.txt', context).strip()
mail_admins(subject=subject, message=message)
if getattr(settings, 'ADMIN_HONEYPOT_EMAIL_ADMINS', True):
honeypot.connect(notify_admins)
|
import re
from datetime import datetime
from src.consts import *
def avg_sentence_length(text):
"""
Averages the number of words in a sentence of `text`
:param text: The text to analyze (str)
:return: average_words (float)
"""
sentences = [s.strip() for s in re.split(r'[\.\?!]', text) if s]
return sum(len(sentence.split()) for sentence in sentences) / len(sentences)
def avg_word_length(text):
"""
Averages the word length in `text`
:param text: The text to analyze (str)
:return: average_word_len (float)
"""
words = re.sub(r'[\.\?!]', '', text).split()
return sum(len(word) for word in words) / len(words)
def character_count(text):
"""
Returns the number of alpha numeric characters in `text`
:param text: The text to analyze (str)
:return: character_count (int)
"""
return len(re.sub(r'^[A-Za-z0-9_]', '', text))
def is_quoted_retweet(text):
"""
Determines if the text begins with a quoted retweet
:param text: The text to analyze (str)
:return: true | false
"""
return int(text[:2] == '"@')
def count_all_caps(text):
"""
Determines the number of all caps words in `text`
:param text: The text to analyze (str)
:return: num_all_caps (int)
"""
return len(re.findall('\s([A-Z][A-Z]+)', text))
def count_random_caps(text):
"""
Determines the number of randomly capitalized words in `text`
:param text: The text to analyze (str)
:return: num_random_caps (int)
"""
return len(re.findall(r"(?<!\.\s)(?<!\!\s)(?<!\?\s)\b[A-Z][a-z]*[^'][^I]\b", text))
def is_mention(text):
"""
Determines whether the tweet is a mention i.e. `text` begins with <USER>
:param text: The text to analyze (str)
:return: true | false
"""
return int(text[0] == '@')
def count_punctuation(text):
"""
Determines the counts of different types of punctuation in `text`
Punctuation can be one of comma, lparen, rparen, exclamation, period, semicolon, colon, question
:param text: The text to analyze (str)
:return: The counts for the different types of punctuation
"""
counts = dict()
for mark in punctuation:
counts[mark] = len(re.findall(mark, text))
return counts
def period_of_day(s):
"""
Determines the period of the day given the string s, which represents a datetime object
:param s: The datetime string to read (str)
:return: period (int) - one of
0 (early morning),
1 (morning),
2 (midday),
3 (afternoon),
4 (evening),
5 (night),
6 (midnight)
"""
dt = datetime.strptime(s, datetime_fmt)
if dt.hour <= 3: return 6
elif dt.hour <= 6: return 0
elif dt.hour <= 11: return 1
elif dt.hour <= 14: return 2
elif dt.hour <= 17: return 3
elif dt.hour <= 20: return 4
else: return 5
def day_of_week(s):
"""
Determines the day of the week given the string s, which represents a datetime object
:param s: The datetime string to read
:return: day (int) - indexed at 0 for monday
"""
dt = datetime.strptime(s, datetime_fmt)
return dt.weekday()
def starts_with_I(text):
"""
Determines whether the text starts with the letter 'I'
:param text: The text to analyze
:return: 1 if the text starts with an 'I', 0 otherwise
"""
return int(re.search(r'.*([A-Z]{1}|[a-z]{1}).*', text).group(0).lower() == 'i')
def count_keywords(text):
"""
Determines the counts for each nickname in text
:param text: The text to analyze
:return: counts of the keywords as a dict
"""
t = text.lower()
counts = {}
for keyword in keywords:
counts[keyword] = len(re.findall(keyword, t))
return counts
def ends_with_link(text):
"""
Determines whether the text ends with a link
:param text: The text to analyze
:return: 0 - 1 if there is not or is a link as the last word
"""
return int(text.split(' ')[-1][:4] == 'http')
def ends_with_hashtag(text):
"""
Determines whether the text ends with a hashtag
:param text: The text to analyze
:return: 0 - 1 if there is not or is a hastag as the last word
"""
return int(text.split(' ')[-1][0] == '#')
|
import click
from mattermostdriver import Driver
import json
from dynaconf import Dynaconf
from pathlib import Path
from os import getenv
settings = Dynaconf(
settings_files=[
Path(getenv("HOME")) / ".oda/settings.toml",
Path(getenv("HOME")) / ".oda/private.toml",
],
environments=False,
load_dotenv=False,
envvar_prefix="ODA", # variables exported as `ODAKB_FOO=bar` becomes `settings.FOO == "bar"`
)
@click.group()
@click.pass_obj
def cli(obj):
mm = Driver({
'url': 'mattermost.astro.unige.ch',
"token": settings.mattermost.access_token, ## THE TOKEN THAT YOU JUST CREATED
'scheme': 'https',
'port': 443
})
mm.login()
obj['mm'] = mm
@cli.command()
@click.argument("message")
@click.pass_obj
def send(obj, message):
channel_name = "deployment"
team = "cdci"
mm = obj['mm']
channel = mm.channels.get_channel_by_name_and_team_name(team, channel_name)
channel_id = channel['id']
mm.posts.create_post(options={
'channel_id': channel_id,
'message': message
})
@cli.command()
@click.pass_obj
def listen(obj):
async def my_event_handler(e):
message=json.loads(e)
print(message)
obj['mm'].init_websocket(my_event_handler)
def main():
cli(obj={})
if __name__ == "__main__":
main()
|
# coding=utf-8
from celery import Celery
app = Celery()
app.config_from_object("clambda.options")
|
"""Extract all quiz environments from quiz.do.txt to a separate document."""
import re
pattern = r'^!bquiz.+?^!equiz\n'
f = open('quiz.do.txt')
text = f.read()
f.close()
quizzes = re.findall(pattern, text, flags=re.MULTILINE|re.DOTALL)
f = open('pure_quiz.do.txt', 'w')
f.write("""\
TITLE: Demo of quizzes
AUTHOR: HPL
DATE: today
This quiz collection is automatically extracted from the documentation
of the "quiz specification format":
"http://hplgit.github.io/doconce/doc/pub/quiz/quiz.html". All syntax
is explained in that document.
""")
for i, quiz in enumerate(quizzes):
f.write('\n# Quiz %d\n\n' % (i+1) + quiz + '\n\n')
f.close()
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def bstToGst(self, root: TreeNode) -> TreeNode:
mappedTree = []
def BFS(node):
mappedTree.append(node.val)
if node.left: BFS(node.left)
if node.right: BFS(node.right)
BFS(root)
return mappedTree
Input = [4,1,6,0,2,5,7,None,None,None,3,None,None,None,8]
Output = [30,36,21,36,35,26,15,None,None,None,33,None,None,None,8]
root = TreeNode(4)
root.left = TreeNode(1)
root.left.left = TreeNode(0)
root.left.right = TreeNode(2)
root.left.right.right = TreeNode(3)
root.right = TreeNode(6)
root.right.left = TreeNode(5)
root.right.right = TreeNode(7)
root.right.right.right = TreeNode(8)
#print(Solution().bstToGst(root))
Inp = filter(lambda e: isinstance(e, int), Input)
for i in Input:
if i != None:
|
#coding: utf-8
from Tkinter import *
class Application(Frame):
def __init__(self,master=None):
Frame.__init__(self,master,height=100, width=250)
self.master.title('Pack Three Labels')
#First Label
la = Label(self,text='Hello everybody. How are you?', bg='yellow',relief=RIDGE,bd=2)
la.place(relx=0.02, rely=0.1, relheight=0.3, relwidth=0.95)
# Second Label
lb = Label(self, text='Oh My God!', bg='red', relief=RIDGE, bd=2)
lb.place(relx=0.15, rely=0.45)
# Third Label
lc = Label(self, text='See you tomorrow.', bg='LightSkyBlue', relief=RIDGE, bd=2)
lc.place(relx=0.5, rely=0.75)
if __name__ == '__main__':
app = Application()
app.pack()
app.mainloop()
|
"""
Time: O(N)
Space: O(N)
`counter1` counts the char in `s1`
`counter2` counts the char is `s2[i:j+1]`
Create a sliding window with length `len(s1)`, l. From i to j.
Move the sliding window (`s2[i:j+1]`) from left to right while maintaining counter2.
Each iteration, add s2[j] to the sliding window.
At the end of iteration, remove s2[i] from the sliding window.
If two counter are the same, s1 and s2[i:j+1] are permutations of each other.
"""
import collections
class Solution(object):
def checkInclusion(self, s1, s2):
l = len(s1)
counter1 = collections.Counter(s1)
counter2 = collections.Counter(s2[:l-1])
for i in xrange(len(s2)):
j = i+l-1
if j>=len(s2): break
counter2[s2[j]] += 1
if counter1==counter2: return True
counter2[s2[i]] -= 1
if counter2[s2[i]]==0: counter2.pop(s2[i], None)
return False
|
from multiprocessing import Pool, cpu_count
try:
_pools = cpu_count()
except NotImplementedError:
_pools = 4
def chop(list_, n):
"Chop list_ into n chunks. Returns a list."
# could look into itertools also, might be implemented there
size = len(list_)
each = size // n
if each == 0:
return [list_]
chopped = []
for i in range(n):
start = i * each
end = (i+1) * each
if i == (n - 1):
# make sure we get all items, let last worker do a litte more
end = size
chopped.append(list_[start:end])
return chopped
|
# -*- coding: utf-8-*-
import time
import math
from pyicloud import PyiCloudService
from pyicloud.exceptions import PyiCloudFailedLoginException
from lessons.base.plugin import JenniferResponsePlugin
from lessons.base.exceptions import JenniferUserWantsToCancelException
class JenniferFindMyIphonePlugin(JenniferResponsePlugin):
PRIORITY = 50
VERBOSE_NAME = "Find My Iphone"
REQUIRES_NETWORK = True
def can_respond(self, **kwargs):
tags = kwargs.get('tags')
one_verb = [
('find', 'VB'),
('finding', 'VBG'),
]
one_of_these = [
('phone', 'NN'),
('iphone', 'NN')
]
tags_dict = self.tags_dict_by_tag(tags)
# There must only be one 'NN'
can_repond = len(tags_dict.get('NN', [])) == 1
# Need any the verbs in `one_verb` with correct context
can_repond = can_repond and any([word in tags for word in one_verb])
# Need at least one of the words in `one_of_these` with correct context
can_repond = can_repond and any([word in tags for word in one_of_these])
return can_repond
def respond(self, **kwargs):
client = kwargs.get('client')
client.give_output_string(self, "Let's find that phone")
try:
api = PyiCloudService(self.settings['icloudEmail'], self.settings['icloudPassword'])
except PyiCloudFailedLoginException:
client.give_output_string(self, "Invalid iCloud Username & Password")
return
# All Devices
devices = api.devices
# Just the iPhones
iphones = []
# The one to ring
phone_to_ring = None
for device in devices:
current = device.status()
if "iPhone" in current['deviceDisplayName']:
iphones.append(device)
# No iphones
if len(iphones) == 0:
client.give_output_string(self, "No IPhones Found on your account")
return
# Many iphones
elif len(iphones) > 1:
client.give_output_string(self, "There are multiple iphones on your account.")
try:
for phone in iphones:
if client.confirm(self, "Did you mean the {type} named {name}?".format(type=phone.status()['deviceDisplayName'], name=phone.status()['name'])):
phone_to_ring = phone
break
except JenniferUserWantsToCancelException():
return
# Just one
elif len(iphones) == 1:
phone_to_ring = iphones[0]
if not phone_to_ring:
client.give_output_string(self, "You didn't select an iPhone")
return
# This will attempt to update the status
phone_to_ring.status()
if phone_to_ring.status()['batteryLevel'] == 0:
client.give_output_string(self, "Oh no! The phone is off!")
location = phone_to_ring.location()
if location and location[u'locationFinished']:
timestamp = location['timeStamp'] / 1000
current_timestamp = time.time()
seconds_ago = round(abs(current_timestamp - timestamp))
minutes = int(math.floor(seconds_ago % 60))
hours = int(math.floor(seconds_ago / 60 / 60))
time_ago_natural_parts = []
if hours > 0:
time_ago_natural_parts.append("{} hours".format(hours))
if minutes > 0:
time_ago_natural_parts.append("{} minutes".format(minutes))
time_ago_natural = " and ".join(time_ago_natural_parts) + " ago"
client.give_output_string(self, "It was last seen {}.".format(time_ago_natural))
else:
client.give_output_string(self, "I can't find a location for it.")
return
client.give_output_string(self, "Sending ring command to {} now".format(phone_to_ring.status()['deviceDisplayName']))
phone_to_ring.play_sound()
return
|
# Standard Library Imports
# Third Party Imports
# Local Application Imports
from classes.block import Block
from classes.gameobject import Enemy
class EnemyBlock(Block):
DESTROYED_ENEMY_SLOT = -1
def __init__(
self,
top_left,
bottom_right,
NODES_PER_COLUMN=Block.DEFAULT_PARENT_VALUE,
NODES_PER_ROW=Block.DEFAULT_PARENT_VALUE,
):
super().__init__(top_left, bottom_right)
if NODES_PER_COLUMN is not self.DEFAULT_PARENT_VALUE:
self.NODES_PER_COLUMN = NODES_PER_COLUMN
if NODES_PER_ROW is not self.DEFAULT_PARENT_VALUE:
self.NODES_PER_ROW = NODES_PER_ROW
self._create_block()
def replace_enemy(self, enemy, replacement):
for row in self._structure:
for r_enemy in row:
if r_enemy == enemy:
enemy_index = row.index(r_enemy)
row_index = self._structure.index(row)
self._structure[row_index][enemy_index] = replacement
def _determine_total_width(self):
total_width = abs(
self.bottom_right[self.X_CORD] - self.top_left[self.X_CORD]
)
width_needed = Enemy.IMG_WIDTH * self.NODES_PER_ROW
if total_width < width_needed:
total_width += abs(total_width - width_needed)
elif total_width > width_needed:
total_width -= abs(total_width - width_needed)
return total_width
def _determine_total_height(self):
total_height = abs(
self.bottom_right[self.Y_CORD] - self.top_left[self.Y_CORD]
)
height_needed = (
Enemy.IMG_WIDTH * self.NODES_PER_COLUMN
) # assuming icons have same width and height
if total_height < height_needed:
total_height += abs(total_height - height_needed)
elif total_height > height_needed:
total_height -= abs(total_height - height_needed)
return total_height
def _determine_node_height(self, total_height):
return Enemy.IMG_WIDTH
def _determine_node_width(self, total_width):
return Enemy.IMG_WIDTH
def _return_node(self, left, top, node_width, node_height):
return Enemy.from_manual_cords(top, left)
def blit(self, screen, COLOR):
pass
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.init as init
import torchvision
from torch.autograd import Variable
import torchvision.transforms as transforms
import argparse
from torchvision import datasets
import visdom
import numpy as np
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多
parser = argparse.ArgumentParser(description='PyTorch CIFAR100 Training')
parser.add_argument('--outf', default='./model/', help='folder to output images and model checkpoints') #输出结果保存路径
parser.add_argument('--net', default='./model/Resnet18.pth', help="path to net (to continue training)") #恢复训练时的模型路径
args = parser.parse_args()
viz = visdom.Visdom()
class ResidualBlock(nn.Module):
def __init__(self, inchannel, outchannel, stride=1):
super(ResidualBlock, self).__init__()
self.left = nn.Sequential(
nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(outchannel),
nn.ReLU(inplace=True),
nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(outchannel)
)
self.stride = stride
self.inchannel = inchannel
self.outchannel = outchannel
self.shortcut = nn.Sequential()
if stride != 1 or inchannel != outchannel:
self.shortcut = nn.Sequential(
nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(outchannel)
)
def forward(self, x):
out1 = self.left(x)
fc1 = nn.MaxPool2d(kernel_size = out1.size(2))
out = fc1(out1)
out = out.view(out.size(0), -1)
fc2 = nn.Linear(self.outchannel, self.outchannel // 16)
out = fc2(out)
fc3 = nn.ReLU(True)
out = fc3(out)
fc4 = nn.Linear(self.outchannel // 16, self.outchannel)
out = fc4(out)
out = out.view(out1.size(0), out1.size(1), 1, 1)
out = out * out1
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, ResidualBlock, num_classes=100):
super(ResNet, self).__init__()
self.inchannel = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)
self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)
self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)
self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)
self.fc = nn.Linear(512, num_classes)
def make_layer(self, block, channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1) #strides=[1,1]
layers = []
for stride in strides:
layers.append(block(self.inchannel, channels, stride))
self.inchannel = channels
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def ResNet18():
return ResNet(ResidualBlock)
net = ResNet18().to(device)
def train(model, data, target, lossfc, optimizer):
model.train()
optimizer.zero_grad()
output = model(data)
loss = lossfc(output, target)
loss.backward()
optimizer.step()
predictions = output.max(1, keepdim = True)[1]
correct = predictions.eq(target.view_as(predictions)).sum().item()
Acc = correct / len(target)
return Acc, loss
def test(model, testloader, lossfc, use_cuda):
model.eval()
sum_loss = 0
sum_Acc = 0
number = 0
with torch.no_grad():
for data, target in testloader:
number += 1
data, target = data.to(device), target.to(device)
if use_cuda:
data = data.cuda()
target = target.cuda()
output = model(data)
loss = lossfc(output, target)
predictions = output.max(1, keepdim = True)[1]
correct = predictions.eq(target.view_as(predictions)).sum().item()
Acc = correct / len(target)
sum_loss += loss
sum_Acc += Acc
return sum_Acc / number, sum_loss / number
def main():
EPOCH = 40 #遍历数据集次数
BATCH_SIZE = 8
LR = 0.001
use_cuda = torch.cuda.is_available()
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding = 4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR100(root='/home/wang/data100', train=True, download=True, transform=transform_train) #训练数据集
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) #生成一个个batch进行批训练
testset = torchvision.datasets.CIFAR100(root='/home/wang/data100', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=2, shuffle=False, num_workers=2)
model = net
if use_cuda:
model = model.cuda()
criterion = nn.CrossEntropyLoss() #损失函数为交叉熵,多用于多分类问题
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
step = 0
test_number = 0
x, train_Acc, test_Acc = 0, 0 ,0
win = viz.line(
X = np.array([x]),
Y = np.column_stack((np.array([train_Acc]), np.array([test_Acc]))),
opts = dict(
legend = ["train_Acc", "test_Acc"]
)
)
for i in range(EPOCH):
for data, target in trainloader:
step += 1
x = step
data, target = data.to(device), target.to(device)
if use_cuda:
data = data.cuda()
target = target.cuda()
Acc, loss = train(model, data, target, criterion, optimizer)
train_Acc = Acc
print ('train : step = %d, loss = %.4f, Acc = %.2f' %(step, loss, 100 * Acc))
if step % 6250 == 0:
test_number += 1
Acc, loss = test(model, testloader, criterion, use_cuda)
test_Acc = Acc
print('Test: test_number = %d, loss = %.4f, Acc = %.2f' %(test_step, loss, 100 * Acc))
if step % 100 == 0:
viz.line(
X = np.array([x]),
Y = np.column_stack((np.array([train_Acc]), np.array([test_Acc]))),
win = win,
update = "append"
)
if __name__ == '__main__':
main()
|
import ipaddress
import pytest
from boardfarm.exceptions import BftIfaceNoIpV4Addr, BftIfaceNoIpV6Addr
from boardfarm.lib.bft_interface import bft_iface
class Dummy:
def check_output(self, cmd):
pass
out_str1 = '''(venv3) testuser@sree-VirtualBox:~/Sreelekshmi/boardfarm/unittests/boardfarm/lib$ ip a show dev enp0s3
2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 08:00:27:cb:9c:89 brd ff:ff:ff:ff:ff:ff
inet 10.0.2.15/24 brd 10.0.2.255 scope global dynamic noprefixroute enp0s3
valid_lft 76520sec preferred_lft 76520sec
inet6 fe80::b2f7:5059:2879:3dfc/64 scope link noprefixroute
valid_lft forever preferred_lft forever
'''
out_str2 = '''root@bft-node-data-106-0:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
'''
out_str3 = '''
204: eth0@if205: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:c0:a8:32:04 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 192.168.50.4/24 brd 192.168.50.255 scope global eth0
valid_lft forever preferred_lft forever
'''
out_str4 = '''
475: eth1@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 3e:73:cb:6b:49:ba brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.64.38.106/23 scope global eth1
valid_lft forever preferred_lft forever
inet6 2001:730:1f:60a::cafe:106/64 scope global
valid_lft forever preferred_lft forever
inet6 fe80::3c73:cbff:fe6b:49ba/64 scope link
valid_lft forever preferred_lft forever
'''
out_str5 = '''
erouter0 Link encap:Ethernet HWaddr 68:02:B8:02:C5:04
inet addr:10.3.0.23 Bcast:10.3.0.255 Mask:255.255.255.0
bft_inet6 addr: 2002:0:c4:1::e:c0/128 Scope:Global
bft_inet6 addr: fe80::6a02:b8ff:fe02:c504/64 Scope:Link
UP BROADCAST RUNNING PROMISC MULTICAST MTU:1500 Metric:1
RX packets:6208 errors:0 dropped:0 overruns:0 frame:0
TX packets:81 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:432494 (422.3 KiB) TX bytes:10636 (10.3 KiB)
'''
out_str6 = '''
erouter0 Link encap:Ethernet HWaddr 68:02:B8:02:C5:04
bft_inet6 2002:0:c4:1::e:c0/128 Scope:Global
bft_inet6 fe80::6a02 Scope:Link
UP BROADCAST RUNNING PROMISC MULTICAST MTU:1500 Metric:1
RX packets:6208 errors:0 dropped:0 overruns:0 frame:0
TX packets:81 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:432494 (422.3 KiB) TX bytes:10636 (10.3 KiB)
'''
out_str7 = '''
erouter0 Link encap:Ethernet HWaddr 68:02:B8:02:C5:04
s inet addr:10.3.0.21 Bcast:10.3.0.255 Mask:255.255.255.0
bft_inet6 fe80:6a02 Scope:Link
UP BROADCAST RUNNING PROMISC MULTICAST MTU:1500 Metric:1
RX packets:6208 errors:0 dropped:0 overruns:0 frame:0
TX packets:81 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:432494 (422.3 KiB) TX bytes:10636 (10.3 KiB)
'''
@pytest.mark.parametrize("command_output,expected",
[(out_str5, "2002:0:c4:1::e:c0"), (out_str2, "::1")])
def test_get_ipv6(mocker, command_output, expected):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=command_output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
obj.get_interface_ipv6addr(command_output)
assert obj.ipv6 == ipaddress.IPv6Interface(expected).ip
@pytest.mark.parametrize("command_output,expected",
[(out_str4, "fe80::3c73:cbff:fe6b:49ba"),
(out_str5, "fe80::6a02:b8ff:fe02:c504")])
def test_get_ip_link_local_ipv6(mocker, command_output, expected):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=command_output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
obj.get_interface_ipv6addr(command_output)
assert obj.ipv6_link_local == ipaddress.IPv6Interface(expected).ip
@pytest.mark.parametrize("command_output,expected", [(out_str1, "10.0.2.15"),
(out_str2, "127.0.0.1")])
def test_get_ipv4(mocker, command_output, expected):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=command_output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
obj.get_interface_ipv4addr(command_output)
assert obj.ipv4 == ipaddress.IPv4Interface(expected).ip
@pytest.mark.parametrize("command_output,expected",
[(out_str4, "2001:730:1f:60a::cafe:106/64"),
(out_str5, "2002:0:c4:1::e:c0/128")])
def test_get_ipv6_prefixlen(mocker, command_output, expected):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=command_output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
obj.get_interface_ipv6addr(command_output)
assert obj.prefixlen == ipaddress.IPv6Interface(expected)._prefixlen
@pytest.mark.parametrize("command_output,expected",
[(out_str1, "10.0.2.15/24"),
(out_str2, "127.0.0.1/8")])
def test_get_ipv4_netmask(mocker, command_output, expected):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=command_output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
obj.get_interface_ipv4addr(command_output)
assert obj.netmask == ipaddress.IPv4Interface(expected).netmask
@pytest.mark.parametrize("command_output,expected",
[(out_str1, "10.0.2.15/24"),
(out_str2, "127.0.0.1/8")])
def test_get_ipv4_network(mocker, command_output, expected):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=command_output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
obj.get_interface_ipv4addr(command_output)
assert obj.network == ipaddress.IPv4Interface(expected).network
@pytest.mark.parametrize("output, expected",
[(out_str4, "2001:730:1f:60a::cafe:106/64"),
(out_str5, "2002:0:c4:1::e:c0/128")])
def test_get_ipv6_network(mocker, output, expected):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
assert obj.network_v6 == ipaddress.IPv6Interface(expected).network
@pytest.mark.parametrize("output", [(out_str6)])
def test_get_ipv4_negative(mocker, output):
mocker.patch.object(bft_iface,
'__init__',
return_value=None,
autospec=True)
obj = bft_iface("dummy_dev", "dummy_iface", "dummy_cmd")
with pytest.raises(BftIfaceNoIpV4Addr):
assert obj.get_interface_ipv4addr(output)
@pytest.mark.parametrize("output", [(out_str6)])
def test_ipv4_negative(mocker, output):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
with pytest.raises(BftIfaceNoIpV4Addr):
print(obj.ipv4)
@pytest.mark.parametrize("output, exp_ip, exp_netmask",
[(out_str7, "10.3.0.21", "10.3.0.21/24")])
def test_ipv4(mocker, output, exp_ip, exp_netmask):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
assert obj.ipv4 == ipaddress.IPv4Interface(exp_ip).ip
assert obj.netmask == ipaddress.IPv4Interface(exp_netmask).netmask
@pytest.mark.parametrize("output", [(out_str7)])
def test_get_interface_ipv6addr_negative(mocker, output):
mocker.patch.object(bft_iface,
'__init__',
return_value=None,
autospec=True)
obj = bft_iface("dummy_dev", "dummy_iface", "dummy_cmd")
with pytest.raises(BftIfaceNoIpV6Addr):
assert obj.get_interface_ipv6addr(output)
@pytest.mark.parametrize("output", [(out_str7)])
def test_ipv6_negative(mocker, output):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
with pytest.raises(BftIfaceNoIpV6Addr):
obj.ipv6
@pytest.mark.parametrize(
"output, exp_ip, exp_net, exp_ip_link",
[(out_str6, "2002:0:c4:1::e:c0", "2002:0:c4:1::e:c0/128", "fe80::6a02")])
def test_ipv6(mocker, output, exp_ip, exp_net, exp_ip_link):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=output,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
assert obj.ipv6 == ipaddress.IPv6Interface(exp_ip).ip
assert obj.network_v6 == ipaddress.IPv6Interface(exp_net).network
assert obj.ipv6_link_local == ipaddress.IPv6Interface(exp_ip_link).ip
@pytest.mark.parametrize("output", [(out_str6), (out_str7)])
def test_refresh(mocker, output):
dummy_dev = Dummy()
mocker.patch.object(dummy_dev,
'check_output',
return_value=output,
autospec=True)
mocker.patch.object(bft_iface,
'get_interface_macaddr',
return_value=None,
autospec=True)
obj = bft_iface(dummy_dev, "dummy_iface", "dummy_cmd")
obj.refresh()
|
maths_data={
"tuto1": "https://drive.google.com/file/d/11gS82G5nsvjWtHGmO3rs9dzQFNTihR0a/view?usp=sharing",
"tuto2": "https://drive.google.com/file/d/11x5FTbsEH2HA3qQFyX4K-q6aFkS4MDx-/view?usp=sharing",
"tuto3":"https://drive.google.com/file/d/122IOVikDeO6rllWn7RfdL9KT1-vHMOWu/view?usp=sharing"
}
cheat_data={
"Maths Tuto 1"
}
|
from paypalrestsdk import Order
import logging
logging.basicConfig(level=logging.INFO)
order = Order.find("<ORDER_ID>")
response = order.authorize({
"amount": {
"currency": "USD",
"total": "0.08"
}
})
if order.success():
print("Authorized[%s] successfully" % (order.id))
else:
print(order.error)
|
import subprocess
import os
import operator
import re
import typing
from urllib.parse import urljoin
from qutebrowser.api import interceptor, message
from PyQt5.QtCore import QUrl
# Autogenerated config.py
#
# NOTE: config.py is intended for advanced users who are comfortable
# with manually migrating the config file on qutebrowser upgrades. If
# you prefer, you can also configure qutebrowser using the
# :set/:bind/:config-* commands without having to write a config.py
# file.
#
# Documentation:
# qute://help/configuring.html
# qute://help/settings.html
# Uncomment this to still load settings configured via autoconfig.yml
# config.load_autoconfig()
# config.load_autoconfig()
# Or uncomment this line to load settings from config.py
config.load_autoconfig(False)
# Aliases for commands. The keys of the given dictionary are the
# aliases, while the values are the commands they map to.
# Type: Dict
c.aliases = {
'q': 'quit',
'w': 'session-save',
'wq': 'quit --save',
'x': 'quit --save'
}
# Setting dark mode
config.set("colors.webpage.darkmode.enabled", True)
# lazy load on startup
config.set("session.lazy_restore",True)
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'no-3rdparty')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent',
'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0',
'https://drive.google.com/*')
# Allow websites to show notifications.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
#config.set('content.notifications.enabled', True, 'https://www.reddit.com')
# default zoom
config.set("zoom.default", 125)
c.auto_save.session = True
c.content.pdfjs = False
c.content.geolocation = False
c.content.javascript.enabled = False
c.content.webgl = False
c.content.canvas_reading = True
config.set('content.autoplay', False)
###############################################################################
# __ _ _ _ _
# / _(_) | ___ _ __ (_) ___| | _____ _ __
# | |_| | |/ _ \ '_ \| |/ __| |/ / _ \ '__|
# | _| | | __/ |_) | | (__| < __/ |
# |_| |_|_|\___| .__/|_|\___|_|\_\___|_|
# |_|
###############################################################################
# Using zenity as file picker
# config.set('fileselect.handler', 'external')
# config.set('fileselect.folder.command',
# ["zenity", "--file-selection", "--directory"])
# config.set('fileselect.multiple_files.command',
# ["zenity", "--file-selection", "--multiple"])
# config.set('fileselect.single_file.command', ["zenity", "--file-selection"])
# Using lf as file picker
####################
HOME = os.environ['HOME']
####################
config.set('fileselect.handler', 'external')
config.set('fileselect.folder.command',
[f"{HOME}/.config/qutebrowser/filepicker"])
config.set('fileselect.multiple_files.command',
[f"{HOME}/.config/qutebrowser/filepicker"])
config.set('fileselect.single_file.command',
[f"{HOME}/.config/qutebrowser/filepicker"])
###############################################################################
config.set('content.javascript.enabled', True, '*://aidoru-online.me/*')
config.set('content.javascript.enabled', True, '*://boards.4chan.org/*')
config.set('content.javascript.enabled', True, '*://boards.4channel.org/*')
config.set('content.javascript.enabled', True, '*://www.showroom-live.com/*')
config.set('content.javascript.enabled', True,
'*://campaign.showroom-live.com/*')
config.set('content.javascript.enabled', True, '*://localhost/*')
config.set('content.javascript.enabled', True, '*://localhost:*/*')
config.set('content.javascript.enabled', True,
'file:///home/wasif/build/other-projects/Bento/index.html')
config.set('content.javascript.enabled', True, '*://gitlab.com/*')
config.set('content.javascript.enabled', True, '*://github.com/*')
config.set('content.javascript.enabled', True, '*://invidious.kavin.rocks/*')
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'chrome-devtools://*')
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome-devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome://*/*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'qute://*/*')
# homepage
# c.url.start_pages = ["about:blank"]
# c.url.default_page = "about:blank"
# fork new tabs to the background, rather than following them
# c.tabs.background = True
# only show the tab bar if there are multiple tabs
# c.tabs.show = 'multiple'
# only show the statusbar if in a mode
# c.statusbar.show = 'in-mode'
# various delays
c.tabs.show_switching_delay = 2000
c.messages.timeout = 5000
c.keyhint.delay = 0
# default text editor
c.editor.command = ['alacritty', '-e', 'nvim', '{file}']
c.editor.encoding = 'utf-8'
# use the new ad-blocker
c.content.blocking.method = 'both'
# allow desktop notifications
c.content.notifications.enabled = False
# default character encoding
c.content.default_encoding = 'utf-8'
# Directory to save downloads to. If unset, a sensible OS-specific
# default is used.
# Type: Directory
c.downloads.location.directory = "~/Downloads"
c.downloads.location.suggestion = 'both'
# When to show the tab bar.
# Type: String
# Valid values:
# - always: Always show the tab bar.
# - never: Always hide the tab bar.
# - multiple: Hide the tab bar if only one tab is open.
# - switching: Show the tab bar when switching tabs.
c.tabs.show = 'always'
# Setting default page for when opening new tabs or new windows with
# commands like :open -t and :open -w .
c.url.default_page = 'file:///home/wasif/build/other-projects/Bento/index.html'
# Search engines which can be used via the address bar. Maps a search
# engine name (such as `DEFAULT`, or `ddg`) to a URL with a `{}`
# placeholder. The placeholder will be replaced by the search term, use
# `{{` and `}}` for literal `{`/`}` braces. The following further
# placeholds are defined to configure how special characters in the
# search terms are replaced by safe characters (called 'quoting'): *
# `{}` and `{semiquoted}` quote everything except slashes; this is the
# most sensible choice for almost all search engines (for the search
# term `slash/and&` this placeholder expands to `slash/and%26amp`).
# * `{quoted}` quotes all characters (for `slash/and&` this
# placeholder expands to `slash%2Fand%26amp`). * `{unquoted}` quotes
# nothing (for `slash/and&` this placeholder expands to
# `slash/and&`). The search engine named `DEFAULT` is used when
# `url.auto_search` is turned on and something else than a URL was
# entered to be opened. Other search engines can be used by prepending
# the search engine name to the search term, e.g. `:open google
# qutebrowser`.
# Type: Dict
c.url.searchengines = {
'DEFAULT': 'https://searx.be/?q={}',
'am': 'https://www.amazon.com/s?k={}',
'aw': 'https://wiki.archlinux.org/?search={}',
'goog': 'https://www.google.com/search?q={}',
'd': 'https://www.duckduckgo.com/?q={}',
'hoog': 'https://hoogle.haskell.org/?hoogle={}',
're': 'https://www.reddit.com/r/{}',
'ub': 'https://www.urbandictionary.com/define.php?term={}',
'wiki': 'https://en.wikipedia.org/wiki/{}',
'yt': 'https://invidious.kavin.rocks/results?search_query={}',
'red': 'https://searx.be/?q={}+site%3Areddit.com',
}
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
# Type: List of QtColor, or QtColor
# c.colors.completion.fg = ['#9cc4ff', 'white', 'white']
# Background color of the completion widget for odd rows.
## Type: QssColor
# c.colors.completion.odd.bg = '#1c1f24'
# Background color of the completion widget for even rows.
## Type: QssColor
# c.colors.completion.even.bg = '#232429'
# Foreground color of completion widget category headers.
## Type: QtColor
# c.colors.completion.category.fg = '#e1acff'
# Background color of the completion widget category headers.
## Type: QssColor
# c.colors.completion.category.bg = 'qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #000000, stop:1 #232429)'
# Top border color of the completion widget category headers.
## Type: QssColor
# c.colors.completion.category.border.top = '#3f4147'
# Bottom border color of the completion widget category headers.
## Type: QssColor
# c.colors.completion.category.border.bottom = '#3f4147'
# Foreground color of the selected completion item.
## Type: QtColor
# c.colors.completion.item.selected.fg = '#282c34'
# Background color of the selected completion item.
## Type: QssColor
# c.colors.completion.item.selected.bg = '#ecbe7b'
# Foreground color of the matched text in the selected completion item.
## Type: QtColor
# c.colors.completion.item.selected.match.fg = '#c678dd'
# Foreground color of the matched text in the completion.
## Type: QtColor
# c.colors.completion.match.fg = '#c678dd'
# Color of the scrollbar handle in the completion view.
## Type: QssColor
#c.colors.completion.scrollbar.fg = 'white'
# Background color for the download bar.
## Type: QssColor
# c.colors.downloads.bar.bg = '#282c34'
# Background color for downloads with errors.
## Type: QtColor
# c.colors.downloads.error.bg = '#ff6c6b'
# Font color for hints.
## Type: QssColor
# c.colors.hints.fg = '#282c34'
# Font color for the matched part of hints.
## Type: QtColor
# c.colors.hints.match.fg = '#98be65'
# Background color of an info message.
## Type: QssColor
# c.colors.messages.info.bg = '#282c34'
# Background color of the statusbar.
## Type: QssColor
# c.colors.statusbar.normal.bg = '#282c34'
# Foreground color of the statusbar in insert mode.
## Type: QssColor
#c.colors.statusbar.insert.fg = 'white'
# Background color of the statusbar in insert mode.
## Type: QssColor
# c.colors.statusbar.insert.bg = '#497920'
# Background color of the statusbar in passthrough mode.
## Type: QssColor
# c.colors.statusbar.passthrough.bg = '#34426f'
# Background color of the statusbar in command mode.
## Type: QssColor
# c.colors.statusbar.command.bg = '#282c34'
# Foreground color of the URL in the statusbar when there's a warning.
## Type: QssColor
#c.colors.statusbar.url.warn.fg = 'yellow'
# Background color of the tab bar.
## Type: QssColor
# c.colors.tabs.bar.bg = '#1c1f34'
# Background color of unselected odd tabs.
## Type: QtColor
# c.colors.tabs.odd.bg = '#282c34'
# Background color of unselected even tabs.
## Type: QtColor
# c.colors.tabs.even.bg = '#282c34'
# Background color of selected odd tabs.
## Type: QtColor
# c.colors.tabs.selected.odd.bg = '#282c34'
# Background color of selected even tabs.
## Type: QtColor
# c.colors.tabs.selected.even.bg = '#282c34'
# Background color of pinned unselected odd tabs.
## Type: QtColor
#c.colors.tabs.pinned.odd.bg = 'seagreen'
# Background color of pinned unselected even tabs.
## Type: QtColor
#c.colors.tabs.pinned.even.bg = 'darkseagreen'
# Background color of pinned selected odd tabs.
## Type: QtColor
# c.colors.tabs.pinned.selected.odd.bg = '#282c34'
# Background color of pinned selected even tabs.
## Type: QtColor
# c.colors.tabs.pinned.selected.even.bg = '#282c34'
# Default font families to use. Whenever "default_family" is used in a
# font setting, it's replaced with the fonts listed here. If set to an
# empty value, a system-specific monospace default is used.
# Type: List of Font, or Font
c.fonts.default_family = '"Ubuntu Mono Nerd Font"'
# Default font size to use. Whenever "default_size" is used in a font
# setting, it's replaced with the size listed here. Valid values are
# either a float value with a "pt" suffix, or an integer value with a
# "px" suffix.
# Type: String
c.fonts.default_size = '11pt'
# Font used in the completion widget.
# Type: Font
c.fonts.completion.entry = '11pt "Ubuntu Mono Nerd Font"'
# Font used for the debugging console.
# Type: Font
c.fonts.debug_console = '11pt "Ubuntu Mono Nerd Font"'
# Font used for prompts.
# Type: Font
c.fonts.prompts = 'default_size sans-serif'
# Font used in the statusbar.
# Type: Font
c.fonts.statusbar = '11pt "Ubuntu Mono Nerd Font"'
# Bindings to use dmenu rather than qutebrowser's builtin search.
# Bindings for cycling through CSS stylesheets from Solarized Everything CSS:
# https://github.com/alphapapa/solarized-everything-css
config.bind(
',ap',
'config-cycle content.user_stylesheets ~/.config/qutebrowser/themes/css/apprentice/apprentice-all-sites.css ""'
)
config.bind(
',dr',
'config-cycle content.user_stylesheets ~/.config/qutebrowser/themes/css/darculized/darculized-all-sites.css ""'
)
config.bind(
',gr',
'config-cycle content.user_stylesheets ~/.config/qutebrowser/themes/css/gruvbox/gruvbox-all-sites.css ""'
)
config.bind(
',sd',
'config-cycle content.user_stylesheets ~/.config/qutebrowser/themes/css/solarized-dark/solarized-dark-all-sites.css ""'
)
config.bind(
',sl',
'config-cycle content.user_stylesheets ~/.config/qutebrowser/themes/css/solarized-light/solarized-light-all-sites.css ""'
)
# ======================= Redline Insert Mode ============= {{{
# Awesome way to open vim from qutebrowser
# c.editor.command = [
# os.environ["TERMINAL"],
# "-e",
# os.environ["EDITOR"],
# "-f",
# "{file}",
# "-c",
# "normal {line}G{column0}1",
# ]
config.bind("<Ctrl-h>", "fake-key <Backspace>", "insert")
config.bind("<Ctrl-a>", "fake-key <Home>", "insert")
config.bind("<Ctrl-e>", "fake-key <End>", "insert")
config.bind("<Ctrl-b>", "fake-key <Left>", "insert")
config.bind("<Mod1-b>", "fake-key <Ctrl-Left>", "insert")
config.bind("<Ctrl-f>", "fake-key <Right>", "insert")
config.bind("<Mod1-f>", "fake-key <Ctrl-Right>", "insert")
config.bind("<Ctrl-p>", "fake-key <Up>", "insert")
config.bind("<Ctrl-n>", "fake-key <Down>", "insert")
config.bind("<Mod1-d>", "fake-key <Ctrl-Delete>", "insert")
config.bind("<Ctrl-d>", "fake-key <Delete>", "insert")
config.bind("<Ctrl-w>", "fake-key <Ctrl-Backspace>", "insert")
config.bind("<Ctrl-u>", "fake-key <Shift-Home><Delete>", "insert")
config.bind("<Ctrl-k>", "fake-key <Shift-End><Delete>", "insert")
config.bind("<Ctrl-x><Ctrl-e>", "edit-text", "insert")
config.bind("<Ctrl-i>", "edit-text", "insert")
config.bind("gF", 'view-source --edit')
# }}}
config.bind('xb', 'config-cycle statusbar.show always never')
config.bind('xt', 'config-cycle tabs.show always never')
config.bind(
'xx',
'config-cycle statusbar.show always never;; config-cycle tabs.show always never'
)
# bindings
# open link using external application
config.bind("A", 'hint links spawn urlportal.sh {hint-url}')
# open link using external application
config.bind("aa", 'spawn urlportal.sh {url}')
# send link to android
config.bind("ak", 'spawn kdeconnect-handler {url}')
# send link to android
config.bind("ya", 'mode-enter caret ;; selection-toggle ;; move-to-end-of-document')
# config source
config.bind("cs", 'config-source')
# readability
config.bind("R", 'spawn --userscript readability')
# open link in internet archive
config.bind("wa", 'open https://web.archive.org/web/{url}')
config.bind(";wa",
'hint links fill :open -t https://web.archive.org/web/{hint-url}')
# download locations
config.bind("gd", 'set downloads.location.directory ~/Downloads ; download')
config.bind(
";ls",
'set downloads.location.directory ~/Pictures/Iwatate-Saho/ ; links download'
)
###############################################################################
# _ _ _
# _ __ ___ __| (_)_ __ ___ ___| |_
# | '__/ _ \/ _` | | '__/ _ \/ __| __|
# | | | __/ (_| | | | | __/ (__| |_
# |_| \___|\__,_|_|_| \___|\___|\__|
###############################################################################
# Any return value other than a literal 'False' means we redirected
REDIRECT_MAP = {
"reddit.com": operator.methodcaller('setHost', 'www.teddit.net'),
"www.reddit.com": operator.methodcaller('setHost', 'www.teddit.net'),
"twitter.com": operator.methodcaller('setHost', 'nitter.42l.fr'),
"www.twitter.com": operator.methodcaller('setHost', 'nitter.42l.fr'),
"youtube.com": operator.methodcaller('setHost', 'invidious.kavin.rocks'),
"www.youtube.com": operator.methodcaller('setHost', 'invidious.kavin.rocks'),
"instagram.com": operator.methodcaller('setHost',
'bibliogram.snopyta.org'),
"www.instagram.com": operator.methodcaller('setHost',
'bibliogram.snopyta.org')
} # type: typing.Dict[str, typing.Callable[..., typing.Optional[bool]]]
def int_fn(info: interceptor.Request):
"""Block the given request if necessary."""
if (info.resource_type != interceptor.ResourceType.main_frame
or info.request_url.scheme() in {"data", "blob"}):
return
url = info.request_url
redir = REDIRECT_MAP.get(url.host())
if redir is not None and redir(url) is not False:
message.info("Redirecting to " + url.toString())
info.redirect(url)
interceptor.register(int_fn)
###############################################################################
config.set(
"content.user_stylesheets",
"~/.config/qutebrowser/themes/css/apprentice/apprentice-all-sites.css")
# kde environment
os.environ['QT_QPA_PLATFORMTHEME'] = 'kde'
###############################################################################
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-11 17:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('images', '0003_remove_imagefolder_index'),
]
operations = [
migrations.CreateModel(
name='Mosaico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('target', models.ImageField(upload_to='target/%Y/%m/%d', verbose_name='Target image')),
('status', models.PositiveSmallIntegerField(choices=[(0, 'Initial'), (1, 'Rendering'), (2, 'Finished'), (10, 'Error'), (11, 'Canceled')], default=0, verbose_name='Status')),
('result_image', models.ImageField(blank=True, null=True, upload_to='mosaico/%Y/%m/%d', verbose_name='Out file')),
('images_folder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='images.ImageFolder')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
]
|
import fedml
if __name__ == "__main__":
fedml.run_hierarchical_cross_silo_client()
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from cafe.engine.models.base \
import AutoMarshallingListModel, AutoMarshallingModel
class NeutronExtension(AutoMarshallingModel):
def __init__(self, updated, name, links, namespace, alias, description):
super(NeutronExtension, self).__init__()
self.updated = updated
self.name = name
self.links = links
self.namespace = namespace
self.alias = alias
self.description = description
@classmethod
def _json_to_obj(cls, serialized_str):
# NOTE: The individual extension summaries do not have a ROOT TAG.
json_dict = json.loads(serialized_str)
return cls(**json_dict)
class NeutronExtensions(AutoMarshallingListModel):
ROOT_TAG = 'extensions'
LIST_MODEL = NeutronExtension
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
extensions = cls()
for extension in json_dict.get(cls.ROOT_TAG, {}):
extensions.append(cls.LIST_MODEL(**extension))
return extensions
|
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
import functools
required_conan_version = ">=1.43.0"
class DrogonConan(ConanFile):
name = "drogon"
description = "A C++14/17/20 based HTTP web application framework running on Linux/macOS/Unix/Windows"
topics = ("http-server", "non-blocking-io", "http-framework", "asynchronous-programming")
license = "MIT"
homepage = "https://github.com/drogonframework/drogon"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
generators = "cmake", "cmake_find_package_multi"
options = {
"shared": [False, True],
"fPIC": [True, False],
"with_boost": [True, False],
"with_ctl": [True, False],
"with_orm": [True, False],
"with_profile": [True, False],
"with_brotli": [True, False],
"with_postgres": [True, False],
"with_postgres_batch": [True, False],
"with_mysql": [True, False],
"with_sqlite": [True, False],
"with_redis": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_boost": True,
"with_ctl": False,
"with_orm": True,
"with_profile": False,
"with_brotli": False,
"with_postgres": False,
"with_postgres_batch": False,
"with_mysql": False,
"with_sqlite": False,
"with_redis": False,
}
@property
def _source_subfolder(self):
return "source_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
self.options["trantor"].shared = True
if not self.options.with_orm:
del self.options.with_postgres
del self.options.with_postgres_batch
del self.options.with_mysql
del self.options.with_sqlite
del self.options.with_redis
elif not self.options.with_postgres:
del self.options.with_postgres_batch
@property
def _compilers_minimum_version(self):
return {
"gcc": "6",
"Visual Studio": "15.0",
"clang": "5",
"apple-clang": "10",
}
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "14")
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version:
if tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("{} requires C++14, which your compiler does not support.".format(self.name))
else:
self.output.warn("{} requires C++14. Your compiler is unknown. Assuming it supports C++14.".format(self.name))
def requirements(self):
self.requires("trantor/1.5.5")
self.requires("jsoncpp/1.9.5")
self.requires("openssl/1.1.1o")
self.requires("zlib/1.2.12")
if self.settings.os == "Linux":
self.requires("libuuid/1.0.3")
if self.options.with_profile:
self.requires("coz/cci.20210322")
if self.options.with_boost:
self.requires("boost/1.79.0")
if self.options.with_brotli:
self.requires("brotli/1.0.9")
if self.options.get_safe("with_postgres"):
self.requires("libpq/14.2")
if self.options.get_safe("with_mysql"):
self.requires("libmysqlclient/8.0.25")
if self.options.get_safe("with_sqlite"):
self.requires("sqlite3/3.38.5")
if self.options.get_safe("with_redis"):
self.requires("hiredis/1.0.2")
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["BUILD_CTL"] = self.options.with_ctl
cmake.definitions["BUILD_EXAMPLES"] = False
cmake.definitions["BUILD_ORM"] = self.options.with_orm
cmake.definitions["COZ_PROFILING"] = self.options.with_profile
cmake.definitions["BUILD_DROGON_SHARED"] = self.options.shared
cmake.definitions["BUILD_DOC"] = False
cmake.definitions["BUILD_BROTLI"] = self.options.with_brotli
cmake.definitions["BUILD_POSTGRESQL"] = self.options.get_safe("with_postgres", False)
cmake.definitions["BUILD_POSTGRESQL_BATCH"] = self.options.get_safe("with_postgres_batch", False)
cmake.definitions["BUILD_MYSQL"] = self.options.get_safe("with_mysql", False)
cmake.definitions["BUILD_SQLITE"] = self.options.get_safe("with_sqlite", False)
cmake.definitions["BUILD_REDIS"] = self.options.get_safe("with_redis", False)
cmake.configure()
return cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", "licenses", self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.libs = ["drogon"]
if self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["rpcrt4", "ws2_32", "crypt32", "advapi32"])
if self.settings.compiler == "gcc" and tools.Version(self.settings.compiler.version).major == "8":
self.cpp_info.system_libs.append("stdc++fs")
if self.options.with_ctl:
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
self.cpp_info.set_property("cmake_file_name", "Drogon")
self.cpp_info.set_property("cmake_target_name", "Drogon::Drogon")
self.cpp_info.filenames["cmake_find_package"] = "Drogon"
self.cpp_info.filenames["cmake_find_package_multi"] = "Drogon"
self.cpp_info.names["cmake_find_package"] = "Drogon"
self.cpp_info.names["cmake_find_package_multi"] = "Drogon"
|
'''
Created on 11 Jan 2020
@author: lordmike
'''
import setup_apps
#import app_source_handler
from setup_apps import util
#import json
#import app_source_handler
import logging
import sys
import LMToyBoxPython
from datetime import datetime
import traceback
from config import Config
def conf_root_logger():
# Default log level.
logging.basicConfig(level=logging.DEBUG)
def create_formatter(log_log_point: bool=True):
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s')
#formatter = logging.Formatter('%(name)-10s - %(levelname)-4s: %(message)s')
f_str = '[%(name)-10s] %(levelname)-4s: %(message)s'
if log_log_point:
f_str += ' [%(pathname)s:%(lineno)d]'
formatter = logging.Formatter(f_str)
return formatter
def create_hand_file(log_file_name: str):
hand = logging.FileHandler(log_file_name)
hand.setLevel(logging.DEBUG)
hand.setFormatter(create_formatter(log_log_point=False))
return hand
def create_hand_stdout():
hand_stdout = logging.StreamHandler(stream=sys.stdout)
hand_stdout.setLevel(logging.DEBUG)
hand_stdout.setFormatter(create_formatter())
return hand_stdout
def create_hand_stderr():
hand_stderr = logging.StreamHandler(stream=sys.stderr)
hand_stderr.setLevel(logging.ERROR)
#hand_stderr.setFormatter(create_formatter())
f_str = '[%(name)-10s] %(levelname)-4s: %(message)s [%(pathname)s:%(lineno)d]'
formatter = logging.Formatter(f_str)
hand_stderr.setFormatter(formatter)
return hand_stderr
def conf_logger(logger_name: str, log_file_name: str=''):
logger_conf = logging.getLogger(logger_name)
logger_conf.addHandler(create_hand_stdout())
logger_conf.addHandler(create_hand_stderr())
if log_file_name:
logger_conf.addHandler(create_hand_file(log_file_name))
def conf_setup_apps_logger(log_file_name: str=''):
#logger_conf = setup_apps.logger
logger_conf = logging.getLogger('setup_apps')
logger_conf.addHandler(create_hand_stdout())
logger_conf.addHandler(create_hand_stderr())
if log_file_name:
logger_conf.addHandler(create_hand_file(log_file_name))
def conf_app_source_handler_logger(log_file_name: str=''):
logger_conf = logging.getLogger('app_source_handler')
logger_conf.addHandler(create_hand_stdout())
logger_conf.addHandler(create_hand_stderr())
if log_file_name:
logger_conf.addHandler(create_hand_file(log_file_name))
def conf_LMToyBoxPython_handler_logger(log_file_name: str=''):
logger_conf = logging.getLogger('LMToyBoxPython')
logger_conf.addHandler(create_hand_stdout())
logger_conf.addHandler(create_hand_stderr())
if log_file_name:
logger_conf.addHandler(create_hand_file(log_file_name))
def create_logger(log_file_name: str=''):
logger = logging.getLogger('test_xml')
logger.addHandler(create_hand_stdout())
logger.addHandler(create_hand_stderr())
if log_file_name:
logger.addHandler(create_hand_file(log_file_name))
logger.propagate = False
return logger
#SOURCE_PATH = util.fix_path(util.home_path() + '/LM_ToyBox/setup_apps')
#SOURCE_FILE = 'app_source.xml'
if __name__ == '__main__':
log_file_name = ''
conf_root_logger()
conf_logger('config_test')
conf = Config.read_values_from_file()
log_to_file = conf.test_xml.log_to_file()
#logger.debug('log_to_file: ' + str(log_to_file))
if log_to_file:
log_file_name = conf.test_xml.log_file_name()
run_create_sample = conf.test_xml.create_sample()
overwrite_sample = conf.test_xml.overwrite_sample()
run_parse = conf.test_xml.parse()
run_init = conf.test_xml.init()
run_download = conf.test_xml.download()
run_install = conf.test_xml.install()
run_configure = conf.test_xml.configure()
conf_setup_apps_logger(log_file_name)
conf_app_source_handler_logger(log_file_name)
conf_LMToyBoxPython_handler_logger(log_file_name)
#LMToyBoxPython.logging_test()
setup_apps.util.stop_urllib3_logger()
logger = create_logger(log_file_name)
logger.info('Start time: ' + str(datetime.now()))
logger.info('Init messsage test_xml.py')
logger.error('Error logging test')
logger.debug('Debug logging test')
logger.info('setup_apps.revision: ' + str(setup_apps.__revision__))
try:
# NOTE: testing raised errors logging
#raise Exception('Test error')
# NOTE: Just testing the 'app_source_handler'
#update_app_source.source.parse(source_file)
#app_source_handler.source.parse(util.fix_path(SOURCE_PATH + '/' + SOURCE_FILE))
#source_file = util.fix_path(SOURCE_PATH + '/' + SOURCE_FILE)
if run_create_sample:
setup_apps.config.create_sample(overwrite=overwrite_sample)
setup_apps.config.print_sample()
#setup_apps.config.parse(source_file)
#print('APPS: ' + json.dumps(app_source_handler.source.APPS, sort_keys=True, indent=2))
if run_parse:
# TODO: What we should do when config xml file does not exist?
setup_apps.config.parse()
if run_init:
setup_apps.config.init()
if run_download:
setup_apps.config.download()
if run_install:
setup_apps.config.install()
if run_configure:
setup_apps.config.configure()
#except Exception as err:
# logger.error(err)
except:
logger.error("Unexpected error: " + str(sys.exc_info()[0]))
# print stck trace
# https://docs.python.org/3/library/traceback.html
#traceback.print_exc()
formatted_lines = traceback.format_exc()
logger.error(formatted_lines)
logger.info('Stop time: ' + str(datetime.now()))
logger.info('END')
|
from sanic import Sanic
from sanic.response import json
from prometheus_sanic import monitor
app = Sanic()
async def ping(_request):
return json({'success': 'you are home'})
async def hone(_request):
return json({'success': 'you are home'})
async def user(_request):
return json({'success': 'you are home'})
if __name__ == "__main__":
monitor(
app,
multiprocess_mode='all',
is_middleware=True,
).expose_endpoint()
app.add_route(ping, 'ping', methods=['GET'])
app.add_route(hone, 'hone', methods=['GET'])
app.add_route(user, 'user', methods=['GET', 'POST'])
app.run(host="127.0.0.1", port=8000, workers=1)
|
from extra_envs.intervener.base import Intervener
from extra_envs.intervener.point import (PointIntervenerRollout,
PointIntervenerNetwork)
from extra_envs.intervener.half_cheetah import (HalfCheetahMpcIntervener,
HalfCheetahHeuristicIntervener)
|
"""Custom errors."""
class ConfigurationError(Exception):
"""
This exception is raised if a user has misconfigured Flask-Stormpath.
"""
pass
|
from abc import ABC
from qrogue.game.logic.actors import StateVector
from qrogue.game.logic.collectibles import Collectible, Coin
from .enemy import Enemy
class Boss(Enemy, ABC):
"""
A special Enemy with specified target and reward.
"""
def __init__(self, target: StateVector, reward: Collectible):
"""
Creates a boss enemy with a specified target StateVector and a specified reward.
:param target:
:param reward:
"""
super().__init__(target, reward, flee_chance=0.3)
self.__is_defeated = False
@property
def is_defeated(self) -> bool:
"""
:return: whether the boss has been defeated yet or not
"""
return self.__is_defeated # todo why would is_active not be sufficient?
def _on_reached(self):
self.__is_defeated = True # todo is this really needed? can't we simply override is_reached()?
def flee_check(self) -> bool:
return True
class DummyBoss(Boss):
def __init__(self):
stv = StateVector([1, 0, 0, 0, 0, 0, 0, 0])
super(DummyBoss, self).__init__(stv, Coin(3))
|
from django import template
from allauth.account.utils import user_display
from allauth.account.models import UserProfile
register = template.Library()
"""
Display profile:
- Avartar
- Full Name - Email
- Like - Dislike
- Comment/Idioms Stat
- Rank
"""
def profile_tag(parser, token ):
try:
tag_name, type = token.split_contents()
except:
raise template.TemplateSyntaxError("%r tag requires exactly one argument" % token.contents.split()[0])
if type == 'small':
return ProfileSmall()
return Profile()
class Profile(template.Node):
template_name = 'account/tag/profile.html'
def __init__(self):
pass
def render(self, context):
t = context.template.engine.get_template(self.template_name)
user = context.request.user
if user.id:
profile, __ = UserProfile.objects.get_or_create(user=user)
context.update({'user': user, 'profile': profile})
res = t.render(context)
return res
return ''
class ProfileSmall(template.Node):
template_name = 'account/tag/profile_small.html'
def __init__(self):
pass
def render(self, context):
t = context.template.engine.get_template(self.template_name)
user = context.request.user
if user.id:
profile, __ = UserProfile.objects.get_or_create(user=user)
context.update({'user': user, 'profile': profile})
res = t.render(context)
return res
return ''
register.tag('profile_tag', profile_tag)
|
#kpbocheneke@gmail.com
import re
VOWELS = "AEIOUY"
CONSONANTS = "BCDFGHJKLMNPQRSTVWXZ"
def is_alternating(word):
prev, other = VOWELS, CONSONANTS
if word[0].upper() in VOWELS:
prev, other = CONSONANTS, VOWELS
for w in word:
if w.upper() not in other:
return False
prev, other = other, prev
return True
def striped_words(text):
return sum([1 for word in re.split("\W+", text) if len(word) > 1 and is_alternating(word)])
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert striped_words("My name is ...") == 3, "All words are striped"
assert striped_words("Hello world") == 0, "No one"
assert striped_words("A quantity of striped words.") == 1, "Only of"
assert striped_words("Dog,cat,mouse,bird.Human.") == 3, "Dog, cat and human"
|
import arcpy
import os
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the
.pyt file)."""
self.label = "Create Points Along Lines"
self.alias = "alonglines"
# List of tool classes associated with this toolbox
self.tools = [CreatePointsAlongLines]
class CreatePointsAlongLines(object):
def __init__(self):
self.label = "Create Points Along Lines"
self.description = "Constructs point features at intervals along line features."
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
in_lines = arcpy.Parameter(
displayName="Input Line Features",
name="in_line_features",
datatype="Feature Layer",
parameterType="Required",
direction="Input")
in_lines.filter.list = ['Polyline']
out_points = arcpy.Parameter(
displayName="Output Point Feature Class",
name="out_point_features",
datatype="Feature Class",
parameterType="Required",
direction="Output")
interval = arcpy.Parameter(
displayName="Interval (units are in units of input)",
name="interval",
datatype="Double",
parameterType="Required",
direction="Input")
use_percentage = arcpy.Parameter(
displayName="Use as percentage (or value)",
name="use_percentage",
datatype="Boolean",
parameterType="Optional",
direction="Input")
use_percentage.filter.list = ["PERCENTAGE", "VALUE"]
use_percentage.value = "VALUE"
end_points = arcpy.Parameter(
displayName="Start and End Points",
name="end_points",
datatype="Boolean",
parameterType="Optional",
direction="Input")
end_points.filter.list = ["END_POINTS", "NO_END_POINTS"]
end_points.value = "NO_END_POINTS"
return [in_lines, out_points, interval, use_percentage, end_points]
def isLicensed(self):
return True
def updateParameters(self, parameters):
parameters[1].parameterDependencies = [parameters[0].name]
parameters[1].schema.clone = True
parameters[1].schema.geometryTypeRule = "AsSpecified"
parameters[1].schema.geometryType = "Point"
parameters[1].schema.fieldsRule = "FirstDependencyFIDs"
parameters[1].schema.fieldsRule = "None"
id_field = arcpy.Field()
id_field.name = "FID_1"
id_field.type = "Integer"
parameters[1].schema.additionalFields = [id_field]
return
def updateMessages(self, parameters):
"""Provide error messages if interval is invalid"""
err_percentage = "Percentages must be between 0.0 and 1.0"
err_value = "Distance value cannot be a negative number"
if parameters[3].value: # percentage
if parameters[2].value < 0.0 or parameters[2].value > 1.0:
parameters[2].setErrorMessage(err_percentage)
elif parameters[3].value == False: # value
if parameters[2].value < 0.0:
parameters[2].setErrorMessage(err_value)
return
def execute(self, parameters, messages):
"""The source code of the tool."""
in_fc = parameters[0].valueAsText
out_fc = parameters[1].valueAsText
interval = parameters[2].value
use_percentage = parameters[3].value
end_points = parameters[4].value
desc = arcpy.Describe(in_fc)
# Create output feature class
arcpy.CreateFeatureclass_management(
os.path.dirname(out_fc),
os.path.basename(out_fc),
geometry_type="POINT",
spatial_reference=desc.spatialReference)
# Add a field to transfer FID from input
fid_name = "FID_1"
arcpy.AddField_management(out_fc, fid_name, "LONG")
# Create new points based on input lines
with arcpy.da.SearchCursor(
in_fc, ['SHAPE@', desc.OIDFieldName]) as search_cursor:
with arcpy.da.InsertCursor(
out_fc, ['SHAPE@', fid_name]) as insert_cursor:
for row in search_cursor:
line = row[0]
if line: # if null geometry--skip
if end_points:
insert_cursor.insertRow([line.firstPoint, row[1]])
cur_length = interval
max_position = 1
if not use_percentage:
max_position = line.length
while cur_length < max_position:
insert_cursor.insertRow(
[line.positionAlongLine(
cur_length, use_percentage), row[1]])
cur_length += interval
if end_points:
insert_cursor.insertRow(
[line.positionAlongLine(1, True), row[1]])
return
|
import datetime
from django.views.decorators.csrf import csrf_exempt
from tropo_webapi.views import TropoView
from .models import CallBox
ENTRY_CODE_GREETING = 'Please enter the entry code or press star to call residents.'
ENTRY_CODE_INVALID = 'Invalid entry code.'
CALL_TRANSFER = 'Calling residents.'
GATE_OPEN_MESSAGE = 'Someone just entered through your gate.'
class IncomingCallView(TropoView):
def post(self, request, *args, **kwargs):
self.call_box = CallBox.objects.get(id=self.session['to']['id'])
return super(IncomingCallView, self).post(request, *args, **kwargs)
def answer(self, request, *args, **kwargs):
if self.call_box.auto_open:
return self.open_gate(request)
if self.call_box.entry_code:
return self.ask_entry_code(request)
return self.call_residents(request)
def ask_entry_code(self, request, *args, **kwargs):
self.ask('[4 DIGITS], call(*, star, call, call residents)', name='entry_code', say=ENTRY_CODE_GREETING, timeout=5, attempts=2)
self.on('continue', callback=self.check_entry_code)
self.on('incomplete', callback=self.call_residents)
self.on('error', callback=self.call_residents)
self.on('hangup', callback=self.hangup)
return self.render_to_response()
def check_entry_code(self, request, entry_code, *args, **kwargs):
if str(entry_code['value']) == 'call':
return self.call_residents(request)
if int(entry_code['value']) == int(self.call_box.entry_code):
return self.open_gate(request)
self.say(ENTRY_CODE_INVALID)
return self.ask_entry_code(request)
def call_residents(self, request, *args, **kwargs):
call_residents = self.call_box.residents.filter(active=True, by_sms=False).values_list('phone_number', flat=True)
if call_residents:
self.say(CALL_TRANSFER)
self.transfer(to=map(str, call_residents), **{
'from': self.call_box.id,
})
return self.render_to_response()
def open_gate(self, request, *args, **kwargs):
self.say(self.call_box.open_key_wav)
for resident in self.call_box.residents.filter(active=True, entry_sms_alert=True):
self.message(GATE_OPEN_MESSAGE, resident.phone_number, channel='TEXT')
self.call_box.date_last_opened = datetime.datetime.now()
return self.hangup(request)
def hangup(self, request, *args, **kwargs):
self.session.delete()
super(IncomingCallView, self).hangup()
return self.render_to_response()
call_incoming = csrf_exempt(IncomingCallView.as_view())
|
# Las dependencias:
from logging import NullHandler
import pandas as pd
import spacy as sp
import os
import PySimpleGUI as sg
import webview as wv
from spacy.matcher import DependencyMatcher as match
from spacy.lang.es.stop_words import STOP_WORDS
from spacy import displacy
import stanza
import spacy_stanza
from string import punctuation
#los siguientes paquetes son para cuando se implemente polyglot:
#import polyglot
#from polyglot.text import Text, Word
sg.theme('Reddit')
# El diseño de la ventana.
#primero definimos el menu superior:
menu_def = ['Opciones', ['Descargar Modelo Spacy para Entidades','Descargar Modelo Spacy para sustantativos, verbos y lemas', 'Descargar Modelo Stanza','Cerrar']],['&Ayuda',['&Ayuda', 'About...']],
#luego creamos el layout
layout = [ [sg.Menu(menu_def)],
[sg.Text('Bienvenido al GN Analizer')],
[sg.Text('Nombre del archivo de salida'), sg.InputText(key="-OUTFILE-"),sg.Radio('.xlsx',"FILETYPE", key="-XLSX-", default=True), sg.Radio('.csv',"FILETYPE", key="-CSV-", default=True), sg.Radio('.json',"FILETYPE", key="-JSON-", default=True)],
[sg.Text('Orden de palabras en el archivo generado'),sg.Radio('Mantener palabras en filas originales, una columna por opción',"WORDORDER", key="-KEEPROW-", default=True), sg.Radio('Una palabra por fila y una columna para opción',"WORDORDER", key="-ONEWORDROW-", default=False), sg.Radio('Todas las palabras en la misma columna, una fila por palabra',"WORDORDER", key="-KEEPROWONECOL-", default=False), sg.Radio('Mantener filas, todo en una columna',"WORDORDER", key="-ONECOL-", default=False)],
[sg.Text('¿Dónde querés guardar el archivo?'), sg.InputText(key="-OUTFOLDER-"), sg.FolderBrowse(target="-OUTFOLDER-")],
[sg.Text('Elegí archivo a analizar'),sg.Input(), sg.FileBrowse(key="-IN-")],
[sg.Text('¿Cuál es el nombre de la pestaña?'), sg.InputText(key="-SHEET-")],
[sg.Text('¿Cuál es el nombre de la columna?'), sg.InputText(key="-COL-")],
[sg.Text('Elegir un motor de NLP'),sg.Radio('Spacy',"ENGINE", key="-SPAC-", default=True), sg.Radio('Stanford Stanza',"ENGINE", key="-STANZ-", default=False), sg.Radio('Freeling',"ENGINE", key="-FREEL-", default=False), sg.Radio('Gensim',"ENGINE", key="-GENSIM-", default=False), sg.Radio('Polyglot',"ENGINE", key="-POLY-", default=False),sg.Radio('TextBLOB',"ENGINE", key="-BLOB-", default=False)],
[sg.Text('PoS Tags'), sg.Checkbox('Nombres Propios', key="-PROPN-", default=True), sg.Checkbox('Sustantivos y Verbos', key="-NOUNV-", default=True), sg.Checkbox('Entidades Naturales', key="-NER-", default=False), sg.Checkbox('Separar el label en las entidades naturales', key="-NERL-", default=False), sg.Checkbox('Visualizar entidades de cada frase.', key="-DISPLAY-", default=False), sg.Checkbox('Exportar el sentiment.', key="-SENTIMENT-", default=False)],
[sg.Button('OK'), sg.Button('Cerrar')]]
#Y la ventana:
window = sg.Window('GN Analizer', layout)
# Hacemos un loop para procesar eventos y tomar los inputs de la ventana como values
while True:
event, values = window.read()
#primero esperamos eventos del dropdown menu:
if event == 'Descargar Modelo Spacy para Entidades':
os.system('python -m spacy download es_core_news_lg')
if event == 'Descargar Modelo Spacy para sustantativos, verbos y lemas':
os.system('python -m spacy download es_dep_news_trf')
if event == 'Descargar Modelo Stanza':
stanza.download('es', processors={'ner': 'conll02'})
if event == 'About...':
sg.popup('Este programa fue creado para probar spacy y otros motores de NLP.', 'Version 0.7', 'PySimpleGUI rocks...')
#despues, si el usuario clickea en Ok procesamos todo lo que sigue
if event == 'OK':
#Lo primero que hacemos despues del OK es chequear que estén todas las opciones completadas y sino prompteo un error
if values["-OUTFILE-"] == "":
sg.popup(f"No determinaste el nombre del archivo que vamos a generar.")
elif values["-OUTFOLDER-"] == "":
sg.popup(f"No determinaste la carpeta en la que se va a guardar el archivo generado.")
elif values["-IN-"] == "" :
sg.popup(f"No seleccionaste un archivo para analizar.")
elif values["-SHEET-"] == "" :
sg.popup(f"No determinaste en qué hoja del archivo está la columna de texto.")
elif values["-COL-"] == "":
sg.popup(f"No determinaste cuál es la columna de texto.")
elif values["-NOUNV-"] == False and values["-PROPN-"] == False and values["-NER-"] == False and values["-DISPLAY-"] == False and values["-SENTIMENT-"] == False:
sg.popup(f"No clickeaste ninguna opción para analizar las Parts of Speech")
#Si se llenaron todas las opciones (ninguna esta vacia) procedemos a ejecutar el programa
else:
original_file = values["-IN-"]
sheet_name = values["-SHEET-"]
column = values["-COL-"]
#A parte de tomar los valores inputeados, usamos os para normalizar la ruta de la carpeta (porque va a cambiar segun el OS) y para joinearla con el nombre del archivo
new_file = os.path.join(os.path.normcase(values["-OUTFOLDER-"]), values["-OUTFILE-"])
#Ahora, segun que opcion clickeo el usuario, vamos a definir el motor y modelo con las variables nlp y nlpner
# Defino las variables:
if values["-SPAC-"] == True:
try:
nlp = sp.load('es_dep_news_trf')
nlpner = sp.load('es_core_news_lg')
#Con spacy usamos dos modelos diferentes, uno sirve para NER y el otro deptrees y lematizacion
except Exception as s:
sg.popup(f"Ups! Ocurrió el siguiente error: {s}, seguramente tengas que descargar el motor de idioma de Spacy en las opciones.")
pos_tag=['NOUN', 'VERB']
pos_propn=['PROPN']
elif values["-STANZ-"] == True:
try:
nlp = spacy_stanza.load_pipeline("es", processors={'ner': 'conll02'})
nlpner = nlp
#con stanza usamos un solo modelo para las dos variables
except Exception as n:
sg.popup(f"Ups! Ocurrió el siguiente error: {n}, seguramente tengas que descargar la el motor de Stanza en las opciones.")
pos_tag=['NOUN', 'VERB']
pos_propn=['PROPN']
elif values["-FREEL-"] == True:
nlp = sp.load('es_dep_news_trf')
nlpner = sp.load('es_core_news_lg')
pos_tag=['NOUN', 'VERB']
pos_propn=['PROPN']
sg.popup(f"Freeling aún no está disponible. Vamos a usar Spacy.")
elif values["-GENSIM-"] == True:
nlp = sp.load('es_dep_news_trf')
nlpner = sp.load('es_core_news_lg')
pos_tag=['NOUN', 'VERB']
pos_propn=['PROPN']
sg.popup(f"GENSIM aún no está disponible. Vamos a usar Spacy.")
elif values["-POLY-"] == True:
nlp = sp.load('es_dep_news_trf')
nlpner = sp.load('es_core_news_lg')
pos_tag=['NOUN', 'VERB']
pos_propn=['PROPN']
sg.popup(f"Polyglot aún no está disponible. Vamos a usar Spacy.")
elif values["-BLOB-"] == True:
nlp = sp.load('es_dep_news_trf')
nlpner = sp.load('es_core_news_lg')
pos_tag=['NOUN', 'VERB']
pos_propn=['PROPN']
sg.popup(f"TextBlob aún no está disponible. Vamos a usar Spacy.")
try:
my_file = pd.read_excel(original_file, sheet_name= sheet_name)
my_file_index = my_file.index
number_of_rows = len(my_file_index)
sg.popup(f"La columna que vamos a analizar tiene {number_of_rows} filas.")
# Ejecuto acá los fors:
i = 1
stopwords= list(STOP_WORDS)
palabras = []
nombres = []
entidades = []
labels = []
columna = []
#tengo tres rutinas, voy a ejecutar la primera si el usuario pidió una palabra por fila y la segunda si quiere todas las palabras de una fila del archivo original por fila
if values["-ONEWORDROW-"] == True:
for row_info in my_file[column]:
sentence = nlp(str(row_info))
sg.OneLineProgressMeter('Avance de la operación.',i, number_of_rows, 'OK')
i=i+1
for token in sentence:
if(token.text in stopwords or token.text in punctuation):
continue
if values["-PROPN-"] == True and (token.pos_ in pos_propn):
nombres.append(str(token.text))
if values["-NOUNV-"] == True and token.pos_ in pos_tag:
palabras.append(token.lemma_.lower())
if values["-NER-"] == True:
sentencener = nlpner(str(row_info))
if values["-NERL-"] == True:
for entidad in sentencener.ents:
entidades.append(str(entidad.text))
labels.append(str(entidad.label_))
else:
for entidad in sentencener.ents:
entidades.append(str(entidad.text +":"+ entidad.label_))
if values["-DISPLAY-"] == True :
options = {"fine_grained": True, "compact": False, "add_lema": True, "color": "blue"}
wv.create_window('Current Spacy Row', html= displacy.render(sentencener, style="ent", options=options))
wv.start()
#Si pidió mantener las mismas filas del archivo original, ejecuto la segunda rutina
elif values["-KEEPROW-"] == True:
for row_info in my_file[column]:
sentence = nlp(str(row_info))
sg.OneLineProgressMeter('Avance de la operación.',i, number_of_rows, 'OK')
i=i+1
nombres_temp = ""
palabras_temp = ""
entidades_temp = ""
labels_temp = ""
for token in sentence:
if(token.text in stopwords or token.text in punctuation):
continue
if values["-PROPN-"] == True and (token.pos_ in pos_propn):
nombres_temp=str(nombres_temp)+","+str(token.text)
if values["-NOUNV-"] == True and token.pos_ in pos_tag:
palabras_temp=str(palabras_temp)+","+str(token.lemma_.lower())
nombres.append(nombres_temp)
palabras.append(palabras_temp)
if values["-NER-"] == True:
sentencener = nlpner(str(row_info))
if values["-NERL-"] == True:
for entidad in sentencener.ents:
entidades_temp = entidades_temp + "," +str(entidad.text)
labels_temp = labels_temp + "," + str(entidad.label_)
else:
for entidad in sentencener.ents:
entidades_temp = entidades_temp + "," +str(entidad.text +":"+ entidad.label_)
if values["-NERL-"] == True:
entidades.append(entidades_temp)
labels.append(labels_temp)
else:
entidades.append(entidades_temp)
if values["-DISPLAY-"] == True :
options = {"fine_grained": True, "compact": False, "add_lema": True, "color": "blue"}
wv.create_window('Current Spacy Row', html= displacy.render(sentencener, style="ent", options=options))
wv.start()
#Si pidió mantener las mismas filas del archivo original pero todo en una sóla columna, ejecuto esta
elif values["-ONECOL-"] == True:
for row_info in my_file[column]:
sentence = nlp(str(row_info))
sg.OneLineProgressMeter('Avance de la operación.',i, number_of_rows, 'OK')
i=i+1
for token in sentence:
if(token.text in stopwords or token.text in punctuation):
continue
if values["-PROPN-"] == True and (token.pos_ in pos_propn):
columna.append(str(token.text))
if values["-NOUNV-"] == True and token.pos_ in pos_tag:
columna.append(token.lemma_.lower())
if values["-NER-"] == True:
sentencener = nlpner(str(row_info))
if values["-NERL-"] == True:
for entidad in sentencener.ents:
columna.append(str(entidad.text +":"+ entidad.label_))
else:
for entidad in sentencener.ents:
columna.append(str(entidad.text))
if values["-DISPLAY-"] == True :
options = {"fine_grained": True, "compact": False, "add_lema": True, "color": "blue"}
wv.create_window('Current Spacy Row', html= displacy.render(sentencener, style="ent", options=options))
wv.start()
#Si pidió tener una fila por palabra y una sóla columna
elif values["-KEEPROWONECOL-"] == True:
for row_info in my_file[column]:
sentence = nlp(str(row_info))
sg.OneLineProgressMeter('Avance de la operación.',i, number_of_rows, 'OK')
i=i+1
columna_temp = ""
for token in sentence:
if(token.text in stopwords or token.text in punctuation):
continue
if values["-PROPN-"] == True and (token.pos_ in pos_propn):
columna_temp=str(columna_temp+","+str(token.text))
if values["-NOUNV-"] == True and token.pos_ in pos_tag:
columna_temp=str(columna_temp+","+str(token.lemma_.lower()))
if values["-NER-"] == True:
sentencener = nlpner(str(row_info))
if values["-NERL-"] == True:
for entidad in sentencener.ents:
columna_temp = str(columna_temp + "," +str(entidad.text))
else:
for entidad in sentencener.ents:
columna_temp = str(columna_temp + "," +str(entidad.text +":"+ entidad.label_))
columna.append(columna_temp)
if values["-DISPLAY-"] == True :
options = {"fine_grained": True, "compact": False, "add_lema": True, "color": "blue"}
wv.create_window('Current Spacy Row', html= displacy.render(sentencener, style="ent", options=options))
wv.start()
#Ahora que tenemos las listas armadas (o columna, o palabras o nombres o entidades), vamos a generar el dataframe final, según las opciones que pidió el usuario
if values["-KEEPROWONECOL-"] == True or values["-ONECOL-"] == True:
new_df = pd.DataFrame({"Todas las palabras" : columna})
elif values["-NOUNV-"] == True and values["-PROPN-"] == True and values["-NER-"] == True:
if values["-NERL-"] == True:
new_df1 = pd.DataFrame({"Lemas" : palabras})
new_df2 = pd.DataFrame({"Nombres propios" : nombres})
new_df3 = pd.DataFrame({"Entidades Naturales" : entidades})
new_df4 = pd.DataFrame({"Labels" : labels})
new_df = new_df1.join(new_df2.join(new_df3.join(new_df4)))
else:
new_df1 = pd.DataFrame({"Lemas" : palabras})
new_df2 = pd.DataFrame({"Nombres propios" : nombres})
new_df3 = pd.DataFrame({"Entidades Naturales" : entidades})
new_df = new_df1.join(new_df2.join(new_df3))
elif values["-NOUNV-"] == True and values["-PROPN-"] == True and values["-NER-"] == False:
new_df1 = pd.DataFrame({"Lemas" : palabras})
new_df2 = pd.DataFrame({"Nombres propios" : nombres})
new_df = new_df1.join(new_df2)
elif values["-NOUNV-"] == True and values["-PROPN-"] == False and values["-NER-"] == True :
if values["-NERL-"] == True:
new_df1 = pd.DataFrame({"Lemas" : palabras})
new_df3 = pd.DataFrame({"Entidades Naturales" : entidades})
new_df4 = pd.DataFrame({"Labels" : labels})
new_df = new_df1.join(new_df3.join(new_df4))
else:
new_df1 = pd.DataFrame({"Lemas" : palabras})
new_df3 = pd.DataFrame({"Entidades Naturales" : entidades})
new_df = new_df1.join(new_df3)
elif values["-NOUNV-"] == False and values["-PROPN-"] == True and values["-NER-"] == True :
if values["-NERL-"] == True:
new_df2 = pd.DataFrame({"Nombres propios" : nombres})
new_df3 = pd.DataFrame({"Entidades Naturales" : entidades})
new_df4 = pd.DataFrame({"Labels" : labels})
new_df = new_df2.join(new_df3.join(new_df4))
else:
new_df2 = pd.DataFrame({"Nombres propios" : nombres})
new_df3 = pd.DataFrame({"Entidades Naturales" : entidades})
new_df = new_df2.join(new_df3)
elif values["-NOUNV-"] == False and values["-PROPN-"] == True and values["-NER-"] == False :
new_df = pd.DataFrame({"Nombres propios" : nombres})
elif values["-NOUNV-"] == True and values["-PROPN-"] == False and values["-NER-"] == False:
new_df = pd.DataFrame({"Lemas" : palabras})
elif values["-NOUNV-"] == False and values["-PROPN-"] == False and values["-NER-"] == True:
if values["-NERL-"] == True:
new_df3 = pd.DataFrame({"Entidades Naturales" : entidades})
new_df4 = pd.DataFrame({"Labels" : labels})
new_df = new_df3.join(new_df4)
else:
new_df = pd.DataFrame({"Entidades Naturales" : entidades})
#Ahora según la opción de archivo a generar, vamos a generar el archivo final
if values["-XLSX-"] == True:
new_file = new_file+".xlsx"
new_df.to_excel(new_file,sheet_name='NLP', index = False)
elif values["-CSV-"] == True:
new_file = new_file+".csv"
new_df.to_csv(new_file, index = False)
elif values["-JSON-"] == True:
new_file = new_file+".json"
new_df.to_json(path_or_buf=new_file)
sg.popup(f'Archivo {new_file} creado correctamente.')
except Exception as e:
sg.popup(f"Ups! Ocurrió el siguiente error: {e}")
if event == "Cerrar" or event == sg.WIN_CLOSED:
force_exit = True
break
window.close()
|
import numpy as np
import pandas as pd
class DataFrameSchema:
def __init__(self, name, primary_keys=None, row_monatonic_increasing=None):
self.name = name
self.primary_keys = primary_keys
self.columns = {}
self.required_columns = []
self.row_monatonic_increasing = row_monatonic_increasing
def add_column(self, column, optional=False):
self.columns[column.name] = column
if not optional:
self.required_columns.append(column.name)
def validate(self, df):
for col in df:
if col not in self.columns:
raise UnexpectedColumn("Column {} is not allowed in DataFrame {}.".format(col, self.name))
for col in self.required_columns:
if col not in df.columns:
raise MissingColumnError("Column {} not in DataFrame {}.".format(col, self.name))
for col in self.columns:
if col in df.columns:
self.columns[col].validate(df[col])
if self.primary_keys is not None:
self._check_for_repeated_rows(df)
def _check_for_repeated_rows(self, df):
cols_in_df = [col for col in self.primary_keys if col in df.columns]
if len(df.index) != len(df.drop_duplicates(cols_in_df)):
raise RepeatedRowError('{} should only have one row for each {}.'.format(self.name, ' '.join(cols_in_df)))
def _check_row_monatonic_increasing(self, df):
df = df.loc[:, self.row_monatonic_increasing]
df = df.transpose()
df.index = pd.to_numeric(df.index)
df = df.sort_index()
for col in df.columns:
if not df[col].is_monotonic:
raise BidsNotMonotonicIncreasing('Bids of each unit are not monotonic increasing.')
class SeriesSchema:
def __init__(self, name, data_type, allowed_values=None, must_be_real_number=False, not_negative=False,
minimum=None, maximum=None):
self.name = name
self.data_type = data_type
self.allowed_values = allowed_values
self.must_be_real_number = must_be_real_number
self.not_negative = not_negative
self.min = minimum
self.max = maximum
def validate(self, series):
self._check_data_type(series)
self._check_allowed_values(series)
self._check_is_real_number(series)
self._check_is_not_negtaive(series)
def _check_data_type(self, series):
if self.data_type == str:
if not all(series.apply(lambda x: type(x) == str)):
raise ColumnDataTypeError('All elements of column {} should have type str'.format(self.name))
elif self.data_type == callable:
if not all(series.apply(lambda x: callable(x))):
raise ColumnDataTypeError('All elements of column {} should have type callable'.format(self.name))
elif self.data_type != series.dtype:
raise ColumnDataTypeError('Column {} should have type {}'.format(self.name, self.data_type))
def _check_allowed_values(self, series):
if self.allowed_values is not None:
if not series.isin(self.allowed_values).all():
raise ColumnValues("The column {} can only contain the values {}.".format(self.name, self.allowed_values))
def _check_is_real_number(self, series):
if self.must_be_real_number:
if np.inf in series.values:
raise ColumnValues("Value inf not allowed in column {}.".format(self.name))
if np.NINF in series.values:
raise ColumnValues("Value -inf not allowed in column {}.".format(self.name))
if series.isnull().any():
raise ColumnValues("Null values not allowed in column {}.".format(self.name))
def _check_is_not_negtaive(self, series):
if self.not_negative:
if series.min() < 0.0:
raise ColumnValues("Negative values not allowed in column '{}'.".format(self.name))
class RepeatedRowError(Exception):
"""Raise for repeated rows."""
class ColumnDataTypeError(Exception):
"""Raise for columns with incorrect data types."""
class MissingColumnError(Exception):
"""Raise for required column missing."""
class UnexpectedColumn(Exception):
"""Raise for unexpected column."""
class ColumnValues(Exception):
"""Raise for unallowed column values."""
class BidsNotMonotonicIncreasing(Exception):
"""Raise for non monotonic increasing bids."""
|
from builtins import str
import dmri_segmenter.dmri_brain_extractor as dbe
import dmri_segmenter.make_comparisons as mc
import nibabel as nib
#import numpy as np
import os
def test_get_version_info():
vinfo = dbe.get_version_info()
assert "\ncommit " in vinfo
assert "\nDate:" in vinfo
assert "version" in vinfo
def test_feature_vector_classify(fakedata, tmpdir):
outdir = str(tmpdir)
t1tivfn = os.path.join(outdir, 't1tiv.nii')
dbe.save_mask(fakedata.phantom, fakedata.aff, t1tivfn)
assert os.path.isfile(t1tivfn)
brain, csf, holes, posterity = dbe.feature_vector_classify(fakedata.data,
fakedata.aff,
fakedata.bvals,
smoothrad=4.0,
Dt=0.0014,
Dcsf=0.0021,
t1wtiv=t1tivfn,
t1fwhm=[0.5, 1.0, 0.75])
assert brain.shape == fakedata.phantom.shape
# Since fakedata is an approximation of a 20C water phantom, don't expect
# dbe to do too well at classifying the different tissue types.
tiv = brain + csf + holes
ji = mc.jaccard_index(fakedata.phantom, tiv)
assert ji > 0.9
assert "Classifier loaded from " in posterity
assert "\nThe classifier is a" in posterity
assert "rained from" in posterity
def test_get_dmri_brain_and_tiv(fakedata, tmpdir):
outdir = str(tmpdir)
ecfn = os.path.join(outdir, 'ec.nii')
nib.save(nib.nifti1.Nifti1Image(fakedata.data, fakedata.aff), ecfn)
ecnii = nib.load(ecfn)
brfn = os.path.join(outdir, 'br.nii')
tivfn = os.path.join(outdir, 'tiv.nii')
_, tiv = dbe.get_dmri_brain_and_tiv(fakedata.data, ecnii, brfn, tivfn,
fakedata.bvals, isFLAIR=False)
assert os.path.isfile(brfn)
assert os.path.isfile(tivfn)
assert (tiv[::10, 45, 4] == [0, 0, 1, 1, 1, 1, 1, 0]).all()
|
#!/usr/bin/env python
# coding: utf-8
import os
import requests
import time
from bs4 import BeautifulSoup
import json
def log(type, msg):
print("[B2-dl] [%s] : %s" % (type, msg))
def getInput(soup, name):
el = soup.find('input', {
'name': name
})
if el is not None:
return el['value']
return ''
def getSelect(soup, name):
el = soup.find('select', {
'name': name
})
if el is not None:
for option in el.findAll('option'):
if option.has_attr("selected"):
return option['value']
return ''
class NinoDownloaderError(ValueError):
pass
class NinoDownloader:
WEBSITE_URL = ""
login_endpoint = "/open2b/admin/index.asp"
customers_endpoint = "/open2b/admin/module/user/index.asp"
customer_endpoint = "/open2b/admin/module/user/user.asp?UserType=0"
address_endpoint = "/open2b/admin/module/user/user_addresses.asp?UserType=0"
session = False
client_url = False
json = False
chunk = -1
users = []
def __init__(self, username, password, client_url, chunk):
self.WEBSITE_URL = client_url
self.chunk = int(chunk)
if self.chunk != -1:
log("info", "Chunk mode, they'll be created json files every %d pages" % self.chunk)
body = {
'Username': username,
'Password': password,
'Login': ' Login'
}
self.login_url = self.WEBSITE_URL+"/"+self.login_endpoint
self.session = requests.Session()
login = self.session.post(self.login_url, data=body, verify=False)
soup = BeautifulSoup(login.text, "html.parser")
loginForm = soup.find("div", {
"id": "login"
})
if loginForm is not None:
raise NinoDownloaderError("Login is not valid")
def downloadMainAnag(self, user_id):
user_url = self.WEBSITE_URL+"/"+self.customer_endpoint+"&Id="+str(user_id)
us_page = self.session.get(user_url)
soup = BeautifulSoup(us_page.text, "html.parser")
main = {"data": {field : getInput(soup, field) for field in [
'Code',
'CompanyName',
'FirstName',
'LastName',
'PersonalCode',
'CompanyCode2',
'Birthday',
'Email',
'PhoneNumber',
'MobileNumber',
'FaxNumber',
]}}
main['data']['Gender'] = getSelect(soup, "Gender")
return main['data']
def downloadBillingAnag(self, user_id):
user_url = self.WEBSITE_URL+"/"+self.address_endpoint+"&Id="+str(user_id)
us_page = self.session.get(user_url)
soup = BeautifulSoup(us_page.text, "html.parser")
form = soup.find("form", {
"name" : "User"
})
addresses = {
}
rows = form.find("table").findAll("tr")
CompanyName = rows[0].findAll("td")[-1].text
FullName = rows[1].findAll("td")[-1].text
VatNumber = rows[2].findAll("td")[-1].text
addresses["billingAddress"] = {field : getInput(soup, field) for field in [
'Street1',
'Street2',
'City',
'PostalCode'
]}
addresses["billingAddress"]['CompanyName'] = CompanyName if CompanyName != '---' else ''
addresses["billingAddress"]['FullName'] = FullName if CompanyName != '---' else ''
addresses["billingAddress"]['VatNumber'] = FullName if VatNumber != '---' else ''
addresses["billingAddress"]['StateProv'] = getSelect(soup, "StateProv")
addresses["billingAddress"]['Country'] = getSelect(soup, "Country")
addresses["shippingAddress"] = {field: getInput(soup, field) for field in [
'ShipName',
'ShipStreet1',
'ShipStreet2',
'ShipCity',
'ShipPostalCode'
]}
addresses["shippingAddress"]['ShipStateProv'] = getSelect(soup, "ShipStateProv")
addresses["shippingAddress"]['ShipCountry'] = getSelect(soup, "ShipCountry")
return addresses
def downloadFromTable(self, table, page):
table_body = table.find("tbody")
rows = table_body.findAll("tr")
for row in rows:
cols = row.findAll("td")
user_id = int(cols[-1].find("input")['value'])
log("info", "[Page %d] Found user. Email = %s, Id = %d" % (page, cols[2].text, user_id))
user = {
"id" : user_id,
"main" : self.downloadMainAnag(user_id),
"addresses": self.downloadBillingAnag(user_id)
}
self.users.append(user)
def createFile(self, pos):
log("info", "JSON creation with data")
with open("downloads/results_%d.json" % pos, 'w+') as f:
json.dump(self.users, f, sort_keys=True, indent=4, ensure_ascii=False)
self.users = [];
def downloadAnags(self):
log("info", "Open Json File")
page = 1
user_url = self.WEBSITE_URL+"/"+self.customers_endpoint
while True:
log("info", "Fetch from page %d" % page )
anag_page = self.session.get(user_url + "?page=" + str(page))
soup = BeautifulSoup(anag_page.text, "html.parser")
table = soup.find("table", {
"class": "grid"
})
if table.find("td", {
"class" : "gridNoRowsMessage"
}) is not None:
log("info", "Yeahhh ended now.")
self.createFile("last")
break
self.downloadFromTable(table, page)
if self.chunk != -1:
if page%self.chunk == 0:
self.createFile((page / self.chunk))
page = page + 1
|
# 该文件是用来尝试输出decoder的
import argparse
from numpy import mod
import torch
import os
import yaml
# Mixing tracing and scripting
import sys
import onnx
import onnxruntime
import numpy as np
from wenet.transformer.asr_model import init_asr_model
from wenet.utils.checkpoint import load_checkpoint
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def output_decoder_onnx(decoder_model,decoder_model_path):
inputs = [torch.randn(16,60*(i+1),256) for i in range(5)]
encoder_masks = [torch.ones(16,1,60*(i+1),dtype=torch.bool) for i in range(5)]
hyps_pad = (abs(torch.randn(16, 30))*1000).ceil().long()
hyps_lens = torch.arange(0,
32,
step=2,
dtype=torch.long)
hyps_lens[0] = 1
# following is output onnx_decoder model code
dummy_input1= inputs[0]
encoder_mask = encoder_masks[0]
torch.onnx.export(decoder_model,
(dummy_input1, encoder_mask, hyps_pad,hyps_lens),
decoder_model_path,
export_params=True,
opset_version=12,
do_constant_folding=True,
input_names=['input','encoder_mask', 'hyps_pad','hyps_lens'],
output_names=['output','o1','olens'],
dynamic_axes={'input': {0:'batch_size',1:'subsample_len'},
'encoder_mask':{0:'batch_size',2:'subsample_len'},
'hyps_pad':{0:'batch_size',1:'hyp_max_len'},
'hyps_lens':{0:'batch_size'},
'output': {0:'batch_size',1:'hyp_max_len'},
'olens':{0:'batch_size',1:'hyp_max_len'},
# 'show_x':{0:'batch_size',1:'hyp_max_len'}
},
verbose=True
)
def check_decoder_onnx_and_pytorch(decoder_model,decoder_model_path):
# following is test torch decoder's function forward code
inputs = [torch.randn(16,60*(i+1),256) for i in range(5)]
encoder_masks = [torch.ones(16,1,60*(i+1),dtype=torch.bool) for i in range(5)]
hyps_pad = (abs(torch.randn(16, 30))*1000).ceil().long()
hyps_lens = torch.arange(0,
32,
step=2,
dtype=torch.long)
# inputs = [torch.randn(20,60*(i+1),256) for i in range(5)]
# encoder_masks = [torch.ones(20,1,60*(i+1),dtype=torch.bool) for i in range(5)]
# hyps_pad = (abs(torch.randn(20, 38))*1000).ceil().long()
# hyps_lens = torch.arange(0,
# 40,
# step=2,
# dtype=torch.long)
hyps_lens[0] = 1
torch_outputs = []
torch_shows = []
for i in range(5):
# compute ONNX Runtime output prediction
dummy_input1 = inputs[i]
encoder_mask = encoder_masks[i]
decoder_out,_ , _ = decoder_model(dummy_input1, encoder_mask, hyps_pad,hyps_lens)
torch_outputs.append(decoder_out)
# torch_shows.append(try_x)
# above is test torch decoder's function forward code
# following is another test torch decoder's function forward code for check result whether close
torch_outputs_another = []
for i in range(5):
# compute ONNX Runtime output prediction
dummy_input1 = inputs[i]
encoder_mask = encoder_masks[i]
decoder_out1,_ , _ = decoder_model(dummy_input1, encoder_mask, hyps_pad,hyps_lens)
torch_outputs_another.append(decoder_out1)
np.testing.assert_allclose(to_numpy(torch_outputs[i]), to_numpy(torch_outputs_another[i]), rtol=1e-03, atol=1e-05)
# above is another test torch decoder's function forward code for check result whether close
onnx_model = onnx.load(decoder_model_path)
onnx.checker.check_model(onnx_model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(onnx_model.graph)
print("decoder onnx_model check pass!")
# 下面的代码是用来测试生产的onnx是否与本身的pytorch结果保持一致
import onnxruntime
ort_session = onnxruntime.InferenceSession(decoder_model_path)
# print(len(ort_session.get_inputs()))
print(decoder_model_path + " onnx model has " + str(len(ort_session.get_inputs())) + " args")
for i in range(5):
# compute ONNX Runtime output prediction
dummy_input1 = inputs[i]
encoder_mask = encoder_masks[i]
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input1),
ort_session.get_inputs()[1].name: to_numpy(encoder_mask),
ort_session.get_inputs()[2].name: to_numpy(hyps_pad),
ort_session.get_inputs()[3].name: to_numpy(hyps_lens),
}
ort_outs = ort_session.run(None, ort_inputs)
np.testing.assert_allclose(to_numpy(torch_outputs[i]), ort_outs[0], rtol=1e-03, atol=1e-05,err_msg='{0}'.format(i))
# np.testing.assert_allclose(to_numpy(torch_shows[i]), ort_outs[3], rtol=1e-03, atol=1e-05,err_msg='{0}'.format(i))
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
def main():
parser = argparse.ArgumentParser(description='export your script model')
parser.add_argument('--config', required=True, help='config file')
parser.add_argument('--checkpoint', required=True, help='checkpoint model')
parser.add_argument('--output_dir', required=True, help='output dir')
args = parser.parse_args()
output_dir=args.output_dir
os.system("mkdir -p "+ output_dir)
# No need gpu for model export
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
with open(args.config, 'r') as fin:
configs = yaml.load(fin, Loader=yaml.FullLoader)
# following load model
model = init_asr_model(configs)
load_checkpoint(model, args.checkpoint)
model.eval()
# above load model
model.set_onnx_mode(True)
decoder_model = model.decoder
decoder_model.eval()
decoder_model_path=os.path.join(output_dir,'decoder.onnx')
output_decoder_onnx(decoder_model,decoder_model_path)
check_onnx = True
if check_onnx:
check_decoder_onnx_and_pytorch(decoder_model,decoder_model_path)
if __name__ == '__main__':
main()
|
from collections import deque
from itertools import islice
from problem0005 import product
def sliding_window(iterable, n):
d = deque([next(iterable, None) for _ in range(n)], maxlen=n)
yield d
for e in iterable:
d.append(e)
yield d
if __name__ == '__main__':
number_series = map(int, list('''
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
'''.replace('\n', '')))
print(max(product(s) for s in sliding_window(number_series, 13)))
|
import agentlogging
def demo():
"""
Demo the logging functionality.
"""
print("=== Development Logging ===")
dev_logger = agentlogging.get_logger("dev")
dev_logger.debug("This is a DEBUG statement")
dev_logger.info("This is an INFO statement")
dev_logger.warning("This is a WARNING statement.")
dev_logger.error("This is an ERROR statement.")
dev_logger.critical("This is a CRITICAL statement.")
print("=== Production Logging ===")
prod_logger = agentlogging.get_logger("prod")
prod_logger.debug("This is a DEBUG statement")
prod_logger.info("This is an INFO statement")
prod_logger.warning("This is a WARNING statement.")
prod_logger.error("This is an ERROR statement.")
prod_logger.critical("This is a CRITICAL statement.")
print("=== System Stream ===")
print("This is a STANDARD OUT statement.")
demo()
|
# +
from ase.io import read, Trajectory
import sys
import os
def reduce_traj(traj, x):
if traj.endswith('.traj'):
if os.path.isfile(traj):
reduced = traj.replace('.traj', f'_r{x}.traj')
new = Trajectory(reduced, 'w')
for atoms in read(traj, f'::{x}'):
new.write(atoms)
new.close()
assert os.system(f'rm -f {traj}') == 0
print(f'{traj} -> {reduced}')
else:
print(f'{traj} not found')
else:
print(f'{traj} not a traj file')
if __name__ == '__main__':
x = int(sys.argv[1])
for k, traj in enumerate(sys.argv[2:]):
reduce_traj(traj, x)
|
# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Shroud Project Developers.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
from distutils.core import setup, Extension
strings = Extension(
'strings',
sources = ['strings-binding.cpp', 'strings.cpp'],
)
setup(
name='PyBindGen-strings',
description='PyBindGen strings',
ext_modules=[strings],
)
|
from my_package._module1 import A
print(A)
|
"""
Contains type definitions for :py:class:`encomp.units.Quantity` objects.
If ``encomp.settings.SETTINGS.type_checking`` is ``True``,
these types will be enforced everywhere.
The dimensionalities defined in this module can be combined with ``*`` and ``/``.
Some commonly used derived dimensionalities (like density) are defined for convenience.
"""
from typing import Generic, TypeVar
from typing import Union
from decimal import Decimal
import numpy as np
import pandas as pd
from uncertainties.core import AffineScalarFunc
from pint.unit import UnitsContainer
R = TypeVar('R')
# for mypy compatibility
class Dimensionality(UnitsContainer, Generic[R]):
pass
# type alias for the magnitude input to Quantity
# also accept Decimal and AffineScalarFunc (from uncertainties package)
MagnitudeValue = Union[float, int, Decimal, AffineScalarFunc]
Magnitude = Union[MagnitudeValue,
list[MagnitudeValue],
tuple[MagnitudeValue, ...],
np.ndarray,
pd.Series]
# base dimensionalities: the 7 base dimensions in the SI system and dimensionless
# NOTE: these must be defined as Dimensionality(...) * Dimensionless to avoid issues with mypy
_Dimensionless: Dimensionality = Dimensionality()
Dimensionless = Dimensionality() * _Dimensionless
Length = Dimensionality({'[length]': 1}) * _Dimensionless
Mass = Dimensionality({'[mass]': 1}) * _Dimensionless
Time = Dimensionality({'[time]': 1}) * _Dimensionless
Temperature = Dimensionality({'[temperature]': 1}) * _Dimensionless
Substance = Dimensionality({'[substance]': 1}) * _Dimensionless
Current = Dimensionality({'[current]': 1}) * _Dimensionless
Luminosity = Dimensionality({'[luminosity]': 1}) * _Dimensionless
# derived dimensionalities
Area = Length**2
Volume = Length**3
Pressure = Mass / Length / Time**2
MassFlow = Mass / Time
VolumeFlow = Volume / Time
Density = Mass / Volume
Energy = Mass * Length**2 / Time**2
Power = Energy / Time
Velocity = Length / Time
DynamicViscosity = Mass / Length / Time
KinematicViscosity = Length**2 / Time
Frequency = 1 / Time
MolarMass = Mass / Substance
# these dimensionalities might have different names depending on context
HeatCapacity = Energy / Mass / Temperature
ThermalConductivity = Power / Length / Temperature
HeatTransferCoefficient = Power / Area / Temperature
_DIMENSIONALITIES: dict[UnitsContainer, str] = {
Dimensionless: 'Dimensionless',
Length: 'Length',
Mass: 'Mass',
Time: 'Time',
Temperature: 'Temperature',
Substance: 'Substance',
Current: 'Current',
Luminosity: 'Luminosity',
Area: 'Area',
Volume: 'Volume',
Pressure: 'Pressure',
MassFlow: 'MassFlow',
VolumeFlow: 'VolumeFlow',
Density: 'Density',
Energy: 'Energy',
Power: 'Power',
Velocity: 'Velocity',
DynamicViscosity: 'DynamicViscosity',
KinematicViscosity: 'KinematicViscosity',
Frequency: 'Frequency'
}
_DIMENSIONALITIES_REV = {
b: a for a, b in _DIMENSIONALITIES.items()}
_BASE_SI_UNITS: tuple[str, ...] = ('m', 'kg', 's', 'K', 'mol', 'A', 'cd')
def get_dimensionality_name(dim: UnitsContainer) -> str:
"""
Returns a readable name for a dimensionality.
Parameters
----------
dim : UnitsContainer
input dimensionality
Returns
-------
str
Readable name, or str representation of the input
"""
if dim in _DIMENSIONALITIES:
return _DIMENSIONALITIES[dim]
else:
return str(dim)
|
# Parts or the whole documentation of this module
# are copied from the respective module:
# libcloud/compute/drivers/nephoscale.py
# see also:
# https://github.com/apache/libcloud/tree/trunk/libcloud/compute/drivers/nephoscale.py
#
# Apache Libcloud is licensed under the Apache 2.0 license.
# For more information, please see LICENSE and NOTICE file or:
# http://www.apache.org/licenses/LICENSE-2.0
from javaimpl.compute.ComputeContextImpl import ComputeContextImpl
from javaimpl.compute.utils import none_check, wrap_listing, wrap_exception, jlist_str_to_pylist
from javaimpl.compute.utils import jlist_obj_to_pylist, get_property, get_property_list
from javaimpl.compute.utils import jmap_to_pymap, jlist_map_to_pylist_map
from org.askalon.jlibcloud.compute.driverSpecific.nephoscale import NephoscaleNodeTemplateImpl
from javaimpl.compute.base.NodeImpl import NodeImpl
from org.askalon.jlibcloud.compute.driverSpecific.nephoscale import NephoscaleComputeContext
class NephoscaleComputeContextImpl(ComputeContextImpl, NephoscaleComputeContext):
def __init__(self, builder):
ComputeContext.__init__(self, builder)
def createNode(self, node_temp):
"""Creates the node, and sets the ssh key, console key
NephoScale will respond with a 200-200 response after sending a valid
request. If nowait=True is specified in the args, we then ask a few
times until the server is created and assigned a public IP address,
so that deploy_node can be run
>>> from libcloud.compute.providers import get_driver
>>> driver = get_driver('nephoscale')
>>> conn = driver('nepho_user','nepho_password')
>>> conn.list_nodes()
>>> name = 'staging-server'
>>> size = conn.list_sizes()[0]
<NodeSize: id=27, ...name=CS025 - 0.25GB, 10GB, ...>
>>> image = conn.list_images()[9]
<NodeImage: id=49, name=Linux Ubuntu Server 10.04 LTS 64-bit, ...>
>>> server_keys = conn.ex_list_keypairs(key_group=1)[0]
<NodeKey: id=71211, name=markos>
>>> server_key = conn.ex_list_keypairs(key_group=1)[0].id
70867
>>> console_keys = conn.ex_list_keypairs(key_group=4)[0]
<NodeKey: id=71213, name=mistio28434>
>>> console_key = conn.ex_list_keypairs(key_group=4)[0].id
70907
>>> node = conn.create_node(name=name, size=size, image=image, \
console_key=console_key, server_key=server_key)
We can also create an ssh key, plus a console key and
deploy node with them
>>> server_key = conn.ex_create_keypair(name, public_key='123')
71211
>>> console_key = conn.ex_create_keypair(name, key_group=4)
71213
We can increase the number of connect attempts to wait until
the node is created, so that deploy_node has ip address to
deploy the script
We can also specify the location
>>> location = conn.list_locations()[0]
>>> node = conn.create_node(name=name,
... size=size,
... image=image,
... console_key=console_key,
... server_key=server_key,
... connect_attempts=10,
... nowait=True,
... zone=location.id)
"""
try:
kwargs = self._eval_template(node_temp)
kwargs = self._parse_nephoscale_template(node_temp, kwargs)
return wrap_listing(self.conn.create_node(**kwargs), NodeImpl)
except Exception, ex:
raise wrap_exception(ex)
def deployNode(self, node_temp):
try:
kwargs = self._eval_template(node_temp)
kwargs = self._eval_deploy_template(node_temp, kwargs)
kwargs = self._parse_nephoscale_template(node_temp, kwargs)
return wrap_listing(self.conn.deploy_node(**kwargs), NodeImpl)
except Exception, ex:
raise wrap_exception(ex)
def _parse_nephoscale_template(self, node_temp, kwargs):
name = node_temp.getName()
kwargs = get_property(self, name, 'name',
kwargs,lambda x : x)
size = node_temp.getSize()
kwargs = get_property(self, size, 'size',
kwargs,lambda x : x.obj)
image = node_temp.getImage()
kwargs = get_property(self, image, 'image',
kwargs,lambda x : x.obj)
server_key = node_temp.getServerKey()
kwargs = get_property(self, server_key, 'server_key',
kwargs,lambda x : x)
console_key = node_temp.getConsoleKey()
kwargs = get_property(self, console_key, 'console_key',
kwargs,lambda x : x)
zone = node_temp.getZone()
kwargs = get_property(self, zone, 'zone',
kwargs,lambda x : x)
return kwargs
def getTemplateBuilder(self):
return NephoscaleNodeTemplateImpl.newBuilder()
def exStartNode(self, node):
"""start a stopped node"""
try:
if node:
node = node.obj
return self.conn.ex_start_node(node)
except Exception, ex:
raise wrap_exception(ex)
def exStopNode(self, node):
"""stop a running node"""
try:
if node:
node = node.obj
return self.conn.ex_stop_node(node)
except Exception, ex:
raise wrap_exception(ex)
def exListKeypairs(self, ssh=False, password=False, key_group=None):
"""
List available console and server keys
There are two types of keys for NephoScale, ssh and password keys.
If run without arguments, lists all keys. Otherwise list only
ssh keys, or only password keys.
Password keys with key_group 4 are console keys. When a server
is created, it has two keys, one password or ssh key, and
one password console key.
:keyword ssh: if specified, show ssh keys only (optional)
:type ssh: ``bool``
:keyword password: if specified, show password keys only (optional)
:type password: ``bool``
:keyword key_group: if specified, show keys with this key_group only
eg key_group=4 for console password keys (optional)
:type key_group: ``int``
:rtype: ``list`` of :class:`NodeKey`
"""
try:
if not key_group:
key_group = None
if not password:
password = False
if not ssh:
ssh = False
return wrap_listing(self.conn.ex_list_keypairs(ssh, password, key_group), NodeKeyImpl)
except Exception, ex:
raise wrap_exception(ex)
def exCreateKeypair(self, name, public_key=None, password=None, key_group=None):
"""Creates a key, ssh or password, for server or console
The group for the key (key_group) is 1 for Server and 4 for Console
Returns the id of the created key
"""
try:
if not key_group:
key_group = None
if not password:
password = None
if not public_key:
public_key = None
return self.conn.ex_create_keypair(name, public_key, password, key_group)
except Exception, ex:
raise wrap_exception(ex)
def exDeleteKeypair(self, key_id, ssh=False):
"""Delete an ssh key or password given it's id
"""
try:
if not ssh:
ssh = False
return self.conn.ex_delete_keypair(key_id, ssh)
except Exception, ex:
raise wrap_exception(ex)
from org.askalon.jlibcloud.compute.driverSpecific.nephoscale import NodeKey
class NodeKeyImpl(NodeKey):
def __init__(self, obj):
self.obj=obj
if hasattr(obj, 'id'):
self.idp = none_check(obj.id, ' ')
else:
self.idp = ' '
if hasattr(obj, 'name'):
self.namep = none_check(obj.name, ' ')
else:
self.namep = ' '
if hasattr(obj, 'key_group'):
self.key_groupp = none_check(obj.key_group, ' ')
else:
self.key_groupp = ' '
if hasattr(obj, 'password'):
self.passwordp = none_check(obj.password, ' ')
else:
self.passwordp = ' '
if hasattr(obj, 'public_key'):
self.public_keyp = none_check(obj.public_key, ' ')
else:
self.public_keyp = ' '
if hasattr(obj, '__repr__()'):
self.reprp = obj.__repr__()
else:
self.reprp = str(obj)
def getId(self):
return self.idp
def getName(self):
return self.namep
def getKeyGroup(self):
return self.key_groupp
def getPassword(self):
return self.passwordp
def getPublicKey(self):
return self.public_keyp
def toString(self):
return self.reprp
|
# Copyright (C) 2016 Ross Wightman. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
import tensorflow as tf
_default_initializer_params = {
'stddev': 0.1,
'dtype': tf.float32,
}
def bidir_lstm(
inputs,
num_units,
num_layers=1,
initializer_fn=tf.truncated_normal,
initializer_params=_default_initializer_params,
dtype=tf.float32,
scope=None
):
shape = inputs.get_shape().as_list()
batch_size = shape[0]
inputs_unpacked = tf.unpack(inputs, axis=1)
cell_fw = tf.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell(num_units=num_units)
cell_bw = tf.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell(num_units=num_units)
if num_layers > 1:
cell_fw = tf.nn.rnn_cell.MultiRNNCell([cell_fw] * num_layers)
cell_bw = tf.nn.rnn_cell.MultiRNNCell([cell_bw] * num_layers)
initializer_params = initializer_params or {}
initializer_params['dtype'] = dtype
if isinstance(cell_fw.state_size, tuple):
initial_state_fw = tuple(
initializer_fn([batch_size, s], **initializer_params) for s in cell_fw.state_size)
initial_state_bw = tuple(
initializer_fn([batch_size, s], **initializer_params) for s in cell_bw.state_size)
else:
initial_state_fw = initializer_fn(shape=[batch_size, cell_fw.state_size], **initializer_params)
initial_state_bw = initializer_fn(shape=[batch_size, cell_bw.state_size], **initializer_params)
outputs, _, _ = tf.nn.bidirectional_rnn(
cell_fw,
cell_bw,
inputs_unpacked,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
dtype=dtype,
scope=scope)
outputs = tf.pack(outputs, axis=1)
return outputs
def lstm(
inputs,
num_units,
num_layers=1,
initializer_fn=tf.truncated_normal,
initializer_params=_default_initializer_params,
dtype=tf.float32,
scope=None
):
print('input shape', inputs.get_shape())
shape = inputs.get_shape().as_list()
batch_size = shape[0]
inputs_unpacked = tf.unpack(inputs, axis=1)
cell = tf.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell(num_units=num_units)
print('cell state size', cell.state_size)
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers)
initializer_params = initializer_params or {}
initializer_params['dtype'] = dtype
if isinstance(cell.state_size, tuple):
initial_state = tuple(initializer_fn([batch_size, s]) for s in cell.state_size)
else:
initial_state = initializer_fn(shape=[batch_size, cell.state_size], **initializer_params)
outputs, _, _ = tf.nn.rnn(
cell,
inputs_unpacked,
initial_state=initial_state,
dtype=dtype,
scope=scope)
outputs = tf.pack(outputs, axis=1)
print('output shape', outputs.get_shape())
return outputs
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from .otBase import BaseTTXConverter
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html
class table__p_r_o_p(BaseTTXConverter):
pass
|
from pylabnet.utils.helper_methods import load_device_config, get_ip
from pylabnet.network.core.service_base import ServiceBase
from pylabnet.network.core.generic_server import GenericServer
from pylabnet.network.core.client_base import ClientBase
import os
class Dummy:
pass
class Client(ClientBase):
pass
def launch(**kwargs):
""" Launches a dummy hardware driver and instantiates server """
log = kwargs['logger']
log.info(f'Launching with config {kwargs["config"]}')
config = load_device_config(
os.path.basename(__file__)[:-3],
kwargs['config'],
log
)
dum = Dummy()
log.info(f'Created dummy object with configuration parameters {config}')
dum_service = ServiceBase()
dum_service.assign_module(module=dum)
dum_service.assign_logger(logger=log)
dum_server = GenericServer(
service=dum_service,
host=get_ip(),
port=kwargs['port']
)
dum_server.start()
|
import unittest
import os
import copy
import sys
from time import sleep
from appium import webdriver
from helpers import report_to_sauce, ANDROID_BASE_CAPS, EXECUTOR
from selenium.common.exceptions import WebDriverException
# Run standard unittest base.
class TestAndroidCreateSession(unittest.TestCase):
def tearDown(self):
report_to_sauce(self.driver.session_id)
def test_should_create_and_destroy_android_session(self):
caps = copy.copy(ANDROID_BASE_CAPS)
caps['name'] = 'test_should_create_and_destroy_android_session'
self.driver = webdriver.Remote(
command_executor=EXECUTOR,
desired_capabilities=caps
)
self.driver.implicitly_wait(10)
# make sure the right package and activity were started
self.assertEquals('io.appium.android.apis', self.driver.current_package)
self.assertEquals('.ApiDemos', self.driver.current_activity)
self.driver.quit()
sleep(5)
# should not be able to use the driver anymore
with self.assertRaises(WebDriverException) as excinfo:
self.driver.title
self.assertTrue('has already finished' in str(excinfo.exception.msg))
|
# from enum import Enum, auto
__all__ = [
# 'TokenKind',
# 'TK_UNEXPECTED',
# 'TK_NONE',
# 'TK_EOF',
# 'TK_LITERAL_INT',
# 'TK_LITERAL_FLOAT',
# 'TK_OP_PLUS',
# 'TK_OP_MINUS',
# 'TK_OP_MULT',
# 'TK_OP_DIV',
# 'TK_OP_MOD',
# 'TK_UNAOP_POS',
# 'TK_UNAOP_NEG',
# 'TK_LPAREN',
# 'TK_RPAREN',
'SW_MAINFUNCTION',
'KW_VARIABLE',
'KW_CONSTANT',
'KW_ENUMERATION',
'KW_IF',
'KW_ELSE',
'KW_PRINT',
'KW_FUNCTION',
'KW_RETURN',
'KW_AND',
'KW_OR',
'KW_XOR',
'KW_NOT',
'KW_IS',
'KW_WHILE',
'KW_DO',
'KW_UNTIL',
'KW_FOR',
'KW_IN',
'KW_OF',
'KW_REPEAT',
'KW_CONTINUE',
'KW_BREAK',
'TT_NAME',
'TT_KW',
'TT_INT',
'TT_FLOAT',
'TT_STR',
'TT_LPAREN',
'TT_RPAREN',
'TT_LBRACE',
'TT_RBRACE',
'TT_LBRACKET',
'TT_RBRACKET',
'TT_PLUS',
'TT_MINUS',
'TT_MULT',
'TT_DIV',
'TT_MOD',
'TT_COMMA',
'TT_SEMI',
'TT_COLON',
'TT_DOT',
'TT_EQUAL',
'TT_QUESTION',
'OP_EQ',
'OP_NE',
'OP_GT',
'OP_GE',
'OP_LT',
'OP_LE',
'OP_AND',
'OP_OR',
'OP_XOR',
'OP_NOT',
'OP_ASN',
'OP_ADD',
'OP_SUB',
'OP_DIV',
'OP_MUL',
'OP_MOD',
'OP_TER',
'OP_INC',
'OP_DEC',
'TY_VOID',
'TY_BOOL',
'TY_INT8',
'TY_INT16',
'TY_INT32',
'TY_INT64',
'TY_UINT8',
'TY_UINT16',
'TY_UINT32',
'TY_UINT64',
'TY_FLOAT16',
'TY_FLOAT32',
'TY_FLOAT64',
'TY_FLOAT80',
'SFX_INT8',
'SFX_INT16',
'SFX_INT32',
'SFX_INT64',
'SFX_UINT8',
'SFX_UINT16',
'SFX_UINT32',
'SFX_UINT64',
'SFX_FLOAT16',
'SFX_FLOAT32',
'SFX_FLOAT64',
'SFX_FLOAT80',
'SUFFIXES',
'KEYWORDS',
'OPERATORS',
'PRIMITIVES',
'TYPE_MODIFIERS',
]
# ---------------------------------------------------------
# region CONSTANTS & ENUMS
SW_MAINFUNCTION = 'principal'
KW_VARIABLE = "var"
KW_CONSTANT = "constante"
KW_ENUMERATION = "enumeração"
KW_IF = "se"
KW_ELSE = "senão"
KW_PRINT = "escreva"
KW_FUNCTION = "função"
KW_RETURN = "retorne"
KW_AND = "e"
KW_OR = "ou"
KW_XOR = "oux"
KW_NOT = "not"
KW_IS = "é"
KW_WHILE = "enquanto"
KW_DO = "faça"
KW_UNTIL = "até"
KW_FOR = "para"
KW_IN = "em"
KW_OF = "de"
KW_REPEAT = "repita"
KW_CONTINUE = "continue"
KW_BREAK = "pare"
KEYWORDS = [name for name in globals() if name.startswith('KW_')]
TT_NAME = "NAME"
TT_KW = "KEYWORD"
TT_INT = "INTEGER"
TT_FLOAT = "FLOAT"
TT_STR = "STRING"
TT_LPAREN = "("
TT_RPAREN = ")"
TT_LBRACE = "{"
TT_RBRACE = "}"
TT_LBRACKET = "["
TT_RBRACKET = "]"
TT_PLUS = "+"
TT_MINUS = "-"
TT_MULT = "*"
TT_DIV = "/"
TT_MOD = "%"
TT_COMMA = ","
TT_SEMI = ";"
TT_COLON = ":"
TT_DOT = "."
TT_EQUAL = '='
TT_QUESTION = '?'
OP_EQ = "=="
OP_NE = "!="
OP_GT = ">"
OP_GE = ">="
OP_LT = "<"
OP_LE = "<="
OP_AND = "&"
OP_OR = "|"
OP_XOR = "^"
OP_NOT = "~"
OP_ASN = "="
OP_ADD = "+"
OP_SUB = "-"
OP_DIV = "/"
OP_MUL = "*"
OP_MOD = "%"
OP_TER = "?"
OP_INC = "++"
OP_DEC = "--"
OPERATORS = [name for name in globals() if name.startswith('OP_')]
TY_VOID = "vazio"
TY_BOOL = "booleano"
TY_INT8 = "int8s"
TY_INT16 = "int16s"
TY_INT32 = "int32s"
TY_INT64 = "int64s"
TY_UINT8 = "int8d"
TY_UINT16 = "int16d"
TY_UINT32 = "int32d"
TY_UINT64 = "int64d"
TY_FLOAT16 = "real16"
TY_FLOAT32 = "real32"
TY_FLOAT64 = "real64"
TY_FLOAT80 = "real80"
PRIMITIVES = [name for name in globals() if name.startswith('TY_')]
SFX_INT8 = "u"
SFX_INT16 = "c"
SFX_INT32 = "i"
SFX_INT64 = "l"
SFX_UINT8 = "ud"
SFX_UINT16 = "cd"
SFX_UINT32 = "id"
SFX_UINT64 = "ld"
SFX_FLOAT16 = "m"
SFX_FLOAT32 = "r"
SFX_FLOAT64 = "d"
SFX_FLOAT80 = "x"
SUFFIXES = [name for name in globals() if name.startswith('SFX_')]
TYMOD_ISIGNED = "sinalado"
TYMOD_IUNSIGNED = "dessinalado"
TYPE_MODIFIERS = [name for name in globals() if name.startswith('TYMOD_')]
#
# class TokenKind(Enum):
# UNEXPECTED = auto()
# NONE = auto()
# EOF = auto()
# LITERAL_INT = auto()
# LITERAL_FLOAT = auto()
# OP_PLUS = auto()
# OP_MINUS = auto()
# OP_MULT = auto()
# OP_DIV = auto()
# OP_MOD = auto()
# UNAOP_POS = auto()
# UNAOP_NEG = auto()
# LPAREN = auto()
# RPAREN = auto()
#
#
# TK_UNEXPECTED = TokenKind.UNEXPECTED
# TK_NONE = TokenKind.NONE
# TK_EOF = TokenKind.EOF
# TK_LITERAL_INT = TokenKind.LITERAL_INT
# TK_LITERAL_FLOAT = TokenKind.LITERAL_FLOAT
# TK_OP_PLUS = TokenKind.OP_PLUS
# TK_OP_MINUS = TokenKind.OP_MINUS
# TK_OP_MULT = TokenKind.OP_MULT
# TK_OP_DIV = TokenKind.OP_DIV
# TK_OP_MOD = TokenKind.OP_MOD
# TK_UNAOP_POS = TokenKind.UNAOP_POS
# TK_UNAOP_NEG = TokenKind.UNAOP_NEG
# TK_LPAREN = TokenKind.LPAREN
# TK_RPAREN = TokenKind.RPAREN
# def export(prefix: str, enumeration: Type[Enum]):
# globals()[enumeration.__name__] = enumeration
# for name in enumeration.__members__:
# export_name = f'{prefix}_{name}'
# globals()[export_name] = enumeration.__members__[name]
# __all__.append(export_name)
# endregion (constants)
# ---------------------------------------------------------
# region FUNCTIONS
# endregion (functions)
# ---------------------------------------------------------
# region CLASSES
# endregion (classes)
# ---------------------------------------------------------
# export('TT', TokenType)
# print(TK_NONE)
|
n=int(input())
games=input()
a = 0
d = 0
for i in games:
if "A" in i:
a += 1
else:
d+=1
if a>d:
print("Anton")
elif a==d:
print("Friendship")
else:
print("Danik")
|
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
def remove_all_inplace(values, item):
try:
while True:
values.remove(item)
except ValueError:
pass
def remove_all_inplace_improved(list, value):
while value in list:
list.remove(value)
def remove_all_v2(values, item):
return [value for value in values if value != item]
def remove_all_v3(values, item):
filter_ = filter(lambda x: x != item, values)
print(filter_)
return list(filter_)
def remove_all_fast(values, item):
write_idx = 0
for i, value in enumerate(values):
if value != item:
values[write_idx] = value
write_idx += 1
return values[:write_idx]
def collect_all(values, condition):
result = []
for elem in values:
if condition(elem):
result.append(elem)
return result
def collect_all_v2(values, condition):
return [elem for elem in values if condition(elem)]
def collect_all_v3(values, condition):
return list(filter(condition, values))
def main():
names = ["Tim", "Tom", "Mike", "Mike", "Mike"]
remove_all_inplace(names, "Mike")
print(names)
names = ["Tim", "Tom", "Mike", "Mike", "Mike"]
remove_all_inplace_improved(names, "Mike")
print(names)
names = remove_all_v2(["Tim", "Tom", "Mike", "Mike", "Mike"], "Mike")
print(names)
names3 = ["Tim", "Tom", "Mike", "Mike", "Mike"]
names3 = remove_all_v3(names3, "Mike")
print(names3)
names = remove_all_fast(["Tim", "Tom", "Mike", "Mike", "Mike"], "Mike")
print(names)
names = ["Tim", "Tom", "Mike", "Mike", "Mike"]
print(collect_all(names, lambda value: value == "Mike"))
names2 = ["Tim", "Tom", "Mike", "Mike", "Mike"]
names2 = collect_all_v2(names2, lambda value: value == "Mike")
print(names2)
names3 = ["Tim", "Tom", "Mike", "Mike", "Mike"]
names3 = collect_all_v3(names3, lambda value: value == "Mike")
print(names3)
if __name__ == "__main__":
main()
|
from model.contact import Contact
import re
from model.functions import clear_double_space
class ContactHelper:
def __init__(self, app):
self.app = app
def sumbmit_creation(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def create(self, contact):
wd = self.app.wd
self.open_contact_page()
# first name
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
# middle name
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
# last name
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
# nick name
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
# uploading photo
wd.find_element_by_name("photo").send_keys(contact.photo_directory)
# title
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
# company
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
# address
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
# home telephone
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_number)
# mobile telephone
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_number)
# work telephone
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work_number)
# fax
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax)
# e-mail1
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email1)
# e-mail2
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email2)
# e-mail3
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email3)
# homepage
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
# birthday
if not wd.find_element_by_xpath(contact.day_of_birth).is_selected():
wd.find_element_by_xpath(contact.day_of_birth).click()
if not wd.find_element_by_xpath(contact.month_of_birth).is_selected():
wd.find_element_by_xpath(contact.month_of_birth).click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.year_of_birth)
# annivesary
if not wd.find_element_by_xpath(contact.day_of_annivesary).is_selected():
wd.find_element_by_xpath(contact.day_of_annivesary).click()
if not wd.find_element_by_xpath(contact.month_of_annivesary).is_selected():
wd.find_element_by_xpath(contact.month_of_annivesary).click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(contact.year_of_annivesary)
# addres2
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
# home telephone2
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.home_number2)
# note
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.note)
self.sumbmit_creation()
self.contact_cache = None
def open_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/addressbook/edit.php") and len(wd.find_elements_by_name("submit"))>0):
wd.find_element_by_link_text("add new").click()
# create new contact
wd.find_element_by_link_text("add new").click()
def open_home_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/addressbook/") and len(wd.find_elements_by_name("add"))>0):
wd.find_element_by_link_text("home").click()
# home page for contact
wd.find_element_by_link_text("home").click()
def delete_some_contact(self, index):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
from selenium.webdriver.common.keys import Keys
wd = self.app.wd
self.open_home_page()
# select first contact
self.select_contact_by_index(index)
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
#wd.find_element_by_id("Value")
wd.find_element_by_xpath("html/body").send_keys(Keys.ENTER)
#wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
self.open_home_page()
self.contact_cache = None
def delete_contact_by_id(self, id):
from selenium.webdriver.common.keys import Keys
wd = self.app.wd
self.open_home_page()
# select first contact
self.select_contact_by_id(id)
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
#wd.find_element_by_id("Value")
wd.find_element_by_xpath("html/body").send_keys(Keys.ENTER)
#wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
self.open_home_page()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_xpath("//img[@title='Edit']")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_contact_by_id_for_edit(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//a[@href='edit.php?id=%s']/img[@title='Edit']" % id).click()
def edit_contact_by_index(self, contact, index):
wd = self.app.wd
self.open_home_page()
# open edditing form
self.select_contact_by_index(index)
self.fill_contact_form(contact)
# updating
wd.find_element_by_xpath("//input[@value='Update']").click()
self.contact_cache = None
def edit_contact_by_id(self, id, contact):
wd = self.app.wd
self.open_home_page()
# open edditing form
self.select_contact_by_id_for_edit(id)
#wd.find_element_by_xpath("html/body/div/div[4]/form[2]/table/tbody/tr[2]/td[8]/a/img").click()
self.fill_contact_form(contact)
# updating
wd.find_element_by_xpath("//input[@value='Update']").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
# first name
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
# middle name
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
# last name
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
# nick name
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
# uploading photo
wd.find_element_by_name("photo").send_keys(contact.photo_directory)
# title
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
# company
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
# address
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
# home telephone
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_number)
# mobile telephone
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_number)
# work telephone
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work_number)
# fax
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax)
# e-mail1
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email1)
# e-mail2
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email2)
# e-mail3
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email3)
# homepage
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
# birthday
if not wd.find_element_by_xpath(contact.day_of_birth).is_selected():
wd.find_element_by_xpath(contact.day_of_birth).click()
if not wd.find_element_by_xpath(contact.month_of_birth).is_selected():
wd.find_element_by_xpath(contact.month_of_birth).click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.year_of_birth)
# annivesary
if not wd.find_element_by_xpath(contact.day_of_annivesary).is_selected():
wd.find_element_by_xpath(contact.day_of_annivesary).click()
if not wd.find_element_by_xpath(contact.month_of_annivesary).is_selected():
wd.find_element_by_xpath(contact.month_of_annivesary).click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(contact.year_of_annivesary)
# addres2
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
# home telephone2
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.home_number2)
# note
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.note)
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
id = element.find_element_by_name("selected[]").get_attribute("value")
all_phones = cells[5].text
all_emailes = cells[4].text
hash = lastname + firstname + cells[3].text + cells[4].text + cells[5].text
self.contact_cache.append(Contact(firstname=firstname, middlename=None, lastname=lastname, nickname=None, company=None, address=address, id=id,
all_phones_from_home_page = all_phones, all_emailes_from_home_page = all_emailes, hash=hash))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home_number = wd.find_element_by_name("home").get_attribute("value")
work_number = wd.find_element_by_name("work").get_attribute("value")
mobile_number = wd.find_element_by_name("mobile").get_attribute("value")
home_number2 = wd.find_element_by_name("phone2").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, address=address, id=id, home_number=home_number,
work_number=work_number, mobile_number=mobile_number, home_number2= home_number2,
email1=email1, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home_number = re.search("H: (.*)", text).group(1)
work_number = re.search("W: (.*)", text).group(1)
mobile_number = re.search("M: (.*)", text).group(1)
home_number2 = re.search("P: (.*)", text).group(1)
return Contact(home_number=home_number, mobile_number=mobile_number, work_number=work_number, home_number2=home_number2)
def delete_spaces(self, contact):
return Contact(id = contact.id,
lastname=clear_double_space(contact.lastname).strip(),
firstname=clear_double_space(contact.firstname).strip(),
address=clear_double_space(contact.address).strip(),
home_number=clear_double_space(contact.home_number).strip(),
mobile_number=clear_double_space(contact.mobile_number).strip(),
work_number=clear_double_space(contact.work_number).strip(),
home_number2=clear_double_space(contact.home_number2).strip(),
email1=clear_double_space(contact.email1).strip(),
email2=clear_double_space(contact.email2).strip(),
email3=clear_double_space(contact.email3).strip())
|
#!/usr/bin/env python3
import inf_common as IC
import hyperparams as HP
import torch
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
from torch import Tensor
import time
from typing import Dict, List, Tuple, Optional
from collections import defaultdict
from collections import ChainMap
import sys,random,itertools
import numpy as np
import os
NUMPROCESSES = 25
MAX_ACTIVE_TASKS = NUMPROCESSES
DATA_THROUGH_QUEUE = False
def copy_parts_and_zero_grad_in_copy(parts,parts_copies):
for part,part_copy in zip(parts,parts_copies):
part_copy.load_state_dict(part.state_dict())
for param in parts_copies.parameters():
# taken from Optmizier zero_grad, roughly
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def copy_grads_back_from_param(parts,parts_copies):
for param, param_copy in zip(parts.parameters(),parts_copies.parameters()):
# print("Copy",param_copy)
# print("Copy.grad",param_copy.grad)
param.grad = param_copy
def eval_and_or_learn_on_one(myparts,data,training):
# probname = prob_data_list[idx][0]
# data = prob_data_list[idx][1]
(init,deriv,pars,pos_vals,neg_vals,tot_pos,tot_neg) = data
# print("Datum of size",len(init)+len(deriv))
model = IC.LearningModel(*myparts,init,deriv,pars,pos_vals,neg_vals,tot_pos,tot_neg)
# print("Model created")
if training:
model.train()
else:
model.eval()
(loss_sum,posOK_sum,negOK_sum) = model()
# print("Model evaluated")
if training:
loss_sum.backward()
# put grad into actual tensor to be returned below (gradients don't go through the Queue)
for param in myparts.parameters():
grad = param.grad
param.requires_grad = False # to allow the in-place operation just below
if grad is not None:
param.copy_(grad)
else:
param.zero_()
param.requires_grad = True # to be ready for the next learning when assigned to a new job
# print("Training finished")
return (loss_sum[0].item(),posOK_sum,negOK_sum,tot_pos,tot_neg,myparts)
global common_data
common_data = None
def worker(q_in, q_out):
global common_data
while True:
start_time = time.time()
if DATA_THROUGH_QUEUE:
(idx,data,myparts,training) = q_in.get()
else:
(idx,myparts,training) = q_in.get()
metainfo,data = common_data[training][idx]
(loss_sum,posOK_sum,negOK_sum,tot_pos,tot_neg,myparts) = eval_and_or_learn_on_one(myparts,data,training)
q_out.put((idx,loss_sum,posOK_sum,negOK_sum,tot_pos,tot_neg,myparts,start_time,time.time()))
def get_size_from_idx(idx,actual_data_list):
metainfo,data = actual_data_list[idx]
return len(data[0])+len(data[1])
def big_go_last(feed_idx_sequence,actual_data_list):
WHAT_IS_BIG = 10000
big = [idx for idx in feed_idx_sequence if get_size_from_idx(idx,actual_data_list) > WHAT_IS_BIG]
small = [idx for idx in feed_idx_sequence if get_size_from_idx(idx,actual_data_list) <= WHAT_IS_BIG]
print("big_go_last",len(small),len(big))
# big.sort() # start with the really big ones so that we are finished with them before the next iteration would be about to start
return small #+big
def loop_it_out(start_time,t,parts_copies,feed_sequence,actual_data_list,training):
stats = np.zeros(3) # loss_sum, posOK_sum, negOK_sum
weights = np.zeros(2) # pos_weight, neg_weight
# training on each problem in these EPOCHS_BEFORE_VALIDATION-many epochs
while feed_sequence or len(parts_copies) < MAX_ACTIVE_TASKS:
# we use parts_copies as a counter of idle children in the pool
while parts_copies and feed_sequence:
parts_copy = parts_copies.pop()
idx = feed_sequence.pop()
((probname,probweight),data) = actual_data_list[idx]
copy_parts_and_zero_grad_in_copy(master_parts,parts_copy)
t += 1
print(time.time() - start_time,"time_idx",t,"starting {} job on problem".format("training" if training else "validation"),idx,"of size",len(data[0])+len(data[1]),"and weight",probweight)
if DATA_THROUGH_QUEUE:
message = (idx,data,parts_copy,True) # True stands for "training is on"
else:
message = (idx,parts_copy,True) # True stands for "training is on"
q_in.put(message)
print(time.time() - start_time,"put finished",flush=True)
print()
print(time.time() - start_time,"about to call get")
(idx,loss_sum,posOK_sum,negOK_sum,tot_pos,tot_neg,his_parts,time_start,time_end) = q_out.get() # this may block
print(time.time() - start_time,"get finished")
parts_copies.append(his_parts) # increase the ``counter'' again
if training:
copy_grads_back_from_param(master_parts,his_parts)
print(time.time() - start_time,"copy_grads_back_from_param finished")
optimizer.step()
print(time.time() - start_time,"optimizer.step() finished")
((probname,probweight),data) = train_data_list[idx]
pos_weight,neg_weight = data[-2],data[-1]
print(time.time() - start_time,"job finished at on problem",idx,"started",time_start-start_time,"finished",time_end-start_time,"took",time_end-time_start,flush=True)
print("Of weight",tot_pos,tot_neg,tot_pos+tot_neg)
print("Debug",loss_sum,posOK_sum,negOK_sum)
print("Local:",loss_sum/(tot_pos+tot_neg),posOK_sum/tot_pos if tot_pos > 0.0 else 1.0,negOK_sum/tot_neg if tot_neg > 0.0 else 1.0)
print()
stats += (loss_sum,posOK_sum,negOK_sum)
weights += (pos_weight,neg_weight)
if __name__ == "__main__":
# Experiments with pytorch and torch script
# what can be learned from a super-simple TreeNN
# which distinguishes:
# 1) conj, user_ax, theory_ax_kind in the leaves
# 2) what inference leads to this in the tree nodes
#
# Learn in parallel using a Pool of processes, or something similar
#
# probably needs to be run with "ulimit -Sn 3000" or something large
#
# To be called as in: ./multi_inf_parallel.py <folder_in> <folder_out> <initial-model>
#
# it expects <folder_in> to contain "training_data.pt" and "validation_data.pt"
# (and maybe also "data_sign.pt")
#
# if <initial-model> is not specified,
# it creates a new one in <folder_out> using the same naming scheme as initializer.py
#
# The log, the plot, and intermediate models are also saved in <folder_out>
# global redirect of prints to the just upen "logfile"
# log = open("{}/run{}".format(sys.argv[2],IC.name_learning_regime_suffix()), 'w')
# sys.stdout = log
# sys.stderr = log
start_time = time.time()
train_data_list = torch.load("{}/training_data.pt".format(sys.argv[1]))
print("Loaded train data:",len(train_data_list))
valid_data_list = torch.load("{}/validation_data.pt".format(sys.argv[1]))
print("Loaded valid data:",len(valid_data_list))
if len(sys.argv) >= 4:
master_parts = torch.load(sys.argv[3])
print("Loaded model parts",sys.argv[3])
else:
thax_sign,sine_sign,deriv_arits,thax_to_str = torch.load("{}/data_sign.pt".format(sys.argv[1]))
master_parts = IC.get_initial_model(thax_sign,sine_sign,deriv_arits)
model_name = "{}/initial{}".format(sys.argv[2],IC.name_initial_model_suffix())
torch.save(master_parts,model_name)
print("Created model parts and saved to",model_name)
if HP.TRR == HP.TestRiskRegimen_OVERFIT:
# merge validation data back to training set (and ditch early stopping regularization)
train_data_list += valid_data_list
valid_data_list = []
print("Merged valid with train; final:",len(train_data_list))
print()
print(time.time() - start_time,"Initialization finished")
common_data = [valid_data_list,train_data_list]
epoch = 0
# in addition to the "oficial model" as named above, we checkpoint it as epoch0 here.
model_name = "{}/model-epoch{}.pt".format(sys.argv[2],epoch)
torch.save(master_parts,model_name)
parts_copies = [] # have as many copies as MAX_ACTIVE_TASKS; they are somehow shared among the processes via Queue, so only one process should touch one at a time
for i in range(MAX_ACTIVE_TASKS):
parts_copies.append(torch.load(model_name)) # currently, don't know how to reliably deep-copy in memory (with pickling, all seems fine)
q_in = torch.multiprocessing.Queue()
q_out = torch.multiprocessing.Queue()
my_processes = []
for i in range(NUMPROCESSES):
p = torch.multiprocessing.Process(target=worker, args=(q_in,q_out))
p.start()
my_processes.append(p)
if HP.OPTIMIZER == HP.Optimizer_SGD: # could also play with momentum and its dampening here!
optimizer = torch.optim.SGD(master_parts.parameters(), lr=HP.LEARN_RATE)
elif HP.OPTIMIZER == HP.Optimizer_ADAM:
optimizer = torch.optim.Adam(master_parts.parameters(), lr=HP.LEARN_RATE)
times = []
train_losses = []
train_posrates = []
train_negrates = []
valid_losses = []
valid_posrates = []
valid_negrates = []
TRAIN_SAMPLES_PER_EPOCH = 1000
VALID_SAMPLES_PER_EPOCH = 200
t = 0
while True:
epoch += 1
if epoch > 150:
break
times.append(epoch)
train_feed_sequence = random.sample(range(len(train_data_list)),TRAIN_SAMPLES_PER_EPOCH) if len(train_data_list) > TRAIN_SAMPLES_PER_EPOCH else range(len(train_data_list)
train_feed_sequence = big_go_last(train_feed_sequence,train_data_list) # largest go last, because loop_it_out pops from the end
(t,stats,weights) = loop_it_out(start_time,t,parts_copies,train_feed_sequence,train_data_list,True) # True for training
print()
print("Epoch",epoch,"training finished at",time.time() - start_time)
model_name = "{}/model-epoch{}.pt".format(sys.argv[2],epoch)
print("Saving model to:",model_name)
torch.save(master_parts,model_name)
print()
print("stats-weights",stats,weights)
loss = stats[0]/(weights[0]+weights[1])
posRate = stats[1]/weights[0]
negRate = stats[2]/weights[1]
print("Training stats:",loss,posRate,negRate,flush=True)
train_losses.append(loss)
train_posrates.append(posRate)
train_negrates.append(negRate)
print("Validating...")
valid_feed_sequence = random.sample(range(len(valid_data_list)),VALID_SAMPLES_PER_EPOCH) if len(valid_data_list) > VALID_SAMPLES_PER_EPOCH else range(len(valid_data_list))
valid_feed_sequence.sort(key=lambda idx : get_size_from_idx(idx,valid_data_list)) # largest go last, because loop_it_out pops from the end
(t,stats,weights) = loop_it_out(start_time,t,parts_copies,valid_feed_sequence,valid_data_list,False) # False for evaluating
print()
print("Epoch",epoch,"validation finished at",time.time() - start_time)
print("stats-weights",stats,weights)
loss = stats[0]/(weights[0]+weights[1])
posRate = stats[1]/weights[0]
negRate = stats[2]/weights[1]
print("Validation stats:",loss,posRate,negRate,flush=True)
valid_losses.append(loss)
valid_posrates.append(posRate)
valid_negrates.append(negRate)
# plotting
IC.plot_one("{}/plot.png".format(sys.argv[2]),times,train_losses,train_posrates,train_negrates,valid_losses,valid_posrates,valid_negrates)
# a final "cleanup"
for p in my_processes:
p.kill()
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
import random
from PointCloud import PointCloud
class Mersenne(PointCloud):
"""
A point generator implemented using the Mersenne Twister random number generator that is
available as part of the python standard library
"""
# interface
def points(self, n, box):
"""
Generate {n} random points in the interior of {box}
"""
# loop over the sample size
for i in range(n):
# build a point
p = [ random.uniform(*interval) for interval in box ]
# and make it available to the caller
yield p #@\label{line:mt:generators:yield}@
# all done
return #@\label{line:mt:generators:return}@
# end of file
|
# backup-1.py
import os
#====================================================================
cwd = os.getcwd()
prefix = cwd.split('/')[-1] # get current folder's name; not all stuff leading to it
# visit the current directory and every one within it, recursively
for dir_listing in os.walk(cwd):
here = dir_listing[0] # full path 'here' for this iteration of the loop
dirnames = dir_listing[1] # list of directories here
filenames = dir_listing[2] # list of files here
for filename in filenames:
# don't upload hidden files
if filename[0] == '.':
continue
# absolute, full path to the file on disk
file_abspath = here + '/' + filename
# S3 object key
key = file_abspath[len(cwd):]
if key[0] == '/': # cleaner S3 keys on Linux
key = key[1:] # remove leading slash
# prepend the prefix so files aren't all dumped straight into bucket root
key = prefix + '/' + key
print key
|
#!/usr/bin/env python3
from src.main.utils.Logger import Logger
from src.test.base.BaseTest import BaseTest
from src.main.base.pages.LoginPage import LoginPage
from src.main.exceptions.PageException import PageException
import unittest
class TestRecoverPasswordPage(BaseTest, unittest.TestCase):
global logger
logger = Logger(__name__)
URL = "https://login.salesforce.com/?"
VALID_USERNAME = "gal"
VALID_PASSWORD = "Aa123456"
EXPECTED_ERR_MSG = \
"We can’t find a username that matches what you entered. " \
"Verify that your username is an email address (for example, username@company.com)."
'''
[Description]
test_invalid_attempt_to_authenticate
:cvar -> This test scenario will simulate a valid login authentication
'''
def test_attempt_to_recover_password(self):
try:
st = self.timer()
logger.debug("{} - test_invalid_attempt_to_authenticate".format(__name__))
driver = self.driver
logger.info("{} - GET -> {}".format(__name__, self.URL))
driver.get(self.URL)
lp = LoginPage(driver)
rp = lp.recover_password()
msg_txt = rp.recover(self.VALID_USERNAME)
logger.debug("{} - AssertEquals -> expected: {} | actual: {}".format(__name__, self.EXPECTED_ERR_MSG,
msg_txt))
self.assertEqual(self.EXPECTED_ERR_MSG, msg_txt)
et = self.timer()
logger.debug("{} - test_valid_attempt_to_authenticate took {} seconds".format(__name__, et - st))
except PageException as e:
logger.warn("{} - WARNING : {}".format(__name__, e))
logger.error("{} - ERROR : {}".format(__name__, e))
if __name__ == '__main__':
unittest.main()
|
from pathlib import Path
from unittest import mock
from tests.cli_test_case import CliTestCase
class MinitestTest(CliTestCase):
test_files_dir = Path(__file__).parent.joinpath('../data/minitest/').resolve()
result_file_path = test_files_dir.joinpath('record_test_result.json')
@mock.patch('requests.request')
def test_record_test_minitest(self, mock_post):
result = self.cli('record', 'tests', '--session', self.session, 'minitest', str(self.test_files_dir) + "/")
self.assertEqual(result.exit_code, 0)
payload = self.gzipped_json_payload(mock_post)
expected = self.load_json_from_file(self.result_file_path)
self.assert_json_orderless_equal(expected, payload)
|
import os
import sys
import unittest
import warnings
from .. import cpu_matcher
from autocnet.examples import get_path
from autocnet.graph.network import CandidateGraph
sys.path.append(os.path.abspath('..'))
"""class TestMatcher(unittest.TestCase):
def setUp(self):
im1 = cv2.imread(get_path('AS15-M-0296_SML.png'))
im2 = cv2.imread(get_path('AS15-M-0297_SML.png'))
self.fd = {}
sift = cv2.xfeatures2d.SIFT_create(10)
self.fd['AS15-M-0296_SML.png'] = sift.detectAndCompute(im1, None)
self.fd['AS15-M-0297_SML.png'] = sift.detectAndCompute(im2, None)
def test_flann_match_k_eq_2(self):
fmatcher = cpu_matcher.FlannMatcher()
source_image = self.fd['AS15-M-0296_SML.png']
fmatcher.add(source_image[1], 0)
self.assertTrue(len(fmatcher.nid_lookup), 1)
fmatcher.train()
with warnings.catch_warnings(record=True) as w:
fmatcher.query(self.fd['AS15-M-0296_SML.png'][1], 0, k=2)
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, UserWarning)
def test_cpu_match(self):
# Build a graph
adjacency = get_path('two_image_adjacency.json')
basepath = get_path('Apollo15')
cang = CandidateGraph.from_adjacency(adjacency, extractor_method='vlfeat', basepath=basepath)
# Extract features
cang.extract_features(extractor_parameters={'nfeatures': 700})
# Make sure cpu matcher is used for test
edges = list()
from autocnet.matcher.cpu_matcher import match as match
for s, d in cang.edges():
cang[s][d]['data']._match = match
edges.append(cang[s][d])
# Assert none of the edges have masks yet
for edge in edges:
self.assertTrue(edge['data'].masks.empty)
# Match & outlier detect
cang.match()
cang.symmetry_checks()
# Grab the length of a matches df
match_len = len(edges[0]['data'].matches.index)
# Assert symmetry check is now in all edge masks
for edge in edges:
self.assertTrue('symmetry' in edge['data'].masks)
# Assert matches have been populated
for edge in edges:
self.assertTrue(not edge['data'].matches.empty)
# Re-match
cang.match()
# Assert that new matches have been added on to old ones
self.assertEqual(len(edges[0]['data'].matches.index), match_len * 2)
# Assert that the match cleared the masks df
for edge in edges:
self.assertTrue(edge['data'].masks.empty)
def tearDown(self):
pass"""
|
from selenium import webdriver
import pandas as pd
import time
from Scripts.params import *
website = 'http://www.nasdaq.com/symbol/' + ticker + '/news-headlines'
def getText(someList):
returnList = []
for i in someList:
returnList.append(i.text)
return returnList
dr = webdriver.Chrome('/usr/local/bin/chromedriver')
dr.get(website)
titles=[]
dates=[]
next = dr.find_elements_by_class_name('pagination__next')
while(next[0].is_enabled()):
titles.append(getText(dr.find_elements_by_class_name('quote-news-headlines__item-title')))
dates.append(getText(dr.find_elements_by_class_name('quote-news-headlines__date')))
dr.execute_script("arguments[0].click();", next[0])
time.sleep(3)
table = pd.DataFrame([titles,dates])
table.to_csv("Headlines.csv")
|
"""Schema calibration set"""
from pydantic import BaseModel
class CalibrationSet(BaseModel):
calibration_set_identifier: str
calibration_factor: float
class Config:
orm_mode = True
|
"""python emulator"""
from PySide2.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QWidget
from PySide2.QtGui import QFont
class RegisterWidget(QWidget):
"""Register widget"""
def __init__(self, text):
QWidget.__init__(self)
self.label = QLabel()
if text:
self.label.setText(text)
self.text_input = QLineEdit()
self.text_input.setFixedWidth(35)
self.text_input.setFont(QFont('Fira Code', 8, QFont.Medium))
layout = QHBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.text_input)
layout.addStretch()
self.setLayout(layout)
def set_text(self, text):
"""Set the text of the label"""
self.label.setText(text)
def set_value(self, value):
"""Set the value of the register"""
self.text_input.setText(value)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class Contact(models.Model):
projects = models.ManyToManyField(
"project.Project", through="ProjectContact", related_name="contacts"
)
business = models.CharField(_("Business"), max_length=512, blank=True)
salutation = models.CharField(_("Salutation"), max_length=512, blank=True)
first_name = models.CharField(_("First Name"), max_length=512, blank=True)
last_name = models.CharField(_("Last Name"), max_length=512, blank=True)
phone = models.CharField(_("Phone"), max_length=512, blank=True)
email = models.EmailField(_("Email"), blank=True)
website = models.URLField(_("Website"), blank=True)
address = models.CharField(_("Address"), max_length=512, blank=True)
postal_code = models.CharField(_("Postal Code"), max_length=512, blank=True)
city = models.CharField(_("City"), max_length=512, blank=True)
country = models.CharField(
_("Country"), max_length=512, default=settings.DEFAULT_COUNTRY
)
is_address_label_generated = models.BooleanField(
_("auto-generate address label"), default=True
)
address_label = models.TextField(_("Address Label"), blank=True)
notes = models.TextField(_("Notes"), blank=True)
class Meta:
verbose_name = _("Contact")
verbose_name_plural = _("Contacts")
ordering = ["first_name", "last_name"]
def __str__(self):
return "{} {}".format(self.first_name, self.last_name)
class ProjectContact(models.Model):
CUSTOMER = "customer"
CONTRACTOR = "contractor"
SUPPLIER = "supplier"
ARCHITECT = "architect"
OTHER = "other"
ASSOCIATION_TYPE = (
(CUSTOMER, _("Customer")),
(CONTRACTOR, _("Contractor")),
(SUPPLIER, _("Supplier")),
(ARCHITECT, _("Architect")),
(OTHER, _("Other")),
)
association = models.CharField(
_("Association"), max_length=128, choices=ASSOCIATION_TYPE, default=CUSTOMER
)
is_billable = models.BooleanField(_("Is Billable?"), default=False)
project = models.ForeignKey(
"project.Project", related_name="project_contacts", on_delete=models.CASCADE
)
contact = models.ForeignKey(
Contact, related_name="project_contacts", on_delete=models.CASCADE
)
notes = models.TextField(_("Notes"), blank=True)
class Meta:
verbose_name = _("Project Contact")
verbose_name_plural = _("Project Contacts")
ordering = ["association", "id"]
def save(self, **kwargs):
if self.is_billable:
self.project.project_contacts.update(is_billable=False)
return super(ProjectContact, self).save(**kwargs)
|
#Contains machine learning pipeline methods
#Based on Chiang et. al, 2014
import numpy as np
import cPickle
from scipy.linalg import norm
from sklearn.linear_model import LogisticRegression
import random
#Split data into folds for cross-validation
#Input: edges in dataset [list of 2-tuples]
# Number of folds (k in k-fold cross validation) [int]
#Output: k disjoint sets of vertices whose
#union is the set of all edges (nonzero entries in matrix) [list of lists]
def kfold_CV_split(data_points, num_folds=10):
random.shuffle(data_points) #shuffle data points into random order
fold_size = len(data_points)/num_folds
folds = list() #data points (edges) in each fold
for fold_index in range(num_folds): #append evenly sized folds of data
if fold_index == num_folds - 1: #last fold--append remaining data
folds.append(data_points[fold_size*fold_index:])
else:
folds.append(data_points[fold_size*fold_index:fold_size*(fold_index + 1)])
return folds
#Join folds other than the one use for testing to construct training dataset
#Input: all folds [list of lists]
# Index of fold to leave out [int]
#Output: List of training points (all other folds) [list]
def join_folds(folds, fold_to_leave_out):
initial_fold_number = 0
if fold_to_leave_out == 0:
initial_fold_number = 1
#initialize fold data and labels
data = list(folds[initial_fold_number]) #copy by value
for fold_index in range(1,len(folds)):
if fold_to_leave_out == 0 and fold_index == 1:
continue #don't re-add first fold
if fold_index != fold_to_leave_out:
#add data
fold_data = folds[fold_index]
data += fold_data
return data
#Given test predictions and labels, evaluate metrics like accuracy
#Input: predictions [np array]
# labels [np array]
#Output: accuracy [float 0-1]
# false positive rate [float 0-1]
#Action: print diagnostics too
def evaluate(predictions, labels):
#average prediction tells you if mostly one label predicted
print("Predictions: avg %f" % np.mean(predictions))
print("Labels: avg %f" % np.mean(labels))
accuracy = np.mean(predictions == labels)
print("Accuracy: %f" % accuracy)
#false positives: prediction 1 but actual label -1
num_false_positives = np.sum(predictions == labels + 2)
#test predictions and labels both -1
num_true_negatives = np.sum(np.logical_and(predictions == -1,labels == -1))
false_positive_rate = 0
try:
false_positive_rate = float(num_false_positives) / (num_false_positives + num_true_negatives)
except ZeroDivisionError:
print "OK...so no false positives and no true negatives? hmmm..."
print("False positive rate: %f" % false_positive_rate)
return accuracy, false_positive_rate
#get unique edges in adjacency matrix
#Input: adjacency matrix [sparse csr matrix]
#Output: list of unique edges [list of 2-tuples of ints]
def get_unique_edges(adj_matrix):
rows,cols = adj_matrix.nonzero()
unique_edges = set()
for edge_index in range(len(rows)):
edge = (rows[edge_index],cols[edge_index])
if edge not in unique_edges and edge[::-1] not in unique_edges:
unique_edges.add(edge)
unique_edge_list = list(unique_edges)
return unique_edge_list
|
import cupy as cp
def to_periodogram(signal):
"""
Returns periodogram of signal for finding frequencies that have high energy.
:param signal: signal (time domain)
:type signal: cudf.Series
:return: CuPy array representing periodogram
:rtype: cupy.ndarray
"""
# convert cudf series to cupy array
signal_cp = cp.fromDlpack(signal.to_dlpack())
# standardize the signal
signal_cp_std = (signal_cp - cp.mean(signal_cp)) / cp.std(signal_cp)
# take fourier transform of signal
FFT_data = cp.fft.fft(signal_cp_std)
# create periodogram
prdg = (1 / len(signal)) * ((cp.absolute(FFT_data)) ** 2)
return prdg
def filter_periodogram(prdg, p_value):
"""
Select important frequencies by filtering periodogram by p-value. Filtered out frequencies are set to zero.
:param periodogram: periodogram to be filtered
:type signal: cudf.Series
:return: CuPy array representing periodogram
:rtype: cupy.ndarray
"""
filtered_prdg = cp.copy(prdg)
filtered_prdg[filtered_prdg < (cp.mean(filtered_prdg) * (-1) * (cp.log(0.001)))] = 0
return filtered_prdg
def to_time_domain(prdg):
"""
Convert the signal back to time domain.
:param prdg: periodogram (frequency domain)
:type prdg: cupy.ndarray
:return: CuPy array representing reconstructed signal
:rtype: cupy.ndarray
"""
acf = cp.abs(cp.fft.ifft(prdg))
return acf
|
from django.contrib import admin
from commit_month.models import CommitMonth
# Register your models here.
admin.site.register(CommitMonth)
|
"""
Lasso
"""
from blackbox_selectinf.usecase.Lasso import LassoClass
from blackbox_selectinf.learning.learning import learn_select_prob, get_weight, get_CI
import numpy as np
import argparse
import pickle
from scipy.stats import norm
import matplotlib.pyplot as plt
import torch
from selectinf.distributions.discrete_family import discrete_family
parser = argparse.ArgumentParser(description='Lasso with nonparametric bootstrap')
parser.add_argument('--data_type', type=str, default='linear')
parser.add_argument('--basis_type', type=str, default='simple')
parser.add_argument('--idx', type=int, default=0)
parser.add_argument('--lbd', type=float, default=4)
parser.add_argument('--indep', action='store_true', default=False)
parser.add_argument('--n', type=int, default=30)
parser.add_argument('--p', type=int, default=50)
parser.add_argument('--n_b', type=int, default=30)
parser.add_argument('--nrep', type=int, default=1)
parser.add_argument('--max_it', type=int, default=1)
parser.add_argument('--savemodel', action='store_true', default=False)
parser.add_argument('--modelname', type=str, default='model_')
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--ntrain', type=int, default=50000)
parser.add_argument('--logname', type=str, default='log')
parser.add_argument('--loadmodel', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--thre', type=float, default=0.99)
parser.add_argument('--consec_epochs', type=int, default=5)
parser.add_argument('--nonnull', action='store_true', default=False)
args = parser.parse_args()
def main():
n = args.n
n_b = args.n_b
ntrain = args.ntrain
p = args.p
beta = np.zeros(p)
if args.nonnull:
beta[:int(p/4)] = 5 / np.sqrt(n)
data_type = args.data_type
basis_type = args.basis_type
lbd = args.lbd
logs = [dict() for x in range(args.nrep)]
for j in range(args.idx, args.idx + args.nrep):
print("Starting simulation", j)
logs[j - args.idx]['seed'] = j
np.random.seed(j)
if data_type == 'linear':
X = np.random.randn(n, p)
Y = X @ beta + np.random.randn(n)
elif data_type == 'binary':
X = np.random.randn(n, p)
prob = 1 / (1 + np.exp(- X @ beta))
Y = np.random.binomial(1, prob, n)
else:
raise AssertionError("invalid data_type")
lassoClass = LassoClass(X, Y, lbd, data_type, basis_type)
num_select = lassoClass.num_select
beta_E = beta[lassoClass.E]
E = lassoClass.E
print("select:", np.sum(E), E)
# lee et al
interval_lee = lassoClass.interval_lee()
print('lee et al:', interval_lee)
logs[j - args.idx]['interval_lee'] = interval_lee
logs[j - args.idx]['covered_lee'] = []
for i in range(num_select):
if interval_lee[i, 0] <= beta_E[i] <= interval_lee[i, 1]:
logs[j - args.idx]['covered_lee'].append(1)
else:
logs[j - args.idx]['covered_lee'].append(0)
# train
Z_data_np = lassoClass.basis(X, Y, basis_type)
Z_data = torch.tensor(Z_data_np, dtype=torch.float)
training_data = lassoClass.gen_train_data(ntrain=ntrain, n_b=n_b, remove_D0=args.indep, perturb=False)
Z_train = training_data['Z_train']
W_train = training_data['W_train']
gamma = training_data['gamma']
pos_ind = np.arange(0, len(W_train))[W_train == 1]
print("fraction of positive data:", np.mean(W_train))
logs[j - args.idx]['ones'] = np.mean(W_train)
if args.indep:
gamma_D0 = training_data['gamma_D0']
Z_data = Z_data - gamma_D0 @ lassoClass.D_0
Z_data = torch.tensor(Z_data, dtype=torch.float)
net = None
max_it = args.max_it
for ii in range(max_it):
print("recursion", ii)
net, flag, pr_data = learn_select_prob(Z_train, W_train, Z_data=Z_data, net=net, thre=args.thre,
consec_epochs=args.consec_epochs, num_epochs=args.epochs,
batch_size=args.batch_size, verbose=args.verbose, print_every=100)
if flag == 1:
print("Succeeded learning!")
break
if ii == max_it - 1:
break
else: # generate more data
print("generate more data")
training_data = lassoClass.gen_train_data(ntrain=ntrain, n_b=n_b, remove_D0=args.indep, perturb=True)
Z_train_new = training_data['Z_train']
W_train_new = training_data['W_train']
Z_train = np.concatenate([Z_train, Z_train_new])
W_train = np.concatenate([W_train, W_train_new])
print("fraction of positive data:", np.mean(W_train))
logs[j - args.idx]['flag'] = flag
logs[j - args.idx]['pr_data'] = pr_data
theta_data = lassoClass.test_statistic(X, Y)
N_0 = Z_data - gamma @ theta_data
target_var = np.diag(lassoClass.Sigma1)
target_sd = np.sqrt(target_var)
gamma_list = np.linspace(-10 * target_sd, 10 * target_sd, 101)
interval_nn = np.zeros([num_select, 2])
logs[j - args.idx]['covered_nn'] = []
weight_val = np.zeros([num_select, 101])
for k in range(num_select):
Gamma_k = lassoClass.Sigma1[:, k] / lassoClass.Sigma1[k, k]
Gamma_k *= 0
Gamma_k[k] = 1
target_theta_k = theta_data[k] + gamma_list[:, k]
weight_val[k, :] = get_weight(net, target_theta, N_0, gamma)
interval = get_CI(target_theta_k, weight_val[k, :], target_var[k], theta_data[k])
interval_nn[k, :] = interval
if interval_nn[k, 0] <= beta_E[k] <= interval_nn[k, 1]:
logs[j - args.idx]['covered_nn'].append(1)
else:
logs[j - args.idx]['covered_nn'].append(0)
logs[j - args.idx]['interval_nn'] = interval_nn
logs[j - args.idx]['width_nn'] = interval_nn[:, 1] - interval_nn[:, 0]
##################################################
# check learning
if False:
count = 0
nb = 50
pval = [[] for x in range(num_select)]
for ell in range(int(nb / np.mean(W_train))):
idx_b = np.random.choice(n, n_b, replace=True)
X_b = X[idx_b, :]
Y_b = Y[idx_b]
if not np.all(lassoClass.select(X_b, Y_b) == lassoClass.sign):
continue
else:
count += 1
d_M = lassoClass.test_statistic(X_b, Y_b)
observed_target = d_M
for k in range(num_select):
target_theta_k = d_M[k] + gamma_list[:, k]
target_theta_0 = np.tile(d_M, [101, 1])
target_theta_0[:, k] = target_theta_k
weight_val_0 = get_weight(net, target_theta_0, N_0, gamma)
weight_val_2 = weight_val_0 * norm.pdf((target_theta_0[:, k] - observed_target[k]) / target_sd[k])
exp_family = discrete_family(target_theta_0.reshape(-1), weight_val_2.reshape(-1))
hypothesis = theta_data[k]
pivot = exp_family.cdf((hypothesis - observed_target[k]) / target_var[k], x=observed_target[k])
pivot = 2 * min(pivot, 1 - pivot)
pval[k].append(pivot)
if count == nb:
break
pval = np.array(pval)
# logs[j - args.idx]['pval'] = pval
logs[j - args.idx]['false_rej'] = np.sum(pval <= 0.05, 1) / count
# print(pval)
# print("reject:", np.sum(pval <= 0.05, 1) / count)
##################################################
# true interval
if True:
D_0 = lassoClass.D_0
prob_gamma = np.zeros([num_select, 101])
interval_true = np.zeros([num_select, 2])
logs[j - args.idx]['covered_true'] = []
fig, ax = plt.subplots(ncols=num_select, figsize=(4 * num_select, 5))
for i in range(num_select):
e_i = np.zeros(num_select)
e_i[i] = 1
for jj in range(101):
if data_type == 'linear':
prob_gamma[i, jj] = lassoClass.linear_KKT(theta_data + gamma_list[jj, i] * e_i, D_0 * np.sqrt(n))
else:
prob_gamma[i, jj] = lassoClass.logistic_KKT(theta_data + gamma_list[jj, i] * e_i, D_0 * np.sqrt(n))
D_M_gamma = theta_data + np.outer(gamma_list[:, i], e_i)
interval_true[i, :] = get_CI(D_M_gamma[:, i], prob_gamma[i, :], target_var[i], theta_data[i])
print(i, interval_true[i, :])
if interval_true[i, 0] <= beta_E[i] <= interval_true[i, 1]:
logs[j - args.idx]['covered_true'].append(1)
else:
logs[j - args.idx]['covered_true'].append(0)
# plot
if num_select == 1:
plt.plot(D_M_gamma[:, i], weight_val[i, :], label='nn')
plt.plot(D_M_gamma[:, i], prob_gamma[i, :], label='truth', ls='--')
plt.legend()
else:
ax[i].plot(D_M_gamma[:, i], weight_val[i, :], label='nn')
ax[i].plot(D_M_gamma[:, i], prob_gamma[i, :], label='truth', ls='--')
ax[i].legend()
plt.savefig('{}_n_{}_p_{}_nb_{}_{}.png'.format(args.logname, n, p, n_b, j))
print('interval_true', interval_true)
logs[j - args.idx]['interval_true'] = interval_true
logs[j - args.idx]['width_true'] = interval_true[:, 1] - interval_true[:, 0]
logs[j - args.idx]['beta_true'] = beta
logs[j - args.idx]['E'] = E
logs[j - args.idx]['beta_E'] = beta_E
logs[j - args.idx]['beta_hat'] = theta_data
#########################################
# naive interval
logs[j - args.idx]['covered_naive'] = []
interval_naive = np.zeros([num_select, 2])
for i in range(num_select):
interval_naive[i, :] = tuple((norm.ppf(0.025) * target_sd[i], -norm.ppf(0.025) * target_sd[i]) + lassoClass.beta_ls[i])
if interval_naive[i, 0] <= beta_E[i] <= interval_naive[i, 1]:
logs[j - args.idx]['covered_naive'].append(1)
else:
logs[j - args.idx]['covered_naive'].append(0)
logs[j - args.idx]['interval_naive'] = interval_naive
logs[j - args.idx]['width_naive'] = interval_naive[:, 1] - interval_naive[:, 0]
path = open('{}_n_{}_p_{}_nb_{}_{}.pickle'.format(args.logname, n, p, n_b, j), 'wb')
pickle.dump(logs[j - args.idx], path)
path.close()
print(logs[j - args.idx])
if __name__ == "__main__":
main()
|
# -*- coding:utf-8 -*-
from PIL import Image
import os
def cut(image_path, save_path, vx, vy):
count = 0
im_name = os.listdir(image_path)
paths = []
for name in im_name:
path = os.path.join(image_path, name)
paths += [path]
for i, path in enumerate(paths):
name = (path.split('/')[-1]).split('.')[0]
name2 = save_path + name + '_'
im = Image.open(path)
w = im.size[0]
h = im.size[1]
# print(w, h)
# 偏移量
dx = 300
dy = 300
n = 1
# 左上角切割
x1 = 0
y1 = 0
x2 = vx
y2 = vy
# 纵向
while x2 <= h:
while y2 <= w:
name3 = name2 + '%06d' % (n) + ".png"
# print(n, x1, y1, x2, y2)
im2 = im.crop((y1, x1, y2, x2))
im2.save(name3)
y1 = y1 + dy
y2 = y1 + vy
n = n + 1
if y2 >= w:
name3 = name2 + '%06d' % (n) + ".png"
# print(n, x1, y1, x2, y2)
y1 = w - vy
y2 = w
im2 = im.crop((y1, x1, y2, x2))
im2.save(name3)
# print n, x1, y1, x2, y2
n = n + 1
x1 = x1 + dx
x2 = x1 + vx
y1 = 0
y2 = vy
x1 = h - vx
x2 = h
y1 = 0
y2 = vy
while y2 <= w:
name3 = name2 + '%06d' % (n) + ".png"
# print(n, x1, y1, x2, y2)
im2 = im.crop((y1, x1, y2, x2))
im2.save(name3)
y1 = y1 + dy
y2 = y1 + vy
n = n + 1
if y2 >= w:
name3 = name2 + '%06d' % (n) + ".png"
# print(n, x1, y1, x2, y2)
y1 = w - vy
y2 = w
im2 = im.crop((y1, x1, y2, x2))
im2.save(name3)
n = n + 1
print(i + 1, '/', len(paths))
count += n
return count
if __name__ == "__main__":
# 'F:/DL_Code/STANet-master/path-to-LEVIR-CD-test/label/'
image_path = 'F:/DL_Code/data/data_2/SZTAKI/Szada/SZTAKI_train/label/'
save_path = 'F:/DL_Code/data/data_2/SZTAKI/Szada/SZTAKI_160_train/label/'
# 切割图片的面积 vx,vy
# 大
res = cut(image_path, save_path, 320, 479)
# 中
# res = cut(id,120,120)
# 小
# res = cut(id,80,80)
print('all sub image:', res)
|
matrix=[]
row=int(input('Enter size of row: '))
column=int(input('enter size of column: '))
for i in range(row):
a=[]
for j in range(column):
j=int(input(f'enter elements of the matrix at postion row({i})column({j}): '))
a.append(j)
print()
matrix.append(a)
print('Elements of matrix: ')
for i in range(row):
for j in range(column):
print(matrix[i][j],end=" ")
print()
#calculates sum of each row of given matrix
for i in range(row):
sumRow=0
for j in range(column):
sumRow=sumRow+matrix[i][j]
print(f'sum of row {i+1} is: {sumRow}')
#calculates sum of each column of given matrix
for i in range(row):
sumColumn=0
for j in range(column):
sumColumn=sumColumn+matrix[j][i]
print(f'sum of column {i+1} is: {sumColumn}')
|
def buildModel_LSTM_64_16(inputshape, outputs, options, softmax=True):
import tensorflow as tf
tf_recall=tf.keras.metrics.Recall()
model = tf.keras.Sequential()
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.GRU(
64,
name="BiDiIn",
return_sequences=True,
input_shape=inputshape), input_shape=inputshape))
model.add(tf.keras.layers.Dropout(options.dropout))
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
16,
return_sequences=False,
input_shape=inputshape)))
model.add(tf.keras.layers.Dropout(options.dropout))
model.add(tf.keras.layers.Dense(
name="ExpressionClass",
units=outputs,
activation='softmax' if softmax else None))
model.compile(
optimizer='adam',
loss=options.loss,
metrics=['accuracy',
'AUC',
tf_recall])
return model
|
import torch
import numpy as np
def camera_ringnet(cam):
camera_params = {'c': cam[1:3],
'k': np.zeros(5),
'f': cam[0] * np.ones(2)}
camera_params['t'] = np.zeros(3)
camera_params['r'] = np.zeros(3)
return camera_params
def camera_dynamic(h_w, translation):
h, w = h_w
fscale = h / 256
camera_params = {'c': np.array([w / 2, h / 2]),
'k': np.array([-0.19816071, 0.92822711, 0, 0, 0]),
'f': np.array([fscale*4754.97941935, fscale*4754.97941935])}
camera_params['t'] = translation
camera_params['r'] = np.array([np.pi, 0., 0.])
return camera_params
def camera_ringnetpp(h_w, trans, focal):
h, w = h_w
camera_params = {'c': np.array([w / 2, h / 2]),
'k': np.zeros(5),
'f': focal * np.ones(2)}
camera_params['t'] = trans
camera_params['r'] = np.array([0., np.pi, 0.])
return camera_params
if __name__ == '__main__':
# config = get_config()
# ##
# config.model_name = 'optimize_flame'
# config.resume_training = True
# config.batch_size = 1
# config.dataset_path = {
# 'vgg2': 'dataset_loaders/vggface2_train_list_max_normal_100_ring_3_3_serial.npy'
# }
# config.ring_elements = 1
#
# # generate
# # generate_rendering(config)
# camera_test(config)
cam_t = np.array([0., 0., 0.]) + np.array([0., 0., 2.5])
camera_params = camera_dynamic((256, 256), cam_t)
points_3d = torch.from_numpy(np.random.uniform(-1, 1, (32, 4, 3)).astype('float32')).cuda()
points_2d = batch_perspective_proj(points_3d, camera_params)
print(points_2d.shape)
|
from spike import App
app = App()
app.play_sound('Cat Meow 1')
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from subprocess import PIPE
import salt.modules.openscap as openscap
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class OpenscapTestCase(TestCase):
random_temp_dir = '/tmp/unique-name'
policy_file = '/usr/share/openscap/policy-file-xccdf.xml'
def setUp(self):
patchers = [
patch('salt.modules.openscap.Caller', MagicMock()),
patch('salt.modules.openscap.shutil.rmtree', Mock()),
patch(
'salt.modules.openscap.tempfile.mkdtemp',
Mock(return_value=self.random_temp_dir)
),
]
for patcher in patchers:
self.apply_patch(patcher)
def apply_patch(self, patcher):
patcher.start()
self.addCleanup(patcher.stop)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(
**{'returncode': 0, 'communicate.return_value': ('', '')}
)
)
)
def test_openscap_xccdf_eval_success(self):
response = openscap.xccdf(
'eval --profile Default {0}'.format(self.policy_file))
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
expected_cmd = [
'oscap',
'xccdf',
'eval',
'--oval-results',
'--results', 'results.xml',
'--report', 'report.html',
'--profile', 'Default',
self.policy_file
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
openscap.Caller().cmd.assert_called_once_with(
'cp.push_dir', self.random_temp_dir)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
{
'upload_dir': self.random_temp_dir,
'error': '',
'success': True,
'returncode': 0
}
)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(
**{'returncode': 2, 'communicate.return_value': ('', 'some error')}
)
)
)
def test_openscap_xccdf_eval_success_with_failing_rules(self):
response = openscap.xccdf(
'eval --profile Default {0}'.format(self.policy_file))
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
expected_cmd = [
'oscap',
'xccdf',
'eval',
'--oval-results',
'--results', 'results.xml',
'--report', 'report.html',
'--profile', 'Default',
self.policy_file
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
openscap.Caller().cmd.assert_called_once_with(
'cp.push_dir', self.random_temp_dir)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
{
'upload_dir': self.random_temp_dir,
'error': 'some error',
'success': True,
'returncode': 2
}
)
def test_openscap_xccdf_eval_fail_no_profile(self):
response = openscap.xccdf(
'eval --param Default /unknown/param')
self.assertEqual(
response,
{
'error': 'argument --profile is required',
'upload_dir': None,
'success': False,
'returncode': None
}
)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(
**{'returncode': 2, 'communicate.return_value': ('', 'some error')}
)
)
)
def test_openscap_xccdf_eval_success_ignore_unknown_params(self):
response = openscap.xccdf(
'eval --profile Default --param Default /policy/file')
self.assertEqual(
response,
{
'upload_dir': self.random_temp_dir,
'error': 'some error',
'success': True,
'returncode': 2
}
)
expected_cmd = [
'oscap',
'xccdf',
'eval',
'--oval-results',
'--results', 'results.xml',
'--report', 'report.html',
'--profile', 'Default',
'/policy/file'
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(**{
'returncode': 1,
'communicate.return_value': ('', 'evaluation error')
})
)
)
def test_openscap_xccdf_eval_evaluation_error(self):
response = openscap.xccdf(
'eval --profile Default {0}'.format(self.policy_file))
self.assertEqual(
response,
{
'upload_dir': None,
'error': 'evaluation error',
'success': False,
'returncode': 1
}
)
def test_openscap_xccdf_eval_fail_not_implemented_action(self):
response = openscap.xccdf('info {0}'.format(self.policy_file))
self.assertEqual(
response,
{
'upload_dir': None,
'error': "argument action: invalid choice: 'info' (choose from 'eval')",
'success': False,
'returncode': None
}
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import setuptools
PATH = 'src/specminers/version.py'
PATH = os.path.join(os.path.dirname(__file__), PATH)
with open(PATH, 'r') as fh:
exec(fh.read())
setuptools.setup(version=__version__)
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
from aws_shelltools import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='aws-shelltools',
version=__version__,
description='Yet another set of scripts and shell functions for managing AWS profiles and cross account access.',
long_description=long_description,
url='https://github.com/ashleygould/aws-shelltools',
author='Ashley Gould',
author_email='agould@ucop.edu',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='aws awscli session',
packages=find_packages(exclude=['scratch', 'notes' ]),
install_requires=[
'awscli',
'boto3',
'botocore',
'docopt',
'aws-orgs',
],
package_data={
'aws_shelltools': ['shell_functions/*'],
},
entry_points={
'console_scripts': [
'awstoken=aws_shelltools.awstoken:main',
'awsassumerole=aws_shelltools.awsassumerole:main',
'aws-make-config=aws_shelltools.awsconfig:main',
'aws-shelltools-setup=aws_shelltools.shelltools_setup:main',
],
},
)
|
supported_currencies = ('USD', 'EUR', 'ILS')
|
# coding=utf-8
import logging
import six
from fakturownia import core
log = logging.getLogger(__name__)
class BaseEndpoint(object):
def __init__(self, api_client):
self.api_client = api_client
def create(self, **kwargs):
return self.model(self.api_client, **kwargs).post()
def __getitem__(self, key):
return self.model(api_client=self.api_client, id=key).get()
class BaseModel(object):
def __init__(self, api_client, **kwargs):
if isinstance(api_client, six.string_types):
api_client = core.ApiClient(api_client)
super(BaseModel, self).__setattr__('_api_client', kwargs.pop('api_client', api_client))
super(BaseModel, self).__setattr__('_data', {'id': None})
for k, v in kwargs.items():
setattr(self, k, v)
def get_raw_data(self):
return self._data
def update_data(self, **kwargs):
self._update_data(kwargs)
def post(self, **kwargs):
data = self.prepare_post_data(**kwargs)
response = self._api_client.post(self.get_endpoint(), data=data)
self._update_data(response)
return self
def put(self, **kwargs):
data = self.prepare_post_data(**kwargs)
response = self._api_client.put(self.get_endpoint(), data=data)
self._update_data(response)
return self
def prepare_post_data(self, **kwargs):
data = kwargs or self._data.copy()
if 'id' in self._data and 'id' not in data:
data['id'] = self._data['id']
for key in self._readonly:
if key in data:
log.warning("Removing readonly key from payload data: %s: %s", key, data.pop(key))
return {self._data_wrap: data}
def get(self):
response = self._api_client.get(self.get_endpoint())
self._update_data(response)
return self
def delete(self):
self._api_client.delete(self.get_endpoint())
return self
def get_endpoint(self, extra=''):
if self.id:
return '{}/{}{}.json'.format(self._endpoint, self.id, extra)
return self._endpoint + ".json"
def _update_data(self, data):
new_id = data.get('id', None)
if self.id and new_id:
assert self.id == new_id, 'Existing id does not match update data {}!={}'.format(self.id, new_id)
self._data.update(data)
def __setattr__(self, name, value):
if name in self.__dict__.keys() or name in self.__class__.__dict__.keys():
return super(BaseModel, self).__setattr__(name, value)
self._data[name] = value
def __getattr__(self, key):
if key not in self._data:
msg = '{} instance does not have {} key in data dictionary, you may have to call get to fetch full data dict.'
raise AttributeError(msg.format(self.__class__.__name__, key))
return self._data[key]
__getitem__ = __getattr__
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .models import Comment
from .serializers import CommentSerializer
from authors.apps.articles.models import Article
class CommentsAPIView(APIView):
"""
Create a view to post, update, delete and get all comments
"""
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = CommentSerializer
def get(self, request, **kwargs):
slug = self.kwargs['slug']
article = Article.objects.filter(slug=slug).first()
if not article:
return Response(
{
"comment": {
"error": "Article not found"
}
},
status=status.HTTP_404_NOT_FOUND
)
queryset = Comment.objects.filter(article=article.id)
if not queryset:
return Response(
{
"comment": {
"error": "You dont have any comments yet"
}
},
status=status.HTTP_404_NOT_FOUND
)
serializer = self.serializer_class(data=queryset, many=True)
serializer.is_valid()
return Response({
"comment": serializer.data,
"commentsCount": queryset.count()
},
status=status.HTTP_200_OK
)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, **kwargs):
slug = self.kwargs['slug']
article = Article.objects.get(slug=slug)
comment = request.data.get('comment', {})
serializer = self.serializer_class(data=comment, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save(author=request.user, article=article)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class OneCommentAPIView(generics.RetrieveUpdateDestroyAPIView):
"""
Get, update, and delete a given comment
"""
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = CommentSerializer
def get(self, request, id, *args, **kwargs):
"""
Get a comment by id
"""
slug = self.kwargs['slug']
article = Article.objects.filter(slug=slug).first()
if not article:
return Response(
{
"comment": {
"error": "Article not found"
}
},
status=status.HTTP_404_NOT_FOUND
)
queryset = Comment.objects.filter(id=id, article=article.id)
if not queryset:
return Response(
{
"comment": {
"error": "Comment not found"
}
},
status=status.HTTP_404_NOT_FOUND
)
serializer = self.serializer_class(queryset, many=True)
return Response({
"comment": serializer.data[0]
},
status=status.HTTP_200_OK
)
def destroy(self, request, id, *args, **kwargs):
"""
Delete a comment by id
"""
slug = self.kwargs['slug']
article = Article.objects.filter(slug=slug).first()
if not article:
return Response(
{
"comment": {
"message": "Article not found"
}
},
status=status.HTTP_404_NOT_FOUND
)
queryset = Comment.objects.filter(id=id, article=article.id)
if not queryset:
return Response(
{
"comment": {
"message": "Comment not found"
}
},
status=status.HTTP_404_NOT_FOUND
)
queryset[0].delete()
return Response(
{
"comment": {
"message": "Comment deleted successfully"
}
},
status=status.HTTP_200_OK
)
def update(self, request, id, *args, **kwargs):
"""
Delete a comment by id
"""
slug = self.kwargs['slug']
article = Article.objects.filter(slug=slug).first()
if not article:
return Response(
{
"comment": {
"message": "Article not found"
}
},
status=status.HTTP_404_NOT_FOUND
)
comment = Comment.objects.filter(id=id, article=article.id).first()
if not comment:
return Response(
{
"comment": {
"message": "Comment not found"
}
},
status=status.HTTP_404_NOT_FOUND
)
if request.user.pk != comment.author.id:
return Response(
{
"comment": {
"message": "You are not authorized to comment"
}
},
status=status.HTTP_404_NOT_FOUND
)
comment_data = request.data.get('comment', {})
comment.body = comment_data['body']
comment.save(update_fields=['body'])
return Response(
{
"comment": {
"message": "Comment Updated successfully"
}
},
status=status.HTTP_200_OK
)
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import functools
import uuid
from oslo_log import log as logging
from oslo_utils import strutils
import zaqar.common.api.errors as api_errors
import zaqar.common.api.response as response
from zaqar.i18n import _
LOG = logging.getLogger(__name__)
def sanitize(document, spec=None, doctype=dict):
"""Validates a document and drops undesired fields.
:param document: A dict to verify according to `spec`.
:param spec: (Default None) Iterable describing expected fields,
yielding tuples with the form of:
(field_name, value_type, default_value)
Note that value_type may either be a Python type, or the
special string '*' to accept any type. default_value is the
default to give the field if it is missing, or None to require
that the field be present.
If spec is None, the incoming documents will not be validated.
:param doctype: type of document to expect; must be either
JSONObject or JSONArray.
:raises DocumentTypeNotSupported: if document type is not supported
:raises TypeError: if document type is neither a JSONObject
nor JSONArray
:returns: A sanitized, filtered version of the document. If the
document is a list of objects, each object will be filtered
and returned in a new list. If, on the other hand, the document
is expected to contain a single object, that object's fields will
be filtered and the resulting object will be returned.
"""
if doctype is dict:
if not isinstance(document, dict):
raise api_errors.DocumentTypeNotSupported()
return document if spec is None else filter_fields(document, spec)
if doctype is list:
if not isinstance(document, list):
raise api_errors.DocumentTypeNotSupported()
if spec is None:
return document
return [filter_fields(obj, spec) for obj in document]
raise TypeError(_(u'Doctype must be either a JSONObject or JSONArray'))
def filter_fields(document, spec):
"""Validates and retrieves typed fields from a single document.
Sanitizes a dict-like document by checking it against a
list of field spec, and returning only those fields
specified.
:param document: dict-like object
:param spec: iterable describing expected fields, yielding
tuples with the form of: (field_name, value_type). Note that
value_type may either be a Python type, or the special
string '*' to accept any type.
:raises BadRequest: if any field is missing or not an
instance of the specified type
:returns: A filtered dict containing only the fields
listed in the spec
"""
filtered = {}
for name, value_type, default_value in spec:
filtered[name] = get_checked_field(document, name,
value_type, default_value)
return filtered
def get_checked_field(document, name, value_type, default_value):
"""Validates and retrieves a typed field from a document.
This function attempts to look up doc[name], and raises
appropriate errors if the field is missing or not an
instance of the given type.
:param document: dict-like object
:param name: field name
:param value_type: expected value type, or '*' to accept any type
:param default_value: Default value to use if the value is missing,
or None to make the value required.
:raises BadRequest: if the field is missing or not an
instance of value_type
:returns: value obtained from doc[name]
"""
try:
value = document[name]
except KeyError:
if default_value is not None:
value = default_value
else:
description = _(u'Missing "{name}" field.').format(name=name)
raise api_errors.BadRequest(description)
# PERF(kgriffs): We do our own little spec thing because it is way
# faster than jsonschema.
if value_type == '*' or isinstance(value, value_type):
return value
description = _(u'The value of the "{name}" field must be a {vtype}.')
description = description.format(name=name, vtype=value_type.__name__)
raise api_errors.BadRequest(description)
def get_client_uuid(req):
"""Read a required Client-ID from a request.
:param req: Request object
:returns: A UUID object or A string of client id
"""
try:
return uuid.UUID(req._headers.get('Client-ID'))
except ValueError:
return req._headers.get('Client-ID')
def get_headers(req):
kwargs = {}
# TODO(vkmc) We should add a control here to make sure
# that the headers/request combination is possible
# e.g. we cannot have messages_post with grace
if req._body.get('marker') is not None:
kwargs['marker'] = req._body.get('marker')
if req._body.get('limit') is not None:
kwargs['limit'] = int(req._body.get('limit'))
if req._body.get('detailed') is not None:
kwargs['detailed'] = strutils.bool_from_string(
req._body.get('detailed'))
if req._body.get('echo') is not None:
kwargs['echo'] = strutils.bool_from_string(req._body.get('echo'))
if req._body.get('include_claimed') is not None:
kwargs['include_claimed'] = strutils.bool_from_string(
req._body.get('include_claimed'))
if req._body.get('include_delayed') is not None:
kwargs['include_delayed'] = strutils.bool_from_string(
req._body.get('include_delayed'))
if req._body.get('ttl') is not None:
kwargs['ttl'] = int(req._body.get('ttl'))
if req._body.get('grace') is not None:
kwargs['grace'] = int(req._body.get('grace'))
return kwargs
def on_exception_sends_500(func):
"""Handles generic Exceptions in API endpoints
This decorator catches generic Exceptions and returns a generic
Response.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as ex:
LOG.exception(ex)
error = _("Unexpected error.")
headers = {'status': 500}
# args[0] - Endpoints object, args[1] - Request object.
req = args[1]
return error_response(req, ex, headers, error)
return wrapper
def error_response(req, exception, headers=None, error=None):
body = {'exception': str(exception), 'error': error}
resp = response.Response(req, body, headers)
return resp
def format_message(message, claim_id=None):
return {
'id': message['id'],
'claim_id': claim_id,
'ttl': message['ttl'],
'age': message['age'],
'body': message['body'],
}
|
"""
Services utils.
"""
from . import (
api,
jwt,
)
|
from ceph_deploy.hosts import util
from mock import Mock
class TestInstallYumPriorities(object):
def setup(self):
self.distro = Mock()
self.patch_path = 'ceph_deploy.hosts.centos.install.pkg_managers.yum'
self.yum = Mock()
def test_centos_six(self):
self.distro.release = ('6', '0')
self.distro.normalized_name = 'centos'
util.install_yum_priorities(self.distro, _yum=self.yum)
assert self.yum.call_args[0][1] == 'yum-plugin-priorities'
def test_centos_five(self):
self.distro.release = ('5', '0')
self.distro.normalized_name = 'centos'
util.install_yum_priorities(self.distro, _yum=self.yum)
assert self.yum.call_args[0][1] == 'yum-priorities'
def test_fedora(self):
self.distro.release = ('20', '0')
self.distro.normalized_name = 'fedora'
util.install_yum_priorities(self.distro, _yum=self.yum)
assert self.yum.call_args[0][1] == 'yum-plugin-priorities'
|
# Generated by Django 3.1.2 on 2020-11-04 10:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
("eveonline", "0012_index_additions"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="General",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
options={
"permissions": (
("basic_access", "Can access this app"),
("view_public", "Can see public events"),
("view_member", "Can see member events"),
("create_event", "Can create and edit events"),
("manage_event", "Can delete and manage signups"),
),
"managed": False,
"default_permissions": (),
},
),
migrations.CreateModel(
name="EventCategory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=150)),
("ticker", models.CharField(max_length=10)),
(
"color",
models.CharField(
choices=[
("green", "Green"),
("red", "Red"),
("orange", "Orange"),
("blue", "Blue"),
("grey", "Grey"),
("yellow", "Yellow"),
],
default="green",
max_length=6,
),
),
],
options={
"verbose_name": "Category",
"verbose_name_plural": "Categories",
},
),
migrations.CreateModel(
name="WebHook",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=150)),
("webhook_url", models.CharField(max_length=500)),
("enabled", models.BooleanField()),
],
options={
"verbose_name": "Webhook",
"verbose_name_plural": "Webhooks",
},
),
migrations.CreateModel(
name="EventSignal",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("ignore_past_fleets", models.BooleanField(default=True)),
(
"webhook",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="opcalendar.webhook",
),
),
],
options={
"verbose_name": "Fleet Signal",
"verbose_name_plural": "Fleet Signals",
},
),
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=200)),
("doctrine", models.CharField(default="", max_length=254)),
("formup_system", models.CharField(default="", max_length=254)),
("description", models.TextField()),
("start_time", models.DateTimeField()),
("end_time", models.DateTimeField()),
("fc", models.CharField(default="", max_length=254)),
(
"visibility",
models.CharField(
choices=[
("Public", "Public access"),
("Member", "Members only access"),
],
db_index=True,
default="Public",
max_length=7,
),
),
(
"created_date",
models.DateTimeField(default=django.utils.timezone.now),
),
(
"eve_character",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="eveonline.evecharacter",
),
),
(
"operation_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="opcalendar.eventcategory",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="EventMember",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"event",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="opcalendar.event",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"unique_together": {("event", "user")},
},
),
]
|
#!/usr/bin/python
from setuptools import setup, find_packages
from cloudfiles.consts import __version__
setup(name='python-cloudfiles',
version=__version__,
description='CloudFiles client library for Python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Distributed Computing',
'Topic :: Utilities',
],
author='Rackspace',
author_email='please_report_on_github@rackspace.com',
url='https://github.com/rackspace/python-cloudfiles',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[],
setup_requires=[],
test_suite='nose.collector',
namespace_packages=[],
)
|
#!/usr/bin/env python
# coding: utf-8
# ## Configurations for Colab
# In[1]:
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
get_ipython().system('apt-get install -y xvfb python-opengl > /dev/null 2>&1')
get_ipython().system('pip install gym pyvirtualdisplay > /dev/null 2>&1')
get_ipython().system('pip install JSAnimation==0.1')
get_ipython().system('pip install pyglet==1.3.2')
from pyvirtualdisplay import Display
# Start virtual display
dis = Display(visible=0, size=(400, 400))
dis.start()
# # 03. Prioritized Experience Replay (PER)
#
# [T. Schaul et al., "Prioritized Experience Replay." arXiv preprint arXiv:1511.05952, 2015.](https://arxiv.org/pdf/1511.05952.pdf)
#
# Using a replay memory leads to design choices at two levels: which experiences to store, and which experiences to replay (and how to do so). This paper addresses only the latter: making the most effective use of the replay memory for learning, assuming that its contents are outside of our control.
#
# The central component of prioritized replay is the criterion by which the importance of each transition is measured. A reasonable approach is to use the magnitude of a transition’s TD error $\delta$, which indicates how ‘surprising’
# or unexpected the transition is. This algorithm stores the last encountered TD error along with each transition in the replay memory. The transition with the largest absolute TD error is replayed from the memory. A Q-learning update
# is applied to this transition, which updates the weights in proportion to the TD error. One thing to note that new transitions arrive without a known TD-error, so it puts them at maximal priority in order to guarantee that all experience is seen at least once. (see *store* method)
#
# We might use 2 ideas to deal with TD-error: 1. greedy TD-error prioritization, 2. stochastic prioritization. However, greedy TD-error prioritization has a severe drawback. Greedy prioritization focuses on a small subset of the experience: errors shrink slowly, especially when using function approximation, meaning that the initially high error transitions get replayed frequently. This lack of diversity that makes the system prone to over-fitting. To overcome this issue, we will use a stochastic sampling method that interpolates between pure greedy prioritization and uniform random sampling.
#
# $$
# P(i) = \frac{p_i^{\alpha}}{\sum_k p_k^{\alpha}}
# $$
#
# where $p_i > 0$ is the priority of transition $i$. The exponent $\alpha$ determines how much prioritization is used, with $\alpha = 0$ corresponding to the uniform case. In practice, we use additional term $\epsilon$ in order to guarantee all transactions can be possibly sampled: $p_i = |\delta_i| + \epsilon$, where $\epsilon$ is a small positive constant.
#
# One more. Let's recall one of the main ideas of DQN. To remove correlation of observations, it uses uniformly random sampling from the replay buffer. Prioritized replay introduces bias because it doesn't sample experiences uniformly at random due to the sampling proportion correspoding to TD-error. We can correct this bias by using importance-sampling (IS) weights
#
# $$
# w_i = \big( \frac{1}{N} \cdot \frac{1}{P(i)} \big)^\beta
# $$
#
# that fully compensates for the non-uniform probabilities $P(i)$ if $\beta = 1$. These weights can be folded into the Q-learning update by using $w_i\delta_i$ instead of $\delta_i$. In typical reinforcement learning scenarios, the unbiased nature of the updates is most important near convergence at the end of training, We therefore exploit the flexibility of annealing the amount of importance-sampling correction over time, by defining a schedule on the exponent $\beta$ that reaches 1 only at the end of learning.
# In[2]:
import os
import random
from typing import Dict, List, Tuple
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from IPython.display import clear_output
if IN_COLAB and not os.path.exists("segment_tree.py"):
# download segment tree module
get_ipython().system('wget https://raw.githubusercontent.com/curt-park/rainbow-is-all-you-need/master/segment_tree.py')
from segment_tree import MinSegmentTree, SumSegmentTree
# ## Replay buffer
#
# Please see *01.dqn.ipynb* for detailed description.
# In[3]:
class ReplayBuffer:
"""A simple numpy replay buffer."""
def __init__(self, obs_dim: int, size: int, batch_size: int = 32):
self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size], dtype=np.float32)
self.rews_buf = np.zeros([size], dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.max_size, self.batch_size = size, batch_size
self.ptr, self.size, = 0, 0
def store(
self,
obs: np.ndarray,
act: np.ndarray,
rew: float,
next_obs: np.ndarray,
done: bool,
):
self.obs_buf[self.ptr] = obs
self.next_obs_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self) -> Dict[str, np.ndarray]:
idxs = np.random.choice(self.size, size=self.batch_size, replace=False)
return dict(obs=self.obs_buf[idxs],
next_obs=self.next_obs_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
def __len__(self) -> int:
return self.size
# ## Prioritized replay Buffer
#
# The key concept of PER's implementation is *Segment Tree*. It efficiently stores and samples transitions while managing the priorities of them. We recommend you understand how it works before you move on. Here are references for you:
#
# - In Korean: https://mrsyee.github.io/rl/2019/01/25/PER-sumtree/
# - In English: https://www.geeksforgeeks.org/segment-tree-set-1-sum-of-given-range/
# In[4]:
class PrioritizedReplayBuffer(ReplayBuffer):
"""Prioritized Replay buffer.
Attributes:
max_priority (float): max priority
tree_ptr (int): next index of tree
alpha (float): alpha parameter for prioritized replay buffer
sum_tree (SumSegmentTree): sum tree for prior
min_tree (MinSegmentTree): min tree for min prior to get max weight
"""
def __init__(
self,
obs_dim: int,
size: int,
batch_size: int = 32,
alpha: float = 0.6
):
"""Initialization."""
assert alpha >= 0
super(PrioritizedReplayBuffer, self).__init__(obs_dim, size, batch_size)
self.max_priority, self.tree_ptr = 1.0, 0
self.alpha = alpha
# capacity must be positive and a power of 2.
tree_capacity = 1
while tree_capacity < self.max_size:
tree_capacity *= 2
self.sum_tree = SumSegmentTree(tree_capacity)
self.min_tree = MinSegmentTree(tree_capacity)
def store(
self,
obs: np.ndarray,
act: int,
rew: float,
next_obs: np.ndarray,
done: bool
):
"""Store experience and priority."""
super().store(obs, act, rew, next_obs, done)
self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.tree_ptr = (self.tree_ptr + 1) % self.max_size
def sample_batch(self, beta: float = 0.4) -> Dict[str, np.ndarray]:
"""Sample a batch of experiences."""
assert len(self) >= self.batch_size
assert beta > 0
indices = self._sample_proportional()
obs = self.obs_buf[indices]
next_obs = self.next_obs_buf[indices]
acts = self.acts_buf[indices]
rews = self.rews_buf[indices]
done = self.done_buf[indices]
weights = np.array([self._calculate_weight(i, beta) for i in indices])
return dict(
obs=obs,
next_obs=next_obs,
acts=acts,
rews=rews,
done=done,
weights=weights,
indices=indices,
)
def update_priorities(self, indices: List[int], priorities: np.ndarray):
"""Update priorities of sampled transitions."""
assert len(indices) == len(priorities)
for idx, priority in zip(indices, priorities):
assert priority > 0
assert 0 <= idx < len(self)
self.sum_tree[idx] = priority ** self.alpha
self.min_tree[idx] = priority ** self.alpha
self.max_priority = max(self.max_priority, priority)
def _sample_proportional(self) -> List[int]:
"""Sample indices based on proportions."""
indices = []
p_total = self.sum_tree.sum(0, len(self) - 1)
segment = p_total / self.batch_size
for i in range(self.batch_size):
a = segment * i
b = segment * (i + 1)
upperbound = random.uniform(a, b)
idx = self.sum_tree.retrieve(upperbound)
indices.append(idx)
return indices
def _calculate_weight(self, idx: int, beta: float):
"""Calculate the weight of the experience at idx."""
# get max weight
p_min = self.min_tree.min() / self.sum_tree.sum()
max_weight = (p_min * len(self)) ** (-beta)
# calculate weights
p_sample = self.sum_tree[idx] / self.sum_tree.sum()
weight = (p_sample * len(self)) ** (-beta)
weight = weight / max_weight
return weight
# ## Network
#
# We are going to use a simple network architecture with three fully connected layers and two non-linearity functions (ReLU).
# In[5]:
class Network(nn.Module):
def __init__(self, in_dim: int, out_dim: int):
"""Initialization."""
super(Network, self).__init__()
self.layers = nn.Sequential(
nn.Linear(in_dim, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, out_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward method implementation."""
return self.layers(x)
# ## DQN + PER Agent
#
# Here is a summary of DQNAgent class.
#
# | Method | Note |
# | --- | --- |
# |select_action | select an action from the input state. |
# |step | take an action and return the response of the env. |
# |compute_dqn_loss | return dqn loss. |
# |update_model | update the model by gradient descent. |
# |target_hard_update| hard update from the local model to the target model.|
# |train | train the agent during num_frames. |
# |test | test the agent (1 episode). |
# |plot | plot the training progresses. |
#
#
# All differences from pure DQN are noted with comments - PER.
#
# #### __init__
#
# Here, we use PrioritizedReplayBuffer, instead of ReplayBuffer, and use hold 2 more parameters beta and priority epsilon which arre used to calculate weights and new priorities respectively.
#
# #### compute_dqn_loss & update_model
#
# It returns every loss per each sample for importance sampling before average. After updating the nework, it is necessary to update priorities of all sampled experiences.
#
# #### train
#
# beta linearly increases to 1 at every training step.
# In[12]:
class DQNAgent:
"""DQN Agent interacting with environment.
Attribute:
env (gym.Env): openAI Gym environment
memory (ReplayBuffer): replay memory to store transitions
batch_size (int): batch size for sampling
epsilon (float): parameter for epsilon greedy policy
epsilon_decay (float): step size to decrease epsilon
max_epsilon (float): max value of epsilon
min_epsilon (float): min value of epsilon
target_update (int): period for target model's hard update
gamma (float): discount factor
dqn (Network): model to train and select actions
dqn_target (Network): target model to update
optimizer (torch.optim): optimizer for training dqn
transition (list): transition information including
state, action, reward, next_state, done
beta (float): determines how much importance sampling is used
prior_eps (float): guarantees every transition can be sampled
"""
def __init__(
self,
env: gym.Env,
memory_size: int,
batch_size: int,
target_update: int,
epsilon_decay: float,
max_epsilon: float = 1.0,
min_epsilon: float = 0.1,
gamma: float = 0.99,
# PER parameters
alpha: float = 0.2,
beta: float = 0.6,
prior_eps: float = 1e-6,
):
"""Initialization.
Args:
env (gym.Env): openAI Gym environment
memory_size (int): length of memory
batch_size (int): batch size for sampling
target_update (int): period for target model's hard update
epsilon_decay (float): step size to decrease epsilon
lr (float): learning rate
max_epsilon (float): max value of epsilon
min_epsilon (float): min value of epsilon
gamma (float): discount factor
alpha (float): determines how much prioritization is used
beta (float): determines how much importance sampling is used
prior_eps (float): guarantees every transition can be sampled
"""
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
self.env = env
self.batch_size = batch_size
self.epsilon = max_epsilon
self.epsilon_decay = epsilon_decay
self.max_epsilon = max_epsilon
self.min_epsilon = min_epsilon
self.target_update = target_update
self.gamma = gamma
# device: cpu / gpu
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
print(self.device)
# PER
# In DQN, We used "ReplayBuffer(obs_dim, memory_size, batch_size)"
self.beta = beta
self.prior_eps = prior_eps
self.memory = PrioritizedReplayBuffer(
obs_dim, memory_size, batch_size, alpha
)
# networks: dqn, dqn_target
self.dqn = Network(obs_dim, action_dim).to(self.device)
self.dqn_target = Network(obs_dim, action_dim).to(self.device)
self.dqn_target.load_state_dict(self.dqn.state_dict())
self.dqn_target.eval()
# optimizer
self.optimizer = optim.Adam(self.dqn.parameters())
# transition to store in memory
self.transition = list()
# mode: train / test
self.is_test = False
def select_action(self, state: np.ndarray) -> np.ndarray:
"""Select an action from the input state."""
# epsilon greedy policy
if self.epsilon > np.random.random():
selected_action = self.env.action_space.sample()
else:
selected_action = self.dqn(
torch.FloatTensor(state).to(self.device)
).argmax()
selected_action = selected_action.detach().cpu().numpy()
if not self.is_test:
self.transition = [state, selected_action]
return selected_action
def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:
"""Take an action and return the response of the env."""
next_state, reward, done, _ = self.env.step(action)
if not self.is_test:
self.transition += [reward, next_state, done]
self.memory.store(*self.transition)
return next_state, reward, done
def update_model(self) -> torch.Tensor:
"""Update the model by gradient descent."""
# PER needs beta to calculate weights
samples = self.memory.sample_batch(self.beta)
weights = torch.FloatTensor(
samples["weights"].reshape(-1, 1)
).to(self.device)
indices = samples["indices"]
# PER: importance sampling before average
elementwise_loss = self._compute_dqn_loss(samples)
loss = torch.mean(elementwise_loss * weights)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# PER: update priorities
loss_for_prior = elementwise_loss.detach().cpu().numpy()
new_priorities = loss_for_prior + self.prior_eps
self.memory.update_priorities(indices, new_priorities)
return loss.item()
def train(self, num_frames: int, plotting_interval: int = 200):
"""Train the agent."""
self.is_test = False
state = self.env.reset()
update_cnt = 0
epsilons = []
losses = []
scores = []
score = 0
for frame_idx in range(1, num_frames + 1):
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
# PER: increase beta
fraction = min(frame_idx / num_frames, 1.0)
self.beta = self.beta + fraction * (1.0 - self.beta)
# if episode ends
if done:
state = env.reset()
scores.append(score)
score = 0
# if training is ready
if len(self.memory) >= self.batch_size:
loss = self.update_model()
losses.append(loss)
update_cnt += 1
# linearly decrease epsilon
self.epsilon = max(
self.min_epsilon, self.epsilon - (
self.max_epsilon - self.min_epsilon
) * self.epsilon_decay
)
epsilons.append(self.epsilon)
# if hard update is needed
if update_cnt % self.target_update == 0:
self._target_hard_update()
# plotting
if frame_idx % plotting_interval == 0:
self._plot(frame_idx, scores, losses, epsilons)
self.env.close()
def test(self):
"""Test the agent."""
self.is_test = True
state = self.env.reset()
done = False
score = 0
frames = []
while not done:
frames.append(self.env.render(mode="rgb_array"))
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
print("score: ", score)
self.env.close()
return frames
def _compute_dqn_loss(self, samples: Dict[str, np.ndarray]) -> torch.Tensor:
"""Return dqn loss."""
device = self.device # for shortening the following lines
state = torch.FloatTensor(samples["obs"]).to(device)
next_state = torch.FloatTensor(samples["next_obs"]).to(device)
action = torch.LongTensor(samples["acts"].reshape(-1, 1)).to(device)
reward = torch.FloatTensor(samples["rews"].reshape(-1, 1)).to(device)
done = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device)
# G_t = r + gamma * v(s_{t+1}) if state != Terminal
# = r otherwise
curr_q_value = self.dqn(state).gather(1, action)
next_q_value = self.dqn_target(
next_state
).max(dim=1, keepdim=True)[0].detach()
mask = 1 - done
target = (reward + self.gamma * next_q_value * mask).to(self.device)
# calculate element-wise dqn loss
elementwise_loss = F.smooth_l1_loss(curr_q_value, target, reduction="none")
return elementwise_loss
def _target_hard_update(self):
"""Hard update: target <- local."""
self.dqn_target.load_state_dict(self.dqn.state_dict())
def _plot(
self,
frame_idx: int,
scores: List[float],
losses: List[float],
epsilons: List[float],
):
"""Plot the training progresses."""
clear_output(True)
plt.figure(figsize=(20, 5))
plt.subplot(131)
plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))
plt.plot(scores)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.subplot(133)
plt.title('epsilons')
plt.plot(epsilons)
plt.show()
# ## Environment
#
# You can see the [code](https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py) and [configurations](https://github.com/openai/gym/blob/master/gym/envs/__init__.py#L53) of CartPole-v0 from OpenAI's repository.
# In[13]:
# environment
env_id = "CartPole-v0"
env = gym.make(env_id)
# ## Set random seed
# In[14]:
seed = 777
def seed_torch(seed):
torch.manual_seed(seed)
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed)
seed_torch(seed)
env.seed(seed)
# ## Initialize
# In[15]:
# parameters
num_frames = 10000
memory_size = 1000
batch_size = 32
target_update = 100
epsilon_decay = 1 / 2000
# train
agent = DQNAgent(env, memory_size, batch_size, target_update, epsilon_decay)
# ## Train
# In[16]:
agent.train(num_frames)
# ## Test
#
# Run the trained agent (1 episode).
# In[17]:
frames = agent.test()
# ## Render
# In[18]:
# Imports specifically so we can render outputs in Colab.
from matplotlib import animation
from JSAnimation.IPython_display import display_animation
from IPython.display import display
def display_frames_as_gif(frames):
"""Displays a list of frames as a gif, with controls."""
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(
plt.gcf(), animate, frames = len(frames), interval=50
)
display(display_animation(anim, default_mode='loop'))
# display
display_frames_as_gif(frames)
|
import click
from flask import current_app
from flask.cli import with_appcontext
@click.group()
def cli():
pass
@cli.command()
@with_appcontext
def delete_expired():
''' Deletes expired uploads '''
click.echo('Deleting expired uploads')
try:
current_app.flask_tus.repo.delete_expired()
except Exception as e:
click.echo(e)
else:
click.echo('Successfully deleted expired uploads deleted')
|
# Check Python Version
import sys
import scipy
import numpy
import matplotlib
import pandas
import sklearn
print('Python: {}'.format(sys.version))
print('scipy: {}'.format(scipy.__version__))
print('numpy: {}'.format(numpy.__version__))
print('matplotlib: {}'.format(matplotlib.__version__))
print('pandas: {}'.format(pandas.__version__))
print('sklearn: {}'.format(sklearn.__version__))
import numpy as np
from sklearn import preprocessing, cross_validation
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import pandas as pd
# Load Dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
names = ['id', 'clump_thickness', 'uniform_cell_size', 'uniform_cell_shape',
'marginal_adhesion', 'single_epithelial_size', 'bare_nuclei',
'bland_chromatin', 'normal_nucleoli', 'mitoses', 'class']
df = pd.read_csv(url, names=names)
# Preprocess the data
df.replace('?',-99999, inplace=True)
print(df.axes)
df.drop(['id'], 1, inplace=True)
# Let explore the dataset and do a few visualizations
print(df.loc[10])
# Print the shape of the dataset
print(df.shape)
# Describe the dataset
print(df.describe())
# Plot histograms for each variable
df.hist(figsize = (10, 10))
plt.show()
# Create scatter plot matrix
scatter_matrix(df, figsize = (18,18))
plt.show()
# Create X and Y datasets for training
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
# Testing Options
seed = 8
scoring = 'accuracy'
# Define models to train
models = []
models.append(('KNN', KNeighborsClassifier(n_neighbors = 5)))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state = seed)
cv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "get_ipython().run_line_magic("s:", " %f (%f)\" % (name, cv_results.mean(), cv_results.std())")
print(msg)
# Make predictions on validation dataset
for name, model in models:
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print(name)
print(accuracy_score(y_test, predictions))
print(classification_report(y_test, predictions))
# Accuracy - ratio of correctly predicted observation to the total observations.
# Precision - (false positives) ratio of correctly predicted positive observations to the total predicted positive observations
# Recall (Sensitivity) - (false negatives) ratio of correctly predicted positive observations to the all observations in actual class - yes.
# F1 score - F1 Score is the weighted average of Precision and Recall. Therefore, this score takes both false positives and false
clf = SVC()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
example_measures = np.array([[4,2,1,1,1,2,3,2,1]])
example_measures = example_measures.reshape(len(example_measures), -1)
prediction = clf.predict(example_measures)
print(prediction)
|
import os
import sys
import math
import copy
from binary_tree import BinaryTreeNode, BinaryTree, BinarySearchTree
from graph import GraphNode, Graph
# 4.6 find the next node (in-order) of a given node in a Binary Tree
# -> back to root and using in-order travelsal until meet the current node. get the next
def get_next_node(node):
root = get_root_node(node)
is_next = [False]
next_node = get_next_node_in_order_of_node(node, root, is_next)
return next_node
def get_next_node_in_order_of_node(node, visit_node, is_next):
if is_next[0]:
return visit_node
if visit_node == None:
return None
node_next = get_next_node_in_order_of_node(node, visit_node.left, is_next)
if node_next != None:
return node_next
if is_next[0]:
return visit_node
if visit_node == node:
is_next[0] = True
node_next = get_next_node_in_order_of_node(node, visit_node.right, is_next)
if node_next != None:
return node_next
return None
def get_root_node(node):
root = node
while node.parent != None:
node = node.parent
return node
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# node = tree.root.left.right
# next_node = get_next_node(node)
# if next_node != None:
# print(next_node.value)
# else:
# print("None")
# 4.7 build projects
class Project:
name = ""
dependencies = list() # list of dependency projects
state = 0 # 0: waiting, 1: built
def __init__(self, name):
self.name = name
self.state = 0
self.dependencies = list()
def build_projects(projects):
build_queue = list()
while True:
has_new_build = False
for project in projects:
if project.state == 0:
if build_project(project) == True:
build_queue.append(project.name)
project.state = 1
has_new_build = True
if has_new_build == False:
break
is_built_all = True
for project in projects:
if project.state == 0:
is_built_all = False
break
if is_built_all:
return build_queue
else:
return False
def build_project(project):
is_dependencies_built = True
for dep in project.dependencies:
if dep.state != 1:
is_dependencies_built = False
break
if is_dependencies_built:
project.state = 1
return True
else:
return False
# a = Project("a")
# b = Project("b")
# c = Project("c")
# d = Project("d")
# e = Project("e")
# f = Project("f")
# d.dependencies.append(a)
# b.dependencies.append(f)
# d.dependencies.append(b)
# a.dependencies.append(f)
# c.dependencies.append(d)
# t = build_projects([a,b,c,d,e,f])
# print(t)
# 4.8 find first common ancestor
# -> get a queue ancestor of node 1 and compare for node 2
def get_common_ancestor(node1, node2):
if node1 == node2:
return node1
node1_parents = list()
parent1 = node1
while parent1 != None:
node1_parents.append(parent1)
parent1 = parent1.parent
node2_parents = list()
parent2 = node2
while parent2 != None:
node2_parents.append(parent2)
parent2 = parent2.parent
common_ancestor = None
for p1 in node1_parents:
for p2 in node2_parents:
if p1 == p2:
common_ancestor = p1
break
if common_ancestor != None:
break
return common_ancestor
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# n1 = tree.root.left.left
# n2 = tree.root.right.left
# common = get_common_ancestor(n1, n2)
# print(common.value)
# 4.9 print all possible array can be create from a binary search tree
def dump_permutation_of_source_array(tree):
if tree.root != None:
_dump_permutation_of_source_array([tree.root], [])
else:
print("tree is empty")
def _dump_permutation_of_source_array(candidate_nodes, visited_nodes):
if len(candidate_nodes) == 0:
dump_nodes(visited_nodes)
return
n = len(candidate_nodes)
for i in range(0, n):
_visited_nodes = copy.deepcopy(visited_nodes)
_candidate_nodes = copy.deepcopy(candidate_nodes)
_visited_nodes.append(_candidate_nodes[i])
_candidate_nodes.remove(_candidate_nodes[i])
node = candidate_nodes[i]
if node.left != None:
_candidate_nodes.insert(0, node.left)
if node.right != None:
_candidate_nodes.insert(0, node.right)
_dump_permutation_of_source_array(_candidate_nodes, _visited_nodes)
def dump_nodes(nodes):
values = []
for node in nodes:
values.append(node.value)
print("source:", values)
# Test
# values = [2,1,3,4]
# values1 = [10,5,15,4,6,14,16]
# tree = BinarySearchTree()
# for v in values1:
# tree.append(v)
# dump_permutation_of_source_array(tree)
|
number_one = int(input())
number_two = int(input())
symbol = str(input())
if symbol == "+":
result = number_one + number_two
if result % 2 == 0:
print(f"{number_one} + {number_two} = {result} - even")
else:
print(f"{number_one} + {number_two} = {result} - odd")
elif symbol == "-":
result = number_one - number_two
if result % 2 == 0:
print(f"{number_one} - {number_two} = {result} - even")
else:
print(f"{number_one} - {number_two} = {result} - odd")
elif symbol == "*":
result = number_one * number_two
if result % 2 == 0:
print(f"{number_one} * {number_two} = {result} - even")
else:
print(f"{number_one} * {number_two} = {result} - odd")
elif symbol == "/":
if number_two == 0:
print(f"Cannot divide {number_one} by zero")
else:
result = number_one / number_two
print(f"{number_one} / {number_two} = {result:.2f}")
elif symbol == "%":
if number_two == 0:
print(f"Cannot divide {number_one} by zero")
else:
result = number_one % number_two
print(f"{number_one} % {number_two} = {result}")
|
import os
import pandas as pd
from bs4 import BeautifulSoup
from Utility import fetchGradeCard, isResultOut
def generateRanks(clgCode, rolldobList):
flag = False
sem = None
course = None
invalids = []
rankData = {'Exam Roll Number':[], 'Name':[], 'CGPA':[]}
for rollNo,dob in rolldobList:
ret = fetchGradeCard(clgCode, rollNo, dob[0], dob[1], dob[2], False)
if ret == 0 or ret == 1:
invalids.append(rollNo)
#print(f'{rollNo} is an invalid exam roll number. Skipping..')
continue
soup = BeautifulSoup(open(f'.temp/{rollNo}.html'), "html.parser")
if flag==False:
course = soup.find('span', {'id':'lblcourse'}).text
sem = soup.find('span', {'id':'lblsem'}).text
#clgName = soup.find('span', {'id':'lblcollege'}).text
flag=True
rankData['Exam Roll Number'].append(soup.find('span', {'id':'lblrollno'}).text)
rankData['Name'].append(soup.find('span', {'id':'lblname'}).text)
tr = soup.find('table',{"id":"gv_sgpa"}).find_all('td')
tr = tr[-6:]
rankData['CGPA'].append(float(tr[3].text))
if len(invalids) == len(rolldobList):
return 'Sorry! Results are not out yet.\n'
else:
rankDF = pd.DataFrame(rankData)
rankDF.sort_values(by=["CGPA"],ascending=False,kind="mergesort",inplace=True)
rankDF.reset_index(drop=True, inplace=True)
rankDF.index = [*range(1,len(rankData['Name'])+1)]
rankDF['Rank'] = rankDF['CGPA'].rank(ascending=False,method="dense").astype(int)
if not os.path.isdir('Downloads'):
os.mkdir('Downloads')
rankDF.to_csv(f'Downloads/{course}_{sem}.csv',index=False)
return "Rank list has been successfully generated and saved in 'Downloads/' folder"
|
from app import db
class TestSuite(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True)
def __repr__(self):
return '<TestSuite {}>'.format(self.name)
class Test(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
classname = db.Column(db.String(64), index=True)
line = db.Column(db.Integer)
timestamp = db.Column(db.DateTime)
status = db.Column(db.Integer)
failure_message = db.Column(db.String(256))
stacktrace = db.Column(db.String(256))
def __repr__(self):
return '<Test {}>'.format(self.name)
class TestResult(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True)
def __repr__(self):
return '<TestResult {}>'.format(self.name)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('questions', '0004_multiplechoiceoption_other'),
]
operations = [
migrations.AlterField(
model_name='quiz',
name='slug',
field=autoslug.fields.AutoSlugField(editable=True, unique=True, max_length=255, populate_from=b'title'),
),
]
|
#!/usr/bin/env python
import haas.api
from haas import config, model, server
config.setup('/etc/haas.cfg')
server.init()
from haas.rest import app as application
|
class Entity:
"""
The entity class is the definition of an object in the game.
Door, frame, enemy,..
Every thing the the player can interact with.
"""
def __init__(self, position: tuple[int, int], sprite: list[str], color: str = ""):
self.position = position
self.sprite = sprite
self.color = color
def get_to_be_rendered(self) -> set[tuple[int, int, str, str]]:
"""
Get the coordinates where this entity is to be rendered, and the characters at those coordinates.
:return: dict subclass in the form of {(i, j): character}, where (i, j) is the coordinate.
"""
to_be_rendered = set()
for i, line in enumerate(self.sprite):
for j, char in enumerate(line):
to_be_rendered.add((i + self.position[0], j + self.position[1], char, self.color))
return to_be_rendered
|
#!/Users/wzf/PycharmProjects/Eat_or_not/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
"""Provides classes for interacting with iTerm2 sessions."""
import asyncio
import iterm2.api_pb2
import iterm2.app
import iterm2.capabilities
import iterm2.connection
import iterm2.keyboard
import iterm2.notifications
import iterm2.profile
import iterm2.rpc
import iterm2.screen
import iterm2.selection
import iterm2.util
import json
import typing
class SplitPaneException(Exception):
"""Something went wrong when trying to split a pane."""
pass
class Splitter:
"""A container of split pane sessions where the dividers are all aligned the same way.
"""
def __init__(self, vertical: bool=False):
"""
:param vertical: Bool. If true, the divider is vertical, else horizontal.
"""
self.__vertical = vertical
# Elements are either Splitter or Session
self.__children: typing.List[typing.Union['Splitter', 'Session']] = []
# Elements are Session
self.__sessions: typing.List['Session'] = []
@staticmethod
def from_node(node, connection):
"""Creates a new Splitter from a node.
:param node: :class:`iterm2.api_pb2.SplitTreeNode`
:param connection: :class:`~iterm2.connection.Connection`
:returns: A new Splitter.
"""
splitter = Splitter(node.vertical)
for link in node.links:
if link.HasField("session"):
session = Session(connection, link)
splitter.add_child(session)
else:
subsplit = Splitter.from_node(link.node, connection)
splitter.add_child(subsplit)
return splitter
@property
def vertical(self) -> bool:
"""Are the dividers in this splitter vertical?"""
return self.__vertical
def add_child(self, child: typing.Union['Splitter', 'Session']):
"""
Adds one or more new sessions to a splitter.
child: A Session or a Splitter.
"""
self.__children.append(child)
if isinstance(child, Session):
self.__sessions.append(child)
else:
self.__sessions.extend(child.sessions)
@property
def children(self) -> typing.List[typing.Union['Splitter', 'Session']]:
"""
:returns: This splitter's children. A list of :class:`Session` or :class:`Splitter` objects.
"""
return self.__children
@property
def sessions(self) -> typing.List['Session']:
"""
:returns: All sessions in this splitter and all nested splitters. A list of :class:`Session` objects.
"""
return self.__sessions
def pretty_str(self, indent: str="") -> str:
"""
:returns: A string describing this splitter. Has newlines.
"""
string_value = indent + "Splitter %s\n" % (
"|" if self.vertical else "-")
for child in self.__children:
string_value += child.pretty_str(" " + indent)
return string_value
def update_session(self, session):
"""
Finds a session with the same ID as session. If it exists, replace the reference with
session.
:returns: True if the update occurred.
"""
i = 0
for child in self.__children:
if isinstance(child, Session) and child.session_id == session.session_id:
self.__children[i] = session
# Update the entry in self.__sessions
for j in range(len(self.__sessions)):
if self.__sessions[j].session_id == session.session_id:
self.__sessions[j] = session
break
return True
elif isinstance(child, Splitter):
if child.update_session(session):
return True
i += 1
return False
def to_protobuf(self):
node = iterm2.api_pb2.SplitTreeNode()
node.vertical = self.vertical
def make_link(obj):
link = iterm2.api_pb2.SplitTreeNode.SplitTreeLink()
if isinstance(obj, Session):
link.session.CopyFrom(obj.to_session_summary_protobuf())
else:
link.node.CopyFrom(obj.to_protobuf())
return link
links = list(map(make_link, self.children))
node.links.extend(links)
return node
class SessionLineInfo:
def __init__(self, line_info):
self.__line_info = line_info
@property
def mutable_area_height(self) -> int:
"""Returns the height of the mutable area of the session."""
return self.__line_info[0]
@property
def scrollback_buffer_height(self) -> int:
"""Returns the height of the immutable area of the session."""
return self.__line_info[1]
@property
def overflow(self) -> int:
"""Returns the number of lines lost to overflow. These lines were removed after scrollback history became full."""
return self.__line_info[2]
@property
def first_visible_line_number(self) -> int:
"""Returns the line number of the first line currently displayed onscreen. Changes when the user scrolls."""
return self.__line_info[3]
class Session:
"""
Represents an iTerm2 session.
"""
@staticmethod
def active_proxy(connection: iterm2.connection.Connection) -> 'Session':
"""
Use this to register notifications against the currently active session.
:param connection: The connection to iTerm2.
:returns: A proxy for the currently active session.
"""
return ProxySession(connection, "active")
@staticmethod
def all_proxy(connection: iterm2.connection.Connection):
"""
Use this to register notifications against all sessions, including those
not yet created.
:param connection: The connection to iTerm2.
:returns: A proxy for all sessions.
"""
return ProxySession(connection, "all")
def __init__(self, connection, link, summary=None):
"""
Do not call this yourself. Use :class:`~iterm2.app.App` instead.
:param connection: :class:`Connection`
:param link: :class:`iterm2.api_pb2.SplitTreeNode.SplitTreeLink`
:param summary: :class:`iterm2.api_pb2.SessionSummary`
"""
self.connection = connection
if link is not None:
self.__session_id = link.session.unique_identifier
self.frame = link.session.frame
self.__grid_size = link.session.grid_size
self.name = link.session.title
self.buried = False
elif summary is not None:
self.__session_id = summary.unique_identifier
self.name = summary.title
self.buried = True
self.__grid_size = None
self.frame = None
self.__preferred_size = self.grid_size
def __repr__(self):
return "<Session name=%s id=%s>" % (self.name, self.__session_id)
def to_session_summary_protobuf(self):
summary = iterm2.api_pb2.SessionSummary()
summary.unique_identifier = self.session_id
summary.grid_size.width = self.preferred_size.width
summary.grid_size.height = self.preferred_size.height
return summary
def update_from(self, session):
"""Replace internal state with that of another session."""
self.frame = session.frame
self.__grid_size = session.grid_size
self.name = session.name
def pretty_str(self, indent: str="") -> str:
"""
:returns: A string describing the session.
"""
return indent + "Session \"%s\" id=%s %s frame=%s\n" % (
self.name,
self.__session_id,
iterm2.util.size_str(self.grid_size),
iterm2.util.frame_str(self.frame))
@property
def preferred_size(self) -> iterm2.util.Size:
"""The size in cells to resize to when `Tab.async_update_layout()` is called."""
return self.__preferred_size
@preferred_size.setter
def preferred_size(self, value: iterm2.util.Size):
"""Sets the size in cells to resize to when `Tab.async_update_layout()` is called."""
self.__preferred_size = value
@property
def session_id(self) -> str:
"""
:returns: the globally unique identifier for this session.
"""
return self.__session_id
async def async_get_screen_contents(self) -> iterm2.screen.ScreenContents:
"""
Returns the contents of the mutable area of the screen.
:returns: A :class:`iterm2.screen.ScreenContents`, containing the screen contents.
"""
result = await iterm2.rpc.async_get_screen_contents(
self.connection,
self.session_id)
if result.get_buffer_response.status == iterm2.api_pb2.GetBufferResponse.Status.Value("OK"):
return iterm2.screen.ScreenContents(result.get_buffer_response)
else:
raise iterm2.rpc.RPCException(iterm2.api_pb2.GetBufferResponse.Status.Name(result.get_buffer_response.status))
def get_screen_streamer(self, want_contents: bool=True) -> iterm2.screen.ScreenStreamer:
"""
Provides a nice interface for receiving updates to the screen.
The screen is the mutable part of a session (its last lines, excluding
scrollback history).
:param want_contents: If `True`, the screen contents will be provided. See :class:`~iterm2.screen.ScreenStreamer` for details.
:returns: A new screen streamer, suitable for monitoring the contents of this session.
:Example:
async with session.get_screen_streamer() as streamer:
while condition():
contents = await streamer.async_get()
do_something(contents)
"""
return iterm2.screen.ScreenStreamer(self.connection, self.__session_id, want_contents=want_contents)
async def async_send_text(self, text: str, suppress_broadcast: bool=False) -> None:
"""
Send text as though the user had typed it.
:param text: The text to send.
:param suppress_broadcast: If `True`, text goes only to the specified session even if broadcasting is on.
.. seealso::
* Example ":ref:`broadcast_example`"
* Example ":ref:`targeted_input_example`"
"""
await iterm2.rpc.async_send_text(self.connection, self.__session_id, text, suppress_broadcast)
async def async_split_pane(
self,
vertical: bool=False,
before: bool=False,
profile: typing.Union[None, str]=None,
profile_customizations: typing.Union[None, iterm2.profile.LocalWriteOnlyProfile]=None) -> 'Session':
"""
Splits the pane, creating a new session.
:param vertical: If `True`, the divider is vertical, else horizontal.
:param before: If `True`, the new session will be to the left of or above the session being split. Otherwise, it will be to the right of or below it.
:param profile: The profile name to use. `None` for the default profile.
:param profile_customizations: Changes to the profile that should affect only this session, or `None` to make no changes.
:returns: A newly created Session.
:throws: :class:`SplitPaneException` if something goes wrong.
.. seealso:: Example ":ref:`broadcast_example`"
"""
if profile_customizations is None:
custom_dict = None
else:
custom_dict = profile_customizations.values
result = await iterm2.rpc.async_split_pane(
self.connection,
self.__session_id,
vertical,
before,
profile,
profile_customizations=custom_dict)
if result.split_pane_response.status == iterm2.api_pb2.SplitPaneResponse.Status.Value("OK"):
new_session_id = result.split_pane_response.session_id[0]
app = await iterm2.app.async_get_app(self.connection)
assert(app)
await app.async_refresh()
session = app.get_session_by_id(new_session_id)
if session:
return session
raise SplitPaneException("No such session {}".format(new_session_id))
else:
raise SplitPaneException(
iterm2.api_pb2.SplitPaneResponse.Status.Name(result.split_pane_response.status))
async def async_set_profile_properties(self, write_only_profile: iterm2.profile.LocalWriteOnlyProfile) -> None:
"""
Sets the value of properties in this session.
When you use this function the underlying profile is not modified. The session will keep a copy of its profile with these modifications.
:param write_only_profile: A write-only profile that has the desired changes.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
.. seealso::
* Example ":ref:`copycolor_example`"
* Example ":ref:`settabcolor_example`"
* Example ":ref:`increase_font_size_example`"
"""
if iterm2.capabilities.supports_multiple_set_profile_properties(self.connection):
assignments = []
for key, json_value in write_only_profile.values.items():
assignments += [(key, json_value)]
response = await iterm2.rpc.async_set_profile_properties_json(
self.connection,
self.session_id,
assignments)
status = response.set_profile_property_response.status
if status != iterm2.api_pb2.SetProfilePropertyResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.SetProfilePropertyResponse.Status.Name(status))
return
# Deprecated code path, in use by 3.3.0beta9 and earlier.
for key, json_value in write_only_profile.values.items():
response = await iterm2.rpc.async_set_profile_property_json(
self.connection,
self.session_id,
key,
json_value)
status = response.set_profile_property_response.status
if status != iterm2.api_pb2.SetProfilePropertyResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.SetProfilePropertyResponse.Status.Name(status))
async def async_get_profile(self) -> iterm2.profile.Profile:
"""
Fetches the profile of this session
:returns: The profile for this session, including any session-local changes not in the underlying profile.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
.. seealso::
* Example ":ref:`blending_example`"
* Example ":ref:`colorhost_example`"
* Example ":ref:`current_preset_example`"
* Example ":ref:`random_color_example`"
"""
response = await iterm2.rpc.async_get_profile(self.connection, self.__session_id)
status = response.get_profile_property_response.status
if status == iterm2.api_pb2.GetProfilePropertyResponse.Status.Value("OK"):
return iterm2.profile.Profile(
self.__session_id,
self.connection,
response.get_profile_property_response.properties)
else:
raise iterm2.rpc.RPCException(
iterm2.api_pb2.GetProfilePropertyResponse.Status.Name(status))
async def async_set_profile(self, profile: iterm2.profile.Profile):
"""
Changes this session's profile.
The profile may be an existing profile, an existing
profile with modifications, or a previously uknown
profile with a unique GUID.
:param profile: The `~iterm2.profile.Profile` to use.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
await self.async_set_profile_properties(profile.local_write_only_copy)
async def async_inject(self, data: bytes) -> None:
"""
Injects data as though it were program output.
:param data: A byte array to inject.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
.. seealso:: Example ":ref:`cls_example`"
"""
response = await iterm2.rpc.async_inject(self.connection, data, [self.__session_id])
status = response.inject_response.status[0]
if status != iterm2.api_pb2.InjectResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.InjectResponse.Status.Name(status))
async def async_activate(self, select_tab: bool=True, order_window_front: bool=True) -> None:
"""
Makes the session the active session in its tab.
:param select_tab: Whether the tab this session is in should be selected.
:param order_window_front: Whether the window this session is in should be brought to the front and given keyboard focus.
.. seealso:: Example ":ref:`broadcast_example`"
"""
await iterm2.rpc.async_activate(
self.connection,
True,
select_tab,
order_window_front,
session_id=self.__session_id)
async def async_set_variable(self, name: str, value: typing.Any):
"""
Sets a user-defined variable in the session.
See Badges documentation for more information on user-defined variables.
:param name: The variable's name.
:param value: The new value to assign.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
.. seealso:: Example ":ref:`escindicator_example`"
"""
result = await iterm2.rpc.async_variable(
self.connection,
self.__session_id,
[(name, json.dumps(value))],
[])
status = result.variable_response.status
if status != iterm2.api_pb2.VariableResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.VariableResponse.Status.Name(status))
async def async_get_variable(self, name: str) -> typing.Any:
"""
Fetches a session variable.
See the Scripting Fundamentals documentation for more information on variables.
:param name: The variable's name.
:returns: The variable's value or empty string if it is undefined.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
.. seealso:: Example ":ref:`colorhost_example`"
"""
result = await iterm2.rpc.async_variable(self.connection, self.__session_id, [], [name])
status = result.variable_response.status
if status != iterm2.api_pb2.VariableResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.VariableResponse.Status.Name(status))
else:
return json.loads(result.variable_response.values[0])
async def async_restart(self, only_if_exited: bool=False) -> None:
"""
Restarts a session.
:param only_if_exited: When `True`, this will raise an exception if the session is still running. When `False`, a running session will be killed and restarted.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
result = await iterm2.rpc.async_restart_session(self.connection, self.__session_id, only_if_exited)
status = result.restart_session_response.status
if status != iterm2.api_pb2.RestartSessionResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.RestartSessionResponse.Status.Name(status))
async def async_close(self, force: bool=False) -> None:
"""
Closes the session.
:param force: If `True`, the user will not be prompted for a confirmation.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
result = await iterm2.rpc.async_close(self.connection, sessions=[self.__session_id], force=force)
status = result.close_response.statuses[0]
if status != iterm2.api_pb2.CloseResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.CloseResponse.Status.Name(status))
async def async_set_grid_size(self, size: iterm2.util.Size) -> None:
"""Sets the visible size of a session.
Note: This is meant for tabs that contain a single pane. If split panes are present, use :func:`~iterm2.tab.Tab.async_update_layout` instead.
:param size: The new size for the session, in cells.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
Note: This will fail on fullscreen windows."""
await self._async_set_property("grid_size", size.json)
@property
def grid_size(self) -> iterm2.util.Size:
"""Returns the size of the visible part of the session in cells.
:returns: The size of the visible part of the session in cells.
"""
return self.__grid_size
async def async_set_buried(self, buried: bool) -> None:
"""Buries or disinters a session.
:param buried: If `True`, bury the session. If `False`, disinter it.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
await self._async_set_property("buried", json.dumps(buried))
async def _async_set_property(self, key, json_value):
"""Sets a property on this session.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
response = await iterm2.rpc.async_set_property(self.connection, key, json_value, session_id=self.session_id)
status = response.set_property_response.status
if status != iterm2.api_pb2.SetPropertyResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.SetPropertyResponse.Status.Name(status))
return response
async def async_get_selection(self) -> iterm2.selection.Selection:
"""
:returns: The selected regions of this session. The selection will be empty if there is no selected text.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
.. seealso:: Example ":ref:`georges_title_example`"
"""
response = await iterm2.rpc.async_get_selection(self.connection, self.session_id)
status = response.selection_response.status
if status != iterm2.api_pb2.SelectionResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.SelectionResponse.Status.Name(status))
subs = []
for subProto in response.selection_response.get_selection_response.selection.sub_selections:
start = iterm2.util.Point(
subProto.windowed_coord_range.coord_range.start.x,
subProto.windowed_coord_range.coord_range.start.y)
end = iterm2.util.Point(
subProto.windowed_coord_range.coord_range.end.x,
subProto.windowed_coord_range.coord_range.end.y)
coordRange = iterm2.util.CoordRange(start, end)
columnRange = iterm2.util.Range(
subProto.windowed_coord_range.columns.location,
subProto.windowed_coord_range.columns.length)
windowedCoordRange = iterm2.util.WindowedCoordRange(coordRange, columnRange)
sub = iterm2.SubSelection(
windowedCoordRange,
iterm2.selection.SelectionMode.fromProtoValue(
subProto.selection_mode),
subProto.connected)
subs.append(sub)
return iterm2.Selection(subs)
async def async_get_selection_text(self, selection: iterm2.selection.Selection) -> str:
"""Fetches the text within a selection region.
:param selection: A :class:`~iterm2.selection.Selection` defining a region in the session.
.. seealso::
* :func:`async_get_selection`.
* Example ":ref:`georges_title_example`"
:returns: A string with the selection's contents. Discontiguous selections are combined with newlines."""
return await selection.async_get_string(
self.connection,
self.session_id,
self.grid_size.width)
async def async_set_selection(self, selection: iterm2.selection.Selection) -> None:
"""
:param selection: The regions of text to select.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
.. seealso:: Example ":ref:`zoom_on_screen_example`"
"""
response = await iterm2.rpc.async_set_selection(self.connection, self.session_id, selection)
status = response.selection_response.status
if status != iterm2.api_pb2.SelectionResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.SelectionResponse.Status.Name(status))
async def async_get_line_info(self) -> SessionLineInfo:
"""
Fetches the number of lines that are visible, in history, and that have been removed after history became full.
:returns: Information about the session's wrapped lines of text.
.. seealso:: Example ":ref:`zoom_on_screen_example`"
"""
response = await iterm2.rpc.async_get_property(self.connection, "number_of_lines", session_id=self.session_id)
status = response.get_property_response.status
if status != iterm2.api_pb2.GetPropertyResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.GetPropertyResponse.Status.Name(status))
dict = json.loads(response.get_property_response.json_value)
t = (dict["grid"], dict["history"], dict["overflow"], dict["first_visible"] )
return SessionLineInfo(t)
async def async_set_name(self, name: str):
"""Changes the session's name.
This is equivalent to editing the session's name manually in the Edit Session window.
:param name: The new name to use.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
invocation = iterm2.util.invocation_string(
"iterm2.set_name",
{ "name": name })
await iterm2.rpc.async_invoke_method(self.connection, self.session_id, invocation, -1)
async def async_run_tmux_command(self, command: str, timeout: float=-1) -> str:
"""Invoke a tmux command and return its result. Raises an exception if this session is not a tmux integration session.
:param command: The tmux command to run.
:param timeout: The amount of time to wait for a response, or -1 to use the default.
:returns: The output from tmux.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
invocation = iterm2.util.invocation_string(
"iterm2.run_tmux_command",
{ "command": command })
return await iterm2.rpc.async_invoke_method(self.connection, self.session_id, invocation, timeout)
async def async_invoke_function(self, invocation: str, timeout: float=-1):
"""
Invoke an RPC. Could be a function registered by this or another script, or a built-in function.
This invokes the RPC in the context of this session. Most user-defined RPCs are invoked in a session context (for example, invocations attached to triggers or key bindings). Default variables will be pulled from that scope. If you call a function from the wrong context it may fail because its defaults will not be set properly.
:param invocation: A function invocation string.
:param timeout: Max number of secondsto wait. Negative values mean to use the system default timeout.
:returns: The result of the invocation if successful.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
response = await iterm2.rpc.async_invoke_function(
self.connection,
invocation,
session_id=self.session_id,
timeout=timeout)
which = response.invoke_function_response.WhichOneof('disposition')
if which == 'error':
if response.invoke_function_response.error.status == iterm2.api_pb2.InvokeFunctionResponse.Status.Value("TIMEOUT"):
raise iterm2.rpc.RPCException("Timeout")
else:
raise iterm2.rpc.RPCException("{}: {}".format(
iterm2.api_pb2.InvokeFunctionResponse.Status.Name(
response.invoke_function_response.error.status),
response.invoke_function_response.error.error_reason))
return json.loads(response.invoke_function_response.success.json_result)
class InvalidSessionId(Exception):
"""The specified session ID is not allowed in this method."""
pass
class ProxySession(Session):
"""A proxy for a Session.
This is used when you specify an abstract session ID like "all" or "active".
Since the session or set of sessions that refers to is ever-changing, this
proxy stands in for the real thing. It may limit functionality since it
doesn't make sense to, for example, get the screen contents of "all"
sessions.
"""
def __init__(self, connection, session_id):
super().__init__(connection, session_id)
self.__session_id = session_id
def __repr__(self):
return "<ProxySession %s>" % self.__session_id
def pretty_str(self, indent=""):
return indent + "ProxySession %s" % self.__session_id
|
import os
import numpy as np
import torch
from gym_robothor.envs.robothor_env import RoboThorEnv, env_generator
def reset(env, state_shape, device):
state, bear = env.reset()
mask_t = torch.tensor(0., dtype=torch.float32).to(device)
prev_a = torch.tensor(0, dtype=torch.long).to(device)
obs_t = state
state_t = torch.zeros(state_shape, dtype=torch.float32).to(device)
inputs = {"observation": obs_t, "bear":bear,
"memory": {
"state": state_t,
"mask": mask_t,
"action": prev_a
}
}
return inputs
def worker(worker_id,
policy,
storage,
ready_to_work,
queue,
exit_flag,
task_config_file="config_files/config_example.json"):
'''
Worker function to collect experience based on policy and store the experience in storage
:param worker_id: id used for store the experience in storage
:param policy: function/actor-critic
:param storage:
:param ready_to_work: condition to synchronize work and training
:param queue: message queue to send episode reward to learner
:param exit_flag: flag set by leaner to exit the job
:param task_config_file: the task configuration file
:return:
'''
print(f"Worker with Id:{worker_id} pid ({os.getpid()}) starts ...")
steps_per_epoch = storage.block_size
print('>>>>>> steps per epoch executed by this worker', steps_per_epoch)
state_shape = storage.h_buf.shape[1]
device = storage.device
episode_rewards, episode_steps = [], []
reward_sum, step_sum = 0., 0
# Wait for start job
ready_to_work.wait()
policy.eval() # model is in training mode by default, worker uses its eval mode because some NN components have diffenrent working paradim in both mode
for env in env_generator('train_valid_'):
if exit_flag.value != 1:
inputs = reset(env, state_shape, device)
for i in range(steps_per_epoch):
with torch.no_grad():
a_t, logp_t, _, v_t, state_t = policy(inputs)
# interact with environment
state, reward, done, info = env.step(a_t.item())
# print('state.shape', state.shape, type(state.shape))
reward_sum += reward # accumulate reward within one rollout.
step_sum += 1
r_t = torch.tensor(reward, dtype=torch.float32).to(device)
# save experience
storage.store(worker_id,
inputs["observation"],
inputs["bear"],
a_t,
r_t,
v_t,
logp_t,
inputs["memory"]["state"],
inputs["memory"]["mask"])
# prepare inputs for next step
inputs["observation"] = state
inputs["bear"] = info
inputs["memory"]["state"] = state_t
inputs["memory"]["mask"] = torch.tensor((done+1)%2, dtype=torch.float32).to(device)
inputs["memory"]["action"] = a_t
# check terminal state
if done: # calculate the returns and GAE and reset environment
storage.finish_path(worker_id, 0)
# print(f"Worker:{worker_id} {device} pid:{os.getpid()} finishes goal at steps :{i}")
episode_rewards.append(reward_sum)
episode_steps.append(step_sum)
inputs = reset(env, state_shape, device)
reward_sum, step_sum = 0., 0
# env does not reaches end
if not done:
_, _, _, last_val, _ = policy(inputs)
storage.finish_path(worker_id,last_val)
# print(f"Worker:{worker_id} {device} pid:{os.getpid()} begins to notify Learner Episode done")
queue.put((episode_rewards,episode_steps, worker_id))
# print(f"Worker:{worker_id} waits for next episode")
episode_rewards, episode_steps = [], []
# inputs = reset(env, state_shape)
# reward_sum, step_sum = 0., 0
# Wait for next job
ready_to_work.clear()
ready_to_work.wait()
# print(f"Worker:{worker_id} {device} pid:{os.getpid()} starts new episode")
else:
env.close()
break
print(f"Worker with pid ({os.getpid()}) finished job")
def tester(model, device, n=5, task_config_file="config_files/NavTaskTrain.json"):
episode_reward = []
rnn_size = 128
env = RoboThorEnv(config_file=task_config_file)
for _ in range(n):
# Wait for trainer to inform next job
total_r = 0.
done = False
inputs = reset(env, rnn_size, device)
while not done:
with torch.no_grad():
a_t, _, _, _, state_t = model(inputs)
# interact with environment
state, reward, done, _ = env.step(a_t.data.item())
total_r += reward # accumulate reward within one rollout.
# prepare inputs for next step
inputs["observation"] = torch.Tensor(state / 255.).to(device)
inputs["memory"]["state"] = state_t
inputs["memory"]["mask"] = torch.tensor((done + 1) % 2, dtype=torch.float32).to(device)
inputs["memory"]["action"] = a_t
episode_reward.append(total_r)
print("Episode reward:", total_r)
env.close()
print(f"Average eposide reward ({np.mean(episode_reward)})")
|
from scipy import *
from pylab import *
img = imread("img/me1.jpg")[:, :, 0]
gray()
figure(1)
imshow(img)
print("original size:" + str(img.shape[0] * img.shape[1]))
m, n = img.shape
U, S, Vt = svd(img)
S = resize(S, [m, 1])*eye(m,n)
k = 10
figure(2)
imshow(dot(U[:,1:k], dot(S[1:k, 1:k], Vt[1:k, :])))
show()
size = m * k + k + k * n
print("compress size:" + str(size))
|
import sys
import logging
import base64
import inspect
import subprocess
def stream():
while True:
data = sys.stdin.readline()
if not data:
break
exec(data)
def main(target, python):
code = inspect.getsource(stream)
b64c = base64.b64encode(code.encode())
cmd = ['ssh',
target,
'''
%s -c "import os,sys,base64; exec(base64.b64decode(%s).decode()); stream()"
''' % (python, b64c)]
ssh = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
ssh.stdin.write(b'print(os.uname())\nsys.stdout.flush()\n')
ssh.stdin.flush()
print(ssh.stdout.readline().decode())
main('localhost', 'python')
|
from pymongo import MongoClient
MONGO_DB_HOST = 'localhost'
MONGO_DB_PORT = '27017'
DB_NAME = 'tap-news'
class MongoDBClient:
def __init__(self, host=MONGO_DB_HOST, port=MONGO_DB_PORT):
self.client = MongoClient("%s:%s" % (host, port))
def get_db(self, db=DB_NAME):
db = self.client[db]
return db
|
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum
class NotificationType(Enum):
RELATIONSHIP_CONFIRMATION_NEEDED = "relationship_confirmation_needed"
RELATIONSHIP_CONFIRMED = "relationship_confirmed"
class Labels:
RELATIONSHIP_CONFIRMATION_NEEDED = _(
"Legal relationship created, confirmation needed"
)
RELATIONSHIP_CONFIRMED = _("Legal relationship confirmed")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.