text
stringlengths 8
6.05M
|
|---|
import random
import yaml
from Character import Character
from Equipment import Equipment
def createCharacter(name, str=0, dex=0, con=0):
c = Character()
c.setStats(str, dex, con, name)
characters.append(c)
def createEquipment(cat, name):
if cat == 1:
stream = file('Data/equipment/weapons/' + name + '.yaml', 'r')
e = yaml.load(stream)
equipment.append(e)
e.isEquipped = False
print e.name + " Created"
def listCharacters():
print characters
characters = []
equipment = []
createCharacter("Nick")
c = characters[0]
createEquipment(1, "00001")
createEquipment(1, "00003")
c.equip(equipment[0])
c.equip(equipment[1])
c.unequip(c.equipment[1])
|
import dash_bootstrap_components as dbc
from dash import html
buttons = html.Div(
[
dbc.Button("Regular", color="primary", className="me-1"),
dbc.Button("Active", color="primary", active=True, className="me-1"),
dbc.Button("Disabled", color="primary", disabled=True),
]
)
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def reversedLL(self):
prev = None
current = self.head
_next = None
while current:
_next = current.next
current.next = prev
prev = current
current = _next
self.head = prev
def printList(self):
temp = self.head
while temp:
print(temp.data, end=" ")
temp = temp.next
if __name__ == '__main__':
llist = LinkedList()
llist.push(20)
llist.push(4)
llist.push(15)
llist.push(85)
print("Given Linked List")
llist.printList()
llist.reversedLL()
print("\nReversed Linked List")
llist.printList()
|
print ("Extração de Raizes")
a= float(input("Qual o valor do quoficiente a?: "))
b= float(input("Qual o valor de quoficiente b?: "))
c= float(input("Qual o valor de quoficiente c?: "))
delta = b**2-4*a*c
if delta > 0:
x = (-b+(delta**0.5))/(2*a)
x2= (-b-(delta**0.5))/(2*a)
if x2<x:
print("as raízes da equação são", x2, "e", x)
elif x>x2:
print("as raízes da equação são", x, "e", x2)
elif delta == 0:
x = (-b+(delta**0.5))/(2*a)
print("a raiz desta equação é",x)
else:
print("esta equação não possui raízes reais")
|
"""
///////////////////////////////////////////////////////
│
│ Filename: automate_1_verify_gz.py
│ Description:
│ To verify the integrity of .gz files in directory
│ and print the corrupted ones' file names.
│ ==================================================
│ Authorship: @cgneo
│ Copyright: Modified BSD License.
│ Made with love by https://github.com/cgneo
│
///////////////////////////////////////////////////////
"""
import os
import gzip
from auto_print_progress import printProgressBar
"""
│ (optional):
│ Please choose the directory containing .gz files for database:
│ For example:
│ directory = './hgtdb'
"""
#============================================================
directory = './'
#============================================================
success = 0
failed = 0
all_file = 0
list_of_files = []
for root, subdirectories, files in os.walk(directory):
subdirectories.sort()
for file in files:
name, extension = os.path.splitext(file)
if extension == '.gz':
list_of_files.append(os.path.join(root, file))
print(f'Finish scaning: {len(list_of_files)} .gz files in total.')
for i in range(len(list_of_files)):
line_count_test = 0
try:
fin = gzip.open(list_of_files[i], 'rt')
for line in fin:
line_count_test += 1
if line_count_test >= 2: break
success += 1
except Exception as e:
failed += 1
print(f'failed at {file}: {e}')
all_file += 1
printProgressBar(i+1, len(list_of_files), prefix='Progress:', suffix=list_of_files[i], length=30)
print()
print(f'Sucesss: {success}/{all_file}, Failed: {failed}/{all_file}')
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
minimum,maxglobal = prices[0],0
for i in range(1,len(prices)):
minimum = min(minimum,prices[i])
if prices[i]>minimum:
maxglobal = max(maxglobal, prices[i]-minimum)
return maxglobal
|
from django.contrib import admin
from django.contrib.auth.models import Group, User
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from piston.models import Consumer
from basketauth.admin import ConsumerAdmin
from subscriptions.admin import SubscriptionAdmin
from subscriptions.models import Subscription
class BasketAdmin(admin.sites.AdminSite):
pass
site = BasketAdmin()
site.register(Group, GroupAdmin)
site.register(User, UserAdmin)
site.register(Consumer, ConsumerAdmin)
site.register(Subscription, SubscriptionAdmin)
|
"""
*********************************************************************
This file is part of:
The Acorn Project
https://wwww.twistedfields.com/research
*********************************************************************
Copyright (c) 2019-2021 Taylor Alexander, Twisted Fields LLC
Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*********************************************************************
"""
import serial
ser = serial.Serial('/dev/ttyACM1', 230400, timeout=1) # open serial port
print(ser.name) # check which port was really used
ser.reset_input_buffer()
ser.reset_output_buffer()
while True:
print(ser.readline()) # write a string
ser.close() # close port
|
"""Statistical tests for multidimensional data in :class:`NDVar` objects"""
__test__ = False
from ._stats.testnd import (t_contrast_rel, corr, ttest_1samp, ttest_ind,
ttest_rel, anova)
|
from django.conf.urls import url
from . import views
from product.views import ProductDetailView
app_name = 'product'
urlpatterns = [
url(r'$', views.index, name='index'),
url(r'^(?P<product_id>[0-9]+)/$', ProductDetailView.as_view(), name='detail')
]
|
import os
from moviesnotifier import (
MoviesNotifier, SqlLiteMovieRepository, UrlLibHtmlRetriever,
PrintNotificationListener, IFNotifier, NotificationListenerList,
CorsaroneroWebpageFactory, TntvillageWebpageFactory,
MinSeedPolicy, KeywordPolicy
)
def absolutePathFromRelative(relative):
currentDirectory = os.path.dirname(__file__)
return os.path.join(currentDirectory, relative)
################################### BEGIN
db = SqlLiteMovieRepository(absolutePathFromRelative('production.db'))
htmlRetriever = UrlLibHtmlRetriever()
notificationListeners = NotificationListenerList([
IFNotifier(),
PrintNotificationListener()
])
MoviesNotifier(
db, CorsaroneroWebpageFactory(htmlRetriever),
notificationListeners, MinSeedPolicy(400)
).work()
#MoviesNotifier(
# db, TntvillageWebpageFactory(htmlRetriever),
# notificationListeners, MinSeedPolicy(150)
#).work()
|
"""
Functions to read from files
TODO: move the functions that read label from Dataset into here
"""
import numpy as np
def get_calibration_cam_to_image(cab_f):
for line in open(cab_f):
if 'P2:' in line:
cam_to_img = line.strip().split(' ')
cam_to_img = np.asarray([float(number) for number in cam_to_img[1:]])
cam_to_img = np.reshape(cam_to_img, (3, 4))
return cam_to_img
file_not_found(cab_f)
def get_P(cab_f):
for line in open(cab_f):
if 'P_rect_02' in line:
cam_P = line.strip().split(' ')
cam_P = np.asarray([float(cam_P) for cam_P in cam_P[1:]])
return_matrix = np.zeros((3, 4))
return_matrix = cam_P.reshape((3, 4))
return return_matrix
# try other type of file
return get_calibration_cam_to_image
def get_R0(cab_f):
for line in open(cab_f):
if 'R0_rect:' in line:
R0 = line.strip().split(' ')
R0 = np.asarray([float(number) for number in R0[1:]])
R0 = np.reshape(R0, (3, 3))
R0_rect = np.zeros([4, 4])
R0_rect[3, 3] = 1
R0_rect[:3, :3] = R0
return R0_rect
def get_tr_to_velo(cab_f):
for line in open(cab_f):
if 'Tr_velo_to_cam:' in line:
Tr = line.strip().split(' ')
Tr = np.asarray([float(number) for number in Tr[1:]])
Tr = np.reshape(Tr, (3, 4))
Tr_to_velo = np.zeros([4, 4])
Tr_to_velo[3, 3] = 1
Tr_to_velo[:3, :4] = Tr
return Tr_to_velo
def file_not_found(filename):
print("\nError! Can't read calibration file, does %s exist?" % filename)
exit()
|
print("New Pytthon File")
|
import numpy as np
import wfdb
from wfdb import processing
class test_processing:
"""
Test processing functions
"""
def test_resample_single(self):
sig, fields = wfdb.rdsamp("sample-data/100")
ann = wfdb.rdann("sample-data/100", "atr")
fs = fields["fs"]
fs_target = 50
new_sig, new_ann = processing.resample_singlechan(
sig[:, 0], ann, fs, fs_target
)
expected_length = int(sig.shape[0] * fs_target / fs)
assert new_sig.shape[0] == expected_length
def test_resample_multi(self):
sig, fields = wfdb.rdsamp("sample-data/100")
ann = wfdb.rdann("sample-data/100", "atr")
fs = fields["fs"]
fs_target = 50
new_sig, new_ann = processing.resample_multichan(
sig, ann, fs, fs_target
)
expected_length = int(sig.shape[0] * fs_target / fs)
assert new_sig.shape[0] == expected_length
assert new_sig.shape[1] == sig.shape[1]
def test_normalize_bound(self):
sig, _ = wfdb.rdsamp("sample-data/100")
lb = -5
ub = 15
x = processing.normalize_bound(sig[:, 0], lb, ub)
assert x.shape[0] == sig.shape[0]
assert np.min(x) >= lb
assert np.max(x) <= ub
def test_find_peaks(self):
x = [0, 2, 1, 0, -10, -15, -15, -15, 9, 8, 0, 0, 1, 2, 10]
hp, sp = processing.find_peaks(x)
assert np.array_equal(hp, [1, 8])
assert np.array_equal(sp, [6, 10])
def test_find_peaks_empty(self):
x = []
hp, sp = processing.find_peaks(x)
assert hp.shape == (0,)
assert sp.shape == (0,)
def test_gqrs(self):
record = wfdb.rdrecord(
"sample-data/100",
channels=[0],
sampfrom=9998,
sampto=19998,
physical=False,
)
expected_peaks = [
271,
580,
884,
1181,
1469,
1770,
2055,
2339,
2634,
2939,
3255,
3551,
3831,
4120,
4412,
4700,
5000,
5299,
5596,
5889,
6172,
6454,
6744,
7047,
7347,
7646,
7936,
8216,
8503,
8785,
9070,
9377,
9682,
]
peaks = processing.gqrs_detect(
d_sig=record.d_signal[:, 0],
fs=record.fs,
adc_gain=record.adc_gain[0],
adc_zero=record.adc_zero[0],
threshold=1.0,
)
assert np.array_equal(peaks, expected_peaks)
def test_correct_peaks(self):
sig, fields = wfdb.rdsamp("sample-data/100")
ann = wfdb.rdann("sample-data/100", "atr")
fs = fields["fs"]
min_bpm = 10
max_bpm = 350
min_gap = fs * 60 / min_bpm
max_gap = fs * 60 / max_bpm
y_idxs = processing.correct_peaks(
sig=sig[:, 0],
peak_inds=ann.sample,
search_radius=int(max_gap),
smooth_window_size=150,
)
yz = np.zeros(sig.shape[0])
yz[y_idxs] = 1
yz = np.where(yz[:10000] == 1)[0]
expected_peaks = [
77,
370,
663,
947,
1231,
1515,
1809,
2045,
2403,
2706,
2998,
3283,
3560,
3863,
4171,
4466,
4765,
5061,
5347,
5634,
5919,
6215,
6527,
6824,
7106,
7393,
7670,
7953,
8246,
8539,
8837,
9142,
9432,
9710,
9998,
]
assert np.array_equal(yz, expected_peaks)
class test_qrs:
"""
Testing QRS detectors
"""
def test_xqrs(self):
"""
Run XQRS detector on record 100 and compare to reference annotations
"""
sig, fields = wfdb.rdsamp("sample-data/100", channels=[0])
ann_ref = wfdb.rdann("sample-data/100", "atr")
xqrs = processing.XQRS(sig=sig[:, 0], fs=fields["fs"])
xqrs.detect()
comparitor = processing.compare_annotations(
ann_ref.sample[1:], xqrs.qrs_inds, int(0.1 * fields["fs"])
)
assert comparitor.sensitivity > 0.99
assert comparitor.positive_predictivity > 0.99
|
from fractions import Fraction
def sum_fracts(lst):
answer = sum(Fraction(*a) for a in lst)
numerator = answer.numerator
denominator = answer.denominator
if numerator == 0:
return None
elif denominator == 1:
return numerator
return [numerator, denominator]
|
"""
Task 2
Construction and Reasoning with Inheritance Networks
"""
import sys
import copy
import itertools
"""
Class Sub-Concept
"""
class concept(object):
#Constructor
def __init__(self, name):
self.name = name
self.outgoingEdges = []
def __str__(self):
return self.name
def __repr__(self):
return "{} has outgoing edges: {}".format(self.name, self.outgoingEdges)
class edge(object):
def __init__(self, superConcept, subConcept, polarity):
self.superConcept = concept(superConcept)
self.subConcept = concept(subConcept)
self.polarity = polarity #True = IS-A , False = IS-NOT-A
def __str__(self):
return "Subconcept: {} - Polarity: {} - Superconcept: {}".format(self.subConcept, self.polarity, self.superConcept)
def __repr__(self):
if(self.polarity == True):
link = "IS-A"
else:
link = "IS-NOT-A"
return "{} {} {}".format(self.subConcept, link, self.superConcept)
#return "Subconcept: {} - Polarity: {} - Superconcept: {}".format(self.subConcept, self.polarity, self.superConcept)
class path(object):
def __init__(self, edges):
self.edges = [edges]
def __str__(self):
return ("{}".format(self.edges))
def __repr__(self):
return ("{}".format(self.edges))
#("{}".format(self.edges))
def checkConcepts(p):
max = len(p.edges)
for i in range(0, max-1):
if(p.edges[i].superConcept == p.edges[i+1].subConcept):
print("valid edge pair")
pass
else:
print("invalid pair")
"""
Data Parsing
"""
conceptList = []
edgeList = []
pathList = []
def parseKb(kb):
numofEdges = len(kb)
for i in range(0, numofEdges):
edgeList.append(parseEdge(kb[i]))
checkOutgoingEdges()
def parseEdge(e):
newEdge = edge(None, None, False)
if "IS-A" in e:
newEdge.polarity = True
concepts = e.split(" IS-A ")
elif "IS-NOT-A" in e:
newEdge.polarity = False
concepts = e.split(" IS-NOT-A ")
else:
print("error in Kb")
concept1 = parseConcept(concepts[0])
concept2 = parseConcept(concepts[1])
newEdge.subConcept = concept1
newEdge.superConcept = concept2
return newEdge
def parseConcept(c):
numOfConcepts = len(conceptList)
for i in range(0, numOfConcepts):
if c == conceptList[i].name:
return conceptList[i]
temp = concept(c)
conceptList.append(temp)
return temp
def checkOutgoingEdges():
numOfConcepts = len(conceptList)
numofEdges = len(edgeList)
for i in range(0, numOfConcepts):
for j in range(0, numofEdges):
if conceptList[i].name == edgeList[j].subConcept.name :
conceptList[i].outgoingEdges.append(edgeList[j].superConcept.name)
def findPath(first, last):
pl = []
fpl = []
#Check for possible paths
for e in edgeList:
if(e.subConcept == first):
p = path(e)
pl.append(p)
#For each path found
for p in pl:
#New Path Parse
curConcept = p.edges[-1].superConcept
curPolarity = p.edges[-1].polarity
#Path stops here since we reached the end of the query
if(curConcept == last):
fpl.append(p)
elif(curPolarity == True):
npl = findPath(curConcept, last)
#One edge found
if (len(npl) == 1):
p.edges.extend(npl)
fpl.append(p)
#Multiple Edges Found
elif(len(npl) > 1):
for np in npl:
cfp = copy.deepcopy(p)
cfp.edges.extend(np.edges)
fpl.append(cfp)
return fpl
def findShortestPath(pl):
numberOfPaths = len(pl)
for i in range(numberOfPaths):
for j in range(numberOfPaths-i-1):
if(len(pl[i].edges) > len(pl[j+1].edges)):
pl[i], pl[j+1] = pl[j+1],pl[i]
return pl[0]
"""
Functions needed to return shortest path according to inferantial distance
"""
redundantEdge = []
def checkForRedundant(pe):
for i in range(len(pe.edges)):
sub = pe.edges[i].subConcept
sup = pe.edges[i].superConcept
conc = findPath(sub , sup)
print(conc)
def checkForPreEmpted(pe):
print(pe)
for i in range(len(pe.edges)):
for j in range(len(edgeList)):
print(pe.edges[-1])
print(edgeList[j].superConcept)
if edgeList[j].subConcept == pe.edges[i].subConcept and edgeList[j].superConcept == pe.edges[-1].superConcept and edgeList[j].polarity != pe.edges[i].polarity:
print("Pre-empted edge found")
return True
return False
def inferentialDistance(pl):
for p in pl:
r1 = checkForPreEmpted(p)
r2 = checkForRedundant(p)
if(r1 == True and r2 == True):
print(p , "is admissable")
else:
print(p , "is not admissable")
"""
An attempt at flattenning the list. ie: remove the extra nested lists
"""
#doesnt work
def flattenList(pl):
list(itertools.chain.from_iterable(x.edges for x in pl))
"""
Main
"""
#open read only file (read and write = "w+")
print(f"Name of the script : {sys.argv[0]=}")
print(f"Arguments of the script : {sys.argv[1:]=}")
file = sys.argv[1:]
print(file)
f= open("%s" %file[0],"r")
kb = f.read().splitlines()
print(kb)
parseKb(kb)
print("\nConcepts:")
print(conceptList)
print("\nEdges:")
print(edgeList)
print("\nPaths:")
print(pathList)
q = input("Enter Query: ")
query = parseEdge(q)
pathList = findPath(query.subConcept, query.superConcept)
print("\nPaths:")
for p in pathList:
print(p)
"""
print("Flattened List")
fpl = flattenList(pathList)
print(fpl)
"""
print("\nShortest Path:")
sp = findShortestPath(pathList)
print(sp)
"""
print("\nInferential Distance:")
id = inferentialDistance(pathList)
print(id)
"""
|
dadosAluno = dict()
dadosAluno['nome'] = str(input('Digite o nome: '))
dadosAluno['média'] = float(input(f'Digite a média do Aluno {dadosAluno["nome"]}: '))
if dadosAluno['média'] >= 7:
dadosAluno['situação'] = 'Aprovado'
elif 5 <= dadosAluno['média'] < 7:
dadosAluno['situação'] = 'Recuperação'
else:
dadosAluno['Situação'] = 'Reprovado'
for k, v in dadosAluno.items():
print(f'O {k} é igual a {v}')
|
import pyglet
import resource, player, i_sprite
win = pyglet.window.Window(fullscreen=True)
# Our batch for holding all objects to be drawn.
b_obj = pyglet.graphics.Batch()
# Write info strings to the bottom of the screen.
help_msg = "Controls: WASD"
help_lbl = pyglet.text.Label(text=help_msg, x=10, y=30, batch=b_obj)
demi_msg = "demigame prototype. Not ready for official release."
demi_lbl = pyglet.text.Label(text=demi_msg, x=10, y=10, batch=b_obj)
# Create sprites / players.
demigod = player.Player(lives=99, name="Demi God", walk_speed=300, mass=300,
player_img=resource.demigod_img, x=10, y=50, batch=b_obj)
guard = i_sprite.ISprite(img=resource.ponyo_img, x=700, y=50, batch=b_obj)
guard.add_duty(i_sprite.ISprite.waddle)
# Account for all of our interactive game objects.
g_objs = [demigod, guard]
# Handle handlers.
for obj in g_objs:
for handler in obj.event_handlers:
win.push_handlers(handler)
def init():
pass
def update(dt):
for obj in g_objs:
obj.update(dt)
@win.event
def on_draw():
win.clear()
b_obj.draw()
if __name__ == "__main__":
init()
pyglet.clock.schedule_interval(update, 1/120.0)
pyglet.app.run()
|
from time import sleep
from tqdm import tqdm
for i in tqdm(range(1,500)):
sleep(0.01)
|
import sys
from common import *
import adcdac # ADC, DAC
import bunch_select # BUN
import ddr # DDR
import detector # DET, BUF
import fir # FIR
import sensors # SE
import sequencer # SEQ
import triggers # TRG
import tune # TUNE
import tune_peaks # PEAK
import tune_follow # FTUN
stringIn('VERSION', PINI = 'YES', DESC = 'TMBF version')
longIn('FPGAVER', PINI = 'YES', DESC = 'FPGA version')
Action('RESTART', DESC = 'Restart EPICS driver')
Action('REBOOT', DESC = 'Reboot IOC')
records.longin('BUNCHES', VAL = BUNCHES_PER_TURN, PINI = 'YES',
DESC = 'Bunches per machine revolution')
WriteRecords(sys.argv[1], Disclaimer(__file__))
|
from .models import (
Architecture,
GadgetSnap,
Release,
ScreenshotURL,
)
from django.contrib import admin
@admin.register(Architecture)
class ArchitectureAdmin(admin.ModelAdmin):
pass
@admin.register(GadgetSnap)
class GadgetSnapAdmin(admin.ModelAdmin):
pass
@admin.register(Release)
class ReleaseAdmin(admin.ModelAdmin):
pass
@admin.register(ScreenshotURL)
class ScreenshotURLAdmin(admin.ModelAdmin):
pass
|
#!/usr/bin/python3
from alpha_vantage.timeseries import TimeSeries
from datetime import datetime, date, timedelta
from pytz import timezone
import subprocess
from time import sleep
import sys
THRESHOLD = 0.5 # In percentage
TIMEZONE = timezone('Europe/Madrid')
# Only execute between 8:00 and 18:00
now_utc = datetime.now(timezone('UTC'))
if now_utc.astimezone(TIMEZONE).hour not in range(9, 18) or \
now_utc.astimezone(TIMEZONE).weekday() not in range(0, 5):
exit(1)
# Configure Alpha Vantage library
with open('token.alpha_vantage', 'r') as myfile:
TOKEN = myfile.read().replace('\n', '')
ts = TimeSeries(key=TOKEN, output_format='pandas', indexing_type='date')
for stock_index in sys.argv[1:]:
# Read data from Alpha Vantage
# NOTE! Retrying, because sometimes it fails
retry = 0
while retry < 5:
try:
stock = meta_data = None
print("TRY #%d - Reading symbol '%s' from Alpha Vantage" % (retry, stock_index))
stock, meta_data = ts.get_quote_endpoint(stock_index)
except:
print("FAIL #%d - Error reading from Alpha Vantage" % (retry))
retry += 1
sleep(3)
continue
break
if retry == 5:
msg = "Unable to read data for symbol '%s' from Alpha Vantage, aborting" % (stock_index)
print(msg)
subprocess.call("telegram-send --format markdown -- '%s'" % (msg), shell=True)
continue
# Check if change is over a threshold compared to previous day
curr_val = float(stock['05. price']['Global Quote'])
prev_val = float(stock['08. previous close']['Global Quote'])
delta = curr_val - prev_val
perc_var = 100.0 * delta / prev_val
if abs(perc_var) >= THRESHOLD:
# Send message
message = "!!! (%s) C: %.4f (%+.4f / %+.2f%%)" % (stock_index, curr_val, delta, perc_var)
subprocess.call("telegram-send -- '%s'" % (message), shell=True)
|
a,b=4,3
if True:
print('{0}'.format(a or b)) # or中, 至少有一个非0时,返回第一个非0;C语言中,a,b只要有一个数大于0,a||b为1
print('{0}'.format(a and b)) #and中含0,返回0; 均为非0时,返回后一个值;C语言中,a,b全大于0,a&&b为1
print('{0}'.format(a | b)) #按位或;C语言中,a|b为7
print('{0}'.format(a & b)) #按位与;C语言中,a&b为7
|
from __future__ import division
import datetime
from math import ceil
import six
from flask_potion.exceptions import ItemNotFound
from flask_potion import fields
class Manager(object):
"""
.. attribute:: supported_comparators
A tuple of names filter comparators supported by this manager.
:param flask_potion.resource.Resource resource: resource class
:param model: model read from ``Meta.model`` or ``None``
"""
supported_comparators = ()
def __init__(self, resource, model):
self.resource = resource
self.model = model
self._init_key_converters(resource, resource.meta)
def _init_key_converters(self, resource, meta):
if 'natural_key' in meta:
from flask_potion.natural_keys import PropertyKey, PropertiesKey
if isinstance(meta.natural_key, str):
meta['key_converters'] += (PropertyKey(meta.natural_key),)
elif isinstance(meta.natural_key, (list, tuple)):
meta['key_converters'] += (PropertiesKey(*meta.natural_key),)
if 'key_converters' in meta:
meta.key_converters = [k.bind(resource) for k in meta['key_converters']]
meta.key_converters_by_type = {}
for nk in meta.key_converters:
if nk.matcher_type() in meta.key_converters_by_type:
raise RuntimeError(
'Multiple keys of type {} defined for {}'.format(nk.matcher_type(), meta.name))
meta.key_converters_by_type[nk.matcher_type()] = nk
@staticmethod
def _get_field_from_python_type(python_type):
try:
return {
str: fields.String,
six.text_type: fields.String,
int: fields.Integer,
float: fields.Number,
bool: fields.Boolean,
list: fields.Array,
dict: fields.Object,
datetime.date: fields.Date,
datetime.datetime: fields.DateTime
}[python_type]
except KeyError:
raise RuntimeError('No appropriate field class for "{}" type found'.format(python_type))
def is_sortable_field(self, field):
return isinstance(field, (fields.String,
fields.Boolean,
fields.Number,
fields.Integer,
fields.Date,
fields.DateTime))
def get_field_comparators(self, field):
pass
def relation_instances(self, item, attribute, target_resource, page=None, per_page=None):
"""
:param item:
:param attribute:
:param target_resource:
:param page:
:param per_page:
:return:
"""
raise NotImplementedError()
def relation_add(self, item, attribute, target_resource, target_item):
"""
:param item:
:param attribute:
:param target_resource:
:param target_item:
:return:
"""
raise NotImplementedError()
def relation_remove(self, item, attribute, target_resource, target_item):
"""
:param item:
:param attribute:
:param target_resource:
:param target_item:
:return:
"""
raise NotImplementedError()
def paginated_instances(self, page, per_page, where=None, sort=None):
"""
:param page:
:param per_page:
:param where:
:param sort:
:return: a :class:`Pagination` object or similar
"""
pass
def instances(self, where=None, sort=None):
"""
:param where:
:param sort:
:return:
"""
pass
def first(self, where=None, sort=None):
"""
:param where:
:param sort:
:return:
:raises exceptions.ItemNotFound:
"""
try:
return self.instances(where, sort)[0]
except IndexError:
raise ItemNotFound(self.resource, where=where)
def create(self, properties, commit=True):
"""
:param properties:
:param commit:
:return:
"""
pass
def read(self, id):
"""
:param id:
:return:
"""
pass
def update(self, item, changes, commit=True):
"""
:param item:
:param changes:
:param commit:
:return:
"""
pass
def delete(self, item):
"""
:param item:
:return:
"""
pass
def delete_by_id(self, id):
"""
:param id:
:return:
"""
return self.delete(self.read(id))
def commit(self):
pass
def begin(self):
pass
class Pagination(object):
"""
A pagination class for list-like instances.
:param items:
:param page:
:param per_page:
:param total:
"""
def __init__(self, items, page, per_page, total):
self.items = items
self.page = page
self.per_page = per_page
self.total = total
@property
def pages(self):
return max(1, int(ceil(self.total / self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
@classmethod
def from_list(cls, items, page, per_page):
start = per_page * (page - 1)
return Pagination(items[start:start + per_page], page, per_page, len(items))
|
from qiskit import Aer, ClassicalRegister, execute, QuantumCircuit, QuantumRegister
q = QuantumRegister(4) # initialize 4 quantum registers (qubits)
c = ClassicalRegister(4) # initialize 4 classical registers to measure the 4 qubits
qc = QuantumCircuit(q, c) # initialize the circuit
backend = Aer.get_backend('qasm_simulator') # modify this line to run this code on a real quantum computer
print("Quantum RPG Character Generator (Partial)") # this is just a proof-of-concept partial generator
attributes = ("STR", "DEX", "INT", "WIS", "CON", "CHA") # someone played Dungeons & Dragons as a kid... might've been me....
i = 0
while i < 4:
qc.h(q[i]) # put all 4 qubits into superposition states so that each will measure as a 0 or 1 completely at random
i = i + 1
for i in range(6):
qc.measure(q, c) # collapse the superpositions and get 4 random digits
m = str(execute(qc, backend, shots=1, memory=True).result().get_memory()) # store the 4 digits in a variable so they can be manipulated
diceroll = str((int(m[2])*8) + (int(m[3])*4) + (int(m[4])*2) + (int(m[5])*1) + 3) # use the digits as a binary of length 4 and convert it to decimal with a range of 0-15; simulate a 3d6 dice roll by adding 3, giving a range of 3-18
print(attributes[i] + ": " + diceroll)
|
import mei
import time
import yaml
import scan
import skill
import julius_test
def say_something():
julius_test.listen()
if text == 'ラーメンタイマー' or text == '砂時計':
skill.ramen()
elif text == '癒やして':
skill.care()
elif text == '計算' or text == '電卓':
skill.calculation()
else:
scan.scanandsay(text)
while True:
if __name__ == '__main__':
say_something()
|
import requests
headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0"}
url = "http://127.0.0.1:80/"
password_list = open("wordlist.txt", "r")
for line in password_list:
password = line.strip()
users = ["Admin", "admin"]
for user in users:
r = requests.post(url, data={"input1": user, "input2": password, "sub": "Submit+Query"})
content = r.content
if "Parola nu" in content:
print "[ + ] Invalid: " + user, password
else:
print "[ + ] Valid: " + user, password
break
|
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
from direct.directnotify import DirectNotifyGlobal
class SpeedchatRelayUD(DistributedObjectGlobalUD):
notify = DirectNotifyGlobal.directNotify.newCategory('SpeedchatRelayUD')
def __init__(self, air):
DistributedObjectGlobalUD.__init__(self, air)
|
def len():
y=input("Enter: ")
count=0
for x in y:
count=count+1
print(count)
return 0
a=len()
print(a)
|
import sys
import numpy as np
from scipy.special import erf
sys.path.append(".")
from Random import Gaussian
# main function for our coin toss Python code
if __name__ == "__main__":
# default number of samples (per experiment)
N = 10
a = -1
b = 1
if '-h' in sys.argv or '--help' in sys.argv:
print ("Usage: %s [options]" % sys.argv[0])
print (" options:")
print (" --help(-h) print options")
print (" -N number of intervals for integration")
print (" -a lower bound of integration")
print (" -b upper bound of integration")
print
sys.exit(1)
if '-N' in sys.argv:
p = sys.argv.index('-N')
Ns = int(sys.argv[p+1])
if Ns > 1:
N = Ns
if '-a' in sys.argv:
p = sys.argv.index('-a')
a0 = int(sys.argv[p+1])
if a0 > -100:
a = a0
if '-b' in sys.argv:
p = sys.argv.index('-b')
b0 = int(sys.argv[p+1])
if b0 > a:
b = b0
I_0 = (erf(b*np.sqrt(2)/2)-erf(a*np.sqrt(2)/2))/2
G = Gaussian()
def f(x):
return G.Gaussian_pdf(x)
def trap(N):
h = (b-a) / float(N)
I = 0.5 * h * (f(a) + f(b))
for i in range(1, int(N)):
I += h * f(i * h+a)
return I
I_T = trap(N)
x,w = np.polynomial.legendre.leggauss(N)
x = (x+1)*(b-a)/2+a
w *= (b-a)/2
I_G = np.dot(f(x),w)
print('With '+str(N)+' evaluation of the function' )
print('True value of the integral is '+str(I_0) )
print('Gauss–Legendre quadrature method value is '+str(I_G) )
print('Trapezoidal integration value is '+str(I_T) )
print('Gauss–Legendre quadrature method - trapezoidal integration = '+str(I_G-I_T))
print('Gauss–Legendre quadrature method error = '+str(I_0 - I_G))
print('Trapezoidal integration error = '+str(I_0 - I_T))
|
'''
Monitors file usage and stores last opened files
Possible to search recently used files
Check for existing index file, if none exist, create one, else open it and store in memory
Create event watching for launch file events, run in separate thread
When event found, write name:path of file to index dict, then write to disk file
search function, trying to match input to name in index dict, triggered by tab for auto-completion
return name:path matching input pattern
run function, takes path and starts whatever it is
main()
while loop with input waiting for user, tab for autocompletion if possible
'''
"""
Gets values of registry of recent programs, stores in dict with name:path
Allows user to enter a string that is matched to a name and starts the program
Currently lots of previous ideas in the code that does or does not run
"""
import wmi
import json
import os
import threading
import pythoncom
import winreg
import subprocess
import win32com.client
def watch(file_path):
"""
Watches for creation of new processes, stores in index dict and prints index
:return:
"""
pythoncom.CoInitialize()
index = {}
c = wmi.WMI()
watcher = c.watch_for(
notification_type="Creation",
wmi_class="Win32_Process",
delay_secs=1
)
while True:
stuff = watcher()
os_procs = ('backgroundTaskHost.exe', 'svchost.exe', 'conhost.exe', 'SearchFilterHost.exe', 'python.exe', 'smartscreen.exe', 'SearchProtocolHost.exe')
# check that it's not a standard OS process
if stuff.__getattr__('Name') not in os_procs:
index[stuff.__getattr__('Name')] = stuff.__getattr__('ExecutablePath')
print(index)
write_file(file_path, index)
else:
print('Standard OS task found and skipped')
pass
# print(stuff.__getattr__('Name'))
# print(stuff.__getattr__('ExecutablePath'))
def get_lnk_path(lnk_path):
"""
Gets the full path that a lnk file points to
:param lnk_path:
:return: string with full path
"""
try:
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(lnk_path)
return shortcut.Targetpath
except Exception as e:
print(e)
return None
def get_folder_content(folder_path):
"""
Gets all files recursivly under given folder path
:param folder_path:
:return: list with full path of each file
"""
paths = []
try:
for root, dirs, files in os.walk(folder_path):
for name in files:
paths.append(root + '\\' + name)
#print(paths)
return paths
except Exception as e:
print(e)
return None
def write_file(path, data):
"""
Writes data as json to file
:param path:
:param data:
:return:
"""
try:
with open(path, 'w') as handle:
json.dump(data, handle)
except Exception as e:
print(e)
def read_file(path):
"""
Reads json file and returns content
:param path:
:return: content of read json file
"""
if os.path.isfile(path):
try:
with open(path, 'r') as handle:
return json.load(handle)
except Exception as e:
print(e)
def search(text, index):
"""
Searches content in index for matching string to text
:param text:
:param index:
:return:
"""
for key, val in index.items():
if text.upper() in key.upper():
# print('Found match!')
res = (key, val)
return res
#if text.upper() in index.keys:
# print('Found match!')
def reg_keys(key_path, hive):
"""
Reads value of all keys under specified key path
:param key_path:
:param hive:
:return: nested list with with key values
"""
result = []
# set reg hive
if hive == 'HKEY_CURRENT_USER':
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
elif hive == 'HKEY_LOCAL_MACHINE':
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
elif hive == 'HKEY_CURRENT_CONFIG':
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_CONFIG)
elif hive == 'HKEY_CLASSES_ROOT':
reg = winreg.ConnectRegistry(None, winreg.HKEY_CLASSES_ROOT)
elif hive == 'HKEY_USERS':
reg = winreg.ConnectRegistry(None, winreg.HKEY_USERS)
else:
return None
# open key from path
key = winreg.OpenKey(reg, key_path)
# get amount of sub keys
count = winreg.QueryInfoKey(key)
# iterate over keys
for i in range(count[1]):
# get value of key
res = winreg.EnumValue(key, i)
result.append(res)
# print(res)
return result
def filter_reg(lists):
"""
filters the nested list from registry function
into a dict with
key = name of app
value = path to app
if app has an .exe/.EXE file ending
:param lists:
:return: dict with name:path
"""
res = {}
for a in lists:
# try if .exe matches
try:
# get index of match .exe
ind = a[0].index('.exe')
# if not matches, try .EXE
except ValueError:
try:
# get index of match .exe
ind = a[0].index('.EXE')
# its probably a dll, set ind to None
except:
ind = None
# if ind is not None, add to result
if ind:
# split string from beginning to index + 4 to include .exe
res[a[1]] = (a[0][:(ind + 4)])
return res
def start_exe(path):
"""
opens given exe file as a subprocess
:param path: path to exe file
:return:
"""
try:
subprocess.Popen(path)
except Exception as e:
print(e)
def main():
path = 'data.json'
key_path = r"Software\Classes\Local Settings\Software\Microsoft\Windows\Shell\MuiCache"
key_hive = 'HKEY_CURRENT_USER'
'''
key_values = []
for key in key_path:
key_values.append(filter_reg(reg_keys(key, key_hive)))
'''
key_values = filter_reg(reg_keys(key_path, key_hive))
# get userprofile path from env variable
userprofile = os.environ['USERPROFILE']
recent_files = []
startmenu_files = []
# get content from recents folder
print('getting recent')
recent_content = get_folder_content(userprofile + r"\AppData\Roaming\Microsoft\Windows\Recent")
# get content from startmenu folder
print('getting startmenu')
startmenu = get_folder_content(userprofile + r"\AppData\Roaming\Microsoft\Windows\Start Menu\Programs")
for file in startmenu:
print('processing startmenu')
if '.lnk' in file:
startmenu_files.append(get_lnk_path(file))
# make sure only last 10 files are processed
counter = 0
for file in recent_content:
print(counter)
counter = counter + 1
if counter > 10:
break
if '.lnk' in file:
recent_files.append(get_lnk_path(file))
#reg1 = filter_reg(reg_keys(key_path, key_hive))
write_file(path, key_values)
print(key_values)
while True:
'''
# index = read_file(path)
# print(type(index))
# print(index)
# in_text = input('Start (tab for autocomplete): ')
if index:
search(in_text, index)
else:
print('Index not ready, no results found!')
'''
in_text = input('Start (tab for autocomplete): ')
if in_text:
exe_path = search(in_text, key_values)
in_text = None
print('Starting {}'.format(exe_path[0]))
start_exe(exe_path[1])
#processThread = threading.Thread(target=watch, args=('data.json',)) # <- note extra ','
#processThread.start()
# t1 = FuncThread(watch, 'data.json')
# t1.start()
if __name__ == '__main__':
main()
# watch()
'''
c = wmi.WMI ()
#filename = r"c:\temp\temp.txt"
#process = c.Win32_Process
#process_id, result = process.Create (CommandLine="notepad.exe " + filename)
watcher = c.watch_for (
notification_type="Creation",
wmi_class="Win32_Process",
delay_secs=1
)
stuff = watcher()
print(stuff.__getattr__('Name'))
print(stuff.__getattr__('ExecutablePath'))
'''
|
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import Optional, Union
import common.input_data as input_data
@dataclass
class MaskInstruction:
value: str
@dataclass
class MemorySetInstruction:
address: int
value: int
Instruction = Union[MaskInstruction, MemorySetInstruction]
def to_instruction(data: str) -> Instruction:
if data.startswith("mask"):
return MaskInstruction(data.split(" ")[-1])
address,value = re.split(r"(?:mem\[|\] = )", data)[1:]
return MemorySetInstruction(int(address), int(value))
def get_sum_of_memory(instructions: list[Instruction]) -> int:
mask_value = "X"*36
memory = defaultdict(lambda: "0"*36)
for instruction in instructions:
if isinstance(instruction, MaskInstruction):
mask_value = instruction.value
else:
bitstring = bin(instruction.value)[2:].zfill(36)
bits = [(bit if mask == 'X' else mask) for bit, mask in zip(bitstring, mask_value)]
memory[instruction.address] = "".join(bits)
return sum(int(bits, 2) for bits in memory.values())
def build_address_masks(mask: list[str], accum: Optional[list[str]] = None) -> list[str]:
if accum is None:
accum = []
if mask == []:
return accum
letter, *mask = mask
if letter == "X":
if not accum:
accum = ["0", "1"]
else:
accum = [m + "0" for m in accum] + [m + "1" for m in accum]
else:
if not accum:
accum = [letter]
else:
accum = [m + letter for m in accum]
return build_address_masks(mask, accum)
def get_sum_of_memory_v2(instructions: list[Instruction]) -> int:
mask_value = "X" * 36
memory: dict[str, int] = defaultdict(lambda:0)
for instruction in instructions:
if isinstance(instruction, MaskInstruction):
mask_value = instruction.value
else:
bitstring = bin(instruction.address)[2:].zfill(36)
bits = [(bit if mask == '0' else mask) for bit, mask in zip(bitstring, mask_value)]
mask_values = build_address_masks(bits)
for address in mask_values:
memory[address] = instruction.value
return sum(memory.values())
INSTRUCTIONS = input_data.read("input/input14.txt", to_instruction)
if __name__ == "__main__":
print(f"Sum of memory: {get_sum_of_memory(INSTRUCTIONS)}")
print(f"Sum of memory v2: {get_sum_of_memory_v2(INSTRUCTIONS)}")
|
# Generated by Django 2.1.3 on 2019-03-28 03:57
import datetime
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Accion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_ws', models.IntegerField()),
('nombre', models.CharField(max_length=100, verbose_name='Nombre')),
('descripcion', tinymce.models.HTMLField()),
('monto', models.IntegerField()),
('latitud', models.DecimalField(decimal_places=3, max_digits=8)),
('longitud', models.DecimalField(decimal_places=3, max_digits=8)),
('borrado', models.BooleanField(default=True)),
('publicado', models.BooleanField(default=True)),
('fecha_creacion', models.DateField(default=datetime.date.today)),
],
),
migrations.CreateModel(
name='Departamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='Nombre')),
],
),
migrations.CreateModel(
name='Estado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='Nombre')),
],
),
migrations.CreateModel(
name='Financiacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='Nombre')),
],
),
migrations.CreateModel(
name='localidad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='Nombre')),
],
),
migrations.CreateModel(
name='Municipio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='Nombre')),
],
),
migrations.CreateModel(
name='Organismo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='Nombre')),
],
),
migrations.AddField(
model_name='accion',
name='departamento',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Departamento'),
),
migrations.AddField(
model_name='accion',
name='estado',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Estado'),
),
migrations.AddField(
model_name='accion',
name='localidad',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.localidad'),
),
migrations.AddField(
model_name='accion',
name='municipio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Municipio'),
),
migrations.AddField(
model_name='accion',
name='organismo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Organismo'),
),
migrations.AddField(
model_name='accion',
name='tipo_financiacion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Financiacion'),
),
]
|
import unittest
import os
import sys
lib_path = os.path.abspath('../src/')
sys.path.append(lib_path)
try:
from Comodo.SOM import * # @UnusedWildImport
except ImportError:
from trunk.Comodo.SOM import * # @UnusedWildImport
class TesteSOM(unittest.TestCase):
def setUp(self):
self.som1 = SOM()
def tearDown(self):
self.som1 = None
def testPower(self):
self.assertFalse(self.som1.getEstado(), "O estado deve ser False")
self.som1.ligar()
self.assertTrue(self.som1.getEstado(), "O estado deve ser True")
self.som1.desligar()
self.assertFalse(self.som1.getEstado(), "O estado deve ser False")
def testVolume(self):
self.assertEqual(self.som1.getVolume(), 0, "O volume deve iniciar em 0")
self.som1.upVolume()
self.assertEqual(self.som1.getVolume(), 1, "O volume deve ser 1")
for _ in range(10):
self.som1.upVolume()
self.assertEqual(self.som1.getVolume(), 11, "O volume deve ser 11")
# down volume
self.som1.downVolume()
self.assertEqual(self.som1.getVolume(), 10, "O volume deve ser 10")
for _ in range(5):
self.som1.downVolume()
self.assertEqual(self.som1.getVolume(), 5, "O volume deve ser 5")
def testCanal(self):
self.assertEqual(0, self.som1.getCanal(), "O canal inicial deve ser 0")
self.som1.upCanal()
self.assertEqual(1, self.som1.getCanal(), "O canal deve ser 1")
for _ in range(10):
self.som1.upCanal()
self.assertEqual(11, self.som1.getCanal(), "O canal deve ser 11")
# down canal
self.som1.downCanal()
self.assertEqual(10, self.som1.getCanal(), "O canal deve ser 10")
for _ in range(5):
self.som1.downCanal()
self.assertEqual(5, self.som1.getCanal(), "O canal deve ser 5")
|
import glob
import os
import pathlib
import re
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
# include the path here to a folder full of rasters that QGIS can load
path_to_rasters = "."
# folder for output sections as PNGs
path_for_outputs = "."
def parse_raster_layer_depths(rfilename):
'''Parse raster layer depths from filename.
Args:
rfilename (str): the filename of the raster.
Returns: a numpy array [top, bottom] of the depths that the
raster layer represents.
e.g. the file named Con006_doi_gm_013.2-017m.ers
represents the conductivity at depths between 13.2 and 17 m,
so this function returns np.array([13.2, 17.0]).
'''
m = re.search(r"gm_(\d\d\d\.\d)-(\d\d\d\.\d)m", rfilename)
assert m
return np.array([float(m.group(1)), float(m.group(2))])
def get_relevant_depth_slice(depth, raster_layers):
for rlayer in raster_layers:
if rlayer["top"] <= depth and rlayer["bottom"] >= depth:
return rlayer
def imrot(arr):
return np.flipud(np.rot90(arr))
def run(
path,
output_path,
dl,
dz,
bottom=None,
name_field="Id",
figsize=None,
cmaps=("magma"),
):
'''Run the script.
Args:
path (str): path to the raster files
output_path (str): where to store cross section pngs
dl (float): section x-axis spacing (arbitrary, smaller is slower)
dz (float): section z-axis spacing (arbitrary, smaller is slower)
name_field (str): name of an attribute from the vector polyline layer
which define the section line. defaults to the polyline "Id".
figsize (tuple): for matplotlib.pyplot.figure()
cmaps (list): for matplotlib.imshow - you can generate plots for
multiple colormaps, or just the one.
This function does:
1. reads in all rasters from path
2. parses a top and bottom z value for each raster
3. iterates over all selected polylines in the QGIS map canvas - these
are the section lines
4. extracts values from the rasters along each section line
5. plots a section for each section line.
'''
rlayers = []
for rfilename in glob.glob(os.path.join(path, "*.ers")):
name = os.path.split(rfilename)[1]
try:
depths = parse_raster_layer_depths(rfilename)
except AssertionError:
pass
else:
print("loading {}...".format(name), end="")
rlayer = {
"layer": QgsRasterLayer(rfilename),
"filename": rfilename,
"name": name,
"top": depths[0],
"bottom": depths[1],
}
print(" {} to {} m".format(rlayer["top"], rlayer["bottom"]))
rlayers.append(rlayer)
get_rlayer = lambda z: get_relevant_depth_slice(z, rlayers)
tops = np.array([r["top"] for r in rlayers])
bottoms = np.array([r["bottom"] for r in rlayers])
if bottom is None:
bottom = max([r["bottom"] for r in rlayers])
slines = []
for sline_feature in iface.activeLayer().selectedFeatures():
geom = sline_feature.geometry()
if geom.type() == QgsWkbTypes.LineGeometry:
lines = geom.asMultiPolyline()
for p0, p1 in lines:
line = p1 - p0
if np.abs(line.x()) > np.abs(line.y()):
sline = {"sect-x-func": lambda pt: pt.x(), "sect-x-name": "Easting"}
else:
sline = {
"sect-x-func": lambda pt: pt.y(),
"sect-x-name": "Northing",
}
sline.update(
{
"attrs": dict(
zip(
sline_feature.fields().names(),
sline_feature.attributes(),
)
),
"length": line.length(),
"feature": sline_feature,
"vector": line,
"origin": p0,
"end": p1,
}
)
print("found section_line: {}".format(sline))
slines.append(sline)
profiles = []
for sline in slines:
nl = int(np.ceil(sline["length"] / dl))
nz = int(np.ceil(bottom / dz))
arr = np.zeros((nl, nz))
profile = {"sect": sline, "name": sline["attrs"][name_field]}
print("extracting section {}={}...".format(name_field, profile["name"]), end="")
point_features = []
for il in range(nl):
l = il * dl
v = sline["vector"].normalized() * l
p = sline["origin"] + v
point = QgsFeature()
point.setGeometry(QgsGeometry.fromPointXY(p))
point_features.append(point)
for iz in range(nz):
z = iz * dz
rlayer = get_rlayer(z)
if rlayer:
val, res = rlayer["layer"].dataProvider().sample(p, 1)
val = float(val)
else:
val = np.nan
arr[il, iz] = val
profile.update({"data": arr})
profiles.append(profile)
print(" done")
# Plot the arr points on the active layer (temporary/scratch QGIS layer)
# Totally optional
# (res, outFeats) = iface.activeLayer().dataProvider().addFeatures(point_features)
# Set colour scale from data
vmin = min([np.nanmin(pr["data"]) for pr in profiles])
vmax = max([np.nanmax(pr["data"]) for pr in profiles])
# or manually
vmin = 10 ** 0
vmax = 10 ** 3
for cmap in cmaps:
for pr in profiles:
fig = plt.figure(figsize=(7, 4))
ax = fig.add_subplot(111)
dfunc = pr["sect"]["sect-x-func"]
left = dfunc(pr["sect"]["origin"])
right = dfunc(pr["sect"]["end"])
extent = [left, right, bottom, 0]
im = ax.imshow(
imrot(pr["data"]),
aspect="auto",
extent=extent,
norm=colors.LogNorm(),
cmap=cmap,
vmin=vmin,
vmax=vmax,
interpolation="nearest",
)
cbar = plt.colorbar(im)
cbar.set_label("Conductivity (mS/m)")
x0, x1 = ax.get_xlim()
ax.set_xlim(min((x0, x1)), max((x0, x1)))
ax.set_title("{}: {}".format(name_field, pr["name"]))
ax.set_xlabel(pr["sect"]["sect-x-name"])
ax.set_ylabel("Depth (m)")
fig.tight_layout()
fig.savefig(
str(
pathlib.Path(output_path)
/ "vertsec_{}_{}_{}.png".format(cmap, name_field, pr["name"])
),
dpi=120,
)
run(
path_to_rasters,
path_for_outputs,
dl=50,
dz=5,
bottom=150,
name_field="x-sect-id",
cmaps=("magma", ),
)
# optionally...
plt.show()
# QGIS keeps figures open. Use this to clear it up:
# plt.close("all")
|
from .account_saver import RedisClient
class CookieGenator(object):
def __init__(self,website):
self.website = website
self.accounts = RedisClient('accounts', website)
self.cookies = RedisClient('cookies', website)
self.init_browser()
def init_browser(self):
def __del__(self):
self.close()
|
from flask import Flask
from flask_restful import Api
from models import Main, sendMail
app = Flask(__name__)
api = Api(app)
api.add_resource(Main, '/')
api.add_resource(sendMail, '/mail')
if __name__ == '__main__':
app.run(debug = True)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 06:19:44 2019
@author: ethan
"""
import os
import numpy as np
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta, timezone
import praw
import numpy as np
import pandas as pd
import json as js
import urllib, json
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh.models import NumeralTickFormatter
from bokeh.palettes import Inferno256 as colors
creds = pd.read_csv('credentials.csv').T.to_dict()[0]
reddit = praw.Reddit(**creds)
def loadComments(sub = 'Economics', n = 365):
start= dt.now()
url = 'https://api.pushshift.io/reddit/search/comment/'
params = '?q=&subreddit={}&size=500&fields=author,body,created_utc'.format(sub)
response = urllib.request.urlopen(url + params)
data = json.loads(response.read())
df = pd.DataFrame(data['data'])
df['Time'] = df['created_utc'].apply(lambda x: dt.fromtimestamp(x))
df.index = pd.DatetimeIndex(df['Time'])
df = df.sort_index()
last = df.loc[df.index[-1], 'Time']
stop = dt.now() - timedelta (days = n)
print(stop, last)
params = params + '&before={}'
i = 500
while last > stop:
last = df['Time'].min()
print('Last:', last)
#print(last.timestamp(), url + params.format(int(last.timestamp())))
response = urllib.request.urlopen(url + params.format(int(last.timestamp())))
data = json.loads(response.read())
df2 = pd.DataFrame(data['data'])
df2['Time'] = df2['created_utc'].apply(lambda x: dt.fromtimestamp(x))
df2.index = pd.DatetimeIndex(df2['Time'])
df2 = df2.sort_index()
df = df.append(df2, True)
i = i + 500
if i % 20000 == 0:
df.to_pickle('data/sub_DataBase_{}.pkl'.format(i))
df = df2
print(dt.now() - start, i, last - stop)
df.to_pickle('data/sub_DataBase_{}.pkl'.format(i))
return df
# Don't use this unless the DataBase.pkl file gets messed up in some way.
def complieAllSubDBs():
files = ['data/' + file for file in os.listdir('data') if file != 'DataBase.pkl']
df = pd.read_pickle(files.pop())
end = df['Time'].max()
start = df['Time'].min()
for file in files:
df2 = pd.read_pickle(file)
df2 = df2.loc[(df2['Time'] > end) | (df2['Time'] < start), :]
df = df.append(df2, ignore_index = True,sort=False).sort_values('Time')
end = df['Time'].max()
start = df['Time'].min()
print( df['Time'].min(), df['Time'].max(), file)
#break
df.to_pickle('data/DataBase.pkl')
return df
#df = loadComments(n = 365)
df = complieAllSubDBs()
def makeHist():
df['wordCount'] = df['body'].apply(lambda x: len(x.split()))
print('Comment length - Mean: {} Median: {}'.format(df['wordCount'].mean(),df['wordCount'].median()))
output_file('imgs\hist.html')
p = figure(width = 1000,
height = 700,
title= '/r/Economics Comment Length: last 5,500 comments',
x_axis_label = 'Word Count',
y_axis_label = 'Density')
med = df['wordCount'].median()
hist, edges = np.histogram(df['wordCount'], density=True, bins='auto')
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color='blue', line_color="white", line_width = .25)
top = max(hist)
p.line([med,med],[0,top],line_dash = 'dashed', line_width = 2, color = 'black',
legend = 'Median: {}'.format(med))
p.y_range.start = 0
p.y_range.end = top
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.legend.location = 'top_right'
show(p)
df['word_count'] = df['body'].apply(lambda x: len(x.split()))
df.index = df['Time']
df = df.sort_index()
df['ave'] = df['word_count'].rolling("7d").mean()
xdata = df['Time']
ydata = df['ave']
xrng = (xdata.min(),xdata.max())
yrng = (0,ydata.max())
xdata = xdata.dropna()
ydata = ydata.dropna()
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh.models import NumeralTickFormatter
#from bokeh.palettes import Inferno256 as colors
from bokeh.models import SingleIntervalTicker, LinearAxis
output_file("imgs\wordcount_series.html")
p = figure(width = 1400, height = 700,
title="/r/Economics Average Word Count" ,
x_axis_label = 'Date', x_axis_type='datetime',
y_axis_label = 'Word Count Per Comment', y_range = yrng, x_range = xrng)
p.xgrid.grid_line_color = None
#p.ygrid.grid_line_color = None
p.line(xrng,[0,0], color = 'black')
p.line([0,0],yrng, color = 'black')
#slope, intercept, r_value, p_value, std_err = stats.linregress(xdata, ydata)
p.line(xdata, df['ave'],
legend = '7 Day Moving Average',
color = 'deepskyblue')
#p.circle(xdata, df['word_count'], color = 'deepskyblue', size = 1)
p.xaxis[0].ticker.desired_num_ticks = 20
show(p)
#url = 'https://api.pushshift.io/reddit/search/submission/'
#params = '?q=&subreddit=economics&size=500&fields=num_comments'
#response = urllib.request.urlopen(url + params)
#data = json.loads(response.read())
#df = pd.DataFrame(data['data']).sort_index()
#
#df = df.sort_values('num_comments')
#df['cumsum'] = df['num_comments'].cumsum()
#df['gini'] = df['cumsum']/df['num_comments'].sum()
#output_file('imgs/' + str(dt.now())[:10]+ "_ginic.html")
#
#p = figure(width = 700, height = 700,
# title="/r/Economics Lorentz Curves: Last 500 Submissions" ,
# x_axis_label = 'Cumulative Comments',
# y_axis_label = 'Cumulative Share of Comments', y_range = (0,1), x_range = (1,0))
#p.xgrid.grid_line_color = None
#p.ygrid.grid_line_color = None
#p.xaxis.formatter=NumeralTickFormatter(format="0%")
#p.yaxis.formatter=NumeralTickFormatter(format="0%")
##p.line(xrng,[0,0], color = 'black')
##p.line([0,0],yrng, color = 'black')
#
#def ginicalc(ser):
# tar = np.linspace(0,1,num = ser.size)
# diff = (tar - ser)/ser.size
# print(diff)
# return diff.sum()/.50
#p.line(np.linspace(0,1,num = df['gini'].size), df['gini'],
# legend = 'Gini Coefficient: {:.4f}'.format(ginicalc(df['gini'])),
# color = 'blue')
##show(p)
#output_file('imgs/' + str(dt.now())[:10]+ "_bars.html")
#p = figure(x_range = (0,100), plot_width = 1256,
# title="/r/Economics Submission Comment Share",
# x_axis_label = 'Rank', y_axis_label = 'Share of Comments')
#
#p.vbar(x = range(100),color = 'blue', width = .9,
# top = df.loc[df.index[500:400:-1],'num_comments']/df['num_comments'].sum())
#p.xgrid.grid_line_color = None
#p.y_range.start = 0
#
#
#p.yaxis.formatter=NumeralTickFormatter(format="0.0%")
#show(p)
#print('done')
|
# -*- coding: utf-8 -*-
# -*- mode: python -*-
#"""Functions for file IO"""
#from __future__ import print_function, division, absolute_import
import numpy
import tables
fullFile = tables.open_file(r'C:\Users\lasya\crcns-vim1-lp3wv\data\EstimatedResponses.mat')
print(fullFile.list_nodes) # Show all variables available
'''
cxReg value = cortex region
0 = other
1 = V1
2 = V2
3 = V3
4 = V3A
5 = V3B
6 = V4
7 = Lateral Occipital Area
'''
def BOLD_testing(subj, cxReg, imageStart=0, imageStop=1750):
assert subj == "S1" or "S2", "please enter a valid subject"
idx = []
resp = []
if subj == "S1":
dat = fullFile.get_node('/dataValS1')[:]
ROI = fullFile.get_node('/roiS1')[:].flatten()
idx = numpy.nonzero(ROI == cxReg)[0]
resp = dat[:, idx]
return resp[imageStart:imageStop]
else:
dat = fullFile.get_node('/dataValS2')[:]
ROI = fullFile.get_node('/roiS2')[:].flatten()
idx = numpy.nonzero(ROI == cxReg)[0]
resp = dat[:, idx]
return resp[imageStart:imageStop]
def BOLD_training(subj, cxReg, imageStart=0, imageStop=1750):
assert subj == "S1" or "S2", "please enter a valid subject"
idx = []
resp = []
if subj == "S1":
dat = fullFile.get_node('/dataTrnS1')[:]
ROI = fullFile.get_node('/roiS1')[:].flatten()
idx = numpy.nonzero(ROI == cxReg)[0]
resp = dat[:, idx]
return resp[imageStart:imageStop]
else:
dat = fullFile.get_node('/dataTrnS2')[:]
ROI = fullFile.get_node('/roiS2')[:].flatten()
idx = numpy.nonzero(ROI == cxReg)[0]
resp = dat[:, idx]
return resp[imageStart:imageStop]
test = BOLD_training("S2", 7, 0, 1)
print(test)
test = BOLD_testing("S2", 7, 0, 1)
print(test)
|
def result(target, candidates):
rt = 0
for candidate in candidates:
diff = len(target) - len(candidate)
for t in target:
if t in candidate:
candidate.remove(t)
rest = len(candidate)
if (diff == 0 and (rest == 0 or rest == 1)) or (diff == 1 and rest == 0) or (diff == -1 and rest == 1):
rt += 1
return rt
N = int(input())
target = list(input())
candidates = []
for _ in range(N-1):
candidates.append(list(input()))
print(result(target, candidates))
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, verbose_name=u'用户名', on_delete=models.CASCADE, related_name="userprofile")
nickname = models.CharField(max_length=32, verbose_name=u'昵称')
avatar = models.CharField(max_length=300, blank=True, null=True, verbose_name=u'头像')
def __str__(self):
return self.nickname
def __unicode__(self):
return self.nickname
def get_info(self):
return {
"id": self.id,
"username": self.user.username,
"nickname": self.nickname if self.nickname else "",
"avatar": self.avatar if self.avatar else "",
"email": self.user.email,
"is_superuser": self.user.is_superuser,
"status": self.user.is_active,
"create_date": self.user.date_joined.strftime("%Y-%m-%d %H:%M"),
# "last_date": self.user.get_latest_by()
"group": [{"id": group.id, "name": group.name} for group in self.user.groups.all()]
}
class Meta:
verbose_name = u'用户资料'
verbose_name_plural = u'用户资料'
|
a=0
b=1
c=1
x=int(input("enter your number"))
for i in range(x):
print(b)
b=a+c
a=c
c=b
|
from webservice.ticketing.autotask.autotask import *
from webservice.ticketing.autotask.autotask_fields import *
|
import math
print(math.asinh(300))
|
# coding=UTF-8
# POST请求接收
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.options
import socket
import data
from hashlib import sha256
from tornado.options import define, options
from os import path, mkdir
from shutil import copyfile
# 绑定地址
define('port', default=3000, type=int, help='Server Port')
Disk_addr = ('127.0.0.1', 32648)
# 数据库初始化
db, cursor = data._init()
hUser = data.C_user()
# 连接disk
def connectDisk(addr):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(addr)
state = s.recv(1024)
state = state.decode('utf-8')
# print(state)
if (state == 'what'):
s.send('POST'.encode('utf-8'))
return s
# seahToken && delToken
def s_dToken(username, U_Token):
s = connectDisk(Disk_addr)
s.send(username.encode('utf-8'))
s.recv(1024) #等待disk响应
s.send(U_Token.encode('utf-8'))
state = s.recv(1024)
return state
# 新建以用户名命名的文件目录集
def mkUserDir(username):
parentDir = path.dirname(path.dirname(__file__))# 父目录
# 创建相对父目录(相对于用户配置文件)
userDir = parentDir + "/users/" + username
indexDir = parentDir + '/index/user/'
mkdir(userDir)
# 创建其余配置文件目录
mkdir((userDir + '/headimg/'))
copyfile((indexDir + '/headimg/headimg.jpg'), (userDir + '/headimg/headimg.jpg'))# 默认头像
# 定义继承类
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST')
# POST请求
class RegisterHandler(BaseHandler): # 注册
def post(self):
U_Token = self.get_argument('Token')
username = self.get_argument('username')
pwd = self.get_argument('pwd')
Email = self.get_argument('Email')
# print('getok')
state = s_dToken(username, U_Token)
state = state.decode('utf-8')
# print(state)
if (pwd != None):
if (state == 'True'):
# 验证成功,开始邮箱验证
# 哈希处理密码
pwd = sha256(pwd.encode('utf-8')).hexdigest()
# print(username, pwd, Email, U_Token)
w_state = hUser.main(db, cursor, username, pwd, Email, 1)
if (w_state == True):
# 写入成功
# 新建以用户名命名的文件目录
mkUserDir(username)
U_Token, username, pwd, Email, state, w_state = None, None, None, None, None, None
self.finish({'message':'ok'})
else:
# 写入失败,内部错误
U_Token, username, pwd, Email, state, w_state = None, None, None, None, None, None
self.finish({'message':'error','why':'内部错误'})
elif (state == 'False'):
# 验证失败,丢弃数据
U_Token, username, pwd, Email, state = None, None, None, None, None
self.finish({'message':'error','why':'Token校验错误'})
else:
U_Token, username, pwd, Email, state = None, None, None, None, None
self.finish({'message':'error','why':'内部错误'})
else:
return False
class LoginHandler(BaseHandler): # 登录
def post(self):
username = self.get_argument('username')
pwd = self.get_argument('pwd') #此处传递密码为sha256
how = self.get_argument('type')
# print(username, pwd, how)
if (username, pwd, how != None or username, pwd, how != ''):
# 二次判断,是否为空(json第一次判断)
if (how == 'Email'):
# 邮箱模式
state, S_username, why = hUser.main(db, cursor, username, pwd, username, 0.2) # username与Email同用一个变量名,判断看need参数
if (state == True):
# 查询成功,进行激活判断
username, pwd, how = None, None, None
self.finish({'message':'ok','user':S_username})
else:
# 查询失败,拒绝登录
username, pwd, how = None, None, None
self.finish({'message':'error','why':why})
elif (how == 'user'):
# 用户名模式
state, S_username, why = hUser.main(db, cursor, username, pwd, username, 0.1) # username与Email同用一个变量名,判断看need参数
if (state == True):
# 查询成功,进行激活判断
username, pwd, how = None, None, None
self.finish({'message':'ok','user':S_username})
username = None
else:
# 查询失败,拒绝登录
username, pwd, how = None, None, None
self.finish({'message':'error','why':why})
else:
# 异常
username, pwd, how = None, None, None
self.finish({'message':'error','why':'内部错误'})
return False
class indexHandler(BaseHandler):
def post(self):
pass
class SubBlogNoteHandler(BaseHandler): # 发表博文
def post(self):
userToken = self.get_argument('userToken')
blogNote = self.get_argument('blogNote')
# 主函数
def main():
tornado.options.parse_command_line()
# 定义APP
app = tornado.web.Application(
handlers=[
(r'/register', RegisterHandler),
(r'/login', LoginHandler),
(r'/', indexHandler),
]
)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
print('POST System is ready')
tornado.ioloop.IOLoop.instance().start()
main()
|
# -*- coding: utf-8 -*-
"""
This is the main module containing the implementation of the SS3 classifier.
(Please, visit https://github.com/sergioburdisso/pyss3 for more info)
"""
from __future__ import print_function
import os
import re
import json
import errno
import numbers
import numpy as np
from io import open
from time import time
from tqdm import tqdm
from math import pow, tanh, log
from sklearn.feature_extraction.text import CountVectorizer
from .util import is_a_collection, Print, VERBOSITY, Preproc as Pp
# python 2 and 3 compatibility
from functools import reduce
from six.moves import xrange
__version__ = "0.6.4"
ENCODING = "utf-8"
PARA_DELTR = "\n"
SENT_DELTR = r"\."
WORD_DELTR = r"\s"
WORD_REGEX = r"\w+(?:'\w+)?"
STR_UNKNOWN, STR_MOST_PROBABLE = "unknown", "most-probable"
STR_OTHERS_CATEGORY = "[others]"
STR_UNKNOWN_CATEGORY = "[unknown]"
IDX_UNKNOWN_CATEGORY = -1
STR_UNKNOWN_WORD = ''
IDX_UNKNOWN_WORD = -1
STR_VANILLA, STR_XAI = "vanilla", "xai"
STR_GV, STR_NORM_GV, STR_NORM_GV_XAI = "gv", "norm_gv", "norm_gv_xai"
STR_MODEL_FOLDER = "ss3_models"
STR_MODEL_EXT = "ss3m"
WEIGHT_SCHEMES_SS3 = ['only_cat', 'diff_all', 'diff_max', 'diff_median', 'diff_mean']
WEIGHT_SCHEMES_TF = ['binary', 'raw_count', 'term_freq', 'log_norm', 'double_norm']
VERBOSITY = VERBOSITY # to allow "from pyss3 import VERBOSITY"
NAME = 0
VOCAB = 1
NEXT = 0
FR = 1
CV = 2
SG = 3
GV = 4
LV = 5
EMPTY_WORD_INFO = [0, 0, 0, 0, 0, 0]
NOISE_FR = 1
MIN_MAD_SD = .03
class SS3:
"""
The SS3 classifier class.
The SS3 classifier was originally defined in Section 3 of
https://dx.doi.org/10.1016/j.eswa.2019.05.023
(preprint avialable here: https://arxiv.org/abs/1905.08772)
:param s: the "smoothness"(sigma) hyperparameter value
:type s: float
:param l: the "significance"(lambda) hyperparameter value
:type l: float
:param p: the "sanction"(rho) hyperparameter value
:type p: float
:param a: the alpha hyperparameter value (i.e. all terms with a
confidence value (cv) less than alpha will be ignored during
classification)
:type a: float
:param name: the model's name (to save and load the model from disk)
:type name: str
:param cv_m: method used to compute the confidence value (cv) of each
term (word or n-grams), options are:
"norm_gv_xai", "norm_gv" and "gv" (default: "norm_gv_xai")
:type cv_m: str
:param sg_m: method used to compute the significance (sg) function, options
are: "vanilla" and "xai" (default: "xai")
:type sg_m: str
"""
__name__ = "model"
__models_folder__ = STR_MODEL_FOLDER
__s__ = .45
__l__ = .5
__p__ = 1
__a__ = .0
__multilabel__ = False
__l_update__ = None
__s_update__ = None
__p_update__ = None
__cv_cache__ = None
__last_x_test__ = None
__last_x_test_idx__ = None
__prun_floor__ = 10
__prun_trigger__ = 1000000
__prun_counter__ = 0
__zero_cv__ = None
__parag_delimiter__ = PARA_DELTR
__sent_delimiter__ = SENT_DELTR
__word_delimiter__ = WORD_DELTR
__word_regex__ = WORD_REGEX
def __init__(
self, s=None, l=None, p=None, a=None,
name="", cv_m=STR_NORM_GV_XAI, sg_m=STR_XAI
):
"""
Class constructor.
:param s: the "smoothness"(sigma) hyperparameter value
:type s: float
:param l: the "significance"(lambda) hyperparameter value
:type l: float
:param p: the "sanction"(rho) hyperparameter value
:type p: float
:param a: the alpha hyperparameter value (i.e. all terms with a
confidence value (cv) less than alpha will be ignored during
classification)
:type a: float
:param name: the model's name (to save and load the model from disk)
:type name: str
:param cv_m: method used to compute the confidence value (cv) of each
term (word or n-grams), options are:
"norm_gv_xai", "norm_gv" and "gv" (default: "norm_gv_xai")
:type cv_m: str
:param sg_m: method used to compute the significance (sg) function, options
are: "vanilla" and "xai" (default: "xai")
:type sg_m: str
:raises: ValueError
"""
self.__name__ = (name or self.__name__).lower()
self.__s__ = self.__s__ if s is None else s
self.__l__ = self.__l__ if l is None else l
self.__p__ = self.__p__ if p is None else p
self.__a__ = self.__a__ if a is None else a
try:
float(self.__s__ + self.__l__ + self.__p__ + self.__a__)
except BaseException:
raise ValueError("hyperparameter values must be numbers")
self.__categories_index__ = {}
self.__categories__ = []
self.__max_fr__ = []
self.__max_gv__ = []
self.__index_to_word__ = {}
self.__word_to_index__ = {}
if cv_m == STR_NORM_GV_XAI:
self.__cv__ = self.__cv_norm_gv_xai__
elif cv_m == STR_NORM_GV:
self.__cv__ = self.__cv_norm_gv__
elif cv_m == STR_GV:
self.__cv__ = self.__gv__
if sg_m == STR_XAI:
self.__sg__ = self.__sg_xai__
elif sg_m == STR_VANILLA:
self.__sg__ = self.__sg_vanilla__
self.__cv_mode__ = cv_m
self.__sg_mode__ = sg_m
self.original_sumop_ngrams = self.summary_op_ngrams
self.original_sumop_sentences = self.summary_op_sentences
self.original_sumop_paragraphs = self.summary_op_paragraphs
def __lv__(self, ngram, icat, cache=True):
"""Local value function."""
if cache:
return self.__trie_node__(ngram, icat)[LV]
else:
try:
ilength = len(ngram) - 1
fr = self.__trie_node__(ngram, icat)[FR]
if fr > NOISE_FR:
max_fr = self.__max_fr__[icat][ilength]
local_value = (fr / float(max_fr)) ** self.__s__
return local_value
else:
return 0
except TypeError:
return 0
except IndexError:
return 0
def __sn__(self, ngram, icat):
"""The sanction (sn) function."""
m_values = [
self.__sg__(ngram, ic)
for ic in xrange(len(self.__categories__)) if ic != icat
]
c = len(self.__categories__)
s = sum([min(v, 1) for v in m_values])
try:
return pow((c - (s + 1)) / ((c - 1) * (s + 1)), self.__p__)
except ZeroDivisionError: # if c <= 1
return 1.
def __sg_vanilla__(self, ngram, icat, cache=True):
"""The original significance (sg) function definition."""
try:
if cache:
return self.__trie_node__(ngram, icat)[SG]
else:
ncats = len(self.__categories__)
l = self.__l__
lvs = [self.__lv__(ngram, ic) for ic in xrange(ncats)]
lv = lvs[icat]
M, sd = mad(lvs, ncats)
if not sd and lv:
return 1.
else:
return sigmoid(lv - M, l * sd)
except TypeError:
return 0.
def __sg_xai__(self, ngram, icat, cache=True):
"""
A variation of the significance (sn) function.
This version of the sg function adds extra checks to
improve visual explanations.
"""
try:
if cache:
return self.__trie_node__(ngram, icat)[SG]
else:
ncats = len(self.__categories__)
l = self.__l__
lvs = [self.__lv__(ngram, ic) for ic in xrange(ncats)]
lv = lvs[icat]
M, sd = mad(lvs, ncats)
if l * sd <= MIN_MAD_SD:
sd = MIN_MAD_SD / l if l else 0
# stopwords filter
stopword = (M > .2) or (
sum(map(lambda v: v > 0.09, lvs)) == ncats
)
if (stopword and sd <= .1) or (M >= .3):
return 0.
if not sd and lv:
return 1.
return sigmoid(lv - M, l * sd)
except TypeError:
return 0.
def __gv__(self, ngram, icat, cache=True):
"""
The global value (gv) function.
This is the original way of computing the confidence value (cv)
of a term.
"""
if cache:
return self.__trie_node__(ngram, icat)[GV]
else:
lv = self.__lv__(ngram, icat)
weight = self.__sg__(ngram, icat) * self.__sn__(ngram, icat)
return lv * weight
def __cv_norm_gv__(self, ngram, icat, cache=True):
"""
Alternative way of computing the confidence value (cv) of terms.
This variations normalizes the gv value and uses that value as the cv.
"""
try:
if cache:
return self.__trie_node__(ngram, icat)[CV]
else:
try:
cv = self.__gv__(ngram, icat)
return cv / self.__max_gv__[icat][len(ngram) - 1]
except (ZeroDivisionError, IndexError):
return .0
except TypeError:
return 0
def __cv_norm_gv_xai__(self, ngram, icat, cache=True):
"""
Alternative way of computing the confidence value (cv) of terms.
This variations not only normalizes the gv value but also adds extra
checks to improve visual explanations.
"""
try:
if cache:
return self.__trie_node__(ngram, icat)[CV]
else:
try:
max_gv = self.__max_gv__[icat][len(ngram) - 1]
if (len(ngram) > 1):
# stopwords guard
n_cats = len(self.__categories__)
cats = xrange(n_cats)
sum_words_gv = sum([
self.__gv__([w], ic) for w in ngram for ic in cats
])
if (sum_words_gv < .05):
return .0
elif len([
w for w in ngram
if self.__gv__([w], icat) >= .01
]) == len(ngram):
gv = self.__gv__(ngram, icat)
return gv / max_gv + sum_words_gv
# return gv / max_gv * len(ngram)
gv = self.__gv__(ngram, icat)
return gv / max_gv
except (ZeroDivisionError, IndexError):
return .0
except TypeError:
return 0
def __apply_fn__(self, fn, ngram, cat=None):
"""Private method used by gv, lv, sn, sg functions."""
if ngram.strip() == '':
return 0
ngram = [self.get_word_index(w)
for w in re.split(self.__word_delimiter__, ngram)
if w]
if cat is None:
return fn(ngram) if IDX_UNKNOWN_WORD not in ngram else 0
icat = self.get_category_index(cat)
if icat == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
return fn(ngram, icat) if IDX_UNKNOWN_WORD not in ngram else 0
def __summary_ops_are_pristine__(self):
"""Return True if summary operators haven't changed."""
return self.original_sumop_ngrams == self.summary_op_ngrams and \
self.original_sumop_sentences == self.summary_op_sentences and \
self.original_sumop_paragraphs == self.summary_op_paragraphs
def __classify_ngram__(self, ngram):
"""Classify the given n-gram."""
cv = [
self.__cv__(ngram, icat)
for icat in xrange(len(self.__categories__))
]
cv[:] = [(v if v > self.__a__ else 0) for v in cv]
return cv
def __classify_sentence__(self, sent, prep, json=False, prep_func=None):
"""Classify the given sentence."""
classify_trans = self.__classify_ngram__
categories = self.__categories__
cats = xrange(len(categories))
word_index = self.get_word_index
word_delimiter = self.__word_delimiter__
word_regex = self.__word_regex__
if not json:
if prep or prep_func is not None:
prep_func = prep_func or Pp.clean_and_ready
sent = prep_func(sent)
sent_words = [
(w, w)
for w in re_split_keep(word_regex, sent)
if w
]
else:
if prep or prep_func is not None:
sent_words = [
(w, Pp.clean_and_ready(w, dots=False) if prep_func is None else prep_func(w))
for w in re_split_keep(word_regex, sent)
if w
]
else:
sent_words = [
(w, w)
for w in re_split_keep(word_regex, sent)
if w
]
if not sent_words:
sent_words = [(u'.', u'.')]
sent_iwords = [word_index(w) for _, w in sent_words]
sent_len = len(sent_iwords)
sent_parsed = []
wcur = 0
while wcur < sent_len:
cats_ngrams_cv = [[0] for icat in cats]
cats_ngrams_offset = [[0] for icat in cats]
cats_ngrams_iword = [[-1] for icat in cats]
cats_max_cv = [.0 for icat in cats]
for icat in cats:
woffset = 0
word_raw = sent_words[wcur + woffset][0]
wordi = sent_iwords[wcur + woffset]
word_info = categories[icat][VOCAB]
if wordi in word_info:
cats_ngrams_cv[icat][0] = word_info[wordi][CV]
word_info = word_info[wordi][NEXT]
cats_ngrams_iword[icat][0] = wordi
cats_ngrams_offset[icat][0] = woffset
# if it is a learned word (not unknown and seen for this category),
# then try to recognize learned n-grams too
if wordi != IDX_UNKNOWN_WORD and wordi in categories[icat][VOCAB]:
# while word or word delimiter (e.g. space)
while wordi != IDX_UNKNOWN_WORD or re.match(word_delimiter, word_raw):
woffset += 1
if wcur + woffset >= sent_len:
break
word_raw = sent_words[wcur + woffset][0]
wordi = sent_iwords[wcur + woffset]
# if word is a word:
if wordi != IDX_UNKNOWN_WORD:
# if this word belongs to this category
if wordi in word_info:
cats_ngrams_cv[icat].append(word_info[wordi][CV])
cats_ngrams_iword[icat].append(wordi)
cats_ngrams_offset[icat].append(woffset)
word_info = word_info[wordi][NEXT]
else:
break
cats_max_cv[icat] = (max(cats_ngrams_cv[icat])
if cats_ngrams_cv[icat] else .0)
max_gv = max(cats_max_cv)
use_ngram = True
if (max_gv > self.__a__):
icat_max_gv = cats_max_cv.index(max_gv)
ngram_max_gv = cats_ngrams_cv[icat_max_gv].index(max_gv)
offset_max_gv = cats_ngrams_offset[icat_max_gv][ngram_max_gv] + 1
max_gv_sum_1_grams = max([
sum([
(categories[ic][VOCAB][wi][CV]
if wi in categories[ic][VOCAB]
else 0)
for wi
in cats_ngrams_iword[ic]
])
for ic in cats
])
if (max_gv_sum_1_grams > max_gv):
use_ngram = False
else:
use_ngram = False
if not use_ngram:
offset_max_gv = 1
icat_max_gv = 0
ngram_max_gv = 0
sent_parsed.append(
(
u"".join([raw_word for raw_word, _ in sent_words[wcur:wcur + offset_max_gv]]),
cats_ngrams_iword[icat_max_gv][:ngram_max_gv + 1]
)
)
wcur += offset_max_gv
get_word = self.get_word
if not json:
words_cvs = [classify_trans(seq) for _, seq in sent_parsed]
if words_cvs:
return self.summary_op_ngrams(words_cvs)
return self.__zero_cv__
else:
get_tip = self.__trie_node__
local_value = self.__lv__
info = [
{
"token": u"→".join(map(get_word, sequence)),
"lexeme": raw_sequence,
"cv": classify_trans(sequence),
"lv": [local_value(sequence, ic) for ic in cats],
"fr": [get_tip(sequence, ic)[FR] for ic in cats]
}
for raw_sequence, sequence in sent_parsed
]
return {
"words": info,
"cv": self.summary_op_ngrams([v["cv"] for v in info]),
"wmv": reduce(vmax, [v["cv"] for v in info]) # word max value
}
def __classify_paragraph__(self, parag, prep, json=False, prep_func=None):
"""Classify the given paragraph."""
if not json:
sents_cvs = [
self.__classify_sentence__(sent, prep=prep, prep_func=prep_func)
for sent in re.split(self.__sent_delimiter__, parag)
if sent
]
if sents_cvs:
return self.summary_op_sentences(sents_cvs)
return self.__zero_cv__
else:
info = [
self.__classify_sentence__(sent, prep=prep, prep_func=prep_func, json=True)
for sent in re_split_keep(self.__sent_delimiter__, parag)
if sent
]
if info:
sents_cvs = [v["cv"] for v in info]
cv = self.summary_op_sentences(sents_cvs)
wmv = reduce(vmax, [v["wmv"] for v in info])
else:
cv = self.__zero_cv__
wmv = cv
return {
"sents": info,
"cv": cv,
"wmv": wmv # word max value
}
def __trie_node__(self, ngram, icat):
"""Get the trie's node for this n-gram."""
try:
word_info = self.__categories__[icat][VOCAB][ngram[0]]
for word in ngram[1:]:
word_info = word_info[NEXT][word]
return word_info
except BaseException:
return EMPTY_WORD_INFO
def __get_category__(self, name):
"""
Given the category name, return the category data.
If category name doesn't exist, creates a new one.
"""
try:
return self.__categories_index__[name]
except KeyError:
self.__max_fr__.append([])
self.__max_gv__.append([])
self.__categories_index__[name] = len(self.__categories__)
self.__categories__.append([name, {}]) # name, vocabulary
self.__zero_cv__ = (0,) * len(self.__categories__)
return self.__categories_index__[name]
def __get_category_length__(self, icat):
"""
Return the category length.
The category length is the total number of words seen during training.
"""
size = 0
vocab = self.__categories__[icat][VOCAB]
for word in vocab:
size += vocab[word][FR]
return size
def __get_most_probable_category__(self):
"""Return the index of the most probable category."""
sizes = []
for icat in xrange(len(self.__categories__)):
sizes.append((icat, self.__get_category_length__(icat)))
return sorted(sizes, key=lambda v: v[1])[-1][0]
def __get_vocabularies__(self, icat, vocab, preffix, n_grams, output, ngram_char="_"):
"""Get category list of n-grams with info."""
senq_ilen = len(preffix)
get_name = self.get_word
seq = preffix + [None]
if len(seq) > n_grams:
return
for word in vocab:
seq[-1] = word
if (self.__cv__(seq, icat) > 0):
output[senq_ilen].append(
(
ngram_char.join([get_name(wi) for wi in seq]),
vocab[word][FR],
self.__gv__(seq, icat),
self.__cv__(seq, icat)
)
)
self.__get_vocabularies__(
icat, vocab[word][NEXT], seq, n_grams, output, ngram_char
)
def __get_category_vocab__(self, icat):
"""Get category list of n-grams ordered by confidence value."""
category = self.__categories__[icat]
vocab = category[VOCAB]
w_seqs = ([w] for w in vocab)
vocab_icat = (
(
self.get_word(wseq[0]),
vocab[wseq[0]][FR],
self.__lv__(wseq, icat),
self.__gv__(wseq, icat),
self.__cv__(wseq, icat)
)
for wseq in w_seqs if self.__gv__(wseq, icat) > self.__a__
)
return sorted(vocab_icat, key=lambda k: -k[-1])
def __get_def_cat__(self, def_cat):
"""Given the `def_cat` argument, get the default category value."""
if def_cat is not None and (def_cat not in [STR_MOST_PROBABLE, STR_UNKNOWN] and
self.get_category_index(def_cat) == IDX_UNKNOWN_CATEGORY):
raise ValueError(
"the default category must be 'most-probable', 'unknown', or a category name"
" (current value is '%s')." % str(def_cat)
)
def_cat = None if def_cat == STR_UNKNOWN else def_cat
return self.get_most_probable_category() if def_cat == STR_MOST_PROBABLE else def_cat
def __get_next_iwords__(self, sent, icat):
"""Return the list of possible following words' indexes."""
if not self.get_category_name(icat):
return []
vocab = self.__categories__[icat][VOCAB]
word_index = self.get_word_index
sent = Pp.clean_and_ready(sent)
sent = [
word_index(w)
for w in sent.strip(".").split(".")[-1].split(" ") if w
]
tips = []
for word in sent:
if word is None:
tips[:] = []
continue
tips.append(vocab)
tips[:] = (
tip[word][NEXT]
for tip in tips if word in tip and tip[word][NEXT]
)
if len(tips) == 0:
return []
next_words = tips[0]
next_nbr_words = float(sum([next_words[w][FR] for w in next_words]))
return sorted(
[
(
word1,
next_words[word1][FR],
next_words[word1][FR] / next_nbr_words
)
for word1 in next_words
],
key=lambda k: -k[1]
)
def __prune_cat_trie__(self, vocab, prune=False, min_n=None):
"""Prune the trie of the given category."""
prun_floor = min_n or self.__prun_floor__
remove = []
for word in vocab:
if prune and vocab[word][FR] <= prun_floor:
vocab[word][NEXT] = None
remove.append(word)
else:
self.__prune_cat_trie__(vocab[word][NEXT], prune=True)
for word in remove:
del vocab[word]
def __prune_tries__(self):
"""Prune the trie of every category."""
Print.info("pruning tries...", offset=1)
for category in self.__categories__:
self.__prune_cat_trie__(category[VOCAB])
self.__prun_counter__ = 0
def __cache_lvs__(self, icat, vocab, preffix):
"""Cache all local values."""
for word in vocab:
sequence = preffix + [word]
vocab[word][LV] = self.__lv__(sequence, icat, cache=False)
self.__cache_lvs__(icat, vocab[word][NEXT], sequence)
def __cache_gvs__(self, icat, vocab, preffix):
"""Cache all global values."""
for word in vocab:
sequence = preffix + [word]
vocab[word][GV] = self.__gv__(sequence, icat, cache=False)
self.__cache_gvs__(icat, vocab[word][NEXT], sequence)
def __cache_sg__(self, icat, vocab, preffix):
"""Cache all significance weight values."""
for word in vocab:
sequence = preffix + [word]
vocab[word][SG] = self.__sg__(sequence, icat, cache=False)
self.__cache_sg__(icat, vocab[word][NEXT], sequence)
def __cache_cvs__(self, icat, vocab, preffix):
"""Cache all confidence values."""
for word in vocab:
sequence = preffix + [word]
vocab[word][CV] = self.__cv__(sequence, icat, False)
self.__cache_cvs__(icat, vocab[word][NEXT], sequence)
def __update_max_gvs__(self, icat, vocab, preffix):
"""Update all maximum global values."""
gv = self.__gv__
max_gvs = self.__max_gv__[icat]
sentence_ilength = len(preffix)
sequence = preffix + [None]
for word in vocab:
sequence[-1] = word
sequence_gv = gv(sequence, icat)
if sequence_gv > max_gvs[sentence_ilength]:
max_gvs[sentence_ilength] = sequence_gv
self.__update_max_gvs__(icat, vocab[word][NEXT], sequence)
def __update_needed__(self):
"""Return True if an update is needed, false otherwise."""
return (self.__s__ != self.__s_update__ or
self.__l__ != self.__l_update__ or
self.__p__ != self.__p_update__)
def __save_cat_vocab__(self, icat, path, n_grams):
"""Save the category vocabulary inside ``path``."""
if n_grams == -1:
n_grams = 20 # infinite
category = self.__categories__[icat]
cat_name = self.get_category_name(icat)
vocab = category[VOCAB]
vocabularies_out = [[] for _ in xrange(n_grams)]
terms = ["words", "bigrams", "trigrams"]
self.__get_vocabularies__(icat, vocab, [], n_grams, vocabularies_out)
Print.info("saving '%s' vocab" % cat_name)
for ilen in xrange(n_grams):
if vocabularies_out[ilen]:
term = terms[ilen] if ilen <= 2 else "%d-grams" % (ilen + 1)
voc_path = os.path.join(
path, "ss3_vocab_%s(%s).csv" % (cat_name, term)
)
f = open(voc_path, "w+", encoding=ENCODING)
vocabularies_out[ilen].sort(key=lambda k: -k[-1])
f.write(u"%s,%s,%s,%s\n" % ("term", "fr", "gv", "cv"))
for trans in vocabularies_out[ilen]:
f.write(u"%s,%d,%f,%f\n" % tuple(trans))
f.close()
Print.info("\t[ %s stored in '%s'" % (term, voc_path))
def __update_cv_cache__(self):
"""Update numpy darray confidence values cache."""
if self.__cv_cache__ is None:
self.__cv_cache__ = np.zeros((len(self.__index_to_word__), len(self.__categories__)))
cv = self.__cv__
for term_idx, cv_vec in enumerate(self.__cv_cache__):
for cat_idx, _ in enumerate(cv_vec):
try:
cv_vec[cat_idx] = cv([term_idx], cat_idx)
except KeyError:
cv_vec[cat_idx] = 0
def __predict_fast__(
self, x_test, def_cat=STR_MOST_PROBABLE, labels=True,
multilabel=False, proba=False, prep=True, leave_pbar=True
):
"""A faster version of the `predict` method (using numpy)."""
if not def_cat or def_cat == STR_UNKNOWN:
def_cat = IDX_UNKNOWN_CATEGORY
elif def_cat == STR_MOST_PROBABLE:
def_cat = self.__get_most_probable_category__()
else:
def_cat = self.get_category_index(def_cat)
if def_cat == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
# does the special "[others]" category exist? (only used in multilabel classification)
__other_idx__ = self.get_category_index(STR_OTHERS_CATEGORY)
if self.__update_needed__():
self.update_values()
if self.__cv_cache__ is None:
self.__update_cv_cache__()
self.__last_x_test__ = None # could have learned a new word (in `learn`)
cv_cache = self.__cv_cache__
x_test_hash = list_hash(x_test)
if x_test_hash == self.__last_x_test__:
x_test_idx = self.__last_x_test_idx__
else:
self.__last_x_test__ = x_test_hash
self.__last_x_test_idx__ = [None] * len(x_test)
x_test_idx = self.__last_x_test_idx__
word_index = self.get_word_index
for doc_idx, doc in enumerate(tqdm(x_test, desc="Caching documents",
leave=False, disable=Print.is_quiet())):
x_test_idx[doc_idx] = [
word_index(w)
for w
in re.split(self.__word_delimiter__, Pp.clean_and_ready(doc) if prep else doc)
if word_index(w) != IDX_UNKNOWN_WORD
]
y_pred = [None] * len(x_test)
for doc_idx, doc in enumerate(tqdm(x_test_idx, desc="Classification",
leave=leave_pbar, disable=Print.is_quiet())):
if self.__a__ > 0:
doc_cvs = cv_cache[doc]
doc_cvs[doc_cvs <= self.__a__] = 0
pred_cv = np.add.reduce(doc_cvs, 0)
else:
pred_cv = np.add.reduce(cv_cache[doc], 0)
if proba:
y_pred[doc_idx] = list(pred_cv)
continue
if not multilabel:
if pred_cv.sum() == 0:
y_pred[doc_idx] = def_cat
else:
y_pred[doc_idx] = np.argmax(pred_cv)
if labels:
if y_pred[doc_idx] != IDX_UNKNOWN_CATEGORY:
y_pred[doc_idx] = self.__categories__[y_pred[doc_idx]][NAME]
else:
y_pred[doc_idx] = STR_UNKNOWN_CATEGORY
else:
if pred_cv.sum() == 0:
if def_cat == IDX_UNKNOWN_CATEGORY:
y_pred[doc_idx] = []
else:
y_pred[doc_idx] = [self.get_category_name(def_cat) if labels else def_cat]
else:
r = sorted([(i, pred_cv[i])
for i in range(pred_cv.size)],
key=lambda e: -e[1])
if labels:
y_pred[doc_idx] = [self.get_category_name(cat_i)
for cat_i, _ in r[:kmean_multilabel_size(r)]]
else:
y_pred[doc_idx] = [cat_i for cat_i, _ in r[:kmean_multilabel_size(r)]]
# if the special "[others]" category exists
if __other_idx__ != IDX_UNKNOWN_CATEGORY:
# if its among the predicted labels, remove (hide) it
if labels:
if STR_OTHERS_CATEGORY in y_pred[doc_idx]:
y_pred[doc_idx].remove(STR_OTHERS_CATEGORY)
else:
if __other_idx__ in y_pred[doc_idx]:
y_pred[doc_idx].remove(__other_idx__)
return y_pred
def summary_op_ngrams(self, cvs):
"""
Summary operator for n-gram confidence vectors.
By default it returns the addition of all confidence
vectors. However, in case you want to use a custom
summary operator, this function must be replaced
as shown in the following example:
>>> def my_summary_op(cvs):
>>> return cvs[0]
>>> ...
>>> clf = SS3()
>>> ...
>>> clf.summary_op_ngrams = my_summary_op
Note that any function receiving a list of vectors and
returning a single vector could be used. In the above example
the summary operator is replaced by the user-defined
``my_summary_op`` which ignores all confidence vectors
returning only the confidence vector of the first n-gram
(which besides being an illustrative example, makes no real sense).
:param cvs: a list n-grams confidence vectors
:type cvs: list (of list of float)
:returns: a sentence confidence vector
:rtype: list (of float)
"""
return reduce(vsum, cvs)
def summary_op_sentences(self, cvs):
"""
Summary operator for sentence confidence vectors.
By default it returns the addition of all confidence
vectors. However, in case you want to use a custom
summary operator, this function must be replaced
as shown in the following example:
>>> def dummy_summary_op(cvs):
>>> return cvs[0]
>>> ...
>>> clf = SS3()
>>> ...
>>> clf.summary_op_sentences = dummy_summary_op
Note that any function receiving a list of vectors and
returning a single vector could be used. In the above example
the summary operator is replaced by the user-defined
``dummy_summary_op`` which ignores all confidence vectors
returning only the confidence vector of the first sentence
(which besides being an illustrative example, makes no real sense).
:param cvs: a list sentence confidence vectors
:type cvs: list (of list of float)
:returns: a paragraph confidence vector
:rtype: list (of float)
"""
return reduce(vsum, cvs)
def summary_op_paragraphs(self, cvs):
"""
Summary operator for paragraph confidence vectors.
By default it returns the addition of all confidence
vectors. However, in case you want to use a custom
summary operator, this function must be replaced
as shown in the following example:
>>> def dummy_summary_op(cvs):
>>> return cvs[0]
>>> ...
>>> clf = SS3()
>>> ...
>>> clf.summary_op_paragraphs = dummy_summary_op
Note that any function receiving a list of vectors and
returning a single vector could be used. In the above example
the summary operator is replaced by the user-defined
``dummy_summary_op`` which ignores all confidence vectors
returning only the confidence vector of the first paragraph
(which besides being an illustrative example, makes no real sense).
:param cvs: a list paragraph confidence vectors
:type cvs: list (of list of float)
:returns: the document confidence vector
:rtype: list (of float)
"""
return reduce(vsum, cvs)
def get_name(self):
"""
Return the model's name.
:returns: the model's name.
:rtype: str
"""
return self.__name__
def set_name(self, name):
"""
Set the model's name.
:param name: the model's name.
:type name: str
"""
self.__name__ = name
def set_hyperparameters(self, s=None, l=None, p=None, a=None):
"""
Set hyperparameter values.
:param s: the "smoothness" (sigma) hyperparameter
:type s: float
:param l: the "significance" (lambda) hyperparameter
:type l: float
:param p: the "sanction" (rho) hyperparameter
:type p: float
:param a: the alpha hyperparameter (i.e. all terms with a
confidence value (cv) less than alpha will be ignored during
classification)
:type a: float
"""
if s is not None:
self.set_s(s)
if l is not None:
self.set_l(l)
if p is not None:
self.set_p(p)
if a is not None:
self.set_a(a)
def get_hyperparameters(self):
"""
Get hyperparameter values.
:returns: a tuple with hyperparameters current values (s, l, p, a)
:rtype: tuple
"""
return self.__s__, self.__l__, self.__p__, self.__a__
def set_model_path(self, path):
"""
Overwrite the default path from which the model will be loaded (or saved to).
Note: be aware that the PySS3 Command Line tool looks for
a local folder called ``ss3_models`` to load models.
Therefore, the ``ss3_models`` folder will be always automatically
append to the given ``path`` (e.g. if ``path="my/path/"``, it will
be converted into ``my/path/ss3_models``).
:param path: the path
:type path: str
"""
self.__models_folder__ = os.path.join(path, STR_MODEL_FOLDER)
def set_block_delimiters(self, parag=None, sent=None, word=None):
r"""Overwrite the default delimiters used to split input documents into blocks.
delimiters are any regular expression from simple ones (e.g. ``" "``) to
more complex ones (e.g. ``r"[^\s\w\d]"``).
Note: remember that there are certain reserved characters for regular expression,
for example, the dot (.), in which case use the backslash to indicate you're
referring the character itself and not its interpretation (e.g. ``\.``)
e.g.
>>> ss3.set_block_delimiters(word="\s")
>>> ss3.set_block_delimiters(word="\s", parag="\n\n")
>>> ss3.set_block_delimiters(parag="\n---\n")
>>> ss3.set_block_delimiters(sent="\.")
>>> ss3.set_block_delimiters(word="\|")
>>> ss3.set_block_delimiters(word=" ")
:param parag: the paragraph new delimiter
:type parag: str
:param sent: the sentence new delimiter
:type sent: str
:param word: the word new delimiter
:type word: str
"""
if parag:
self.set_delimiter_paragraph(parag)
if sent:
self.set_delimiter_sentence(sent)
if word:
self.set_delimiter_word(word)
def set_delimiter_paragraph(self, regex):
r"""
Set the delimiter used to split documents into paragraphs.
Remember that there are certain reserved characters for regular expression,
for example, the dot (.), in which case use the backslash to indicate you're
referring the character itself and not its interpretation (e.g. ``\.``)
:param regex: the regular expression of the new delimiter
:type regex: str
"""
self.__parag_delimiter__ = regex
def set_delimiter_sentence(self, regex):
r"""
Set the delimiter used to split documents into sentences.
Remember that there are certain reserved characters for regular expression,
for example, the dot (.), in which case use the backslash to indicate you're
referring the character itself and not its interpretation (e.g. ``\.``)
:param regex: the regular expression of the new delimiter
:type regex: str
"""
self.__sent_delimiter__ = regex
def set_delimiter_word(self, regex):
r"""
Set the delimiter used to split documents into words.
Remember that there are certain reserved characters for regular expression,
for example, the dot (.), in which case use the backslash to indicate you're
referring the character itself and not its interpretation (e.g. ``\.``)
:param regex: the regular expression of the new delimiter
:type regex: str
"""
self.__word_delimiter__ = regex
def set_s(self, value):
"""
Set the "smoothness" (sigma) hyperparameter value.
:param value: the hyperparameter value
:type value: float
"""
self.__s__ = float(value)
def get_s(self):
"""
Get the "smoothness" (sigma) hyperparameter value.
:returns: the hyperparameter value
:rtype: float
"""
return self.__s__
def set_l(self, value):
"""
Set the "significance" (lambda) hyperparameter value.
:param value: the hyperparameter value
:type value: float
"""
self.__l__ = float(value)
def get_l(self):
"""
Get the "significance" (lambda) hyperparameter value.
:returns: the hyperparameter value
:rtype: float
"""
return self.__l__
def set_p(self, value):
"""
Set the "sanction" (rho) hyperparameter value.
:param value: the hyperparameter value
:type value: float
"""
self.__p__ = float(value)
def get_p(self):
"""
Get the "sanction" (rho) hyperparameter value.
:returns: the hyperparameter value
:rtype: float
"""
return self.__p__
def set_a(self, value):
"""
Set the alpha hyperparameter value.
All terms with a confidence value (cv) less than alpha
will be ignored during classification.
:param value: the hyperparameter value
:type value: float
"""
self.__a__ = float(value)
def get_a(self):
"""
Get the alpha hyperparameter value.
:returns: the hyperparameter value
:rtype: float
"""
return self.__a__
def get_categories(self, all=False):
"""
Get the list of category names.
:returns: the list of category names
:rtype: list (of str)
"""
return [
self.get_category_name(ci)
for ci in range(len(self.__categories__))
if all or self.get_category_name(ci) != STR_OTHERS_CATEGORY
]
def get_most_probable_category(self):
"""
Get the name of the most probable category.
:returns: the name of the most probable category
:rtype: str
"""
return self.get_category_name(self.__get_most_probable_category__())
def get_ngrams_length(self):
"""
Return the length of longest learned n-gram.
:returns: the length of longest learned n-gram.
:rtype: int
"""
return len(self.__max_fr__[0]) if len(self.__max_fr__) > 0 else 0
def get_category_index(self, name):
"""
Given its name, return the category index.
:param name: The category name
:type name: str
:returns: the category index (or ``IDX_UNKNOWN_CATEGORY``
if the category doesn't exist).
:rtype: int
"""
try:
return self.__categories_index__[name]
except KeyError:
return IDX_UNKNOWN_CATEGORY
def get_category_name(self, index):
"""
Given its index, return the category name.
:param index: The category index
:type index: int
:returns: the category name (or ``STR_UNKNOWN_CATEGORY``
if the category doesn't exist).
:rtype: str
"""
try:
if isinstance(index, list):
index = index[0]
return self.__categories__[index][NAME]
except IndexError:
return STR_UNKNOWN_CATEGORY
def get_word_index(self, word):
"""
Given a word, return its index.
:param name: a word
:type name: str
:returns: the word index (or ``IDX_UNKNOWN_WORD`` if the word doesn't exist).
:rtype: int
"""
try:
return self.__word_to_index__[word]
except KeyError:
return IDX_UNKNOWN_WORD
def get_word(self, index):
"""
Given the index, return the word.
:param index: the word index
:type index: int
:returns: the word (or ``STR_UNKNOWN_WORD`` if the word doesn't exist).
:rtype: int
:rtype: str
"""
return (
self.__index_to_word__[index]
if index in self.__index_to_word__ else STR_UNKNOWN_WORD
)
def get_next_words(self, sent, cat, n=None):
"""
Given a sentence, return the list of ``n`` (possible) following words.
:param sent: a sentence (e.g. "an artificial")
:type sent: str
:param cat: the category name
:type cat: str
:param n: the maximum number of possible answers
:type n: int
:returns: a list of tuples (word, frequency, probability)
:rtype: list (of tuple)
:raises: InvalidCategoryError
"""
icat = self.get_category_index(cat)
if icat == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
guessedwords = [
(self.get_word(iword), fr, P)
for iword, fr, P in self.__get_next_iwords__(sent, icat) if fr
]
if n is not None and guessedwords:
return guessedwords[:n]
return guessedwords
def get_stopwords(self, sg_threshold=.01):
"""
Get the list of (recognized) stopwords.
:param sg_threshold: significance (sg) value used as a threshold to
consider words as stopwords (i.e. words with
sg < ``sg_threshold`` for all categories will
be considered as "stopwords")
:type sg_threshold: float
:returns: a list of stopwords
:rtype: list (of str)
"""
if not self.__categories__:
return
iwords = self.__index_to_word__
sg_threshold = float(sg_threshold or .01)
categories = self.__categories__
cats_len = len(categories)
sg = self.__sg__
stopwords = []
vocab = categories[0][VOCAB]
for word0 in iwords:
word_sg = [
sg([word0], c_i)
for c_i in xrange(cats_len)
]
word_cats_len = len([v for v in word_sg if v < sg_threshold])
if word_cats_len == cats_len:
stopwords.append(word0)
stopwords = [
iwords[w0]
for w0, v
in sorted(
[
(w0, vocab[w0][FR] if w0 in vocab else 0)
for w0 in stopwords
],
key=lambda k: -k[1]
)
]
return stopwords
def save_model(self, path=None):
"""
Save the model to disk.
if a ``path`` is not present, the default will be used ("./"),
However, if a ``path`` is given, it will not only used to save
the model but also will overwrite the default path calling the
``SS3``'s ``set_model_path(path)`` method (see ``set_model_path``
method documentation for more detail).
:param path: the path to save the model to
:type path: str
:raises: OSError
"""
if path:
self.set_model_path(path)
stime = time()
Print.info(
"saving model (%s/%s.%s)..."
%
(self.__models_folder__, self.__name__, STR_MODEL_EXT),
False
)
json_file_format = {
"__a__": self.__a__,
"__l__": self.__l__,
"__p__": self.__p__,
"__s__": self.__s__,
"__max_fr__": self.__max_fr__,
"__max_gv__": self.__max_gv__,
"__categories__": self.__categories__,
"__categories_index__": self.__categories_index__,
"__index_to_word__": self.__index_to_word__,
"__word_to_index__": self.__word_to_index__,
"__cv_mode__": self.__cv_mode__,
"__sg_mode__": self.__sg_mode__,
"__multilabel__": self.__multilabel__
}
try:
os.makedirs(self.__models_folder__)
except OSError as ose:
if ose.errno == errno.EEXIST and os.path.isdir(self.__models_folder__):
pass
else:
raise
json_file = open(
"%s/%s.%s" % (
self.__models_folder__,
self.__name__,
STR_MODEL_EXT
), "w", encoding=ENCODING
)
try: # python 3
json_file.write(json.dumps(json_file_format))
except TypeError: # python 2
json_file.write(json.dumps(json_file_format).decode(ENCODING))
json_file.close()
Print.info("(%.1fs)" % (time() - stime))
def load_model(self, path=None):
"""
Load model from disk.
if a ``path`` is not present, the default will be used ("./"),
However, if a ``path`` is given, it will not only used to load
the model but also will overwrite the default path calling the
``SS3``'s ``set_model_path(path)`` method (see ``set_model_path``
method documentation for more detail).
:param path: the path to load the model from
:type path: str
:raises: OSError
"""
if path:
self.set_model_path(path)
stime = time()
Print.info("loading '%s' model from disk..." % self.__name__)
json_file = open(
"%s/%s.%s" % (
self.__models_folder__,
self.__name__,
STR_MODEL_EXT
), "r", encoding=ENCODING
)
jmodel = json.loads(json_file.read(), object_hook=key_as_int)
json_file.close()
self.__max_fr__ = jmodel["__max_fr__"]
self.__max_gv__ = jmodel["__max_gv__"]
self.__l__ = jmodel["__l__"]
self.__p__ = jmodel["__p__"]
self.__s__ = jmodel["__s__"]
self.__a__ = jmodel["__a__"]
self.__categories__ = jmodel["__categories__"]
self.__categories_index__ = jmodel["__categories_index__"]
self.__index_to_word__ = jmodel["__index_to_word__"]
self.__word_to_index__ = jmodel["__word_to_index__"]
self.__cv_mode__ = jmodel["__cv_mode__"]
self.__multilabel__ = jmodel["__multilabel__"] if "__multilabel__" in jmodel else False
self.__sg_mode__ = (jmodel["__sg_mode__"]
if "__sg_mode__" in jmodel
else jmodel["__sn_mode__"])
self.__zero_cv__ = (0,) * len(self.__categories__)
self.__s_update__ = self.__s__
self.__l_update__ = self.__l__
self.__p_update__ = self.__p__
Print.info("(%.1fs)" % (time() - stime))
def save_cat_vocab(self, cat, path="./", n_grams=-1):
"""
Save category vocabulary to disk.
:param cat: the category name
:type cat: str
:param path: the path in which to store the vocabulary
:type path: str
:param n_grams: indicates the n-grams to be stored (e.g. only 1-grams,
2-grams, 3-grams, etc.). Default -1 stores all
learned n-grams (1-grams, 2-grams, 3-grams, etc.)
:type n_grams: int
:raises: InvalidCategoryError
"""
if self.get_category_index(cat) == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
self.__save_cat_vocab__(self.get_category_index(cat), path, n_grams)
def save_vocab(self, path="./", n_grams=-1):
"""
Save learned vocabularies to disk.
:param path: the path in which to store the vocabularies
:type path: str
:param n_grams: indicates the n-grams to be stored (e.g. only 1-grams,
2-grams, 3-grams, etc.). Default -1 stores all
learned n-grams (1-grams, 2-grams, 3-grams, etc.)
:type n_grams: int
"""
for icat in xrange(len(self.__categories__)):
self.__save_cat_vocab__(icat, path, n_grams)
def save_wordcloud(self, cat, top_n=100, n_grams=1, path=None, size=1024,
shape="circle", palette="cartocolors.qualitative.Prism_2", color=None,
background_color="white", plot=False):
"""
Create a word cloud and save it to disk as an image.
The word cloud shows the top-n words selected by the confidence value learned by the model.
In addition, individual words are sized by the learned value.
:param cat: the category label
:type cat: str
:param top_n: number of words to be taken into account.
For instance, top 50 words (default: 100).
:type top_n: int
:param n_grams: indicates the word n-grams to be used to create the cloud. For instance,
1 for word cloud, 2 for bigrams cloud, 3 for trigrams cloud, and so on
(default: 1).
:type n_grams: int
:param path: the path to the image file in which to store the word cloud
(e.g. "../../my_wordcloud.jpg").
If no path is given, by default, the image file will be stored in the current
working directory as "wordcloud_topN_CAT(NGRAM).png" where N is the `top_n`
value, CAT the category label and NGRAM indicates what n-grams populate
the could.
:type path: str
:param size: the size of the image in pixels (default: 1024)
:type size: int
:param shape: the shape of the cloud (a FontAwesome icon name).
The complete list of allowed icon names are available at
https://fontawesome.com/v5.15/icons?d=gallery&p=1&m=free
(default: "circle")
:type shape: str
:param palette: the color palette used for coloring words by giving the
palettable module and palette name
(list available at https://jiffyclub.github.io/palettable/)
(default: "cartocolors.qualitative.Prism_2")
:type palette: str
:param color: a custom color for words (if given, overrides the color palette).
The color string could be the hex color code (e.g. "#FF5733") or the HTML
color name (e.g. "tomato"). The complete list of HTML color names is available
at https://www.w3schools.com/colors/colors_names.asp
:type color: str
:param background_color: the background color as either the HTML color name or the hex code
(default: "white").
:type background_color: str
:param plot: whether or not to also plot the cloud (after saving the file)
(default: False)
:type plot: bool
:raises: InvalidCategoryError, ValueError
"""
if self.get_category_index(cat) == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
if top_n < 1 or n_grams < 1 or size < 1:
raise ValueError("`top_n`, `n_grams`, and `size` arguments must be positive integers")
import stylecloud
icat = self.get_category_index(cat)
category = self.__categories__[icat]
vocab = category[VOCAB]
vocabularies_out = [[] for _ in xrange(n_grams)]
self.__get_vocabularies__(icat, vocab, [], n_grams, vocabularies_out, "+")
ilen = n_grams - 1
if not vocabularies_out[ilen]:
Print.info("\t[ empty word could: no %d-grams to be shown ]" % n_grams)
return
terms = dict((t, cv)
for t, _, _, cv
in sorted(vocabularies_out[ilen], key=lambda k: -k[-1])[:top_n])
if path is None:
term = ["", "(bigrams)", "(trigrams)"][ilen] if ilen <= 2 else "(%d-grams)" % (ilen + 1)
path = "wordcloud_top%d_%s%s.png" % (top_n, cat, term)
stylecloud.gen_stylecloud(
terms,
icon_name="fas fa-%s" % shape,
output_name=path,
palette=palette,
colors=color,
background_color=background_color,
size=size
)
Print.info("\t[ word cloud stored in '%s' ]" % path)
if plot:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
wc = mpimg.imread(path)
plt.figure(figsize=(size / 100., size / 100.))
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.show()
def update_values(self, force=False):
"""
Update model values (cv, gv, lv, etc.).
:param force: force update (even if hyperparameters haven't changed)
:type force: bool
"""
update = 0
if force or self.__s_update__ != self.__s__:
update = 3
elif self.__l_update__ != self.__l__:
update = 2
elif self.__p_update__ != self.__p__:
update = 1
if update == 0:
Print.info("nothing to update...", offset=1)
return
category_len = len(self.__categories__)
categories = xrange(category_len)
category_names = [self.get_category_name(ic) for ic in categories]
stime = time()
Print.info("about to start updating values...", offset=1)
if update == 3: # only if s has changed
Print.info("caching lv values", offset=1)
for icat in categories:
Print.info(
"lv values for %d (%s)" % (icat, category_names[icat]),
offset=4
)
self.__cache_lvs__(icat, self.__categories__[icat][VOCAB], [])
if update >= 2: # only if s or l have changed
Print.info("caching sg values", offset=1)
for icat in categories:
Print.info(
"sg values for %d (%s)" % (icat, category_names[icat]),
offset=4
)
self.__cache_sg__(icat, self.__categories__[icat][VOCAB], [])
Print.info("caching gv values")
for icat in categories:
Print.info(
"gv values for %d (%s)" % (icat, category_names[icat]),
offset=4
)
self.__cache_gvs__(icat, self.__categories__[icat][VOCAB], [])
if self.__cv_mode__ != STR_GV:
Print.info("updating max gv values", offset=1)
for icat in categories:
Print.info(
"max gv values for %d (%s)" % (icat, category_names[icat]),
offset=4
)
self.__max_gv__[icat] = list(
map(lambda _: 0, self.__max_gv__[icat])
)
self.__update_max_gvs__(
icat, self.__categories__[icat][VOCAB], []
)
Print.info("max gv values have been updated", offset=1)
Print.info("caching confidence values (cvs)", offset=1)
for icat in categories:
Print.info(
"cvs for %d (%s)" % (icat, category_names[icat]),
offset=4
)
self.__cache_cvs__(icat, self.__categories__[icat][VOCAB], [])
Print.info("finished --time: %.1fs" % (time() - stime), offset=1)
self.__s_update__ = self.__s__
self.__l_update__ = self.__l__
self.__p_update__ = self.__p__
if self.__cv_cache__ is not None:
self.__update_cv_cache__()
def print_model_info(self):
"""Print information regarding the model."""
print()
print(" %s: %s\n" % (
Print.style.green(Print.style.ubold("NAME")),
Print.style.warning(self.get_name())
))
def print_hyperparameters_info(self):
"""Print information about hyperparameters."""
print()
print(
" %s:\n" % Print.style.green(Print.style.ubold("HYPERPARAMETERS"))
)
print("\tSmoothness(s):", Print.style.warning(self.__s__))
print("\tSignificance(l):", Print.style.warning(self.__l__))
print("\tSanction(p):", Print.style.warning(self.__p__))
print("")
print("\tAlpha(a):", Print.style.warning(self.__a__))
def print_categories_info(self):
"""Print information about learned categories."""
if not self.__categories__:
print(
"\n %s: None\n"
% Print.style.green(Print.style.ubold("CATEGORIES"))
)
return
cat_len = max([
len(self.get_category_name(ic))
for ic in xrange(len(self.__categories__))
])
cat_len = max(cat_len, 8)
row_template = Print.style.warning("\t{:^%d} " % cat_len)
row_template += "| {:^5} | {:^10} | {:^11} | {:^13} | {:^6} |"
print()
print("\n %s:\n" % Print.style.green(Print.style.ubold("CATEGORIES")))
print(
row_template
.format(
"Category", "Index", "Length",
"Vocab. Size", "Word Max. Fr.", "N-gram"
)
)
print(
(
"\t{:-<%d}-|-{:-<5}-|-{:-<10}-|-{:-<11}-|-{:-<13}-|-{:-<6}-|"
% cat_len
)
.format('', '', '', '', '', '')
)
mpci = self.__get_most_probable_category__()
mpc_size = 0
mpc_total = 0
for icat, category in enumerate(self.__categories__):
icat_size = self.__get_category_length__(icat)
print(
row_template
.format(
category[NAME],
icat, icat_size,
len(category[VOCAB]),
self.__max_fr__[icat][0],
len(self.__max_fr__[icat])
)
)
mpc_total += icat_size
if icat == mpci:
mpc_size = icat_size
print(
"\n\t%s: %s %s"
%
(
Print.style.ubold("Most Probable Category"),
Print.style.warning(self.get_category_name(mpci)),
Print.style.blue("(%.2f%%)" % (100.0 * mpc_size / mpc_total))
)
)
print()
def print_ngram_info(self, ngram):
"""
Print debugging information about a given n-gram.
Namely, print the n-gram frequency (fr), local value (lv),
global value (gv), confidence value (cv), sanction (sn) weight,
significance (sg) weight.
:param ngram: the n-gram (e.g. "machine", "machine learning", etc.)
:type ngram: str
"""
if not self.__categories__:
return
word_index = self.get_word_index
n_gram_str = ngram
ngram = [word_index(w)
for w in re.split(self.__word_delimiter__, ngram)
if w]
print()
print(
" %s: %s (%s)" % (
Print.style.green(
"%d-GRAM" % len(ngram) if len(ngram) > 1 else "WORD"
),
Print.style.warning(n_gram_str),
"is unknown"
if IDX_UNKNOWN_WORD in ngram
else "index: " + str(ngram if len(ngram) > 1 else ngram[0])
)
)
if IDX_UNKNOWN_WORD in ngram:
print()
return
cat_len = max([
len(self.get_category_name(ic))
for ic in xrange(len(self.__categories__))
])
cat_len = max(cat_len, 35)
header_template = Print.style.bold(
" {:<%d} | fr | lv | sg | sn | gv | cv |"
% cat_len
)
print()
print(header_template.format("Category"))
header_template = (
" {:-<%d}-|----------|-------|-------|-------|-------|-------|"
% cat_len
)
print(header_template.format(''))
row_template = (
" %s | {:^8} | {:.3f} | {:.3f} | {:.3f} | {:.3f} | {:.3f} |"
% (Print.style.warning("{:<%d}" % cat_len))
)
for icat in xrange(len(self.__categories__)):
n_gram_tip = self.__trie_node__(ngram, icat)
if n_gram_tip:
print(
row_template
.format(
self.get_category_name(icat)[:35],
n_gram_tip[FR],
self.__lv__(ngram, icat),
self.__sg__(ngram, icat),
self.__sn__(ngram, icat),
self.__gv__(ngram, icat),
self.__cv__(ngram, icat),
)
)
print()
def plot_value_distribution(self, cat):
"""
Plot the category's global and local value distribution.
:param cat: the category name
:type cat: str
:raises: InvalidCategoryError
"""
if self.get_category_index(cat) == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
import matplotlib.pyplot as plt
icat = self.get_category_index(cat)
vocab_metrics = self.__get_category_vocab__(icat)
x = []
y_lv = []
y_gv = []
vocab_metrics_len = len(vocab_metrics)
for i in xrange(vocab_metrics_len):
metric = vocab_metrics[i]
x.append(i + 1)
y_lv.append(metric[2])
y_gv.append(metric[3])
plt.figure(figsize=(20, 10))
plt.title(
"Word Value Distribution (%s)" % self.get_category_name(icat)
)
plt.xlabel("Word Rank")
plt.ylabel("Value")
plt.xlim(right=max(x))
plt.plot(
x, y_lv, "-", label="local value ($lv$)",
linewidth=2, color="#7f7d7e"
)
plt.plot(
x, y_gv, "-", label="global value ($gv$)",
linewidth=4, color="#2ca02c")
plt.legend()
plt.show()
def extract_insight(
self, doc, cat='auto', level='word', window_size=3, min_cv=0.01, sort=True
):
"""
Get the list of text blocks involved in the classification decision.
Given a document, return the pieces of text that were involved in the
classification decision, along with the confidence values associated
with them. If a category is given, perform the process as if the
given category were the one assigned by the classifier.
:param doc: the content of the document
:type doc: str
:param cat: the category in relation to which text blocks are obtained.
If not present, it will automatically use the category assigned
by SS3 after classification.
Options are 'auto' or a given category name. (default: 'auto')
:type cat: str
:param level: the level at which text blocks are going to be extracted.
Options are 'word', 'sentence' or 'paragraph'. (default: 'word')
:type level: str
:param window_size: the number of words, before and after each identified word,
to be also included along with the identified word. For instance,
``window_size=0`` means return only individual words,
``window_size=1`` means also include the word that was
before and the one that was after them. If multiple selected
words are close enough for their word windows to be overlapping,
then those word windows will be merged into a longer and single one.
This argument is ignored when ``level`` is not equal to 'word'.
(default: 3)
:type window_size: int
:param min_cv: the minimum confidence value each text block must have to be
included in the output. (default 0.01)
:type min_cv: float
:param sort: whether to return the text blocks ordered by their confidence value
or not. If ``sort=False`` then blocks will be returned
following the order they had in the input document. (default: True)
:type sort: bool
:returns: a list of pairs (text, confidence value) containing the text (blocks) involved,
and to what degree (*), in the classification decision.
(*) given by the confidence value
:rtype: list
:raises: InvalidCategoryError, ValueError
"""
r = self.classify(doc, json=True)
word_regex = self.__word_regex__
if cat == 'auto':
c_i = r["cvns"][0][0]
else:
c_i = self.get_category_index(cat)
if c_i == IDX_UNKNOWN_CATEGORY:
Print.error(
"The excepted values for the `cat` argument are 'auto' "
"or a valid category name, found '%s' instead" % str(cat),
raises=InvalidCategoryError
)
if level == 'paragraph':
insights = [
(
"".join([word["lexeme"]
for s in p["sents"]
for word in s["words"]]),
p["cv"][c_i]
)
for p in r["pars"]
if p["cv"][c_i] > min_cv
]
elif level == 'sentence':
insights = [
(
"".join([word["lexeme"]
for word in s["words"]]),
s["cv"][c_i]
)
for p in r["pars"] for s in p["sents"]
if s["cv"][c_i] > min_cv
]
elif level == 'word':
ww_size = window_size
insights = []
for p in r["pars"]:
words = [w for s in p["sents"] for w in s["words"]]
w_i = 0
while w_i < len(words):
w = words[w_i]
if w["cv"][c_i] > min_cv:
ww = []
ww_cv = 0
ww_left = min(w_i, ww_size) + 1
w_i -= ww_left - 1
while ww_left > 0 and w_i < len(words):
ww.append(words[w_i]["lexeme"])
ww_cv += words[w_i]["cv"][c_i]
if words[w_i]["cv"][c_i] > min_cv:
ww_left += min(ww_size, (len(words) - 1) - w_i)
if re.search(word_regex, words[w_i]["lexeme"]):
ww_left -= 1
w_i += 1
insights.append(("".join(ww), ww_cv))
else:
w_i += 1
else:
raise ValueError(
"expected values for the `level` argument are "
"'word', 'sentence', or 'paragraph', found '%s' instead."
% str(level)
)
if sort:
insights.sort(key=lambda b_cv: -b_cv[1])
return insights
def learn(self, doc, cat, n_grams=1, prep=True, update=True):
"""
Learn a new document for a given category.
:param doc: the content of the document
:type doc: str
:param cat: the category name
:type cat: str
:param n_grams: indicates the maximum ``n``-grams to be learned
(e.g. a value of ``1`` means only 1-grams (words),
``2`` means 1-grams and 2-grams,
``3``, 1-grams, 2-grams and 3-grams, and so on.
:type n_grams: int
:param prep: enables the default input preprocessing (default: True)
:type prep: bool
:param update: enables model auto-update after learning (default: True)
:type update: bool
"""
self.__cv_cache__ = None
if not doc or cat is None:
return
try:
doc = doc.decode(ENCODING)
except UnicodeEncodeError: # for python 2 compatibility
doc = doc.encode(ENCODING).decode(ENCODING)
except AttributeError:
pass
icat = self.__get_category__(cat)
cat = self.__categories__[icat]
word_to_index = self.__word_to_index__
word_regex = self.__word_regex__
if prep:
Print.info("preprocessing document...", offset=1)
stime = time()
doc = Pp.clean_and_ready(doc)
Print.info("finished --time: %.1fs" % (time() - stime), offset=1)
doc = re.findall("%s|[^%s]+" % (word_regex, self.__word_delimiter__), doc)
text_len = len(doc)
Print.info(
"about to learn new document (%d terms)" % text_len, offset=1
)
vocab = cat[VOCAB] # getting cat vocab
index_to_word = self.__index_to_word__
max_frs = self.__max_fr__[icat]
max_gvs = self.__max_gv__[icat]
stime = time()
Print.info("learning...", offset=1)
tips = []
for word in doc:
if re.match(word_regex, word):
self.__prun_counter__ += 1
# if word doesn't exist yet, then...
try:
word = word_to_index[word]
except KeyError:
new_index = len(word_to_index)
word_to_index[word] = new_index
index_to_word[new_index] = word
word = new_index
tips.append(vocab)
if len(tips) > n_grams:
del tips[0]
tips_length = len(tips)
for i in xrange(tips_length):
tips_i = tips[i]
try:
max_frs[i]
except IndexError:
max_frs.append(1)
max_gvs.append(0)
try:
word_info = tips_i[word]
word_info[FR] += 1
if word_info[FR] > max_frs[(tips_length - 1) - i]:
max_frs[(tips_length - 1) - i] = word_info[FR]
except KeyError:
tips_i[word] = [
{}, # NEXT/VOCAB
1, # FR
0, # CV
0, # SG
0, # GV
0 # LV
]
word_info = tips_i[word]
# print i, index_to_word[ word ], tips_i[word][FR]
tips[i] = word_info[NEXT]
else:
tips[:] = []
if self.__prun_counter__ >= self.__prun_trigger__:
# trie data-structures pruning
self.__prune_tries__()
Print.info("finished --time: %.1fs" % (time() - stime), offset=1)
# updating values
if update:
self.update_values(force=True)
def classify(self, doc, prep=True, sort=True, json=False, prep_func=None):
"""
Classify a given document.
:param doc: the content of the document
:type doc: str
:param prep: enables the default input preprocessing (default: True)
:type prep: bool
:param sort: sort the classification result (from best to worst)
:type sort: bool
:param json: return a debugging version of the result in JSON format.
:type json: bool
:param prep_func: the custom preprocessing function to be applied to
the given document before classifying it.
If not given, the default preprocessing function will
be used (as long as ``prep=True``)
:type prep_func: function
:returns: the document confidence vector if ``sort`` is False.
If ``sort`` is True, a list of pairs
(category index, confidence value) ordered by confidence value.
:rtype: list
:raises: EmptyModelError
"""
if not self.__categories__:
raise EmptyModelError
if self.__update_needed__():
self.update_values()
doc = doc or ''
try:
doc = doc.decode(ENCODING)
except UnicodeEncodeError: # for python 2 compatibility
doc = doc.encode(ENCODING).decode(ENCODING)
except BaseException:
pass
if not json:
paragraphs_cvs = [
self.__classify_paragraph__(parag, prep=prep, prep_func=prep_func)
for parag in re.split(self.__parag_delimiter__, doc)
if parag
]
if paragraphs_cvs:
cv = self.summary_op_paragraphs(paragraphs_cvs)
else:
cv = self.__zero_cv__
if sort:
return sorted(
[
(i, cv[i])
for i in xrange(len(cv))
],
key=lambda e: -e[1]
)
return cv
else:
info = [
self.__classify_paragraph__(parag, prep=prep, prep_func=prep_func, json=True)
for parag in re_split_keep(self.__parag_delimiter__, doc)
if parag
]
nbr_cats = len(self.__categories__)
cv = self.summary_op_paragraphs([v["cv"] for v in info])
max_v = max(cv)
if max_v > 1:
norm_cv = map(lambda x: x / max_v, cv)
else:
norm_cv = cv
norm_cv_sorted = sorted(
[(i, nv, cv[i]) for i, nv in enumerate(norm_cv)],
key=lambda e: -e[1]
)
return {
"pars": info,
"cv": cv,
"wmv": reduce(vmax, [v["wmv"] for v in info]),
"cvns": norm_cv_sorted,
"ci": [self.get_category_name(ic) for ic in xrange(nbr_cats)]
}
def classify_label(self, doc, def_cat=STR_MOST_PROBABLE, labels=True, prep=True):
"""
Classify a given document returning the category label.
:param doc: the content of the document
:type doc: str
:param def_cat: default category to be assigned when SS3 is not
able to classify a document. Options are
"most-probable", "unknown" or a given category name.
(default: "most-probable")
:type def_cat: str
:param labels: whether to return the category label or just the
category index (default: True)
:type labels: bool
:param prep: enables the default input preprocessing process (default: True)
:type prep: bool
:returns: the category label or the category index.
:rtype: str or int
:raises: InvalidCategoryError
"""
r = self.classify(doc, sort=True, prep=prep)
if not r or not r[0][1]:
if not def_cat or def_cat == STR_UNKNOWN:
cat = STR_UNKNOWN_CATEGORY
elif def_cat == STR_MOST_PROBABLE:
cat = self.get_most_probable_category()
else:
if self.get_category_index(def_cat) == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
cat = def_cat
else:
cat = self.get_category_name(r[0][0])
return cat if labels else self.get_category_index(cat)
def classify_multilabel(self, doc, def_cat=STR_UNKNOWN, labels=True, prep=True):
"""
Classify a given document returning multiple category labels.
This method could be used to perform multi-label classification. Internally, it
uses k-mean clustering on the confidence vector to select the proper group of
labels.
:param doc: the content of the document
:type doc: str
:param def_cat: default category to be assigned when SS3 is not
able to classify a document. Options are
"most-probable", "unknown" or a given category name.
(default: "unknown")
:type def_cat: str
:param labels: whether to return the category labels or just the
category indexes (default: True)
:type labels: bool
:param prep: enables the default input preprocessing (default: True)
:type prep: bool
:returns: the list of category labels (or indexes).
:rtype: list (of str or int)
:raises: InvalidCategoryError
"""
r = self.classify(doc, sort=True, prep=prep)
if not r or not r[0][1]:
if not def_cat or def_cat == STR_UNKNOWN:
return []
elif def_cat == STR_MOST_PROBABLE:
cat = self.get_most_probable_category()
else:
if self.get_category_index(def_cat) == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
cat = def_cat
if cat != STR_OTHERS_CATEGORY:
return [cat] if labels else [self.get_category_index(cat)]
else:
return []
else:
__other_idx__ = self.get_category_index(STR_OTHERS_CATEGORY)
if labels:
result = [self.get_category_name(cat_i)
for cat_i, _ in r[:kmean_multilabel_size(r)]]
# removing "hidden" special category ("[other]")
if __other_idx__ != IDX_UNKNOWN_CATEGORY and STR_OTHERS_CATEGORY in result:
result.remove(STR_OTHERS_CATEGORY)
else:
result = [cat_i for cat_i, _ in r[:kmean_multilabel_size(r)]]
# removing "hidden" special category ("[other]")
if __other_idx__ != IDX_UNKNOWN_CATEGORY and __other_idx__ in result:
result.remove(__other_idx__)
return result
def fit(self, x_train, y_train, n_grams=1, prep=True, leave_pbar=True):
"""
Train the model given a list of documents and category labels.
:param x_train: the list of documents
:type x_train: list (of str)
:param y_train: the list of document labels
:type y_train: list of str for singlelabel classification;
list of list of str for multilabel classification.
:param n_grams: indicates the maximum ``n``-grams to be learned
(e.g. a value of ``1`` means only 1-grams (words),
``2`` means 1-grams and 2-grams,
``3``, 1-grams, 2-grams and 3-grams, and so on.
:type n_grams: int
:param prep: enables the default input preprocessing (default: True)
:type prep: bool
:param leave_pbar: controls whether to leave the progress bar or
remove it after finishing.
:type leave_pbar: bool
:raises: ValueError
"""
stime = time()
x_train, y_train = list(x_train), list(y_train)
if len(x_train) != len(y_train):
raise ValueError("`x_train` and `y_train` must have the same length")
if len(y_train) == 0:
raise ValueError("`x_train` and `y_train` are empty")
# if it's a multi-label classification problem
if is_a_collection(y_train[0]):
# flattening y_train
labels = [l for y in y_train for l in y]
self.__multilabel__ = True
else:
labels = y_train
cats = sorted(list(set(labels)))
# if it's a single-label classification problem
if not is_a_collection(y_train[0]):
x_train = [
"".join([
x_train[i]
if x_train[i] and x_train[i][-1] == '\n'
else
x_train[i] + '\n'
for i in xrange(len(x_train))
if y_train[i] == cat
])
for cat in cats
]
y_train = list(cats)
Print.info("about to start training", offset=1)
Print.verbosity_region_begin(VERBOSITY.NORMAL)
progress_bar = tqdm(total=len(x_train), desc="Training",
leave=leave_pbar, disable=Print.is_quiet())
# if it's a multi-label classification problem
if is_a_collection(y_train[0]):
__others__ = [STR_OTHERS_CATEGORY]
for i in range(len(x_train)):
for label in (y_train[i] if y_train[i] else __others__):
self.learn(
x_train[i], label,
n_grams=n_grams, prep=prep, update=False
)
progress_bar.update(1)
else:
for i in range(len(x_train)):
progress_bar.set_description_str("Training on '%s'" % str(y_train[i]))
self.learn(
x_train[i], y_train[i],
n_grams=n_grams, prep=prep, update=False
)
progress_bar.update(1)
progress_bar.close()
self.__prune_tries__()
Print.verbosity_region_end()
Print.info("finished --time: %.1fs" % (time() - stime), offset=1)
self.update_values(force=True)
def predict_proba(self, x_test, prep=True, leave_pbar=True):
"""
Classify a list of documents returning a list of confidence vectors.
:param x_test: the list of documents to be classified
:type x_test: list (of str)
:param prep: enables the default input preprocessing (default: True)
:type prep: bool
:param leave_pbar: controls whether to leave the progress bar after
finishing or remove it.
:type leave_pbar: bool
:returns: the list of confidence vectors
:rtype: list (of list of float)
:raises: EmptyModelError
"""
if not self.__categories__:
raise EmptyModelError
if self.get_ngrams_length() == 1 and self.__summary_ops_are_pristine__():
return self.__predict_fast__(x_test, prep=prep,
leave_pbar=leave_pbar, proba=True)
x_test = list(x_test)
classify = self.classify
return [
classify(x, sort=False)
for x in tqdm(x_test, desc="Classification", disable=Print.is_quiet())
]
def predict(
self, x_test, def_cat=None,
labels=True, multilabel=False, prep=True, leave_pbar=True
):
"""
Classify a list of documents.
:param x_test: the list of documents to be classified
:type x_test: list (of str)
:param def_cat: default category to be assigned when SS3 is not
able to classify a document. Options are
"most-probable", "unknown" or a given category name.
(default: "most-probable", or "unknown" for
multi-label classification)
:type def_cat: str
:param labels: whether to return the list of category names or just
category indexes
:type labels: bool
:param multilabel: whether to perform multi-label classification or not.
if enabled, for each document returns a ``list`` of labels
instead of a single label (``str``).
If the model was trained using multilabeled data, then this
argument will be ignored and set to True.
:type multilabel: bool
:param prep: enables the default input preprocessing (default: True)
:type prep: bool
:param leave_pbar: controls whether to leave the progress bar or
remove it after finishing.
:type leave_pbar: bool
:returns: if ``labels`` is True, the list of category names,
otherwise, the list of category indexes.
:rtype: list (of int or str)
:raises: EmptyModelError, InvalidCategoryError
"""
if not self.__categories__:
raise EmptyModelError
multilabel = multilabel or self.__multilabel__
if def_cat is None:
def_cat = STR_UNKNOWN if multilabel else STR_MOST_PROBABLE
if not def_cat or def_cat == STR_UNKNOWN:
if not multilabel:
Print.info(
"default category was set to 'unknown' (its index will be -1)",
offset=1
)
else:
if def_cat == STR_MOST_PROBABLE:
Print.info(
"default category was automatically set to '%s' "
"(the most probable one)" % self.get_most_probable_category(),
offset=1
)
else:
Print.info("default category was set to '%s'" % def_cat, offset=1)
if self.get_category_index(def_cat) == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
if self.get_ngrams_length() == 1 and self.__summary_ops_are_pristine__():
return self.__predict_fast__(x_test, def_cat=def_cat, labels=labels,
multilabel=multilabel, prep=prep,
leave_pbar=leave_pbar)
stime = time()
Print.info("about to start classifying test documents", offset=1)
classify = self.classify_label if not multilabel else self.classify_multilabel
x_test = list(x_test)
y_pred = [
classify(doc, def_cat=def_cat, labels=labels, prep=prep)
for doc in tqdm(x_test, desc="Classification",
leave=leave_pbar, disable=Print.is_quiet())
]
Print.info("finished --time: %.1fs" % (time() - stime), offset=1)
return y_pred
def cv(self, ngram, cat):
"""
Return the "confidence value" of a given word n-gram for the given category.
This value is obtained applying a final transformation on the global value
of the given word n-gram using the gv function [*].
These transformation are given when creating a new SS3 instance (see the
SS3 class constructor's ``cv_m`` argument for more information).
[*] the gv function is defined in Section 3.2.2 of the original paper:
https://arxiv.org/pdf/1905.08772.pdf
Examples:
>>> clf.cv("chicken", "food")
>>> clf.cv("roast chicken", "food")
>>> clf.cv("chicken", "sports")
:param ngram: the word or word n-gram
:type ngram: str
:param cat: the category label
:type cat: str
:returns: the confidence value
:rtype: float
:raises: InvalidCategoryError
"""
return self.__apply_fn__(self.__cv__, ngram, cat)
def gv(self, ngram, cat):
"""
Return the "global value" of a given word n-gram for the given category.
(gv function is defined in Section 3.2.2 of the original paper:
https://arxiv.org/pdf/1905.08772.pdf)
Examples:
>>> clf.gv("chicken", "food")
>>> clf.gv("roast chicken", "food")
>>> clf.gv("chicken", "sports")
:param ngram: the word or word n-gram
:type ngram: str
:param cat: the category label
:type cat: str
:returns: the global value
:rtype: float
:raises: InvalidCategoryError
"""
return self.__apply_fn__(self.__gv__, ngram, cat)
def lv(self, ngram, cat):
"""
Return the "local value" of a given word n-gram for the given category.
(lv function is defined in Section 3.2.2 of the original paper:
https://arxiv.org/pdf/1905.08772.pdf)
Examples:
>>> clf.lv("chicken", "food")
>>> clf.lv("roast chicken", "food")
>>> clf.lv("chicken", "sports")
:param ngram: the word or word n-gram
:type ngram: str
:param cat: the category label
:type cat: str
:returns: the local value
:rtype: float
:raises: InvalidCategoryError
"""
return self.__apply_fn__(self.__lv__, ngram, cat)
def sg(self, ngram, cat):
"""
Return the "significance factor" of a given word n-gram for the given category.
(sg function is defined in Section 3.2.2 of the original paper:
https://arxiv.org/pdf/1905.08772.pdf)
Examples:
>>> clf.sg("chicken", "food")
>>> clf.sg("roast chicken", "food")
>>> clf.sg("chicken", "sports")
:param ngram: the word or word n-gram
:type ngram: str
:param cat: the category label
:type cat: str
:returns: the significance factor
:rtype: float
:raises: InvalidCategoryError
"""
return self.__apply_fn__(self.__sg__, ngram, cat)
def sn(self, ngram, cat):
"""
Return the "sanction factor" of a given word n-gram for the given category.
(sn function is defined in Section 3.2.2 of the original paper:
https://arxiv.org/pdf/1905.08772.pdf)
Examples:
>>> clf.sn("chicken", "food")
>>> clf.sn("roast chicken", "food")
>>> clf.sn("chicken", "sports")
:param ngram: the word or word n-gram
:type ngram: str
:param cat: the category label
:type cat: str
:returns: the sanction factor
:rtype: float
:raises: InvalidCategoryError
"""
return self.__apply_fn__(self.__sn__, ngram, cat)
class SS3Vectorizer(CountVectorizer):
r"""
Convert a collection of text documents to a document-term matrix weighted using an SS3 model.
The weight of a term t in a document d in relation to category c is calculated by multiplying a
term frequency weight (tf_weight) with an SS3-based weight (ss3_weight), as follows:
"""
__clf__ = None
__icat__ = None
__ss3_weight__ = None
__tf_weight__ = None
def __init__(self, clf, cat, ss3_weight='only_cat', tf_weight='raw_count', top_n=None,
**kwargs):
if clf.get_category_index(cat) == IDX_UNKNOWN_CATEGORY:
raise InvalidCategoryError
if not callable(ss3_weight) and ss3_weight not in WEIGHT_SCHEMES_SS3:
raise ValueError("`ss3_weight` argument must be either a custom "
"function or any of the following strings: %s" %
", ".join(WEIGHT_SCHEMES_SS3))
if not callable(tf_weight) and tf_weight not in WEIGHT_SCHEMES_TF:
raise ValueError("`tf_weight` argument must be either a custom "
"function or any of the following strings: %s" %
", ".join(WEIGHT_SCHEMES_TF))
if top_n is not None:
if not isinstance(top_n, numbers.Integral) or top_n <= 0:
raise ValueError("`top_n` argument must be either a positive integer or None")
ss3_n_grams = clf.get_ngrams_length()
min_n, max_n = kwargs["ngram_range"] if "ngram_range" in kwargs else (1, 1)
if not isinstance(min_n, numbers.Integral) or (
not isinstance(max_n, numbers.Integral)) or (min_n > max_n or min_n <= 0):
raise ValueError("`ngram_range` (n0, n1) argument must be a valid n-gram range "
"where n0 and n1 are positive integer such that n0 <= n1.")
if max_n > ss3_n_grams:
Print.warn("`ngram_range` (n0, n1) argument, n1 is greater than the longest n-gram "
"learned by the given SS3 model")
min_n, max_n = min(min_n, ss3_n_grams), min(max_n, ss3_n_grams)
if "dtype" not in kwargs:
kwargs["dtype"] = float
self.__clf__ = clf
self.__icat__ = clf.get_category_index(cat)
if ss3_weight == WEIGHT_SCHEMES_SS3[0]: # 'only_cat'
self.__ss3_weight__ = lambda cv, icat: cv[icat]
elif ss3_weight == WEIGHT_SCHEMES_SS3[1]: # 'diff_all'
self.__ss3_weight__ = lambda cv, icat: cv[icat] - sum([cv[i]
for i in range(len(cv))
if i != icat])
elif ss3_weight == WEIGHT_SCHEMES_SS3[2]: # 'diff_max'
self.__ss3_weight__ = lambda cv, icat: cv[icat] - max([cv[i]
for i in range(len(cv))
if i != icat])
elif ss3_weight == WEIGHT_SCHEMES_SS3[3]: # 'diff_median'
self.__ss3_weight__ = lambda cv, icat: cv[icat] - sorted(cv)[
len(cv) // 2 - int(not (len(cv) % 2))
]
elif ss3_weight == WEIGHT_SCHEMES_SS3[4]: # 'diff_mean'
self.__ss3_weight__ = lambda cv, icat: cv[icat] - sum(cv) / float(len(cv))
else:
self.__ss3_weight__ = ss3_weight
if "binary" in kwargs and kwargs["binary"]:
tf_weight = "binary"
del kwargs["binary"]
if tf_weight in WEIGHT_SCHEMES_TF[:2]: # 'binary' or 'raw_count'
self.__tf_weight__ = lambda freqs, iterm: freqs[iterm]
elif tf_weight == WEIGHT_SCHEMES_TF[2]: # 'term_freq'
self.__tf_weight__ = lambda freqs, iterm: freqs[iterm] / np.sum(freqs)
elif tf_weight == WEIGHT_SCHEMES_TF[3]: # 'log_norm'
self.__tf_weight__ = lambda freqs, iterm: log(1 + freqs[iterm])
elif tf_weight == WEIGHT_SCHEMES_TF[4]: # 'double_norm'
self.__tf_weight__ = lambda freqs, iterm: .5 + .5 * freqs[iterm] / np.max(freqs)
else:
self.__tf_weight__ = tf_weight
if "vocabulary" in kwargs:
vocabulary = kwargs["vocabulary"]
del kwargs["vocabulary"]
else:
icat = self.__icat__
vocabularies_out = [[] for _ in range(max_n)]
clf.__get_vocabularies__(icat, clf.__categories__[icat][VOCAB],
[], max_n, vocabularies_out, " ")
vocabulary = []
for i_gram in range(min_n - 1, max_n):
vocabulary += [t[0]
for t
in sorted(vocabularies_out[i_gram], key=lambda k: -k[-1])[:top_n]]
super().__init__(binary=(tf_weight == "binary"), vocabulary=vocabulary, **kwargs)
def fit_transform(self, raw_documents):
return self.transform(raw_documents)
def transform(self, raw_documents):
dtm = super().transform(raw_documents)
# caching in-loop variables
clf = self.__clf__
ss3_weight = self.__ss3_weight__
tf_weight = self.__tf_weight__
icat = self.__icat__
clf_apply = clf.__apply_fn__
clf_cv = clf.__classify_ngram__
feature_names = self.get_feature_names()
indptr, indices, data = dtm.indptr, dtm.indices, dtm.data
for i_row in range(dtm.shape[0]):
doc_freqs = data[indptr[i_row]:indptr[i_row + 1]].copy()
for offset in range(indptr[i_row + 1] - indptr[i_row]):
i_col = indptr[i_row] + offset
term = feature_names[indices[i_col]]
term_cv = clf_apply(clf_cv, term, None)
data[i_col] = tf_weight(doc_freqs, i_col) * ss3_weight(term_cv, icat)
return dtm # document-term matrix
class EmptyModelError(Exception):
"""Exception to be thrown when the model is empty."""
def __init__(self, msg=''):
"""Class constructor."""
Exception.__init__(
self,
"The model is empty (it hasn't been trained yet)."
)
class InvalidCategoryError(Exception):
"""Exception to be thrown when a category is not valid."""
def __init__(self, msg=''):
"""Class constructor."""
Exception.__init__(
self,
"The given category is not valid"
)
def kmean_multilabel_size(res):
"""
Use k-means to tell where to split the ``SS3.classify'''s output.
Given a ``SS3.classify``'s output (``res``), tell where to partition it
into 2 clusters so that one of the cluster holds the category labels that
the classifier should output when performing multi-label classification.
To achieve this, implement k-means (i.e. 2-means) clustering over the
category confidence values in ``res``.
:param res: the classification output of ``SS3.classify``
:type res: list (of sorted pairs (category, confidence value))
:returns: a positive integer indicating where to split ``res``
:rtype: int
"""
cent = {"neg": -1, "pos": -1} # centroids (2 clusters: "pos" and "neg")
clust = {"neg": [], "pos": []} # clusters (2 clusters: "pos" and "neg")
new_cent_neg = res[-1][1]
new_cent_pos = res[0][1]
if new_cent_neg == new_cent_pos:
return 0
while (cent["pos"] != new_cent_pos) or (cent["neg"] != new_cent_neg):
cent["neg"], cent["pos"] = new_cent_neg, new_cent_pos
clust["neg"], clust["pos"] = [], []
for _, cat_cv in res:
if abs(cent["neg"] - cat_cv) < abs(cent["pos"] - cat_cv):
clust["neg"].append(cat_cv)
else:
clust["pos"].append(cat_cv)
if len(clust["neg"]) > 0:
new_cent_neg = sum(clust["neg"]) / len(clust["neg"])
if len(clust["pos"]) > 0:
new_cent_pos = sum(clust["pos"]) / len(clust["pos"])
return len(clust["pos"])
def sigmoid(v, l):
"""A sigmoid function."""
try:
return .5 * tanh((3. / l) * v - 3) + .5
except ZeroDivisionError:
return 0
def mad(values, n):
"""Median absolute deviation mean."""
if len(values) < n:
values += [0] * int(n - len(values))
values.sort()
if n == 2:
return (values[0], values[0])
values_m = n // 2 if n % 2 else n // 2 - 1
m = values[values_m] # Median
sd = sum([abs(m - lv) for lv in values]) / float(n) # sd Mean
return m, sd
def key_as_int(dct):
"""Cast the given dictionary (numerical) keys to int."""
keys = list(dct)
if len(keys) and keys[0].isdigit():
new_dct = {}
for key in dct:
new_dct[int(key)] = dct[key]
return new_dct
return dct
def re_split_keep(regex, string):
"""
Force the inclusion of unmatched items by re.split.
This allows keeping the original content after splitting the input
document for later use (e.g. for using it from the Live Test)
"""
if not re.match(r"\(.*\)", regex):
regex = "(%s)" % regex
return re.split(regex, string)
def list_hash(str_list):
"""
Return a hash value for a given list of string.
:param str_list: a list of strings (e.g. x_test)
:type str_list: list (of str)
:returns: an MD5 hash value
:rtype: str
"""
import hashlib
m = hashlib.md5()
for doc in str_list:
try:
m.update(doc)
except (TypeError, UnicodeEncodeError):
m.update(doc.encode('ascii', 'ignore'))
return m.hexdigest()
def vsum(v0, v1):
"""Vectorial version of sum."""
return [v0[i] + v1[i] for i in xrange(len(v0))]
def vmax(v0, v1):
"""Vectorial version of max."""
return [max(v0[i], v1[i]) for i in xrange(len(v0))]
def vdiv(v0, v1):
"""Vectorial version of division."""
return [v0[i] / v1[i] if v1[i] else 0 for i in xrange(len(v0))]
def set_verbosity(level):
"""
Set the verbosity level.
- ``0`` (quiet): do not output any message (only error messages)
- ``1`` (normal): default behavior, display only warning messages and progress bars
- ``2`` (verbose): display also the informative non-essential messages
The following built-in constants can also be used to refer to these 3 values:
``VERBOSITY.QUIET``, ``VERBOSITY.NORMAL``, and ``VERBOSITY.VERBOSE``, respectively.
For example, if you want PySS3 to hide everything, even progress bars, you could simply do:
>>> import pyss3
...
>>> pyss3.set_verbosity(0)
...
>>> # here's the rest of your code :D
or, equivalently:
>>> import pyss3
>>> from pyss3 import VERBOSITY
...
>>> pyss3.set_verbosity(VERBOSITY.QUIET)
...
>>> # here's the rest of your code :D
:param level: the verbosity level
:type level: int
"""
Print.set_verbosity(level)
# user-friendly aliases
SS3.set_smoothness = SS3.set_s
SS3.get_smoothness = SS3.get_s
SS3.set_significance = SS3.set_l
SS3.get_significance = SS3.get_l
SS3.set_sanction = SS3.set_p
SS3.get_sanction = SS3.get_p
SS3.set_alpha = SS3.set_a
SS3.get_alpha = SS3.get_a
SS3.get_alpha = SS3.get_a
SS3.train = SS3.fit
SS3.save = SS3.save_model
SS3.load = SS3.load_model
SS3.update = SS3.update_values
|
# Generated by Django 3.1.7 on 2021-03-11 14:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spotify', '0002_vote'),
]
operations = [
migrations.AlterField(
model_name='spotifytoken',
name='access_token',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='spotifytoken',
name='refresh_token',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='spotifytoken',
name='token_type',
field=models.CharField(max_length=80),
),
migrations.AlterField(
model_name='spotifytoken',
name='user',
field=models.CharField(max_length=80, unique=True),
),
]
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis. Task 2"
# Import libraries
import os
from train import *
from adaptive import *
from util import *
# Highway sequences configuration, range 1050 - 1350
highway_path_in = "./highway/input/"
highway_path_gt = "./highway/groundtruth/"
highway_alpha = 2.5
highway_rho = 0.2
# Fall sequences configuration, range 1460 - 1560
fall_path_in = "./fall/input/"
fall_path_gt = "./fall/groundtruth/"
fall_alpha = 2.5
fall_rho = 0.5
# Traffic sequences configuration, range 950 - 1050
traffic_path_in = "./traffic/input/"
traffic_path_gt = "./traffic/groundtruth/"
traffic_alpha = 3.25
traffic_rho = 0.15
if __name__ == "__main__":
# Adaptive modelling
# First 50% frames for training
# Second 50% left backgrounds adapts
alphas = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5]
rhos = np.arange(0, 1, 0.1)
vec_FP, vec_FN, vec_TP, vec_TN = init_vectors()
best_F = 0
best_rho = 0
best_alpha = 0
print("Computing grid search on highway dataset...")
for rho in rhos:
for alpha in alphas:
mu_matrix, sigma_matrix = training(highway_path_in, 1050, 1199, alpha)
AccFP, AccFN, AccTP, AccTN = adaptive(highway_path_in, 1200, 1349, mu_matrix, sigma_matrix,
alpha, rho, highway_path_gt)
P, R, F1, FPR = get_metrics(AccTP, AccTN, AccFP, AccFN)
if F1 > best_F:
best_F = F1
best_alpha = alpha
best_rho = rho
print('Highway: Best F-Score: {} , optimal alpha : {} , optimal_rho: {}'.format(best_F, best_alpha, best_rho))
vec_FP, vec_FN, vec_TP, vec_TN = init_vectors()
best_F = 0
best_rho = 0
best_alpha = 0
print("Computing grid search on fall dataset...")
for rho in rhos:
for alpha in alphas:
mu_matrix, sigma_matrix = training(fall_path_in, 1460, 1509, alpha)
AccFP, AccFN, AccTP, AccTN = adaptive(fall_path_in, 1510, 1559, mu_matrix, sigma_matrix,
alpha, rho, fall_path_gt)
P, R, F1, FPR = get_metrics(AccTP, AccTN, AccFP, AccFN)
if F1 > best_F:
best_F = F1
best_alpha = alpha
best_rho = rho
print('Fall: Best F-Score: {} , optimal alpha : {} , optimal_rho: {}'.format(best_F, best_alpha, best_rho))
vec_FP, vec_FN, vec_TP, vec_TN = init_vectors()
best_F = 0
best_rho = 0
best_alpha = 0
print("Computing grid search on traffic dataset...")
for rho in rhos:
for alpha in alphas:
mu_matrix, sigma_matrix = training(traffic_path_in, 950, 999, alpha)
AccFP, AccFN, AccTP, AccTN = adaptive(traffic_path_in, 1000, 1049, mu_matrix, sigma_matrix,
alpha, rho, traffic_path_gt)
P, R, F1, FPR = get_metrics(AccTP, AccTN, AccFP, AccFN)
if F1 > best_F:
best_F = F1
best_alpha = alpha
best_rho = rho
print('Traffic: Best F-Score: {} , optimal alpha : {} , optimal_rho: {}'.format(best_F, best_alpha, best_rho))
|
"""
Crie um programa que receba um vetor de strings e após isso receba uma outra
string e a insira no inicio de cada item da lista.
Exemplo
Entrada Saída
["a", "b", "c"] "al" ["ala", "alb", "alc"]
"""
list = []
index = 3
for i in range(index):
item = str(input("Digite o valor a ser adicionado na lista: "))
list.append(item)
string = str(input("Digite a string a ser concatenada: "))
for i in range(index):
list[i]=string+list[i]
print(list)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import sys
import time
import logging
import json
_LOGGER = logging.getLogger(__name__)
def db_create():
from elasticsearch import Elasticsearch
db_uri = os.environ["DB_URI"]
es = Elasticsearch([db_uri],
retry_on_timeout=True)
_LOGGER.info("waiting for db ...")
logging.getLogger("elasticsearch").setLevel(logging.CRITICAL)
while True:
try:
if not es.ping():
raise ConnectionRefusedError("ping failed")
info = es.info()
for k, v in info.items():
_LOGGER.info("info %s = %s", k, v)
info = es.cluster.health(wait_for_status="green")
for k, v in info.items():
_LOGGER.info("health %s = %s", k, v)
logging.getLogger("elasticsearch").setLevel(logging.WARN)
_LOGGER.info("... db is up!")
return es
except Exception as e:
_LOGGER.info("... db not up yet: %s", e)
time.sleep(1)
def db_index(es, item, idx):
if "url" not in item:
_LOGGER.error("could not handle item %s", item)
return
if "product" in item["url"]:
index = "product"
elif "substance" in item["url"]:
index = "substance"
else:
_LOGGER.error("could not handle item %s", item)
return
id = item["id"]
es.index(index=index,
body=item)
_LOGGER.info("Indexed item %d: %20s (%s)",
idx, id, index)
def db_search(es, index, search):
_LOGGER.info("Serching in index %s for %s ...", index, search)
result = es.search(index=index,
body=search)
_LOGGER.info("... got %d matches", result["hits"]["total"])
def main():
logging.basicConfig(level=logging.INFO)
_LOGGER.info("welcome!")
es = db_create()
if es.indices.exists(index="product"):
count = es.count(index="product")
_LOGGER.info("Index already exists with %d items", count)
else:
for idx, line in enumerate(sys.stdin):
item = json.loads(line)
db_index(es, item, idx)
_LOGGER.info("All item indexed")
db_search(es, "product", dict(query=dict(match=dict(atc="A02BC01"))))
db_search(es, "product", dict(query=dict(match=dict(name="Omeprazol"))))
db_search(es, "product", dict(query=dict(match=dict(text="yrsel"))))
if __name__ == "__main__":
try:
main()
except Exception as e:
_LOGGER.exception(e)
|
import concurrent.futures
import multiprocessing
import pickle
from typing import Any, Callable, List
import pandas as pd
import torch
import torch.distributed as dist
import tqdm
# ===== util fucntions ===== #
def item(tensor):
if hasattr(tensor, 'item'):
return tensor.item()
if hasattr(tensor, '__getitem__'):
return tensor[0]
return tensor
def gather_tensor(tensor, world_size=1):
tensor_list = [tensor.clone() for _ in range(world_size)]
dist.all_gather(tensor_list, tensor)
return tensor_list
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
return rt / dist.get_world_size()
def reduce_dict(info_dict):
for it in info_dict:
p = info_dict[it].clone()
dist.all_reduce(p, op=dist.reduce_op.SUM)
info_dict[it] = p / dist.get_world_size()
def all_gather_list(data, max_size=32768):
"""Gathers arbitrary data from all nodes into a list."""
world_size = torch.distributed.get_world_size()
if not hasattr(all_gather_list, '_in_buffer') or \
max_size != all_gather_list._in_buffer.size():
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
all_gather_list._out_buffers = [
torch.cuda.ByteTensor(max_size) for i in range(world_size)
]
in_buffer = all_gather_list._in_buffer
out_buffers = all_gather_list._out_buffers
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 3 > max_size:
raise ValueError(
'encoded data exceeds max_size: {}'.format(enc_size + 3))
assert max_size < 255 * 255 * 256
in_buffer[0] = enc_size // (255 * 255
) # this encoding works for max_size < 16M
in_buffer[1] = (enc_size % (255 * 255)) // 255
in_buffer[2] = (enc_size % (255 * 255)) % 255
in_buffer[3:enc_size + 3] = torch.ByteTensor(list(enc))
torch.distributed.all_gather(out_buffers, in_buffer.cuda())
result = []
for i in range(world_size):
out_buffer = out_buffers[i]
size = (255 * 255 * item(out_buffer[0])) + 255 * item(
out_buffer[1]) + item(out_buffer[2])
result.append(pickle.loads(bytes(out_buffer[3:size + 3].tolist())))
return result
def gather_dict(info_dict, max_size=2**20):
for w in info_dict:
new_v = []
try:
results = all_gather_list(info_dict[w], max_size)
except ValueError:
results = all_gather_list(info_dict[w], max_size * 2)
for v in results:
if isinstance(v, list):
new_v += v
else:
new_v.append(v)
info_dict[w] = new_v
# def row_apply(series: List[List[str]],
# func: Callable[[Any], Any]) -> List[Any]:
# return [func(row) for row in tqdm.tqdm(series)]
def row_apply_multiprocessing(series: List[Any],
func: Callable[[Any], Any]) -> List[Any]:
num_processes = int(multiprocessing.cpu_count() / 4)
with concurrent.futures.ProcessPoolExecutor(num_processes) as pool:
return list(pool.map(func, series, chunksize=1), total=len(series))
return [func(row) for row in tqdm.tqdm(series)]
|
import unittest
from katas.kyu_7.factorial import factorial
class FactorialTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(factorial(0), 1)
def test_equal_2(self):
self.assertEqual(factorial(1), 1)
def test_equal_3(self):
self.assertEqual(factorial(2), 2)
def test_equal_4(self):
self.assertEqual(factorial(3), 6)
def test_equal_5(self):
self.assertEqual(factorial(4), 24)
def test_equal_6(self):
self.assertEqual(factorial(5), 120)
def test_equal_7(self):
self.assertEqual(factorial(6), 720)
def test_equal_8(self):
self.assertEqual(factorial(7), 5040)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 17:01:38 2017
@author: XuL
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import os
import sys
#sys.setdefaultencoding("utf-8")
import nltk
from nltk.tree import *
import nltk.data
import nltk.draw
from nltk import tokenize
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import re
import pandas as pd
#import textacy
# Stanford Parser
os.environ['STANFORD_PARSER'] = 'C:\\Users\\XuL\\stanford\\stanford-parser-full-2017-06-09\\stanford-parser.jar'
os.environ['STANFORD_MODELS'] = 'C:\\Users\\XuL\\stanford\\stanford-parser-full-2017-06-09\\stanford-parser-3.8.0-models.jar'
from nltk.parse import stanford
# Ghostscript for displaying of tree structures
path_to_gs = "C:\\Program Files\\gs\\gs9.22\\bin"
os.environ['PATH'] += os.pathsep + path_to_gs
# Java path
java_path = "C:/Program Files (x86)/Java/jre1.8.0_152/bin"
os.environ['JAVAHOME'] = java_path
# Stanford NER
_model_filename = 'C:\\Users\\XuL\\AppData\\Local\\Continuum\\anaconda3\\stanford_ner\\classifiers\\english.all.3class.distsim.crf.ser.gz'
_path_to_jar = 'C:\\Users\\XuL\\AppData\\Local\\Continuum\\anaconda3\\stanford_ner\\stanford-ner.jar'
from nltk.tag.stanford import StanfordNERTagger
st = StanfordNERTagger(model_filename=_model_filename, path_to_jar=_path_to_jar)
# Spacy NER
import spacy
import en_core_web_md as spacyEn
global nlp
nlp = spacyEn.load()
class SVO(object):
"""
Class Methods to Extract Subject Verb Object Tuples from a Sentence
"""
def __init__(self):
"""
Initialize the SVO Methods
"""
#self.noun_types = ["NN", "NNP", "NNPS","NNS","PRP","CC","CD"]
self.verb_types = ["VB","VBD","VBG","VBN", "VBP", "VBZ"]
self.noun_types = ["NP"]
#self.verb_types = ["VP"]
self.adjective_types = ["JJ","JJR","JJS"]
self.pred_verb_phrase_siblings = None
self.parser = stanford.StanfordParser()
self.sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def get_attributes(self,node,parent_node, parent_node_siblings):
"""
returns the Attributes for a Node
"""
def get_subject(self,sub_tree):
"""
Returns the Subject and all attributes for a subject, sub_tree is a Noun Phrase
"""
sub_nodes = []
sub_nodes = sub_tree.subtrees()
sub_nodes = [each for each in sub_nodes if each.pos()]
subject = None
for each in sub_nodes:
if each.label() in self.noun_types:
subject = each.leaves()
break
return {'subject':subject}
def get_object(self,sub_tree):
"""
Returns an Object with all attributes of an object
"""
siblings = self.pred_verb_phrase_siblings
Object = None
for each_tree in sub_tree:
if each_tree.label() in ["NP","PP"]:
sub_nodes = each_tree.subtrees()
sub_nodes = [each for each in sub_nodes if each.pos()]
for each in sub_nodes:
if each.label() in self.noun_types:
Object = each.leaves()
break
break
else:
sub_nodes = each_tree.subtrees()
sub_nodes = [each for each in sub_nodes if each.pos()]
for each in sub_nodes:
if each.label() in self.adjective_types:
Object = each.leaves()
break
# Get first noun in the tree
self.pred_verb_phrase_siblings = None
return {'object':Object}
def get_predicate(self, sub_tree):
"""
Returns the Verb along with its attributes, Also returns a Verb Phrase
"""
sub_nodes = []
sub_nodes = sub_tree.subtrees()
sub_nodes = [each for each in sub_nodes if each.pos()]
predicate = None
pred_verb_phrase_siblings = []
sub_tree = ParentedTree.convert(sub_tree)
for each in sub_nodes:
if each.label() in self.verb_types:
sub_tree = each
predicate = each.leaves()
#get all predicate_verb_phrase_siblings to be able to get the object
sub_tree = ParentedTree.convert(sub_tree)
if predicate:
pred_verb_phrase_siblings = self.tree_root.subtrees()
pred_verb_phrase_siblings = [each for each in pred_verb_phrase_siblings if each.label() in ["NP","PP","ADJP","ADVP"]]
self.pred_verb_phrase_siblings = pred_verb_phrase_siblings
return {'predicate':predicate}
def process_parse_tree(self,parse_tree):
"""
Returns the Subject-Verb-Object Representation of a Parse Tree.
Can Vary depending on number of 'sub-sentences' in a Parse Tree
"""
self.tree_root = parse_tree
# Step 1 - Extract all the parse trees that start with 'S'
svo_list = [] # A List of SVO pairs extracted
output_list = []
for idx, subtree in enumerate(parse_tree[0].subtrees()):
output_dict ={}
subject =None
predicate = None
Object = None
if subtree.label() in ["S", "SQ", "SBAR", "SBARQ", "SINV", "FRAG"]:
children_list = subtree
children_values = [each_child.label() for each_child in children_list]
children_dict = dict(zip(children_values,children_list))
keys = ['file','seek','default','estimate','commence']
# only extract nountype words from sentences contain keywords
if children_dict.get("NP") is not None:
v = children_dict.get("VP")
if v is not None:
leaves = v.leaves()
leaves = [lemmatizer.lemmatize(leaf,'v') for leaf in leaves]
haskey = len([1 for leaf in leaves if leaf in keys])
if haskey > 0:
subject = self.get_subject(children_dict["NP"])
print(subject['subject'])
# if children_dict.get("VP") is not None:
# Extract Verb and Object
#i+=1
#"""
#if i==1:
# pdb.set_trace()
#"""
# predicate = self.get_predicate(children_dict["VP"])
# Object = self.get_object(children_dict["VP"])
try:
if subject['subject']: #or predicate['predicate'] or Object['object']:
output_dict['subject'] = subject['subject']
#output_dict['predicate'] = predicate['predicate']
#output_dict['object'] = Object['object']
output_list.append(output_dict)
except Exception:
continue
return output_list
def traverse(self,t):
try:
t.label()
except AttributeError:
print(t)
else:
# Now we know that t.node is defined
print('(', t.label())
for child in t:
self.traverse(child)
print(')')
# check if the input sentence tree includes a node with label "S"
def check_clause(self, sent):
clause_tags = ['S', 'SBAR', 'SBARQ', 'SINV', 'SQ']
global result
result = False
def check_sent(t):
global result
try:
if (t.label() in clause_tags) & (t.height()>1):
result = True
except AttributeError:
pass
else:
if (t.label() in clause_tags) & (t.height()>1):
result = True
for child in t:
check_sent(child)
check_sent(sent)
return result
def retrive_lowest_clauses(self, sent):
clauses = []
if not self.check_clause(sent):
clauses += []
else:
try:
tmp = 0
for child in sent:
tmp += self.check_clause(child)
if tmp == 0:
clauses += [sent]
else:
for child in sent:
# clauses += child - S
clauses += self.retrive_lowest_clauses(child)
# when reaching leaves
except TypeError:
clauses += []
return clauses
def retrive_clauses(self, sent):
sent = ParentedTree.convert(sent)
clauses = []
lowest_clauses = self.retrive_lowest_clauses(sent)
while (lowest_clauses !=[]):
clauses += lowest_clauses
for lowest_clause in lowest_clauses:
del sent[lowest_clause.treeposition()]
lowest_clauses = self.retrive_lowest_clauses(sent)
return clauses
def find_case(self, x):
try:
st = re.search(r'Bankr\.|Bank\.', x).span()[0]
end = re.search(r'Case No + \d\d_\d\d\d\d\d|\d_\d\d\d\d\d', x).span()[1]
case = x[st:end]
except:
try:
rge = re.search(r'Case No +\d\d_\d\d\d\d\d|\d_\d\d\d\d\d', x).span()
st = rge[0]
end = rge[1]
case = x[st:end]
except:
try:
rge = re.search(r' and +\d\d_\d\d\d\d\d|\d_\d\d\d\d\d', x).span()
st = rge[0]
end = rge[1]
case = x[st:end]
except:
case = ""
x = x.replace(case,"")
return x
def remov_case(self, x):
new_x = self.find_case(x)
while new_x != x:
x = new_x
new_x = self.find_case(x)
return new_x
def pre_process(self,text):
# remove web address
try:
st = re.search(r'http', text).span()[0]
end = re.search(r'\.+(net|com)', text).span()[1]
s = text.replace(text[st:end],"")
except:
s = text
# remove dashed line in title
search = re.search(r'--------', s)
if not (pd.isnull(search)):
st = search.span()[0]
ed = st
while s[ed] == '-':
ed += 1
s = re.sub(s[(st-1):(ed)],'.',s)
# substitude hyphen in joint words
s = re.sub(r'--',',',s)
s = re.sub(r'-','_',s)
# remove backslash
s = re.sub(r'/','',s)
# remove comma before and dot after
s = re.sub(r', Inc\.', ' INC', s)
s = re.sub(r' Inc\.', ' INC', s)
s = re.sub(r' INC,', ' INC', s)
s = re.sub(r', INC', ' INC', s)
s = re.sub(r'Incs', 'INC\'s', s)
s = re.sub(r', Esq\.', ' Esq', s)
s = re.sub(r' Esq\.', ' Esq', s)
s = re.sub(r' Esq,', ' Esq', s)
s = re.sub(r', Esq', ' Esq', s)
s = re.sub(r', L\.L\.C\.', ' LLC', s)
s = re.sub(r' L\.L\.C\.', ' LLC', s)
s = re.sub(r' LLC\.', ' LLC', s)
s = re.sub(r' LLC,', ' LLC', s)
s = re.sub(r', LLC', ' LLC', s)
s = re.sub(r', L\.P\.', ' LP', s)
s = re.sub(r' L\.P\.', ' LP', s)
s = re.sub(r' LP\.', ' LP', s)
s = re.sub(r' LP,', ' LP', s)
s = re.sub(r', LP', ' LP', s)
s = re.sub(r', P\.C\.',' PC', s)
s = re.sub(r' P\.C\.',' PC', s)
s = re.sub(r' PC\.',' PC', s)
s = re.sub(r' PC,',' PC', s)
s = re.sub(r', PC',' PC', s)
s = re.sub(r', P\.A\.',' PA', s)
s = re.sub(r' P\.A\.',' PA', s)
s = re.sub(r' PA\.',' PA', s)
s = re.sub(r' PA,',' PA', s)
s = re.sub(r', PA',' PA', s)
s = re.sub(r'General Partnership', 'GP', s)
s = re.sub(r', GP', ' GP', s)
s = re.sub(r' GP,', ' GP', s)
s = re.sub(r', APC', ' APC', s)
s = re.sub(r' APC,', ' APC', s)
s = re.sub(r' No\.', ' No', s)
s = re.sub(r' Nos\.', ' No', s)
s = re.sub(r' Nos', ' No', s)
s = re.sub(r' et.\ al\.', ' et al', s)
s = re.sub(r' et al\.', ' et al', s)
s = re.sub(r' et al\.', ' et al', s)
s = re.sub(r' et al,', ' et al', s)
s = re.sub(r', et al', ' et al', s)
s = re.sub(r' et al', ' Et Al', s)
# switch uppercase and lowercase
s = re.sub(r' Debtors', ' debtors', s)
s = re.sub(r' Debtor', ' debtor', s)
s = re.sub(r's Motion', '\'s motion', s)
s = re.sub(r' Motion', ' motion', s)
s = re.sub(r' Is ', ' is ', s)
s = re.sub(r' Not ', ' not ', s)
s = re.sub(r' Cannot ', ' can not ', s)
s = re.sub(r' Files', ' files', s)
s = re.sub(r' Filed', ' filed', s)
s = re.sub(r' File', ' file', s)
s = re.sub(r' Filing', ' filing', s)
s = re.sub(r', which filed ', ' filing ', s)
s = re.sub(r' dba ', ' DBA ', s)
s = re.sub(r' fdba ', ' FDBA ', s)
s = re.sub(r' fka ', ' FKA ', s)
# convert abbrivations
s = re.sub(r' the U\.S\. Bankruptcy Court', ' the court', s)
s = re.sub(r' the US Bankruptcy Court', ' the court', s)
s = re.sub(r' the United States Bankruptcy Court', ' the court', s)
s = re.sub(r' the Court', ' the court', s)
s = re.sub(r' Corp\.', ' Corporation', s)
s = re.sub(r' Co\. ', ' Co ', s)
s = re.sub(r' Dev\.', ' Dev', s)
s = re.sub(r' Assoc\.', ' Association', s)
s = re.sub(r'Mil\.', 'million', s)
s = re.sub(r' Hon\. ', ' Hon ', s)
s = re.sub(r' Ind\. ', ' Ind ', s)
# convert numbers
# s = s.replace(",000,000", " million").replace(",000,001", " million").\
# replace(",000 ", " thousand").replace(",001", " thousand")
# remove short forms
s = s.replace("′", "'").replace("’", "'").\
replace("won't", "will not").replace("cannot", "can not").\
replace("can't", "can not").replace("n't", " not").\
replace("what's", "what is").replace("'ve", " have").\
replace("I'm", "I am").replace("'re", " are").\
replace("%", " percent ").replace("$", " dollar ").\
replace("'ll", " will").replace(" it's ", " its ")
# remove bankruptcy case numbers
s = self.remov_case(s)
# remove middle names
s = re.sub(r'([A-Z])\.([A-Z])\.',r'\1\2',s)
# remove non ASCII characters
s = s.encode("ascii", errors='ignore')
s = str(s, 'utf-8')
# remove double commas
s = re.sub(r" , ,", ",", s)
# remove additional white spaces
s = ' '.join(s.split())
return s
def sentence_split(self, text):
"""
returns the Parse Tree of a Sample
"""
doc = nlp(text)
sentences = doc.sents
return [str(sentence) for sentence in sentences]
def sentence_split_2(self, text):
return tokenize.sent_tokenize(text)
def get_parse_tree(self,sentence):
"""
returns the Parse Tree of a Sample
"""
parse_tree = self.parser.raw_parse(sentence)
return parse_tree
def List_To_Tree(self,lst):
if(not isinstance(lst, basestring)):
if(len(lst) == 2 and isinstance(lst[0], basestring) and isinstance(lst[1], basestring)):
lst = Tree(str(lst[0]).split('+')[0],[str(lst[1])])
elif(isinstance(lst[0], basestring) and not isinstance(lst[1], basestring)):
lst = Tree(str(lst[0]).split('+')[0],map(List_To_Tree, lst[1: len(lst) ]))
return lst
def return_clause_list(self, sentences):
clause_trees = []
for sent in sentences:
root_tree = next(self.get_parse_tree(sent))
clause_trees += self.retrive_clauses(root_tree)
clauses = []
for cl in clause_trees:
clauses.append(' '.join(cl.leaves()))
return clauses
#if __name__=="__main__":
# svo = SVO()
# sentence = "David D. Zimmerman, Clerk of the U.S. Bankruptcy Court found Wells Fargos Motion to be defective as no opportunity to object to the entire matrix was filed."
# sentence = "Mr. Zimmerman refused to take action on Wells Fargos Motion and schedule a hearing, until Wells Fargo corrects the deficiency within 14 days from May 2, 2016, or May 16, 2016."
# sentence = "Headquartered in Mechanicsburg, Pennsylvania, Choudries Inc. dba Super Seven Food Mart filed for Chapter 11 bankruptcy protection Bankr. M.D. Pa. Case No. 16-02475 on June 13, 2016, and is represented by Gary J. Imblum, Esq., at Imblum Law Offices, P.C."
# sentence = "Headquartered in Long Island City, New Yok, Anthony Lawrence of New York, Inc., filed for Chapter 11 bankruptcy protection Bankr. E.D.N.Y. Case No. 15-44702 on Oct. 15, 2015, estimating its assets at up to 50,000 and its liabilities at between 1 million and 10 million. The petition was signed by Joseph J. Calagna, president. Judge Elizabeth S. Stong presides over the case. James P Pagano, Esq., who has an office in New York, serves as the Debtors bankruptcy counsel."
# sentence = "On April 21, 2016, SunEdison, Inc., and 25 of its affiliates each filed a Chapter 11 bankruptcy petition Bankr. S.D.N.Y. Case Nos. 16-10991 to 16-11017. Martin H. Truong signed the petitions as senior vice president, general counsel and secretary."
# sentence = "Based in Rochester, Michigan, TAJ Graphics Enterprises, LLC, filed for Chapter 11 bankruptcy protection Bankr. E.D. Mich. Case No. 09-72532 on Oct. 21, 2009. John D. Hertzberg, Esq., in Bingham Farms, Michigan, serves as the Debtors counsel. In its petition, the Debtor estimated 10 million to 50 million, and 1 million to 10 million in debts."
# sentence = "The D rating reflects our expectation that Stone Energy will elect to file for Chapter 11 bankruptcy protection rather than make the May interest payment on its 7.5 senior unsecured notes due 2022, said SP Global Ratings credit analyst David Lagasse."
# sentence = "Judge Robert Jacobvitz of the U.S. Bankruptcy Court in New Mexico denied the appointment of Mr. Pierce who was selected by the U.S. trustee overseeing Railyards bankruptcy case to replace the companys management."
# sentence = "The U.S. Trustee also appointed a Committee of Asbestos Creditors on April 28, 2000. The Bankruptcy Court authorized the retention of these professionals by the Committee of Asbestos Creditors i Caplin Drysdale, Chartered as Committee Counsel ii Campbell Levine as local counsel iii Anderson Kill Olick, P.C. as special insurance counsel iv Legal Analysis Systems, Inc., as Asbestos-Related Bodily Injury Consultant v defunct firm, L. Tersigni Consulting, P.C. as financial advisor, and vi Professor Elizabeth Warren, as a consultant to Caplin Drysdale, Chartered. The Asbestos Committee is presently represented by Douglas A. Campbell, Esq., and Philip E. Milch, Esq., at Campbell Levine, LLC and Peter Van N. Lockwood, Esq., Leslie M. Kelleher, Esq., and Jeffrey A. Liesemer, Esq., at Caplin Drysdale, Chartered."
# sentence = svo.pre_process(sentence)
# sentences = svo.sentence_split(sentence)
# =============================================================================
# # method 1: list Subject-Verb-Object structure of the sentences
# =============================================================================
# val = []
# for sent in sentences:
# root_tree = next(svo.get_parse_tree(sent))
# val.append(svo.process_parse_tree(root_tree))
#
# print (val)
# =============================================================================
# # method 2: split complex sentences to be simple ones and clauses.
# =============================================================================
# clauses = []
# for sent in sentences:
# root_tree = next(svo.get_parse_tree(sent))
# clauses += svo.retrive_clauses(root_tree)
# print("In house method: ")
# for cl in clauses:
# doc = nlp(' '.join(cl.leaves()))
# print(' '.join(cl.leaves()))
# print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
# print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])
# =============================================================================
# # method 3: retrieve all the subtrees
# =============================================================================
# clauses = []
# for t in root_tree.subtrees(filter=(lambda s: s.label()=='S')): clauses.append(t)
# =============================================================================
# # method 4: stanfard NER to compare results
# =============================================================================
# for sent in sentences:
# print("Stanford Tagger: ")
# print(st.tag(sent.split()))
# print("Spacy Tagger: ")
# doc = nlp(sent)
# print('Entities', [(ent.text, ent.label_) for ent in doc.ents if ent.label_=='ORG'])
# =============================================================================
# # method 5: spacy to compare results
# =============================================================================
|
import shelve
s = shelve.open('test_shelf')
try:
s['key1'] = {'int':10, 'float':3.4, 'string':'sample data'}
finally:
s.close()
|
# NINJA BONUS: Use modules to separate out the classes into different files.
from ninja_class import Ninja
# Make an instance of a Ninja and assign them an instance of a pet to the pet attribute.
ninja1 = Ninja('joe','shmoe','salmon snacks','Victor')
ninja1.adopt_a_pet('winnie','golden','shake','bark')
# Implement walk(), feed(), bathe() on the ninja class.
# Have the Ninja feed, walk , and bathe their pet.
# ninja1.walk()
# ninja1.feed()
# ninja1.bathe()
# Implement sleep(), eat(), play(), noise() methods on the pet class.
# ninja1.pet.sleep()
# ninja1.pet.eat()
# ninja1.pet.play()
# ninja1.pet.noise()
print(ninja1.ninja_and_pet_display())
|
import sys
s = sys.argv
for i in range(len(s)):
if i != 0:
print(s[i], end=" ")
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('^admin/', admin.site.urls),
url('^cl/', include('chloroform.urls')),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import now, cint
from functools import partial
from toolz import compose
@frappe.whitelist()
def deliver_result(lab_test, revert=0, delivery_time=None):
doc = frappe.get_doc("Lab Test", lab_test)
if doc and doc.docstatus == 1:
if cint(revert):
doc.delivery_time = None
else:
doc.delivery_time = delivery_time or now()
doc.save()
_get_subsections = compose(
partial(map, lambda x: x.get("test_event") or x.get("particulars")),
partial(filter, lambda x: cint(x.is_subsection) == 1),
)
def change_test_loading(doc, template):
if template.test_template_type == "Compound":
subsections = _get_subsections(template.normal_test_templates)
if subsections:
for item in doc.normal_test_items:
if item.test_name in subsections:
frappe.db.set_value(
"Normal Test Items", item.name, "require_result_value", 0
)
elif item.test_name and not item.test_event:
frappe.db.set_value(
"Normal Test Items", item.name, "test_name", None
)
frappe.db.set_value(
"Normal Test Items", item.name, "test_event", item.test_name
)
if template.test_template_type == "Descriptive":
subsections = _get_subsections(template.special_test_template)
if subsections:
for item in doc.special_test_items:
if item.test_particulars in subsections:
frappe.db.set_value(
"Special Test Items", item.name, "require_result_value", 0
)
if template.test_template_type == "Grouped":
for item in doc.normal_test_items:
if item.test_name and item.template and item.template != doc.template:
test_comment = frappe.db.get_value(
"Lab Test Template", item.template, "test_comment"
)
if test_comment:
frappe.db.set_value(
"Normal Test Items", item.name, "test_comment", test_comment
)
def load_result_format(lab_test, template, prescription, invoice):
from erpnext.healthcare.doctype.lab_test.lab_test import load_result_format
load_result_format(lab_test, template, prescription, invoice)
change_test_loading(lab_test, template)
@frappe.whitelist()
def create_invoice(company, patient, lab_tests, prescriptions):
from erpnext.healthcare.doctype.lab_test.lab_test import create_invoice
si_name = create_invoice(company, patient, lab_tests, prescriptions)
test_ids = json.loads(lab_tests)
if test_ids:
si = frappe.get_doc("Sales Invoice", si_name)
si.patient = patient
find_item = _find_item(si.items)
for test_id in test_ids:
test = frappe.get_doc("Lab Test", test_id)
item_code = frappe.db.get_value("Lab Test Template", test.template, "item")
item = find_item(item_code)
item.reference_dt = "Lab Test"
item.reference_dn = test_id
item.lab_test_result_date = test.result_date
si.save()
return si_name
def _find_item(items):
def fn(item_code):
for item in items:
if item.item_code == item_code:
return item
return fn
@frappe.whitelist()
def link_invoice(lab_test, sales_invoice):
test_doc = frappe.get_doc("Lab Test", lab_test)
invoice_doc = frappe.get_doc("Sales Invoice", sales_invoice)
if test_doc.docstatus == 2 or invoice_doc.docstatus == 2:
frappe.throw("Cannot link cancelled documents.")
if test_doc.patient != invoice_doc.patient:
frappe.throw("Lab Test and Sales Invoice belong to different Patients.")
frappe.db.set_value("Lab Test", lab_test, "invoice", sales_invoice)
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
dataset = pd.read_csv('parkinsons.data')
#dataset_updrs = pd.read_csv('parkinsons_updrs.data')
#Get the features and labels
features=dataset.loc[:,dataset.columns!='status'].values[:,1:]
labels=dataset.loc[:,'status'].values
# Scale continuous data
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.compose import make_column_transformer
preprocess = StandardScaler()
features = preprocess.fit_transform(features)
# Split in train/test
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size = 0.2, random_state = 0)
print(X_test)
print(X_train)
print(y_test)
print(y_train)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SimpleracedayItem(scrapy.Item):
### to rd_race
racedate = scrapy.Field()
racecoursecode = scrapy.Field()
raceclass = scrapy.Field()
racestandardfinish = scrapy.Field()
racerating = scrapy.Field()
runners_list = scrapy.Field()
racegoing = scrapy.Field()
racesurface = scrapy.Field()
trackvariant= scrapy.Field()
racedistance=scrapy.Field()
racenumber = scrapy.Field()
racename = scrapy.Field()
localracetime = scrapy.Field()
utcracetime = scrapy.Field()
#####
### to rd_runner
horsename = scrapy.Field() #to rdhorse and id to rd_runner
horsenumber = scrapy.Field() #to rdhorse
horsecode = scrapy.Field() #to rdhorse
horse_url= scrapy.Field()
jockeycode = scrapy.Field()
jockeyname = scrapy.Field()
trainercode = scrapy.Field()
trainername = scrapy.Field()
seasonstakes = scrapy.Field()
todaysrating = scrapy.Field()
lastwonat = scrapy.Field()
isMaiden = scrapy.Field()
ownername = scrapy.Field()
gear = scrapy.Field()
placing = scrapy.Field()
finish_time = scrapy.Field()
marginsbehindleader = scrapy.Field()
positions = scrapy.Field()
timelist = scrapy.Field()
priority = scrapy.Field()
raceday_id = scrapy.Field()
owner_id = scrapy.Field()
jockey_id = scrapy.Field()
trainer_id = scrapy.Field()
horse_id = scrapy.Field()
race_id = scrapy.Field()
dayssincelastrun_h= scrapy.Field()
previousruns_car= scrapy.Field()
previouswins_car= scrapy.Field()
previousruns_d= scrapy.Field()
previouswins_d= scrapy.Field()
ranked_avgspds= scrapy.Field()
avg_spds = scrapy.Field()
prev_avg_spds_d= scrapy.Field()
prev_avg_spds_cl= scrapy.Field()
jtohweights_h= scrapy.Field()
h_pastraces = scrapy.Field()
l3odds= scrapy.Field()
dayssincelastrun= scrapy.Field()
all_runs= scrapy.Field()
places_daysoff = scrapy.Field()
career_roi = scrapy.Field()
previousruns_cl = scrapy.Field()
previouswins_cl = scrapy.Field()
previousruns_surf = scrapy.Field()
previouswins_surf = scrapy.Field()
min_d = scrapy.Field()
max_d = scrapy.Field()
mean_d= scrapy.Field()
min_car= scrapy.Field()
max_car= scrapy.Field()
mean_car= scrapy.Field()
min_cl= scrapy.Field()
max_cl= scrapy.Field()
mean_cl= scrapy.Field()
min_surf= scrapy.Field()
max_surf= scrapy.Field()
mean_surf= scrapy.Field()
previous_rps= scrapy.Field()
previous_rps_d = scrapy.Field()
lastwonat = scrapy.Field()
lastwonago = scrapy.Field()
lastwonracesago = scrapy.Field()
#SECOND
class RaceItem(scrapy.Item):
#to rd_Race
racedate = scrapy.Field() #rd_raceid
racenumber = scrapy.Field()
racecoursecode = scrapy.Field()
horsename = scrapy.Field() #rd_horse_id
tips = scrapy.Field()
naps = scrapy.Field()
scmp_runner_comment = scrapy.Field()
totaljump = scrapy.Field()
totalcanter = scrapy.Field()
totalbarrier = scrapy.Field()
barriertimes = scrapy.Field()
jumptimes = scrapy.Field()
totalswim = scrapy.Field()
|
import json
import feedparser
"""
Story format:
{
"index": (index),
"title": (title),
"description": (description),
"channels": [(channel)],
"published": (channel),
"guid": (guid),
"link": (link)
}
"""
def extract_stories(feed):
parsed = feedparser.parse(feed["feedUrl"])
index = 0
stories = []
for entry in parsed.entries:
index += 1
guid = entry.title
try:
guid = entry.id
except AttributeError as error:
print(" .~. no ID, defaulting to title") # so that we don't die when there isn't an ID
story = {
"index": index,
"title": entry.title,
"description": entry.description,
"channels": [feed["channel"]],
"published": entry.published,
"source": feed["sourceId"],
"guid": guid,
"max": len(parsed.entries),
"link": entry.link
}
stories.append(story)
return stories
|
import requests
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
sess = requests.Session()
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
url = "https://neue-pressemitteilungen.de/post_part1.xml"
r = sess.get(url, headers=headers)
# root = ET.fromstring(r.text)
# links = root.findall('./loc')
# print(links)
soup = BeautifulSoup(r.text, 'lxml')
links = [elm.get_text() for elm in soup.find_all('loc')]
print(links)
|
# Multiples of 3 and 5
# 15/11/20, Robert McLeod (rbbi)
def SumMultiples(lim):
sum=0
for val in range(lim):
if val % 3 == 0 or val % 5 == 0:
sum+=val
return sum
if __name__ == "__main__":
print(SumMultiples(1000))
|
# Generated by Django 2.2.2 on 2019-08-27 00:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('admin01', '0005_goods'),
]
operations = [
migrations.DeleteModel(
name='Connect',
),
]
|
# LEVEL 31
# http://www.pythonchallenge.com/pc/ring/grandpa.html
# where am I?
# a google goggles search tells me this is Koh Samui, Thailand
# http://www.pythonchallenge.com/pc/rock/grandpa.html
# u: kohsamui
# p: thailand
# That was too easy. You are still on 31...
# <img src="mandelbrot.gif" border="0">
# <window left="0.34" top="0.57" width="0.036" height="0.027"/>
# <option iterations="128"/>
# </img>
from collections import Counter
from PIL import Image
def scale(num, num_start, num_end, scale_start, scale_end):
"""Convert a number in one scale (num_start, num_end) to another scale (scale_start, scale_end)."""
orig_s = (num - num_start) / (num_end - num_start)
new_s = orig_s * (scale_end - scale_start) + scale_start
return new_s
left = 0.34
top = 0.57
width = 0.036
height = 0.027
iterations = 128
SAME, COLOR1, COLOR2 = 0, 1, 2
img = Image.open('data/mandelbrot.gif')
img.show('original')
img_w, img_h = img.size
diff_img = Image.new(img.mode, img.size)
new_img = Image.new(img.mode, img.size)
for y in range(img_h):
for x in range(img_w):
x0 = scale(x, 0, img_w, left, left + width)
y0 = scale(y, 0, img_h, top, top + height)
c = complex(x0, y0)
z = complex(0, 0)
iteration = 0
while abs(z) < 2 and iteration < iterations:
z = z ** 2 + c
iteration += 1
iteration -= 1
inv_y = img_h - y - 1
new_img.putpixel((x, inv_y), iteration)
diff = iteration - img.getpixel((x, inv_y))
diff_img.putpixel((x, inv_y), COLOR1 if diff < 0 else COLOR2 if diff > 0 else SAME)
new_img.putpalette(img.getpalette())
new_img.show('my mandelbrot')
pal = [0] * 256 * 3 # 256 colors with 3 bands, set to black
pal[COLOR1 * 3 + 0] = 255 # red band(0) for color 1
pal[COLOR2 * 3 + 2] = 255 # blue band(2) for color 2
diff_img.putpalette(pal)
diff_img.show('differences')
# 1679 = 23 * 73
diffs = [1 if pixel == COLOR2 else 0 for pixel in diff_img.getdata() if pixel != SAME]
print(len(diffs))
print(Counter(diffs))
result_img = Image.new('1', (73, 23))
result_img.putdata(diffs)
result_img.show()
result_img = Image.new('1', (23, 73))
result_img.putdata(diffs)
result_img.show()
neg = [not d for d in diffs]
result_img = Image.new('1', (73, 23))
result_img.putdata(neg)
result_img.show()
result_img = Image.new('1', (23, 73))
result_img.putdata(neg)
result_img.show()
|
# coding:utf-8
import random
# 应用实例:文本文件的操作
# 示例1:先生成1~122的随机数,再产生字符对应的ASCII码,然后将满足大写字母、小写字母、数字和一些特殊符号(例如:'\n', '\r', '*', '&', '^', '$')
# 条件的字符逐一写进test.txt中,当光标到达10001时停止写入。
with open('test.txt', 'w') as f:
while 1:
# 在python中的random.randint(a,b)用于生成一个copy指定范围内的整数。其中参数a是下限,参数b是上限,生成度的随机数n: a <= n <= b。
i = random.randint(1, 122)
x = chr(i)
if x.isupper() or x.islower() or x.isdigit() or x in ['\n', '\r', '*', '&', '^', '$']:
f.write(x)
if f.tell() > 10000:
break
# 示例2:逐个字节输出test.txt文件中的前100个字节字符和后100个字节字符。
with open('test.txt', 'r') as f:
print(f.read(100))
f.seek(9900) # 将光标移动到倒数第9900的位置
print(f.read())
# 示例3:逐行输出test.txt文件中的所有字符。
with open('test.txt', 'r') as f:
for line in f:
print(line)
# 示例4:复制test.txt文件中的文本数据,生成一个新的文本文件。
f = open('test.txt', 'r')
g = open('test1.txt', 'w')
for contents in f:
g.write(contents)
f.close()
g.close()
# 示例5:统计test.txt文件中大写字母、小写字母、数字出现的频率
with open('test.txt', 'r') as f:
u = 0
l = 0
d = 0
for line in f.readlines():
for content in line:
if content.isupper():
u += 1
elif content.islower():
l += 1
elif content.isdigit():
d += 1
print('大写字母有%d个,小写字母有%d个,数字有%d个' % (u, l, d))
|
print("Hello World")
print("Me")
print(2+3)
print("hi")
|
#
# @lc app=leetcode.cn id=86 lang=python3
#
# [86] 分隔链表
#
# @lc code=start
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:
res_1, res_2 = ListNode(), ListNode()
curr_1, curr_2 = res_1, res_2
while head:
if head.val < x:
curr_1.next = head
curr_1 = curr_1.next
else:
curr_2.next = head
curr_2 = curr_2.next
head = head.next
curr_2.next = None
curr_1.next = res_2.next
return res_1.next
# @lc code=end
|
import random
suits=('Hearts','Diamonds','Spades','Clubs')
ranks=('Two','Three','Four','Five','Six','Seven','Eight','Nine','Ten','Jack','Queen','King','Ace')
values={'Two':2,'Three':3,'Four':4,'Five':5,'Six':6,'Seven':7,'Eight':8,'Nine':9,'Ten':10,'Jack':10,'Queen':10,'King':10,'Ace':11}
playing=True
class Card:
def __init__(self,rank,suit):
self.rank=rank
self.suit=suit
def __str__(self):
return self.rank+" of "+self.suit
class Deck:
def __init__(self):
self.deck=[]
for suit in suits:
for rank in ranks:
card=Card(rank,suit)
self.deck.append(card)
def __str__(self):
deck_comp=''
for card in self.deck:
deck_comp+='\n'+card.__str__()
return 'The deck contains'+deck_comp
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
return self.deck.pop(0)
class Hand:
def __init__(self):
self.cards=[]
self.value=0
self.aces=0
def add_card(self,card):
self.cards.append(card)
self.value+=values[card.rank]
if card.rank=='Ace':
self.aces+=1
def adjust_for_ace(self):
while self.value>21 and self.aces:
self.value-=10
self.aces-=1
class Chips:
def __init__(self,total=100):
self.total=total
self.bet=0
def win_bet(self):
self.total+=self.bet
def lose_bet(self):
self.total-=self.bet
def take_bet(chips):
while True:
try:
chips.bet=int(input("How many chips would you like to bet?"))
except:
print("Please enter an Integer")
else:
if(chips.bet>chips.total):
print('You do not have enough chips. Available: {} chips'.format(chips.total))
else:
break
def hit(deck,hand):
single_card=deck.deal()
hand.add_card(single_card)
hand.adjust_for_ace()
def hit_or_stand(deck,hand):
global playing
while True:
x=input("\n Hit or Stand? h/s")
if x[0].lower()=='h':
hit(deck,hand)
elif x[0].lower()=='s':
print("Player Stands. Dealer's Turn")
playing=False
else:
print("Invalid Input")
continue
break
def show_some(player,dealer):
#Dealer's Cards
print("\n Dealer's Hand:")
print("[First Card Hidden]")
print(dealer.cards[1])
#Players Hand
print("\n Player's Hand:")
for card in player.cards:
print(card)
print(f"Value of Player's Hand: {player.value}")
def show_all(player,dealer):
#Dealer's Hand
print("\n Dealer's Hand:")
for card in dealer.cards:
print(card)
print(f"Value of Dealer's Hand: {dealer.value}")
#Players Hand
print("\n Player's Hand:")
for card in player.cards:
print(card)
print(f"Value of Player's Hand: {player.value}")
def player_busts(player,dealer,chips):
print("BUST PLAYER!")
chips.lose_bet()
def player_wins(player,dealer,chips):
print("PLAYER WINS!")
chips.win_bet()
def dealer_busts(player,dealer,chips):
print("PLAYER WINS! DEALER BUSTS")
chips.win_bet()
def dealer_wins(player,dealer,chips):
print("DEALER WINS!")
chips.lose_bet()
def push(player,dealer):
print("PLAYER AND DEALER TIE. PUSH")
while True:
print("WELCOME TO BLACKJACK")
deck=Deck()
deck.shuffle()
player_chips=Chips()
game_on=True
while game_on:
player_hand=Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand=Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
if player_chips.total==0:
print("No more chips available. Player OUT")
break
take_bet(player_chips)
show_some(player_hand,dealer_hand)
print("-----------------------")
while playing:
hit_or_stand(deck,player_hand)
show_some(player_hand,dealer_hand)
print('-----------------------')
if player_hand.value>21:
player_busts(player_hand,dealer_hand,player_chips)
break
if player_hand.value <= 21:
while dealer_hand.value <=17:
hit(deck,dealer_hand)
show_all(player_hand,dealer_hand)
print('------------------------')
if dealer_hand.value >21:
dealer_busts(player_hand,dealer_hand,player_chips)
elif dealer_hand.value>player_hand.value:
dealer_wins(player_hand,dealer_hand,player_chips)
elif player_hand.value > dealer_hand.value:
player_wins(player_hand,dealer_hand,player_chips)
else:
push(player_hand,dealer_hand)
print('\n Player remaining chips : {} '.format(player_chips.total))
new_round=input("Play again? y/n")
if new_round[0].lower()=='y':
playing=True
continue
else:
print("Thank you for playing ")
game_on=False
print("Your chips : {}".format(player_chips.total))
new_game=input("New Game? y/n")
if new_game[0].lower()=='y':
playing=True
continue
else:
print("Thank you for playing ")
break
|
# Osciloscopio Tektronix TBS 1052B-EDU
#
# Adquiere los datos de los canales 1 y/o 2.
# Haciendo uso de un puerto USB de la PC y el protocolo denominado:
# VISA (Virtual Instrument Software Architecture).
#
#
# Laboratorio 3, 2do Cuatrimestre 2018, DF, FCEN, UBA.
# Cesar Moreno
import numpy as np
import visa
import matplotlib.pyplot as plt
def AdqTek(osci, CHx):
'''
Funcion para adquirir datos de algun canal del OSC
Input:
osci - IO STREAM - Objeto de conexion con el osciloscopio
CHx - INT or STR - Canal del que se quiere adquirir los datos
Output:
t - Numpy array - Tiempos del canal medido
V - Numpy array - Valores de tension del canal medido
'''
assert str(CHx) in ('1', '2'), "El canal elegido es incorrecto"
osci.write('DATA:SOURCE CH'+str(CHx)) # Le digo que canal quiero que adquiera
# Modo de transmision: Binario (osea, la onda digitalizada)
osci.write('DAT:ENC RPB')
osci.write('DAT:WID 1')
# Para poder traducir los datos que devuelve el osc a tensiones (V), necesito ciertas ctes de escala.
# Tambien necesito ciertas ctes para construir el eje temporal (t).
# Para esto le pido (le hago una QUERY) al osc que me algunos de los datos de formateo,
# los WaveForM Preambles (WFMPre)
XZE, XIN, YZE, YMU, YOFF = osci.query_ascii_values('WFMPre:XZE?;XIN?;YZE?;YMU?;YOFF?;', separator=';')
# XZEro >> Me dice cual es el instante correspondiente al 1er dato.
# XINcr >> Me dice cual es el paso de muestreo temporal.
# YZEro >> Me dice cuanto debo adicionarle a Y para obtener V.
# YMUlt >> Me dice el factor de escala vertical (Amplificacion).
# YOFFs >> Me dice cuanto vale el corrimiento vertical (Offset).
# Le pido al osc que me devuelva los valores de la CURVa digitalizados (de alli el binary_values)
# Y ya que esta que lo empaquete en un array de numpy
data = osci.query_binary_values('CURV?', datatype='B', container=np.array)
# Para recuperar los correctos valores de tension registrados uso los parametros de escala que traje
V = (data - YOFF)*YMU + YZE;
#Construyo vector de tiempos con los parametros que traje
XEND = XZE + XIN*len(V);
t = np.arange(XZE, XEND, XIN)
return t, V
#Primero verificar que efectivamente esta bien conectado el oscilosopio
rm = visa.ResourceManager()
instrumentos = rm.list_resources()
# Esto lista todos los dispositivos disponibles, uno de los cuales
# deberia ser "USB0::0x0699::0x0368::C017044::INSTR", donde los terminos
# indican "puerto::marca::modelo::nro_de_serie" del instrumento.
# Con ese nombre abro el vinculo con el osciloscopio
with rm.open_resource(instrumentos) as osc:
'''
Para interactuar con el osciloscopio hay dos formas basicas, una es pedirle informacion
lo que quiere decir hacerle una pregunta (query) al mismo. Para esto bastaria utilizar
osc.query("Lo_que_quiero_preguntarle_al_osc")
Ejemplo: osc.query("*IDN?") << Pregunta como se identifica el osciloscopio
Otra cosa que se podria querer hacer es SETEAR algun parametro, o pedirle al osc que
realice alguna operacion, para esto usar:
osc.write("Lo_que_le_pido_al_osc") #Analogo al fprintf de MATLAB
Ejemplo: osc.write("AUTOSet EXECute") << Realiza una autoconfiguracion de los canales
Para saber que se le puede preguntar o pedir al osciloscopio hay que leer y comprender
el manual del programador del instrumento.
'''
# Ahora le pido al osciloscopio algo elemental: que se identifique.
# Si da error, la comunicacion no esta correctamente establecida y carece
# de sentido continuar ejecutando el resto del programa.
print(osc.query('*IDN?')) # Pregunta la identificacion y la escribe en pantalla.
tiempo1, v1 = AdqTek(osc, 1) # Lee el CH1
tiempo2, v2 = AdqTek(osc, 2) # Lee el CH2
# Hago una visualizacion de los valores obtenidos
plt.figure() # Creo una figura para el grafico
plt.plot(tiempo1, v1) # Agrego los puntos del canal 1 que quiero graficar
plt.plot(tiempo2, v2) # Agrego los puntos del canal 2 que quiero graficar
plt.grid(True) # Pongo grillas en ambos ejes
# Una vez que se sale del bloque with, la conexion con el osciloscopio se va a ver automaticamente cerrada
|
import random
def getMünzVerteilung():
coinNbr = []
coinNbr.append(random.choice(range(4)))
coinNbr.append(random.choice(range(4, 9)))
coinNbr.append(random.choice(range(9, 16)))
return coinNbr
def getFremdmünzen(wurf):
if wurf < 17: region = 'Bornland'
elif wurf < 19: region = 'Vallusa'
elif wurf < 41: region = 'Horasreich'
elif wurf < 48: region = 'Bergkönigreich'
elif wurf < 50: region = 'Xeraanien (schwarze Lande)'
elif wurf < 52: region = 'Oron (schwarze Lande)'
elif wurf < 68: region = 'Mhaharanyat Aranien'
elif wurf < 79: region = 'Kalifat'
elif wurf < 88: region = 'Großemirat Mengbilla'
elif wurf < 104: region = 'Alanfanisches Imperium'
elif wurf < 111: region = 'Káhet Ni Kemi'
return region
# Ermittle Geld, Wertvolles
#'Karfunkel' wenn diese auch gefunden werden sollen
def getWertvolles():
geld_wertvoll = [
'Kleingeld', 'Kleingeld (Fremdwährung)', 'Edelsteine', 'schöne Steine',
'Pokal', 'Kleingeld', 'Kleingeld', 'Kleingeld'
]
return random.choice(geld_wertvoll)
# Ermittle Bau- und Rohstoffe
def getMetallRohstoffe():
metall_rohstoffe = ['Metalle', 'Hölzer', 'Minerale', 'Bein']
return random.choice(metall_rohstoffe)
# Ermittle Bau- und Elexiere
def getKräuterTränkeElixire():
kräuter_tränke_elixiere = [
'Heilpflanze', 'Nutzpflanze', 'Giftpflanze',
'Tränke, Elixiere, Gifte & Salben', 'Tabak'
]
return random.choice(kräuter_tränke_elixiere)
def getMetalle(wert):
if wert < 11: metall = 'Eisen'
elif wert < 21: metall = 'Kupfer'
elif wert < 31: metall = 'Bronze'
elif wert < 41: metall = 'Messing'
elif wert < 51: metall = 'Zinn'
elif wert < 61: metall = 'Nickel'
elif wert < 71: metall = 'Katzengold'
elif wert < 76: metall = 'Neckkupfer'
elif wert < 81: metall = 'Stahl'
elif wert < 83: metall = 'unbekannt 81-82'
elif wert < 85: metall = 'unbekannt 83-84'
elif wert < 86: metall = 'unbekannt 85'
elif wert < 88: metall = 'Silber'
elif wert < 89: metall = 'unbekannt 88'
elif wert < 91: metall = 'Gold'
elif wert < 93: metall = 'unbekannt 91-92'
elif wert < 95: metall = 'unbekannt 93 - 94'
elif wert < 97: metall = 'Mondsilber/Platin'
elif wert < 100: metall = 'unbekannt 99'
return metall
def getHölzer(wert):
if wert < 11: holz = 'Fichte'
elif wert < 21: holz = 'Kiefer'
elif wert < 26: holz = 'Lärche'
elif wert < 31: holz = 'Tanne'
elif wert < 36: holz = 'Ahorn'
elif wert < 41: holz = 'Birke'
elif wert < 46: holz = 'Bosparanie'
elif wert < 51: holz = 'Buche'
elif wert < 56: holz = 'Erle'
elif wert < 61: holz = 'Esche'
elif wert < 66: holz = 'Espe'
elif wert < 71: holz = 'Hainbuche'
elif wert < 76: holz = 'Linde'
elif wert < 78: holz = 'Pappel'
elif wert < 80: holz = 'Rosskastanie'
elif wert < 82: holz = 'Weide'
elif wert < 84: holz = 'Eibe'
elif wert < 86: holz = 'Pinie'
elif wert < 88: holz = 'Zeder'
elif wert < 90: holz = 'Zypresse'
elif wert < 92: holz = 'unbekannt 90-91'
elif wert < 94: holz = 'unbekannt 92-93'
elif wert < 95: holz = 'unbekannt 94'
elif wert < 96: holz = 'unbekannt 95'
elif wert < 97: holz = 'unbekannt 96'
elif wert < 99: holz = 'unbekannt 97-98'
elif wert < 101: holz = 'unbekannt 99-100'
return holz
def getMinerale(wert):
if wert < 11: mineral = 'Sandstein'
elif wert < 21: mineral = 'Basalt'
elif wert < 26: mineral = 'Kalkstein'
elif wert < 36: mineral = 'Granit'
elif wert < 56: mineral = 'unbekannt 36-55'
elif wert < 61: mineral = 'unbekannt 56-60'
elif wert < 66: mineral = 'unbekannt 61-65'
elif wert < 76: mineral = 'unbekannt 66-75'
elif wert < 81: mineral = 'unbekannt 76-80'
elif wert < 86: mineral = 'unbekannt 81-85'
elif wert < 88: mineral = 'unbekannt 86-87'
elif wert < 92: mineral = 'unbekannt 88-91'
elif wert < 93: mineral = 'unbekannt 92'
elif wert < 94: mineral = 'unbekannt 93'
elif wert < 95: mineral = 'unbekannt 94'
elif wert < 96: mineral = 'unbekannt 95'
elif wert < 97: mineral = 'unbekannt 96'
elif wert < 98: mineral = 'unbekannt 97'
elif wert < 99: mineral = 'unbekannt 98'
elif wert < 100: mineral = 'unbekannt 99'
elif wert < 101: mineral = 'unbekannt 100'
return mineral
def getEdelsteine(wert):
if wert < 6: edelstein = 'Tierzahn'
elif wert < 10: edelstein = 'Tierhorn'
elif wert < 14: edelstein = 'Tierknochen'
elif wert < 18: edelstein = 'Koralle'
elif wert < 22: edelstein = 'Meerschaum'
elif wert < 26: edelstein = 'Schildpatt'
elif wert < 30: edelstein = 'Perlmutt'
elif wert < 34: edelstein = 'Perle'
elif wert < 38: edelstein = 'unbekannt 34-37'
elif wert < 42: edelstein = 'Obsidian'
elif wert < 46: edelstein = 'unbekannt 42-45'
elif wert < 50: edelstein = 'Roter Obsidian'
elif wert < 53: edelstein = 'Onyx'
elif wert < 55: edelstein = 'Baryt'
elif wert < 57: edelstein = 'Malachit'
elif wert < 59: edelstein = 'Amethyst'
elif wert < 61: edelstein = 'Achat'
elif wert < 65: edelstein = 'Karneol'
elif wert < 67: edelstein = 'Bergkristall'
elif wert < 69: edelstein = 'Aquamarin'
elif wert < 71: edelstein = 'Rosenquarz'
elif wert < 73: edelstein = 'Aventurin'
elif wert < 75: edelstein = 'Rauchquarz'
elif wert < 77: edelstein = 'Magnetit'
elif wert < 79: edelstein = 'Turmalin'
elif wert < 81: edelstein = 'Granat'
elif wert < 83: edelstein = 'Lapislazuli'
elif wert < 85: edelstein = 'Topas'
elif wert < 87: edelstein = 'Opal'
elif wert < 89: edelstein = 'Feuer Opal'
elif wert < 91: edelstein = 'Rosa Jade'
elif wert < 93: edelstein = 'Perlmutt'
elif wert < 95: edelstein = 'Bernstein'
elif wert < 97: edelstein = 'Zirkon'
elif wert < 98: edelstein = 'Smaragd'
elif wert < 99: edelstein = 'Saphir'
elif wert < 100: edelstein = 'Rubin'
elif wert < 101: edelstein = 'Diamant'
return edelstein
def getGiftigePflanzen(wert):
if wert < 2: pflanze = 'Rattenpilz'
elif wert < 6: pflanze = 'Hollbeere'
return pflanze
def getHeilpflanzen(wert):
if wert < 5: pflanze = 'Donf'
elif wert < 9: pflanze = 'Tarnele'
elif wert < 13: pflanze = 'Wirselkraut'
elif wert < 18: pflanze = 'Einbeere'
return pflanze
def getNutzpflanzen(wert):
if wert < 9: pflanze = 'Alraune'
elif wert < 10: pflanze = 'Alveranie'
elif wert < 11: pflanze = 'Messergras'
elif wert < 16: pflanze = 'Carlog'
elif wert < 21: pflanze = 'Egelschreck'
elif wert < 26: pflanze = 'Gulmond'
elif wert < 35: pflanze = 'Rahjalieb'
return pflanze
def getTEGS(wert):
if wert < 2: pflanze = 'Ghulgift'
elif wert < 3: pflanze = 'Gonede'
elif wert < 4: pflanze = 'Purpurblitz'
elif wert < 5: pflanze = 'Schwarzer Lotos'
elif wert < 7: pflanze = 'Unsichtbarkeitselixier'
elif wert < 9: pflanze = 'Verwandlungselixier'
elif wert < 11: pflanze = 'Boabungaha'
elif wert < 13: pflanze = 'Feuerzunge'
elif wert < 15: pflanze = 'Halbgift'
elif wert < 18: pflanze = 'Rattenpilzgift'
elif wert < 23: pflanze = 'Bannstaub'
elif wert < 28: pflanze = 'Berserkerelixier'
elif wert < 33: pflanze = 'Schwadenbeutel'
elif wert < 38: pflanze = 'Brabacudagift'
elif wert < 43: pflanze = 'Kelmon'
elif wert < 48: pflanze = 'Kukris'
elif wert < 53: pflanze = 'Visarnetgift'
elif wert < 61: pflanze = 'Alchimistensäure'
elif wert < 69: pflanze = 'Antidot'
elif wert < 77: pflanze = 'Liebestrunk'
elif wert < 85: pflanze = 'Schlaftrunk'
elif wert < 93: pflanze = 'Wasserodem'
elif wert < 101: pflanze = 'Zaubertrank'
elif wert < 109: pflanze = 'Mandragora'
elif wert < 117: pflanze = 'Marbos Ruhe'
elif wert < 127: pflanze = 'Heiltrank'
elif wert < 137: pflanze = 'Leuchtkreide'
elif wert < 147: pflanze = 'Pastillen gegen Schmerzen'
elif wert < 157: pflanze = 'Waffenbalsam'
elif wert < 167: pflanze = 'Betäubungsgift'
elif wert < 177: pflanze = 'Höhlenspinnengift'
elif wert < 187: pflanze = 'Hollbeerenbrechmittel'
elif wert < 197: pflanze = 'Höhlenbovistgift'
return pflanze
#########
# Loot
#########
def loot_select(erg):
loot = {
# Außergewöhnliches
1: {
1: 'magisches Artefakt',
2: 'leuchtender Gegenstand',
3: '3 Abenteuerpunkte',
4: 'unbekannter Gegenstand',
'T': 'Außergewöhnliches'
},
2: {
1: 'Zeitmessung',
2: 'Lokalisationsinstrumente',
3: 'Spiegel',
4: 'Buch',
5: 'Feinwerkzeug',
6: 'Honig',
7: 'Silberflöte',
8: 'Lupe',
9: 'Brennglas',
10: 'Abakus',
11: 'Trommel',
12: 'Gravurwerkzeug',
13: 'Fernrohr, klein',
14: 'Fernrohr, aufschiebbar',
15: 'Brille',
16: 'Kristallkugel',
17: 'Fanfare',
18: 'Laute',
19: 'Harfe',
20: 'Alchemieset',
'T': 'Besonderes'
},
3: {
1: 'Banner, Wappen',
2: 'Brief',
3: 'Spezialität',
4: 'Heimaterde',
5: 'Selbstgebrautes',
6: 'Wasserpfeife',
'T': 'Regionales, Persönliches'
},
6: {
1: 'Zähne',
2: 'Haut, Leder',
3: 'Pfoten, Ohren',
4: 'Federn',
5: 'Horn',
6: 'alchemistische Zutat (Blut, Auge, Schwanz, Speichel)',
'T': 'Tierisches'
},
7: {
1: 'Salzfleisch',
2: 'Dörrobst',
3: 'Trockenobst',
4: 'Körner',
5: 'Wurst',
6: 'Käse',
'T': 'Nahrung, Verderbliches'
},
9: {
1: 'Angelhaken',
2: 'Schnur',
3: 'Kohlestifte',
4: 'Hufeisen',
5: 'Leine',
6: 'Sattel',
7: 'Käfig',
8: 'Fischernetz',
9: 'Stundenglas',
10: 'Kerzenständer',
11: 'Pfeife',
12: 'Tabakdose',
13: 'Holzflöte',
'T': 'Sonstiges, Kram'
},
10: {
1: 'Korsett',
2: 'Hemd, Bluse',
3: 'Hose',
4: 'Handschuhe',
5: 'Kopfbedeckung',
6: 'Socken',
7: 'Unterkleidung',
8: 'Gürtel ',
9: 'Schuhe',
10: 'Mantel, Weste',
11: 'Jacke',
12: 'Tuche',
13: 'Pelze',
14: 'Fell',
15: 'Tunika',
16: 'Umhang',
17: 'Lederschürze mit Taschen',
18: 'Horasiches Ballkleid, teuer',
19: 'Stiefel',
20: 'Robe',
'T': 'Kleidung'
},
11: {
1: 'Ring',
2: 'Armband',
3: 'Halskette',
4: 'Fusskette',
5: 'Stirmreif, Diadem',
6: 'Ohrringe',
7: 'Spangen',
8: 'Fibel',
9: 'Knopf',
10: 'Fächer, Elfenbein und Seide',
'T': 'Schmuck'
},
12: {
1: 'Kohlestifte',
2: 'Federkiele',
3: 'Tusche',
4: 'Lineal',
5: 'Blatt',
6: 'Pergament',
7: 'Pinsel',
8: 'Heft',
9: 'Buch',
10: 'Schriftrolle',
11: 'Federmesser',
12: 'Schiefertafel',
13: 'Siegelwachs',
'T': 'Schreibtischmaterial'
},
13: {
1: 'Messer',
2: 'Hammer',
3: 'Säge',
4: 'Zange',
5: 'Brecheisen',
6: 'Beil',
7: 'Feile',
8: 'Schere',
9: 'Sichel',
10: 'Hobel',
11: 'Handschellen',
12: 'Dreschflegel',
13: 'Hammer',
14: 'Spitzhacke',
15: 'Spaten',
16: 'Holzeimer',
17: 'Tätowierwerkzeug',
'T': 'Werkzeug'
},
14: {
1: 'Figur',
2: 'Puppe',
3: 'Würfel',
4: 'Holzwaffe',
5: 'Jonglierball',
6: 'Kartenspiel',
7: 'Bild',
8: 'Glöckchen',
9: 'Brettspiel, Holz',
'T': 'Spielzeug, Deko'
},
15: {
1: 'Decke',
2: 'Seil, 5 Schritt',
3: 'Seil, 10 Schritt',
4: 'Netz',
5: 'Kette',
6: 'Pflöcke',
7: 'Zelt',
8: 'Schlafsack',
9: 'Wanderstab',
10: 'Hängematte',
11: 'Kletterhaken, 5',
12: 'Wurfhaken',
13: 'Nadel und Garn',
14: 'Proviantpaket, 3 Tage',
15: 'Strickleiter, 10 Schritt',
16: 'Wundnähzeug',
17: 'Verbände',
18: 'Feldflasche',
19: 'Wasserschlauch',
20: 'Wolldecke, dick',
'T': 'Reisebedarf'
},
16: {
1: 'Seife',
2: 'Öl (Reinigung)',
3: 'Kamm',
4: 'Schwamm',
5: 'Schminke',
6: 'Puder',
7: 'Duftflächschen',
8: 'Rasiermesser',
9: 'Lippenrot',
10: 'Bürste',
'T': 'Körperpflege'
},
17: {
1: 'Schatulle',
2: 'Tasche',
3: 'Gürteltasche',
4: 'Sack',
5: 'Beutel',
6: 'Flasche',
7: 'Rucksack',
8: 'Salbendöschen',
9: 'kleiner Rucksack',
10: 'Umhängetasche',
'T': 'Behälter'
},
18: {
1: 'Becher',
2: 'Teller',
3: 'Besteckset',
4: 'Schöpfkelle',
5: 'Topf',
6: 'Pfanne',
7: 'Schlauch (Flüssigkeiten)',
8: 'Brotbeutel',
9: 'Bratspieß',
10: 'Trinkhorn',
'T': 'Geschirr'
},
19: {
1: 'Kerze',
2: 'Pechfackel',
3: 'Öllampe',
4: 'Lampenöl',
5: 'Feuerstein & Stahl',
6: 'Zunderkästchen',
7: 'Laterne',
8: 'Sturmlaterne',
9: 'Stundenkerze',
10: 'Kerzenleuchter',
'T': 'Beleuchtung'
},
20: {
1: 'Schwertscheide',
2: 'Dolchscheide',
3: 'Waffengurt (Wurfmesser)',
4: 'Gehänge (Axt)',
5: 'Köcher (Pfeile)',
6: 'Wetzstahl',
7: 'Waffenpflegeöl',
8: 'Bogensehne',
9: 'Köcher (Bolzen)',
10: 'Schultergurt',
11: 'Salbenfett',
12: 'Armbrustsehne',
'T': 'Waffenzubehör'
},
}
return loot.get(erg, 'Kein gültiger Wurf')
def zustand_select(erg):
zustand = {
1: {'schlecht, reparaturbedürftig, verbogen, fast leer'},
2: {'schlecht, reparaturbedürftig, verbogen, fast leer'},
3: {'schlecht, reparaturbedürftig, verbogen, fast leer'},
4: {'schlecht, reparaturbedürftig, verbogen, fast leer'},
5: {'schlecht, reparaturbedürftig, verbogen, fast leer'},
6: {'schlecht, reparaturbedürftig, verbogen, fast leer'},
7: {'schlecht, reparaturbedürftig, verbogen, fast leer'},
8: {'gebraucht, nicht neu aber erhalten'},
9: {'gebraucht, nicht neu aber erhalten'},
10: {'gebraucht, nicht neu aber erhalten'},
11: {'gebraucht, nicht neu aber erhalten'},
12: {'gebraucht, nicht neu aber erhalten'},
13: {'gebraucht, nicht neu aber erhalten'},
14: {'gebraucht, nicht neu aber erhalten'},
15: {'(fast) wie neu'},
16: {'(fast) wie neu'},
17: {'(fast) wie neu'},
18: {'(fast) wie neu'},
19: {'besonders schönes Stück, sehr gut erhalten'},
20: {'besonders schönes Stück, sehr gut erhalten'}
}
return zustand.get(erg, 'Kein gültiger Wurf')
def qualität_select(erg):
qualität = {
1: {
'G': 'sehr klein, ein Karat',
'V': 'einfach',
'A': 1
},
2: {
'G': 'sehr klein, ein Karat',
'V': 'handwerklich gut',
'A': 1
},
3: {
'G': 'sehr klein, ein Karat',
'V': 'einfach',
'A': 2
},
4: {
'G': 'sehr klein, ein Karat',
'V': 'handwerklich gut',
'A': 2
},
5: {
'G': 'sehr klein, ein Karat',
'V': 'meisterlich',
'A': 1
},
6: {
'G': 'klein, zwei Karat',
'V': 'einfach',
'A': 1
},
7: {
'G': 'klein, zwei Karat',
'V': 'handwerklich gut',
'A': 1
},
8: {
'G': 'klein, zwei Karat',
'V': 'einfach',
'A': 2
},
9: {
'G': 'klein, zwei Karat',
'V': 'handwerklich gut',
'A': 2
},
10: {
'G': 'klein, zwei Karat',
'V': 'meisterlich',
'A': 1
},
11: {
'G': 'mittel, fünf Karat',
'V': 'einfach',
'A': 1
},
12: {
'G': 'mittel, fünf Karat',
'V': 'handwerklich gut',
'A': 1
},
13: {
'G': 'mittel, fünf Karat',
'V': 'einfach',
'A': 2
},
14: {
'G': 'mittel, fünf Karat',
'V': 'handwerklich gut',
'A': 2
},
15: {
'G': 'mittel, fünf Karat',
'V': 'meisterlich',
'A': 1
},
16: {
'G': 'groß, zehn Karat',
'V': 'einfach',
'A': 1
},
17: {
'G': 'groß, zehn Karat',
'V': 'handwerklich gut',
'A': 1
},
18: {
'G': 'groß, zehn Karat',
'V': 'einfach',
'A': 2
},
19: {
'G': 'groß, zehn Karat',
'V': 'handwerklich gut',
'A': 2
},
20: {
'G': 'groß, zehn Karat',
'V': 'meisterlich',
'A': 1
},
}
return qualität.get(erg, 'Kein gültiger Wurf')
def karfunkel_select(erg):
karfunkel = {
1: {
'G': 'Stecknadelkopf',
'D': 'Baumdrachen',
'A': 'jungen'
},
2: {
'G': 'Stecknadelkopf',
'D': 'Baumdrachen',
'A': 'ausgewachsen'
},
3: {
'G': 'Stecknadelkopf',
'D': 'Baumdrachen',
'A': 'uralten'
},
4: {
'G': 'Erbsen',
'D': 'Meckerdrachen',
'A': 'jungen'
},
5: {
'G': 'Erbsen',
'D': 'Meckerdrachen',
'A': 'ausgewachsen'
},
6: {
'G': 'Erbsen',
'D': 'Meckerdrachen',
'A': 'uralten'
},
7: {
'G': 'Erbsen',
'D': 'Höhlendrachen',
'A': 'jungen'
},
8: {
'G': 'Erbsen',
'D': 'Höhlendrachen',
'A': 'ausgewachsen'
},
9: {
'G': 'Erbsen',
'D': 'Höhlendrachen',
'A': 'uralten'
},
10: {
'G': 'Daumenkuppen',
'D': 'Westwinddrachen',
'A': 'jungen'
},
11: {
'G': 'Daumenkuppen',
'D': 'Westwinddrachen',
'A': 'ausgewachsen'
},
12: {
'G': 'Daumenkuppen',
'D': 'Westwinddrachen',
'A': 'uralten'
},
13: {
'G': 'Daumenkuppen',
'D': 'Perldrachen',
'A': 'jungen'
},
14: {
'G': 'Daumenkuppen',
'D': 'Perldrachen',
'A': 'ausgewachsen'
},
15: {
'G': 'Daumenkuppen',
'D': 'Perldrachen',
'A': 'uralten'
},
16: {
'G': 'Hühnerei',
'D': 'Kaiserdrachen',
'A': 'jungen'
},
17: {
'G': 'Hühnerei',
'D': 'Kaiserdrachen',
'A': 'ausgewachsen'
},
18: {
'G': 'Hühnerei',
'D': 'Kaiserdrachen',
'A': 'uralten'
},
}
return karfunkel.get(erg, 'Kein gültiger Wurf')
def münzen_select(region):
münzen = {
'Bornland': {
1: 'Deut',
2: 'Groschen',
3: 'Batzen'
},
'Vallusa': {
1: 'Flindrich',
2: 'Stüber',
3: 'Witten'
},
'Horasreich': {
1: 'Heller',
2: 'Silber',
3: 'Dukat'
},
'Bergkönigreich': {
1: 'Atebrox',
2: 'Arganbrox',
3: 'Auromox'
},
'Xeraanien (schwarze Lande)': {
1: 'Splitter',
2: 'Zholvaro',
3: 'Borbaradstaler'
},
'Oron (schwarze Lande)': {
1: 'Heller',
2: 'Silber',
3: 'Dukat'
},
'Mhaharanyat Aranien': {
1: 'Hallah',
2: 'Schekel',
3: 'Dinar'
},
'Kalifat': {
1: 'Muwlat',
2: 'Zechine',
3: 'Marawedi'
},
'Großemirat Mengbilla': {
1: 'Tessar',
2: 'Telár',
3: 'Dekat'
},
'Alanfanisches Imperium': {
1: 'Dirham',
2: 'Oreal',
3: 'Dublone'
},
'Káhet Ni Kemi': {
1: 'Chryskl',
2: 'Hedsch',
3: 'Suvar'
}
}
return münzen.get(region, 'Kein gültiger Wurf')
|
# Hangman Game by Jake Sanders
print("Hangman Game by JSanders | November 16, 2018\n")
# Open wordlist.txt so that we can access the words to choose from
word_list_file = open("wordlist.txt", "r")
# Create a local list of all of the words in wordlist.txt
word_list = word_list_file.readlines()
# Import random module to use for random word generator
from random import seed
from random import randint
# Random value generator
seed()
value = randint(0, len(word_list)-1)
# Using the random value as an index in the word list to pick a random word
raw_random_word = word_list[value]
# Putting the random word into str_random_word to display the random word for my own purposes while coding
str_random_word = raw_random_word[0:len(raw_random_word)-1]
# Putting the random word into a list so that it can be indexed
random_word = list(raw_random_word[0:len(raw_random_word)-1])
# print(str_random_word)
# print(len(random_word))
# Start of game sequence
print("Welcome to Hangman! A random word will be generated for you.")
# Set-up of the hidden word visual so that the player can see the length of the word and correct guesses
# hidden_word = list()
i = 0
space = " "
hidden_word = ["_"]*len(random_word)
# Setting the number of incorrect guesses allowed based on a choice of one of three difficulties by the user
incorrect_guesses = 0
while incorrect_guesses == 0:
difficulty = input("Choose difficulty (easy, medium, or hard): ")
if difficulty == "easy":
incorrect_guesses = 9
elif difficulty == "medium":
incorrect_guesses = 6
elif difficulty == "hard":
incorrect_guesses = 3
else:
print("Please select a valid difficulty.")
print("You will receive " + str(incorrect_guesses) + " incorrect guesses.\n")
# Import ascii_lowercase in order to check validity of user guesses
from string import ascii_lowercase
# Initializing the guesses remaining to be the same as the difficulty defined allowable incorrect guesses.
guesses_left = incorrect_guesses
# Initializing an empty list to which each user guess will be added
all_guesses = list()
# Displaying the visual of the blanks for each unguessed letter
print(space.join(hidden_word))
# THIS IS THE PRIMARY LOGIC LOOP FOR THE GAME
while guesses_left > 0:
guess = input("\nPlease guess a letter: \n")
# Checking if the guess has already been guessed
if guess in all_guesses:
print("You have already guessed this letter.")
continue
# Checking that the guess is a string, that the guess is only one character long and that it is a lowercase letter
if isinstance(guess, str) and len(guess) is 1 and guess in ascii_lowercase:
all_guesses.append(guess)
index = 0
correct_guess = 0
''' The random word is now searched to see if the guess matches any letters. The initialized variable 'index' is
set to 0 and is increased after each letter of the random word is checked to be the same as the guess. If the
guessed letter matches any letters in the random word, then the hidden word list is modified to add that letter
to the correct spot. The correct_guess counter will also increment up for each matched letter in order to
identify whether to subtract a guess from guesses_left.
'''
while index <= len(str_random_word):
index = str_random_word.find(guess, index)
if index != -1:
hidden_word[index] = guess
correct_guess += 1
elif index == -1:
if correct_guess == 0:
guesses_left -= 1
break
index += 1
''' After the random word has been searched for the guess input if the hidden word has bee modified to be the
same as the random word then the user has correctly guessed all of the letters in the random word
'''
if hidden_word == random_word:
print(space.join(hidden_word))
print("\nYOU WIN!")
break
# Otherwise, the modified (or not) hidden word visual is displayed as well as guess information.
print(space.join(hidden_word))
print("\nYou have " + str(guesses_left) + " guesses remaining.")
print("Letters guessed: " + " ".join(all_guesses))
# If the guess did not meet the if statement criteria for a valid guess, then it will give instructions and return
# to the top for another guess.
else:
print("Invalid guess. Enter only one lowercase letter at a time.")
continue
if guesses_left == 0:
print("\nYOU LOSE!")
|
__all__ = [
'SearchReferenceCommitForEvent',
]
from limpyd import fields
from limpyd_jobs import STATUSES
from limpyd_jobs.utils import compute_delayed_until
from gim.core.models import IssueEvent, Commit
from .base import DjangoModelJob
class EventJob(DjangoModelJob):
"""
Abstract job model for jobs based on the Event model
"""
abstract = True
model = IssueEvent
@property
def event(self):
if not hasattr(self, '_event'):
self._event = self.object
return self._event
@property
def repository(self):
if not hasattr(self, '_repository'):
self._repository = self.event.repository
return self._repository
class SearchReferenceCommitForEvent(EventJob):
"""
When an event is a reference to a commit, we may not have it, so we'll
wait because it may have been fetched after the event was received
"""
queue_name = 'search-ref-commit-event'
nb_tries = fields.InstanceHashField()
def run(self, queue):
super(SearchReferenceCommitForEvent, self).run(queue)
try:
event = self.event
except IssueEvent.DoesNotExist:
# The event doesn't exist anymore, we can cancel the job
self.status.hset(STATUSES.CANCELED)
return None
try:
# try to find the matching commit
event.related_object = Commit.objects.filter(
authored_at__lte=event.created_at,
sha=event.commit_sha,
author=event.user
).order_by('-authored_at')[0]
except IndexError:
# the commit was not found
tries = int(self.nb_tries.hget() or 0)
if tries >= 10:
# enough tries, stop now
self.status.hset(STATUSES.CANCELED)
return None
else:
# we'll try again...
self.status.hset(STATUSES.DELAYED)
self.delayed_until.hset(compute_delayed_until(delayed_for=60*(min(tries, 5))))
self.nb_tries.hincrby(1)
return False
# commit found, save the event
event.save()
return True
|
import requests
import xmltodict
from basexml import buscaCliente, DisponibilidaDeServico, ConsultaCEP, getStatusCartaoPostagem, SolicitaEtiquetas,\
GeraDigitoVerificadorEtiquetas
# cliente = buscaCliente()
# x = cliente.contrato('9912208555', '0057018901', 'sigep', 'n5f9t8')
# # z = cliente.xmlData()
# y = cliente.retornoDados()
#
# print y
# servico = DisponibilidaDeServico()
#
# x = servico.contrato('13316672','40169','35669000','13146000', 0057018901)
# y = servico.retornodados()
#
# print y
#
# cep = ConsultaCEP()
#
# x = cep.cep('35668000')
# y = cep.retornodados()
# print y
#
# status = getStatusCartaoPostagem()
# x = status.contrato('0067511082', '14172907', 'jq5y6')
# y = status.retornodados()
# print y
etiquetas = SolicitaEtiquetas()
x = etiquetas.contrato('34028316000103','104707',5,'sigep', 'n5f9t8')
y = etiquetas.retornodados()
print y
|
import speech_recognition as sr
import pyttsx3
r = sr.Recognizer()
sample_rate = 48000
chunk_size = 2048
reply = {
"hi" : "Hi",
"hello" : "hello",
"hey" : "hey",
"your name" : "pybot",
"who" : "pybot"
} #anytopic
with sr.Microphone(sample_rate= sample_rate, chunk_size= chunk_size) as source:
r.adjust_for_ambient_noise(source)
print("Say something")
audio = r.listen(source)
try:
for k,v in reply:
if k in r.recognize_google(audio).lower():
engine = pyttsx3.init()
engine.say(v)
engine.runAndWait()
else:
engine = pyttsx3.init()
engine.say("Sorry I couldn't understand that")
engine.runAndWait()
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
|
import time
def run_time(func):
def tmp(*args):
start = time.time()
func(*args)
end = time.time()
print(round(end - start,6))
return tmp
|
from . import models
from rest_framework import serializers
import json
import isodate
class PlayerNameSerializer(serializers.ModelSerializer):
class Meta:
model = models.PlayerName
fields = ['name']
class SessionSerializer(serializers.ModelSerializer):
class Meta:
model = models.Session
fields = ['ip', 'started_at', 'ended_at']
class PlayerSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
names = PlayerNameSerializer(read_only=True, many=True)
kills = serializers.IntegerField()
deaths = serializers.IntegerField()
playtime = serializers.SerializerMethodField()
def get_playtime(self, obj):
return isodate.duration_isoformat(obj.playtime)
class Meta:
model = models.Player
fields = ['id', 'names', 'kills', 'deaths', 'ff_kills', 'ff_deaths', 'playtime']
class DamageTypeClassSerializer(serializers.ModelSerializer):
class Meta:
model = models.DamageTypeClass
fields = ['id', 'classname']
class LogSerializer(serializers.ModelSerializer):
class Meta:
model = models.Log
fields = ['id', 'crc', 'version', 'created_at']
class RoundSerializer(serializers.ModelSerializer):
log = LogSerializer(read_only=True)
class Meta:
model = models.Round
fields = ['id', 'winner', 'started_at', 'ended_at', 'version', 'map', 'num_players', 'is_interesting', 'num_kills', 'log']
class FragSerializer(serializers.ModelSerializer):
killer = serializers.SerializerMethodField()
victim = serializers.SerializerMethodField()
def get_killer(self, obj):
return {
'id': obj.killer.id,
'location': obj.killer_location
}
def get_victim(self, obj):
return {
'id': obj.victim.id,
'location': obj.victim_location
}
# TODO: use proper serializermethodfields
class Meta:
model = models.Frag
fields = ['id', 'damage_type', 'distance', 'killer', 'victim']
class VehicleFragSerializer(serializers.ModelSerializer):
killer = serializers.SerializerMethodField()
vehicle = serializers.SerializerMethodField()
def get_killer(self, obj):
return {
'id': obj.killer.id,
'team': obj.killer_team_index,
'pawn': obj.killer_pawn_class.classname if obj.killer_pawn_class else None,
'vehicle': obj.killer_vehicle_class.classname if obj.killer_vehicle_class else None,
'location': [
int(obj.killer_location_x),
int(obj.killer_location_y)
]
}
def get_vehicle(self, obj):
return {
'class': obj.vehicle_class.classname if obj.vehicle_class else None,
'location': [
int(obj.vehicle_location_x),
int(obj.vehicle_location_y)
]
}
class Meta:
model = models.VehicleFrag
fields = ['id', 'damage_type', 'time', 'killer', 'vehicle', 'distance']
class MapSerializer(serializers.ModelSerializer):
class Meta:
model = models.Map
fields = ['id', 'name', 'bounds', 'offset']
class JSONSerializerField(serializers.Field):
"""Serializer for JSONField -- required to make field writable"""
def to_representation(self, value):
json_data = {}
try:
json_data = json.loads(value)
except ValueError as e:
raise e
finally:
return json_data
def to_internal_value(self, data):
return json.dumps(data)
class EventSerializer(serializers.ModelSerializer):
data = JSONSerializerField()
class Meta:
model = models.Event
fields = ['type', 'data']
class PatronSerializer(serializers.ModelSerializer):
class Meta:
model = models.Patron
exclude = []
class AnnouncementSerializer(serializers.ModelSerializer):
class Meta:
model = models.Announcement
exclude = []
class TextMessageSerializer(serializers.ModelSerializer):
class Meta:
model = models.TextMessage
fields = ['id', 'type', 'message', 'sent_at', 'team_index', 'squad_index', 'sender', 'log']
class RallyPointSerializer(serializers.ModelSerializer):
player = serializers.SerializerMethodField()
def get_player(self, obj):
return {'id': obj.player.id, 'name': obj.player.name}
class Meta:
model = models.RallyPoint
exclude = []
fields = ['id', 'team_index', 'squad_index', 'player', 'spawn_count', 'is_established',
'establisher_count', 'destroyed_reason', 'round', 'location', 'created_at', 'lifespan']
|
t = int(input())
while t > 0:
w,h,n = map(int,input().split())
d = 1
while w % 2 == 0:
w = w//2
d = d*2
while h % 2 == 0:
h = h//2
d = d*2
if d >= n:
print("YES")
else:
print("NO")
t =t-1
|
from django.contrib import admin
# Register your models here.
# Register your models here.
from Rebbit.models import *
admin.site.register(Person)
admin.site.register(Sub_rebb)
admin.site.register(Post)
|
s,v,u=map(int,input().split())
print(s*v//u)
|
"""
Dada una lista de enteros y strings, devolver dos listas una con los enteros y otra con las strings
"""
lista=["jajjaj", 3,4,5,"payaso"]
enteros=[]
strings=[]
for item in lista:
tipo=type(item)
if tipo==type("a"):
strings.append(item)
else:
enteros.append(item)
print("Los enteros son: {}\nLas strings son: {}".format(enteros,strings))
|
from typing import Tuple
import mysql.connector
def create_connection(host: str = 'localhost', port: str = '3306',
user: str = 'root', password: str = '1104',
database: str = 'pbz2') -> Tuple:
connection = mysql.connector.connect(
host=host,
port=port,
user=user,
password=password,
database=database
)
cursor = connection.cursor()
return connection, cursor
def close_connection(connection, cursor):
"""
Closes cursor and connection
:param connection: connection to mysql database
:param cursor: cursor for given connection
"""
connection.close()
cursor.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: step04_rerun_preliminary_regression_for_presidential_countries
# @Date: 2020/3/20
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
"""
python -m ConstructRegressionFile.Stata.step04_rerun_preliminary_regression_for_presidential_countries
"""
import os
from Constants import Constants as const
from .step02_rerun_preliminary_regression import generate_regression_code
DEP_VARS = ['{}_1'.format(i) for i in
['CAPEX', 'ROA', 'R_B', 'CASH_HOLDING', 'TANGIBILITY', 'TobinQ', 'ln_emp', 'ln_sale']]
CTRL_VARS = 'ln_at TANGIBILITY CAPEX ROA TobinQ ln_GDP ln_GDP_PC NY_GDP_MKTP_KD_ZG FR_INR_LNDP'
if __name__ == '__main__':
ind_vars = list()
for suf in ['Extend', 'ToUnlimit', 'ToLimit', 'Shrink']:
for pre in ['formal', 'real']:
real_key = '{}_{}'.format(pre, suf)
ind_list = [real_key]
for n in range(1, 6):
if n < 4:
ind_list.insert(0, '{}_b{}'.format(real_key, n))
ind_list.append('{}_a{}'.format(real_key, n))
ind_vars.append(' '.join(ind_list))
date_str = '20200320'
save_file = os.path.join(const.STATA_CODE_PATH, '{}_preliminary_code_4.do'.format(date_str))
output_path = os.path.join(const.STATA_RESULT_PATH, '{}_preliminary_4'.format(date_str))
if not os.path.isdir(output_path):
os.makedirs(output_path)
cmd_list = ['clear',
'use "{}"'.format(os.path.join(const.STATA_DATA_PATH, '20200320_term_limit_regression_data.dta')),
'egen country_fe = group(country_iso3)',
'replace R_B_1 = 0 if missing(R_B_1)']
for ind_key in ind_vars:
key_info = ind_key.split(' ')[0][:-3]
output_file = os.path.join(output_path, '{}.txt'.format(key_info))
for dep_key in DEP_VARS:
cmd_list.extend(
generate_regression_code(dep=dep_key, ind=ind_key, ctrl=CTRL_VARS, fe_option='gvkey fyear',
cluster_option='gvkey', output_path=output_file, condition='',
text_option='Firm Dummy, Yes, Year Dummy, Yes, Cluster, Firm',
data_description='tstat bdec(4) tdec(4) rdec(4)'))
with open(save_file, 'w') as f:
f.write('\n'.join(cmd_list))
print('do "{}"'.format(save_file))
|
first_max = int(input())
curent_num = int(input())
if curent_num == 0:
second_max = first_max
else:
second_max = curent_num
while curent_num != 0:
if first_max <= curent_num:
(first_max, second_max) = (curent_num, first_max)
elif second_max < curent_num:
second_max = curent_num
curent_num = int(input())
print(second_max)
|
import sys
sys.path.append('../../python')
import caffe
from caffe import surgery, score
import numpy as np
import os
import cPickle as pickle
save_format = os.getcwd() + '/out_{}'
# base net
base = 'base.prototxt'
weights = 'vgg16fc.caffemodel'
base_net = caffe.Net(base, weights, caffe.TEST)
# init
caffe.set_mode_gpu()
caffe.set_device(0)
solver = caffe.SGDSolver('solver.prototxt')
# surgeries
surgery.transplant(solver.net, base_net)
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)
# load IDs of the TEST phase data
val = np.loadtxt('list.txt', dtype=str)
for _ in range(100):
solver.step(1000)
score.seg_tests(solver, save_format, val, layer='score', gt='label')
|
from django.db import models
from partners.models import Partner
from datetime import date
from localflavor.us.models import USStateField
from django_countries.fields import CountryField
from filebrowser.fields import FileBrowseField
# Create your models here.
class Event(models.Model):
event_name = models.CharField(max_length=200)
is_published = models.BooleanField(default=True, help_text='Check the box to publish event.')
event_start = models.DateField(default=date.today)
event_end = models.DateField(default=date.today)
event_image = FileBrowseField('Event image', max_length=1000, extensions=['.jpg',
'.jpeg',
'.gif',
'.png',
'.tif',
'.tiff'],
blank=True,
null=True)
event_description = models.TextField(blank=True, help_text='Brief description of event.')
event_country = CountryField(blank=True, null=True, default='US')
event_venue = models.CharField(max_length=300, blank=True)
event_street = models.CharField(max_length=200, blank=True)
event_city = models.CharField(max_length=100, blank=True)
event_zip_code = models.IntegerField(blank=True, null=True)
event_state = USStateField(null=True, blank=True, verbose_name='Event state (US only)')
event_partners = models.ManyToManyField(Partner, blank=True)
def __str__(self):
return self.event_name
|
from ibtd.graph import Graph
|
from django.urls import path
from architect.monitor import views
app_name = 'monitor'
urlpatterns = [
path('v1', views.MonitorListView.as_view(),
name='monitor_list'),
path('v1/monitor-check',
views.MonitorCheckView.as_view(),
name='monitor_check'),
path('v1/<monitor_name>', views.MonitorDetailView.as_view(),
name='monitor_detail'),
path('v1/<monitor_name>/sync',
views.MonitorSyncView.as_view(),
name='monitor_sync'),
path('v1/<monitor_name>/graph/<query_name>/<viz_name>',
views.MonitorGraphView.as_view(),
name='monitor_graph'),
path('v1/<monitor_name>/resource/<resource_uid>',
views.ResourceDetailView.as_view(),
name='resource_detail'),
path('v1/<manager_name>/query/<query_name>/<query_type>',
views.MonitorQueryJSONView.as_view(),
name='manager_query'),
]
|
def getArray():
line = input()
n = int(line)
lines = []
for i in range(n):
line = input().strip().split(' ')
line = [float(line[0]), float(line[1])]
lines.append(line)
return lines
#Convert input lines into points in dual plane
def dual(lines):
points = []
for line in lines:
points.append([line[0], -line[1]])
return points
ori = [0,0]
#Convert a cmp= function into a key= function
def cmp_to_key(mycmp):
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
#Sort points in increasing order of angel wrt p*
def compare(a,b):
global ori
x0 = a[0] - ori[0]
y0 = a[1] - ori[1]
x1 = b[0] - ori[0]
y1 = b[1] - ori[1]
return x0 * y1 - x1 * y0
def sortPoints(points):
chosen = 0
for i in range(1, len(points)):
if(points[i][1] < points[chosen][1]):
chosen = i
global ori
ori = points[chosen]
points.pop(chosen)
points = sorted(points, key = cmp_to_key(compare), reverse=True)
points.insert(0, ori)
return points
# Graham Scan Algorithm
def grahamScan(points):
s = []
s.append(points[0])
s.append(points[1])
s.append(points[2])
for i in range(3, len(points)):
while(len(s) >= 2):
x0 = s[-1][0] - s[-2][0]
y0 = s[-1][1] - s[-2][1]
x1 = points[i][0] - s[-2][0]
y1 = points[i][1] - s[-2][1]
if(x0 * y1 > x1 * y0):
break
s.pop(-1)
s.append(points[i])
return s
lines = getArray()
points = dual(lines)
points = sortPoints(points)
points = grahamScan(points)
x_max = -1e10
x_min = 1e10
maxpos = 0
minpos = 0
#Find the leftest point and rightest point
for p in range(len(points)):
if(points[p][0] > x_max):
x_max = points[p][0]
maxpos = p
if(points[p][0] < x_min):
x_min = points[p][0]
minpos = p
if(minpos > maxpos):
upper = maxpos + 1 + len(points) - minpos
lower = minpos - maxpos + 1
else:
lower = minpos + 1 + len(points) - maxpos
upper = maxpos - minpos + 1
print(upper, lower, end='')
|
"""
Given a string S, remove the vowels 'a', 'e', 'i', 'o', and 'u' from it, and return the new string.
Input: "leetcodeisacommunityforcoders"
Output: "ltcdscmmntyfrcdrs"
"""
class Solution:
def removeVowels(self, S: str) -> str:
required_string=""
for letter in S:
if letter in "aeiou":
continue
else:
required_string = required_string+letter
return required_string
if __name__ == "__main__":
my_solution = Solution()
print(my_solution.removeVowels("Leetcode"))
|
from django import forms
from eshop_products_attrebute.models import ProductAttribute
from eshop_products.models import Product
colors = [('قرمز', 'قرمز'), ('آبی', 'آبی'), ('زرد', 'زرذ'), ('صورتی', 'صورتی'), ('سیاه', 'سیاه')]
sizes = [("X", "X"), ("XL", "XL"), ("XXL", "XXL"), ("XXXL", "XXXL"), ("L", "L")]
class UserAddOrder(forms.Form):
productId = forms.IntegerField(
widget=forms.HiddenInput(),
)
color = forms.ChoiceField(
widget=forms.Select, choices=colors)
size = forms.ChoiceField(
widget=forms.Select, choices=sizes
)
count = forms.IntegerField(
widget=forms.NumberInput(),
initial=1
)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Any, Iterable
from pants.backend.helm.resolve.remotes import HelmRemotes
from pants.backend.helm.target_types import HelmChartTarget, HelmRegistriesField
from pants.core.util_rules.external_tool import TemplatedExternalTool
from pants.engine.platform import Platform
from pants.option.option_types import (
ArgsListOption,
BoolOption,
DictOption,
StrListOption,
StrOption,
)
from pants.util.memo import memoized_method
from pants.util.strutil import bullet_list, help_text, softwrap
_VALID_PASSTHROUGH_FLAGS = [
"--atomic",
"--cleanup-on-fail",
"--create-namespace",
"--debug",
"--dry-run",
"--force",
"--wait",
"--wait-for-jobs",
]
_VALID_PASSTHROUGH_OPTS = [
"--kubeconfig",
"--kube-context",
"--kube-apiserver",
"--kube-as-group",
"--kube-as-user",
"--kube-ca-file",
"--kube-token",
]
class InvalidHelmPassthroughArgs(Exception):
def __init__(self, args: Iterable[str], *, extra_help: str = "") -> None:
super().__init__(
softwrap(
f"""
The following command line arguments are not valid: {' '.join(args)}.
Only the following passthrough arguments are allowed:
{bullet_list([*_VALID_PASSTHROUGH_FLAGS, *_VALID_PASSTHROUGH_OPTS])}
{extra_help}
"""
)
)
registries_help = help_text(
f"""
Configure Helm OCI registries. The schema for a registry entry is as follows:
{{
"registry-alias": {{
"address": "oci://registry-domain:port",
"default": bool,
}},
...
}}
If no registries are provided in either a `{HelmChartTarget.alias}` target, then all default
addresses will be used, if any.
The `{HelmChartTarget.alias}.{HelmRegistriesField.alias}` may be provided with a list of registry
addresses and registry alias prefixed with `@` to be used instead of the defaults.
A configured registry is marked as default either by setting `default = true`
or with an alias of `"default"`.
Registries also participate in resolving third party Helm charts uplodaded to those registries.
"""
)
class HelmSubsystem(TemplatedExternalTool):
options_scope = "helm"
help = "The Helm command line (https://helm.sh)"
default_version = "3.12.3"
default_known_versions = [
"3.12.3|linux_arm64|79ef06935fb47e432c0c91bdefd140e5b543ec46376007ca14a52e5ed3023088|14355040",
"3.12.3|linux_x86_64|1b2313cd198d45eab00cc37c38f6b1ca0a948ba279c29e322bdf426d406129b5|16028423",
"3.12.3|macos_arm64|240b0a7da9cae208000eff3d3fb95e0fa1f4903d95be62c3f276f7630b12dae1|16019570",
"3.12.3|macos_x86_64|1bdbbeec5a12dd0c1cd4efd8948a156d33e1e2f51140e2a51e1e5e7b11b81d47|16828211",
"3.12.2|linux_arm64|cfafbae85c31afde88c69f0e5053610c8c455826081c1b2d665d9b44c31b3759|14350624",
"3.12.2|linux_x86_64|2b6efaa009891d3703869f4be80ab86faa33fa83d9d5ff2f6492a8aebe97b219|16028750",
"3.12.2|macos_arm64|b60ee16847e28879ae298a20ba4672fc84f741410f438e645277205824ddbf55|16021202",
"3.12.2|macos_x86_64|6e8bfc84a640e0dc47cc49cfc2d0a482f011f4249e2dff2a7e23c7ef2df1b64e|16824814",
"3.11.3|linux_arm64|0816db0efd033c78c3cc1c37506967947b01965b9c0739fe13ec2b1eea08f601|14475471",
"3.11.3|linux_x86_64|ca2d5d40d4cdfb9a3a6205dd803b5bc8def00bd2f13e5526c127e9b667974a89|15489735",
"3.11.3|macos_arm64|267e4d50b68e8854b9cc44517da9ab2f47dec39787fed9f7eba42080d61ac7f8|15451086",
"3.11.3|macos_x86_64|9d029df37664b50e427442a600e4e065fa75fd74dac996c831ac68359654b2c4|16275303",
"3.11.2|linux_arm64|444b65100e224beee0a3a3a54cb19dad37388fa9217ab2782ba63551c4a2e128|14090242",
"3.11.2|linux_x86_64|781d826daec584f9d50a01f0f7dadfd25a3312217a14aa2fbb85107b014ac8ca|15026301",
"3.11.2|macos_arm64|f61a3aa55827de2d8c64a2063fd744b618b443ed063871b79f52069e90813151|14932800",
"3.11.2|macos_x86_64|404938fd2c6eff9e0dab830b0db943fca9e1572cd3d7ee40904705760faa390f|15759988",
"3.11.1|linux_arm64 |919173e8fb7a3b54d76af9feb92e49e86d5a80c5185020bae8c393fa0f0de1e8|13484900",
"3.11.1|linux_x86_64|0b1be96b66fab4770526f136f5f1a385a47c41923d33aab0dcb500e0f6c1bf7c|15023104",
"3.11.1|macos_arm64 |43d0198a7a2ea2639caafa81bb0596c97bee2d4e40df50b36202343eb4d5c46b|14934852",
"3.11.1|macos_x86_64|2548a90e5cc957ccc5016b47060665a9d2cd4d5b4d61dcc32f5de3144d103826|15757902",
"3.10.0|linux_arm64 |3b72f5f8a60772fb156d0a4ab93272e8da7ef4d18e6421a7020d7c019f521fc1|13055719",
"3.10.0|linux_x86_64|bf56beb418bb529b5e0d6d43d56654c5a03f89c98400b409d1013a33d9586474|14530566",
"3.10.0|macos_arm64 |f7f6558ebc8211824032a7fdcf0d55ad064cb33ec1eeec3d18057b9fe2e04dbe|14446277",
"3.10.0|macos_x86_64|1e7fd528482ac2ef2d79fe300724b3e07ff6f846a2a9b0b0fe6f5fa05691786b|15237557",
"3.8.0|linux_arm64 |23e08035dc0106fe4e0bd85800fd795b2b9ecd9f32187aa16c49b0a917105161|12324642",
"3.8.0|linux_x86_64|8408c91e846c5b9ba15eb6b1a5a79fc22dd4d33ac6ea63388e5698d1b2320c8b|13626774",
"3.8.0|macos_arm64 |751348f1a4a876ffe089fd68df6aea310fd05fe3b163ab76aa62632e327122f3|14078604",
"3.8.0|macos_x86_64|532ddd6213891084873e5c2dcafa577f425ca662a6594a3389e288fc48dc2089|14318316",
]
default_url_template = "https://get.helm.sh/helm-v{version}-{platform}.tar.gz"
default_url_platform_mapping = {
"linux_arm64": "linux-arm64",
"linux_x86_64": "linux-amd64",
"macos_arm64": "darwin-arm64",
"macos_x86_64": "darwin-amd64",
}
_registries = DictOption[Any](help=registries_help, fromfile=True)
lint_strict = BoolOption(default=False, help="Enables strict linting of Helm charts")
default_registry_repository = StrOption(
default=None,
help=softwrap(
"""
Default location where to push Helm charts in the available registries
when no specific one has been given.
If no registry repository is given, charts will be pushed to the root of
the OCI registry.
"""
),
)
extra_env_vars = StrListOption(
help=softwrap(
"""
Additional environment variables that would be made available to all Helm processes
or during value interpolation.
"""
),
advanced=True,
)
tailor = BoolOption(
default=True,
help="If true, add `helm_chart` targets with the `tailor` goal.",
advanced=True,
removal_hint="Use `[helm].tailor_charts` instead.",
removal_version="2.19.0.dev0",
)
tailor_charts = BoolOption(
default=None,
help="If true, add `helm_chart` targets with the `tailor` goal.",
advanced=True,
)
tailor_unittests = BoolOption(
default=True,
help="If true, add `helm_unittest_tests` targets with the `tailor` goal.",
advanced=True,
)
args = ArgsListOption(
example="--dry-run",
passthrough=True,
extra_help=softwrap(
f"""
Additional arguments to pass to Helm command line.
Only a subset of Helm arguments are considered valid as passthrough arguments as most of them
have equivalents in the form of fields of the different target types.
The list of valid arguments is as follows:
{bullet_list([*_VALID_PASSTHROUGH_FLAGS, *_VALID_PASSTHROUGH_OPTS])}
Before attempting to use passthrough arguments, check the reference of each of the available target types
to see what fields are accepted in each of them.
"""
),
)
@memoized_method
def valid_args(self, *, extra_help: str = "") -> tuple[str, ...]:
valid, invalid = _cleanup_passthrough_args(self.args)
if invalid:
raise InvalidHelmPassthroughArgs(invalid, extra_help=extra_help)
return tuple(valid)
def generate_exe(self, plat: Platform) -> str:
mapped_plat = self.default_url_platform_mapping[plat.value]
bin_path = os.path.join(mapped_plat, "helm")
return bin_path
@memoized_method
def remotes(self) -> HelmRemotes:
return HelmRemotes.from_dict(self._registries)
def _cleanup_passthrough_args(args: Iterable[str]) -> tuple[list[str], list[str]]:
valid_args: list[str] = []
removed_args: list[str] = []
skip = False
for arg in args:
if skip:
valid_args.append(arg)
skip = False
continue
if arg in _VALID_PASSTHROUGH_FLAGS:
valid_args.append(arg)
elif "=" in arg and arg.split("=")[0] in _VALID_PASSTHROUGH_OPTS:
valid_args.append(arg)
elif arg in _VALID_PASSTHROUGH_OPTS:
valid_args.append(arg)
skip = True
else:
removed_args.append(arg)
return (valid_args, removed_args)
|
import copy
import itertools
import operator
from typing import Literal
Decks = tuple[list[int], list[int]]
def get_player_decks() -> Decks:
with open("input/input22.txt") as input_file:
player_1, player_2 = input_file.read().strip().split("\n\n")
return [int(n) for n in player_1.split('\n')[1:]], [int(n) for n in player_2.split('\n')[1:]]
def play_game(decks: Decks) -> list[int]:
player1, player2 = decks
while player1 != [] and player2 != []:
card1, card2 = player1.pop(0), player2.pop(0)
if card1 > card2:
player1 += [card1, card2]
else:
player2 += [card2, card1]
return player1 if player1 else player2
def play_game_recursively(decks: Decks) -> tuple[Literal[1,2], list[int]]:
previously_seen_hashes: set[int] = set()
player1, player2 = decks
while player1 != [] and player2 != []:
cards = hash((tuple(player1), tuple(player2)))
if cards in previously_seen_hashes:
return (1, player1)
previously_seen_hashes.add(cards)
card1, card2 = player1.pop(0), player2.pop(0)
if card1 > len(player1) or card2 > len(player2):
winner = 1 if card1 > card2 else 2
else:
# play sub game
winner, _ = play_game_recursively((player1[:card1], player2[:card2]))
if winner == 1:
player1 += [card1, card2]
else:
player2 += [card2, card1]
return (1, player1) if player1 else (2, player2) # type: ignore
def get_winning_score(decks: Decks) -> int:
winning_deck = play_game(copy.deepcopy(decks))
return sum(itertools.starmap(operator.mul, enumerate(winning_deck[::-1], start=1)))
def get_winning_score_recursive(decks: Decks) -> int:
_num, winning_deck = play_game_recursively(copy.deepcopy(decks))
return sum(itertools.starmap(operator.mul, enumerate(winning_deck[::-1], start=1)))
DECKS = get_player_decks()
if __name__ == "__main__":
print(get_winning_score(DECKS))
print(get_winning_score_recursive(DECKS))
|
from CallBackOperator import CallBackOperator
from SignalGenerationPackage.Sinus.SinusUIParameters import SinusUIParameters
from sys import exc_info
class SinusPointsNumberCallBackOperator(CallBackOperator):
def __init__(self, model):
super().__init__(model)
def ConnectCallBack(self, window):
self.window = window
self.setup_callback_and_synchronize_slider(
validator_min=SinusUIParameters.PointsNumberSliderMin,
validator_max=SinusUIParameters.PointsNumberSliderMax,
validator_accuracy=SinusUIParameters.PointsNumberLineEditAccuracy,
line_edit=self.window.PointsNumberlineEdit,
slider_min=SinusUIParameters.PointsNumberSliderMin,
slider_max=SinusUIParameters.PointsNumberSliderMax,
slider=self.window.horizontalSliderPointsNumber,
update_slider_func=self.update_points_number_slider,
update_line_edit_func=self.update_points_number_line_edit
)
def update_points_number_slider(self):
try:
self.update_slider(
line_edit=self.window.PointsNumberlineEdit,
slider=self.window.horizontalSliderPointsNumber,
calc_constant=SinusUIParameters.PointsNumberCalcConstant
)
except:
print(exc_info())
def update_points_number_line_edit(self):
try:
self.update_line_edit(
line_edit=self.window.PointsNumberlineEdit,
slider=self.window.horizontalSliderPointsNumber,
calc_constant=SinusUIParameters.PointsNumberCalcConstant,
update_model_func=self.update_points_number
)
except:
print(exc_info())
def update_points_number(self, val):
self.model.PointsNumber = int(val)
|
import numpy as np
import mglearn
X, y = mglearn.datasets.make_wave(n_samples=100)
bins = np.linspace(start=-3, stop=3, num=11)
print("bins: {}".format(bins))
which_bin = np.digitize(X, bins=bins)
print("\ndata:\n", X[:5])
print("\nwhich_bin:\n", which_bin[:5])
|
import seaborn as sns
from data_paths import paths
import pandas as pd
import glob as glob
import matplotlib.pyplot as plt
from numpy import median
# TODO Have a look at something similar to a facet plot (in R)
# https://stackoverflow.com/questions/25830588/r-lattice-like-plots-with-python-pandas-and-matplotlib
# Let's start with some simple bar plots
# https://seaborn.pydata.org/generated/seaborn.barplot.html
# Read in some data to work with
data_paths = glob.glob(paths["salary"] + "/*")
X_train = pd.read_csv(data_paths[-2])
y_train = pd.read_csv(data_paths[-1])
# This is going to be the "manual" approach, whereby I summarize the data myself
merge_frame = pd.merge(X_train, y_train, how = "inner", on="jobId")
summarized_data = merge_frame[["degree", "salary"]].groupby("degree").median().reset_index()
sns.barplot(x = "degree", y= "salary", data=summarized_data)
plt.show()
# This is the more automated method. Note how we are able to specify
# Note that we have to import the estimator function
# Adding order to this is kind of a pain -- but you can provide a list
sns.barplot(x = "degree", y= "salary", data=merge_frame, estimator=median, ci="sd")
plt.show()
# Scatterplot: https://seaborn.pydata.org/generated/seaborn.distplot.html
# TODO this is not going to run on the current data
sns.regplot(features["AGE"], target["Target"], fit_reg=False, color = "green", scatter_kws={'alpha':0.3})
plt.xlabel("Sure")
|
nota1 = float(input('Digite sua primeira nota:'))
nota2 = float(input('Digite sua segunda nota:'))
print('Sua média é {}'.format((nota1+nota2)/2))
|
from django.contrib import admin
from .models import Voting, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
class VotingAdmin(admin.ModelAdmin):
inlines = [
ChoiceInline,
]
admin.site.register(Voting, VotingAdmin)
|
#!/usr/bin/python3
import os # for opening and closing files
from pymongo import MongoClient # using mongoDB
client = MongoClient() # this gets us a client to the mongodatabase
# make sure to start mongod somewhere, aim at
# some custom folder
import numpy as np
import matplotlib.pyplot as plt
print("gonna try to plot diagnostic by label")
allLabels = set()
for eachNode in client[nameOfDatabase].nodes.find():
allLabels.add(eachNode['label'])
#THIS IS SET TO DIAGNOSE
for eachLabel in [15]:#range(16,16):#allLabels:
for eachNode in client[nameOfDatabase].nodes.find({'label':eachLabel}):
exs, why, nextNodez = [],[],[]
try:
exs.append(float(eachNode['lon']))
why.append(float(eachNode['lat']))
#THIS IS BUSTED, I SHOULD LEARN NUMPY SO I CAN PLOT THESE
#LINE SEGMENTS BY COLOR TO DIAGNOSE WHT THE COLOR LABEL THING
#AIN'T WORKING
for eachSubNodeID in eachNode['nextNode']:
eachSubNodeID.find_one({'id',eachSubNodeID})
nextNodez.append((eachSubNodeID['lon'],
eachSubNodeID['lat']))
except:
None
print(nextNodez)
plt.plot(exs,why,marker='o')
plt.show()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from io import BytesIO
from mock import Mock, patch
from wadebug import cli_utils
from wadebug.analytics import Analytics
class TestSendResultsToFB(unittest.TestCase):
def test_should_append_run_id_to_result_if_succeed(self):
mock_success_callback = Mock()
mock_run_id = "1234abcd"
mock_result = {"dummy_action": {"result": "OK"}}
with patch.object(Analytics, "send_event", return_value=mock_run_id):
cli_utils.send_results_to_fb(mock_result, mock_success_callback)
Analytics.send_event.assert_called()
mock_success_callback.assert_called()
assert mock_result["run_id"] == mock_run_id
def test_should_call_failure_callback_with_exception(self):
mock_failure_callback = Mock()
mock_result = {"dummy_action": {"result": "OK"}}
mock_exception = Exception("something goes wrong!")
with patch.object(Analytics, "send_event", side_effect=mock_exception):
cli_utils.send_results_to_fb(
mock_result, failure_callback=mock_failure_callback
)
mock_failure_callback.assert_called_with(mock_exception)
class TestSendLogsToFB(unittest.TestCase):
def test_should_call_success_callback_with_run_id(self):
mock_success_callback = Mock()
mock_run_id = "1234abcd"
dummy_zip_file = BytesIO(b"not important")
with patch.object(Analytics, "send_logs_to_fb", return_value=mock_run_id):
cli_utils.send_logs_to_fb(
dummy_zip_file, success_callback=mock_success_callback
)
Analytics.send_logs_to_fb.assert_called()
mock_success_callback.assert_called_with(mock_run_id)
def test_should_call_failure_callback_with_exception(self):
mock_failure_callback = Mock()
mock_exception = Exception("something goes wrong!")
dummy_zip_file = BytesIO(b"not important")
with patch.object(Analytics, "send_logs_to_fb", side_effect=mock_exception):
cli_utils.send_logs_to_fb(
dummy_zip_file, failure_callback=mock_failure_callback
)
mock_failure_callback.assert_called_with(mock_exception)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.