text
stringlengths 8
6.05M
|
|---|
import os, lfc, sys, commands
from testClass import _test
global testHome
class test_setcomment_ok(_test):
def info(self):
return "Set file comment existing file name"
def prepare(self):
guid = commands.getoutput('uuidgen').split('/n')[0]
self.name = "/grid/dteam/python_setcomment_test"
ret = lfc.lfc_creatg(self.name,guid,0664)
def clean(self):
lfc.lfc_unlink(self.name)
pass
def test(self):
self.comment="Test comment"
comment=" "
ret=lfc.lfc_setcomment(self.name,self.comment)
lfc.lfc_getcomment(self.name, comment)
return (comment.strip(),ret)
def ret(self):
return self.comment + '\x00'
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if (retRetVal == testRetVal):
retval = retval & (ret == test)
else:
retval = False
return retval
class test_update_comment(_test):
def info(self):
return "Update comment on existing file name"
def prepare(self):
guid = commands.getoutput('uuidgen').split('/n')[0]
self.name = "/grid/dteam/python_setcomment_test"
ret = lfc.lfc_creatg(self.name,guid,0664)
lfc.lfc_setcomment(self.name,"Primary comment")
def clean(self):
lfc.lfc_unlink(self.name)
pass
def test(self):
self.comment="Test comment"
comment=" "
ret=lfc.lfc_setcomment(self.name,self.comment)
lfc.lfc_getcomment(self.name, comment)
return (comment.strip(),ret)
def ret(self):
return self.comment + '\x00'
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if (retRetVal == testRetVal):
retval = retval & (ret == test)
else:
retval = False
return retval
class test_nonexisting_name(_test):
def __init__(self):
self.retVal = -1
def info(self):
return "Set comment on nonexisting file name: "
def prepare(self):
self.name="/fhgsdjfgsagfhs"
def test(self):
self.comment="Test comment"
comment=" "
ret=lfc.lfc_setcomment(self.name,self.comment)
lfc.lfc_getcomment(self.name, comment)
return (comment.strip(),ret)
def ret(self):
return ""
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if (retRetVal == testRetVal):
retval = (ret == test)
else:
retval = False
return retval
class lfc_setcomment_test:
def __init__(self):
self.name = "lfc_setcomment_test"
self.tests=[test_setcomment_ok, test_update_comment, test_nonexisting_name]
def run(self):
retVal = True
for testclass in self.tests:
testInstance = testclass()
testInstance.prepare()
ret1 = testInstance.compare(testInstance.test(), (testInstance.ret(), testInstance.getRetVal()))
testInstance.clean()
retVal = retVal & ret1
if ret1:
print "%-60s[OK]" % testInstance.info()
else:
print "%-60s[FAILED]" % testInstance.info()
return retVal
os.environ['LFC_HOME'] = 'lxb1941.cern.ch:/grid/dteam'
os.environ['LFC_HOST'] = 'lxb1941.cern.ch'
testHome = "python_lfc_test"
lfc_setcomment_test().run()
|
"""Users are of CustomUser type. Models include all user profile and bio
info as well as user Role and currency tracking/transacting.
Login/Authorization information available here as well"""
import datetime
import os
from django.conf import settings
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from rest_framework.authtoken.models import Token
from boards.models import Board, Comment, BoardLike, CommentLike
# from main.models import FullSizeNPC
from wallet.models import Wallet
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
def get_auth_token(sender):
if Token.objects.filter(user=sender).exists():
token = Token.objects.get(user=sender)
if not token:
raise ValueError("No token found")
return token
else:
raise ValueError("No user found")
class Role(models.Model):
title = models.CharField(max_length=250, blank=True, null=True)
desc = models.TextField(max_length=2500, blank=True, null=True)
clearance = models.IntegerField(default=1)
def __str__(self):
return self.title
def save(self, *args, **kwargs) -> None:
self.title = self.title.lower()
super(Role, self).save(*args, **kwargs)
class MyUserManager(BaseUserManager):
def create_user(self, email, username, password=None, date_of_birth=None, age_verified=None) -> object:
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
username=username,
password=password,
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, date_of_birth=None, password=None) -> object:
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(
email=email,
username=username,
password=password,
date_of_birth=date_of_birth,
)
# If user is_admin and no role has been assigned
# the role of staff will be assigned. There is no further
# authorizations for this role but to be a placeholder
role_exists = Role.objects.filter(title="staff").exists()
if not role_exists:
Role.objects.create(
title="staff",
desc="Basic staff role. No extended authorizations granted",
clearance=1
)
user_role = Role.objects.get(title="staff")
user.role = user_role
user.is_admin = True
user.save(using=self._db)
return user
class Interest(models.Model):
title = models.CharField(
max_length=50,
blank=False,
default="",
null=False,
unique=True
)
def __str__(self):
return self.title
class User(AbstractBaseUser):
def upload_to_avatar_dir(self, file):
"""Uploads to avatars directory"""
uploadTo = os.path.join(f'avatars/{self.username}', file)
return uploadTo
def upload_to_thumbnail_dir(self, file):
"""Uploads to thumbnail directory"""
uploadTo = os.path.join(f'avatars/{self.username}/thumbnail', file)
return uploadTo
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
username = models.CharField(
max_length=250,
unique=True,
blank=True,
null=True,
default=""
)
first_name = models.CharField(max_length=250, default="", blank=True, null=True)
last_name = models.CharField(max_length=250, default="", blank=True, null=True)
role = models.ForeignKey(Role, null=True, blank=True, on_delete=models.SET_NULL)
date_of_birth = models.DateField(blank=True, null=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(blank=True, default=False)
isAuthenticated = models.BooleanField(default=True)
joined_on = models.DateField(default=timezone.now)
online = models.BooleanField(default=False)
email_confirmed = models.BooleanField(default=False)
primary_color = models.CharField(
max_length=10,
default="#8115C4"
)
secondary_color = models.CharField(
max_length=10,
default="#e67a63"
)
wallet = models.ForeignKey(
Wallet,
related_name="wallet",
on_delete=models.CASCADE,
blank=True,
null=True
)
# PROFILE
location = models.CharField(
max_length=250,
default="",
null=True,
blank=True
)
occupation = models.CharField(
max_length=250,
default="",
null=True,
blank=True
)
tag = models.CharField(
max_length=1200,
default="",
null=True,
blank=True
)
bio = models.TextField(
max_length=2400,
default="",
null=True,
blank=True
)
interests = models.ManyToManyField(
Interest,
blank=True
)
friends = models.ManyToManyField(
'self',
blank=True
)
# For usage once Avatar Builder is operational
# TODO avatar builder model
avatar = models.ImageField(
null=True,
blank=True,
upload_to=upload_to_avatar_dir,
default="avatars/DEFAULT/dressedAvatar-300x516.png"
)
avatar_thumbnail = models.ImageField(
null=True,
blank=True,
upload_to=upload_to_thumbnail_dir,
default="avatars/DEFAULT/thumbnail/avatar-120x120.png"
)
# For use before avatar Builder becomes active
# Uses an NPC character as avatar for now
# npc_avatar = models.ForeignKey(
# FullSizeNPC,
# on_delete=models.CASCADE,
# blank=True,
# null=True
# )
# Settings
display_profile_snackbar = models.BooleanField(default=True)
display_full_name = models.BooleanField(default=False)
display_date_of_birth = models.BooleanField(default=False)
display_location = models.BooleanField(default=False)
display_occupation = models.BooleanField(default=False)
IS_ANONYMOUS = models.BooleanField(default=False) # User is anonymous and cannot interact, but is invisible
dailyChance = models.BooleanField(default=True)
dailyChanceDate = models.DateTimeField(default=timezone.now)
# Data
# Gathered data about user usage and stats
# Board Count, Comment Count, Has Liked Count
@property
def board_count(self) -> int:
"""Returns the amount of boards the user has"""
user_boards = Board.objects.filter(author=self)
if user_boards.count() > 0:
return user_boards.count()
else:
return 0
@property
def comment_count(self, show=None) -> int:
user_comments = Comment.objects.filter(author=self)
if show:
return user_comments.all()
return user_comments.count()
@property
def has_liked_count(self) -> int:
# TODO Add AvatarLikes once Avatar component is up
user_has_liked_boards = BoardLike.objects.filter(author=self).count()
user_has_liked_comments = CommentLike.objects.filter(author=self).count()
return user_has_liked_boards + user_has_liked_comments
@property
def shinies(self):
shinies = self.wallet.shinies
return shinies
@property
def muns(self):
muns = self.wallet.muns
return muns
objects = MyUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email', 'date_of_birth']
def has_perm(self, perm, obj=None):
# Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
# "Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
# "Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
def set_dailyChance(self, state=None):
if state is None:
self.dailyChance = False
self.dailyChanceDate = timezone.now()
if self.dailyChance is False:
return True
if state == "reset":
now = datetime.datetime.now()
last_picked = datetime.datetime(
self.dailyChanceDate.year,
self.dailyChanceDate.month,
self.dailyChanceDate.day,
self.dailyChanceDate.hour,
self.dailyChanceDate.minute
)
# Add day to last_picked to find next available date
t = last_picked + datetime.timedelta(hours=24)
# If 24hrs has passed since the last item pick, will reset
if t <= now:
self.dailyChance = True
else:
self.dailyChance = False
return True
if type(state) == bool:
self.dailyChance = state
return True
def add_friend(self, friend):
try:
new_friend = User.objects.get(username=friend.username)
self.friends.add(new_friend)
return True
except ObjectDoesNotExist:
return False
def get_clearance(self, req=None):
# Checks clearance leveled required vs clearance
# of user
if req is not None and type(req) is int:
# If required clearance is within bounds
if 10 >= req >= 0:
return self.role.clearance >= req
else:
raise ValueError("Clearance level out of range")
else:
return self.role.clearance
def get_friends(self):
# Returns full list of friends from the ManyToMany
return self.friends.all()
def check_username(self) -> str:
# If username includes "@" wil split on @ and
# return the string before @
if '@' in self.username:
email = self.username
user = email.split('@')[0]
return user
else:
return self.username
def __str__(self):
return self.username
def create_wallet(self):
try:
wallet = Wallet.objects.get(owner=self.username)
except ObjectDoesNotExist:
wallet = Wallet.objects.create(owner=self.username)
self.wallet = wallet
def save(self, *args, **kwargs):
if user := self.check_username():
# Format username before saving
self.username = user
if not self.role:
if not self.is_admin:
# If user is not admin and no role is assigned
# the role of 'user' will be used- Role will be
# created if not existent
role_exists = Role.objects.filter(title="user").exists()
if not role_exists:
Role.objects.create(
title="user",
desc="User",
clearance=0
)
user_role = Role.objects.get(title="user")
self.role = user_role
super(User, self).save(*args, **kwargs)
if self.wallet is None:
self.create_wallet()
self.save()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################################
# #
# extract_hrma_focus_data.py: extract data and plot hrma focus related plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Oct 20, 2021 #
# #
#################################################################################################
import os
import sys
import re
import string
import random
import operator
import math
import numpy
import astropy.io.fits as pyfits
import time
import Chandra.Time
import random
#
#--- from ska
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release; source /home/mta/bin/reset_param', shell='tcsh')
#
#--- reading directory list
#
path = '/data/mta/Script/Hrma_src/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- filtering parameters
#
snr_lim = 6.0
rmaj_hrc = 500.0
rmaj_acis = 15.0
defoc_lim = 0.01
#
#--- arc5gl user name
#
arc_user = 'isobe'
#-----------------------------------------------------------------------------------------
#-- extract_hrma_focus_data: extract hrma src2 data --
#-----------------------------------------------------------------------------------------
def extract_hrma_focus_data(start, stop):
"""
extract hrma src2 data
input: start --- start time in format of 01/01/18; defalut: <blank> --- automatically set the data
stop --- stop time in format of 01/01/18
output: <data_dir>/hrma_src_data
"""
[fits_a, fits_h] = extract_data(start, stop)
if (len(fits_a) > 0) or (len(fits_h) > 0):
cmd = 'rm -rf param'
os.system(cmd)
cmd = 'mkdir -p param'
os.system(cmd)
if len(fits_a) > 0:
data = extract_src_info(fits_a, 'acis')
print_out_data(data)
if len(fits_h) > 0:
data = extract_src_info(fits_h, 'hrc')
print_out_data(data)
#-----------------------------------------------------------------------------------------
#-- print_out_data: append extracted data to <data_dir>/hrma_src_data --
#-----------------------------------------------------------------------------------------
def print_out_data(data):
"""
append extracted data to <data_dir>/hrma_src_data
input: data --- data
output: updated <data_dir>/hrma_src_data
see read_src_file for column information
"""
if len(data) > 0:
outfile = data_dir + 'hrma_src_data'
fo = open(outfile, 'a')
for ent in data:
fo.write(ent)
fo.close()
#-----------------------------------------------------------------------------------------
#-- extract_data: extract data to compute HRMA focus plots --
#-----------------------------------------------------------------------------------------
def extract_data(start, stop):
"""
extract data to compute HRMA focus plots
input: start ---- start time in the foramt of mm/dd/yy (e.g. 05/01/15)
stio ---- stop time in the format of mm/dd/yy
output: acis*evt2.fits.gz, hrc*evt2.fits.gz
"""
#
#--- check whether previous fits files are still around, and if so, remove them
#
cmd = 'ls * > ' + zspace
os.system(cmd)
with open(zspace, 'r') as f:
chk = f.read()
mcf.rm_files(zspace)
mc = re.search('fits', chk)
if mc is not None:
cmd = 'rm *fits*'
os.system(cmd)
#
#--- if time interval is not given, set for a month interval
#
if start == '':
[start, stop] = set_interval()
#
#--- extract acis and hrc evt2 files
#
inst = 'acis'
fits_a = create_fits_list(inst, start, stop)
inst = 'hrc'
fits_h = create_fits_list(inst, start, stop)
return [fits_a, fits_h]
#-----------------------------------------------------------------------------------------
#-- set_interval: set time inteval for a month --
#-----------------------------------------------------------------------------------------
def set_interval():
"""
set time inteval for a month
input: none but read from <data_dir>/hrma_src_data
output: start --- start time in format of mm/dd/yy (e.g., 05/01/15)
stop --- stop time in format of mm/dd/yy
"""
#
#--- find the last entry date
#
stday = find_last_input_date() + 86400 #--- starting date start from the next day
ldate = Chandra.Time.DateTime(stday).date
atemp = re.split(':', ldate)
year = atemp[0]
yday = atemp[1]
idate = year + ':' + yday
#start = time.strftime('%m/%d/%y', time.strptime(idate, '%Y:%j'))
start = time.strftime('%Y-%m-%d', time.strptime(idate, '%Y:%j'))
#
#--- find today's date
#
#stop = time.strftime("%m/%d/%y", time.gmtime())
stop = time.strftime("%Y-%m-%d", time.gmtime())
return (start, stop)
#-----------------------------------------------------------------------------------------
#-- find_last_input_date: find the last entry date --
#-----------------------------------------------------------------------------------------
def find_last_input_date():
"""
find the last entry date
input: none but read from <data_dir>/hrma_src_data
output: ldate --- the last entry time in seconds from 1998.1.1
"""
ifile = data_dir + 'hrma_src_data'
data = mcf.read_data_file(ifile)
f.close()
t_list = []
for ent in data:
atemp = re.split('\s+', ent)
t_list.append(float(atemp[3]))
ldate = max(t_list)
return ldate
#-----------------------------------------------------------------------------------------
#-- create_fits_list: run arc5gl and extract evt2 fits file names of "inst" --
#-----------------------------------------------------------------------------------------
def create_fits_list(inst, start, stop):
"""
run arc5gl and extract evt2 fits file names of "inst"
input: inst --- instrument, acis or hrc
start --- interval start time in format of mm/dd/yy (e.g. 05/01/15)
stop --- interval stop time in format of mm/dd/yy
output: fits_l --- a list of evt2 fits files
"""
tstart = date_foramat_change(start)
tstop = date_foramat_change(stop)
operation = 'browse'
write_arc5gl_input(tstart, tstop, inst=inst, operation=operation)
cmd2 = ' /proj/sot/ska/bin/arc5gl -user ' + arc_user + ' -script ' + zspace + '> zout'
os.system(cmd2)
mcf.rm_files(zspace)
data = mcf.read_data_file('./zout', remove=1)
f_list = ''
fits_l = []
for ent in data:
atemp = re.split('\s+', ent)
if inst == 'acis':
mc = re.search('acisf', ent)
mc1 = re.search('acisf5', ent)
mc2 = re.search('acisf6', ent)
else:
mc = re.search('hrcf', ent)
mc1 = re.search('hrcf5', ent)
mc2 = re.search('hrcf6', ent)
if mc is not None:
if (mc1 is not None) or (mc2 is not None):
continue
else:
name = atemp[0] + '.gz'
if f_list == '':
f_list = atemp[0]
fits_l.append(name)
else:
f_list = f_list + ', ' +atemp[0]
fits_l.append(name)
return fits_l
#-----------------------------------------------------------------------------------------
#-- write_arc5gl_input: write input of arc5gl --
#-----------------------------------------------------------------------------------------
def write_arc5gl_input(tstart, tstop, operation='retrieve', dataset='flight',\
inst='hrc', level=2, filetype='evt2', filename=''):
"""
write input of arc5gl
input: tstart --- start time (arc5gl acceptable format)
tstop --- stop time
operation --- operation (retrieve/browse): default: retrieve
dataset --- dataset; default: flight
inst --- instrument; default: hrc
level --- level; defalut: 2
filetype --- file type; default: evt2
filename --- file name; if this is given, tstart and tstop are ignored.
output: zspace --- arc5gl command input saved in zspace
"""
line = 'operation=' + operation + '\n'
line = line + 'dataset=' + dataset + '\n'
line = line + 'detector=' + inst + '\n'
line = line + 'level=' + str(level) + '\n'
line = line + 'filetype=' + filetype + '\n'
if filename != '':
line = line + 'filename=' + filename + '\n'
else:
line = line + 'tstart=' + tstart + '\n'
line = line + 'tstop=' + tstop + '\n'
line = line + 'go\n'
with open(zspace, 'w') as fo:
fo.write(line)
#-----------------------------------------------------------------------------------------
#-- date_foramat_change: convert the data format from 05/01/16 to 2016-05-01T00:00:00 --
#-----------------------------------------------------------------------------------------
def date_foramat_change(date):
"""
convert the data format from 05/01/16 to 2016-05-01T00:00:00
input: date --- date in format of 05/01/16
output: date --- date in format of 2016-05-01T00:00:00
"""
atemp = re.split('-', date)
year = atemp[0]
mon = atemp[1]
date = '01'
date = year + '-' + mon + '-' + date + 'T00:00:00'
return date
#-----------------------------------------------------------------------------------------
#-- extract_src_info: create src fits file and extract needed information --
#-----------------------------------------------------------------------------------------
def extract_src_info(fits_list, inst):
"""
create src fits file and extract needed information
input: fits_list --- a list of fits files
inst --- instrument acis or hrc
output: results --- the list of src data (see read_src_file)
"""
results = []
print("# of Files: " + str(len(fits_list)))
for ent in fits_list:
###print("FILE: " + ent)
#
#--- extract evt2 file
#
fnam = ent.replace('.gz', '')
write_arc5gl_input(0, 0, inst=inst, filename=fnam)
cmd2 = ' /proj/sot/ska/bin//arc5gl -user ' + arc_user + ' -script ' + zspace + '> zout'
try:
os.system(cmd2)
mcf.rm_files(zspace)
except:
mcf.rm_files(zspace)
continue
#
#--- handle only none grating observations
#
grating = 'NONE'
try:
grating = read_header_value(ent, 'GRATING')
except:
continue
if grating == 'NONE':
#
#--- run celldetect script
#
cmd1 = '/usr/bin/env PERL5LIB= '
mc = re.search('acis', ent)
if mc is not None:
cmd2 = 'dmcopy "' + ent + '[events][bin x=::4, y=::4]" img.fits clobber=yes'
else:
cmd2 = 'dmcopy "' + ent + '[events][bin x=::32, y=::32]" img.fits clobber=yes'
cmd3 = 'mkpsfmap img.fits psf.fits 0.9 ecf=0.9 clobber=yes'
cmd4 = 'celldetect mode=h infile=img.fits outfile=cell.fits psffile=psf.fits clobber=yes'
#
#-- it seems fixed cell detection is good enough
#
bcmd1 = cmd1 + cmd2
bcmd2 = cmd1 + cmd3
bcmd3 = cmd1 + cmd4
try:
bash(bcmd1, env=ascdsenv)
bash(bcmd2, env=ascdsenv)
bash(bcmd3, env=ascdsenv)
except:
pass
else:
mcf.rm_files(ent)
continue
mcf.rm_files(ent)
#
#--- extract information needed from src2 fits file
#
try:
out = read_src_file('./cell.fits')
except:
out = []
if len(out) > 0:
results = results + out
mcf.rm_files('./cell.fits')
print("# of resulted lists: " + str(len(results)))
return results
#-----------------------------------------------------------------------------------------
#-- read_src_file: read src file and create table --
#-----------------------------------------------------------------------------------------
def read_src_file(ifits):
"""
read src file and create table
input: ifits --- src2 fits file name
output: lsave --- a list of:
obsid --- obsid
start --- start time in sec from 1998.1.1
stop --- stop time in sec from 1998.1.1
simx --- sim x postion
simz --- sim z position
x --- sky x
y --- sky y
snr --- SNR
ravg --- the average of major and minor axis
rnd --- roundness: <major axis> / <minor axis>
rotang --- rotation angle
psf --- PSF
dist --- distance from the center
angd --- angle estimated from x and y
"""
#
#--- get data from header
#
try:
obsid = int(float(read_header_value(ifits, 'OBS_ID')))
except:
return []
try:
start = float(read_header_value(ifits, 'TSTART'))
stop = float(read_header_value(ifits, 'TSTOP'))
sim_x = float(read_header_value(ifits, 'SIM_X'))
sim_z = float(read_header_value(ifits, 'SIM_Z'))
defoc = float(read_header_value(ifits, 'DEFOCUS'))
roll = float(read_header_value(ifits, 'ROLL_NOM')) * math.pi /180.0
except:
return []
#
#--- defocus is too large
#
if abs(defoc) > defoc_lim:
return []
#
#--- set parmas depending on sim z position
#
if sim_z < -210.0: #--- acis i
zoff = -233.6 - sim_z
xref = 4096.5
yref = 4086.5
scale = 0.492 #--- arcsec/pix
rmaj_lim = rmaj_acis
inst = 'acis_i'
elif (sim_z >=-210.0) and(sim_z < -150.): #--- acis s
zoff = -190.1 - sim_z
xref = 4096.5
yref = 4086.5
scale = 0.492 #--- arcsec/pix
rmaj_lim = rmaj_acis
inst = 'acis_s'
elif (sim_z >=100.0) and(sim_z < 200.0): #--- hrc i
zoff = 126.99 - sim_z
xref = 16384.5
yref = 16384.5
scale = 0.13175 #--- arcsec/pix
rmaj_lim = rmaj_hrc
inst = 'hrc_i'
elif sim_z >= 200.0: #--- hrc s
zoff = 250.1 - sim_z
xref = 32768.5
yref = 32768.5
scale = 0.13175 #--- arcsec/pix
rmaj_lim = rmaj_hrc
inst = 'hrc_s'
else: #--- sim is not at the correct position
return []
#
#--- get data from table
#
x = read_col_data(ifits, 'cell_x')
y = read_col_data(ifits, 'cell_y')
snr = read_col_data(ifits, 'snr')
r = list(read_col_data(ifits, 'r'))
rotang = read_col_data(ifits, 'rotang') * 0.01745 #--- convert to rads
psfratio = read_col_data(ifits,'psfratio')
#
#---- separate major and minor axes
#
rmaj = []
rmin = []
for ent in r:
rmaj.append(float(ent[0]))
rmin.append(float(ent[1]))
rmaj = numpy.array(rmaj)
rmin = numpy.array(rmin)
#
#--- remove zero data
#
rindx = (rmaj > 0) & (rmin > 0) & (psfratio > 0)
x = x[rindx]
y = y[rindx]
snr = snr[rindx]
rotang = rotang[rindx]
psfratio = psfratio[rindx]
rmaj = rmaj[rindx]
rmin = rmin[rindx]
ravg = ((rmaj + rmin) / 2) * scale
psf = ravg / psfratio
rnd = rmaj / rmin
#
#--- select data satisfy the selection condition
#
rindx = numpy.where((snr >= snr_lim) & (rmaj < rmaj_lim))
x = x[rindx]
y = y[rindx]
snr = snr[rindx]
ravg = ravg[rindx]
rotang = rotang[rindx]
psf = psf[rindx]
rnd = rnd[rindx]
#
#--- print out the data
#
lsave = []
for j in range(0, len(x)):
#
#--- compute two more elements before printing the results
#
dist = scale * (math.sqrt((x[j] - xref)**2 + (y[j] - yref)**2))
try:
angd = math.atan((y[j] - yref) / (x[j] - xref))
except:
angd = 0
if angd < 0:
angd += math.pi
line = str(obsid) + '\t'
line = line + inst + '\t'
line = line + str(int(start)) + '\t'
line = line + str(int(stop)) + '\t'
line = line + "%4.6f\t" % sim_x
line = line + "%4.6f\t" % sim_z
line = line + "%4.1f\t" % x[j]
line = line + "%4.1f\t" % y[j]
if snr[j] < 10:
line = line + "%4.5f\t\t" % snr[j]
else:
line = line + "%4.5f\t" % snr[j]
if ravg[j] < 10:
line = line + "%4.5f\t" % ravg[j]
else:
line = line + "%4.5f\t" % ravg[j]
line = line + "%4.5f\t" % rnd[j]
line = line + "%4.5f\t" % rotang[j]
if psf[j] < 10:
line = line + "%4.5f\t" % psf[j]
else:
line = line + "%4.5f\t" % psf[j]
line = line + "%4.5f\t" % dist
line = line + "%4.5f\n" % angd
lsave.append(line)
return lsave
#-----------------------------------------------------------------------------------------------
#-- read_header_value: read fits header value for a given parameter name --
#-----------------------------------------------------------------------------------------------
def read_header_value(fits, name):
"""
read fits header value for a given parameter name
input: fits--- fits file name
name--- parameter name
output: val --- parameter value
if the parameter does not exist, reuturn "NULL"
"""
hfits = pyfits.open(fits)
hdr = hfits[1].header
try:
val = hdr[name.lower()]
except:
val = "NULL"
hfits.close()
return val
#-----------------------------------------------------------------------------------------------
#-- read_col_data: read data from fits file for given conlum name --
#-----------------------------------------------------------------------------------------------
def read_col_data(fits, name):
"""
read data from fits file for given conlum name
input: fits --- fits file name
name --- column name
output: data --- numpy array of data
"""
hfits = pyfits.open(fits)
hdata = hfits[1].data
data = hdata[name]
hfits.close()
return data
#-----------------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) > 2:
start = sys.argv[1].strip()
stop = sys.argv[2].strip()
else:
start = ''
stop = ''
extract_hrma_focus_data(start, stop)
# for year in range(1999, 2021):
# for month in range(1, 13):
# if year == 1999 and month < 10:
# continue
#
# if year == 2020 and month > 4:
# exit(1)
# break
#
# nyear = year
# nmonth = month + 1
# if nmonth > 12:
# nmonth = 1
# nyear += 1
#
# start = str(year) + '-' + mcf.add_leading_zero(month)
# stop = str(nyear) + '-' + mcf.add_leading_zero(nmonth)
#
# print('Period: ' + start +'<-->'+ stop)
# extract_hrma_focus_data(start, stop)
|
"""
My solution: works but times out when submitted :(
"""
class Solution:
def fourSumCount(self, nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:
n = len(nums1)
a, b, c = set(), set(), set()
for i in range(n):
for j in range(n):
# a <--- left side
a.add((i, j, nums1[i] + nums2[j]))
# b ---> right side
b.add((i, j, nums3[i] + nums4[j]))
for x in a:
for y in b:
# a / b
s = x[2] + y[2]
if s == 0:
c.add((x[0:2] + y[0:2], s))
print('Lenths:', c)
return len(c)
"""
Correct solution is not mine and I don't fully understand it yet.
Assignment: find explainer for solution tomorrow.
"""
class Solution:
def fourSumCount(self, nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:
m = {}
for a in nums1:
for b in nums2:
m[a + b] = m.get(a + b, 0) + 1
count = 0
for x in nums3:
for y in nums4:
count += m.get(-x-y, 0)
print('Lenths:', count)
return count
|
from django.shortcuts import render,redirect,HttpResponse
from time import gmtime, strftime
import random
def index(request):
if 'your' not in request.session:
request.session['your']=0
request.session["activity"]=[]
return render(request,"index.html")
def colgold(request):
time=strftime("%Y-%m-%d %H:%M %p", gmtime())
if request.method=='POST':
if request.POST["gold"]=='farm':
x=random.randint(1, 100)
active=f"Earned {x} from farm at {time}"
elif request.POST["gold"]=='cave':
x=random.randint(5, 10)
active=f"Earned {x} from Cave at {time}"
elif request.POST["gold"]=='house':
x=random.randint(2, 5)
active=f"Earned {x} from House at {time}"
elif request.POST["gold"]=='casino':
x=random.randint(-20, 50)
if x>0:
active=f"Earned {x} from casino at {time}"
else:
active=f"Lost {x} from casino... ouch... at {time}"
request.session['your']+=x
request.session["activity"].insert(0,active)
return redirect('/')
def reset(request):
request.session.clear()
return redirect('/')
|
"""Support for lienci Gateways."""
""" version 0.3 """
import socket
import json
import time
from collections import defaultdict
from threading import Thread
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP)
CONF_DISCOVERY_RETRY = 'discovery_retry'
DOMAIN = 'lienci'
PY_LIENCI_GATEWAY = "lienci_gw"
BROADCAST_ADDRESS = '<broadcast>'
GATEWAY_UDP_PORT = 6000
DEVICE_DISCOVERY_PORT = 6001
GATEWAY_DISCOVERY_PORT = 6002
SOCKET_BUFSIZE = 1024
HRARTBEAT_PORT = 12345
GATEWAY_SEARCH_TIMEOUT = 5.0
SCENES_SEARCH_TIMEOUT = 3.0
_LOGGER = logging.getLogger(__name__)
class PyLienciGateway(object):
"""PyLienci."""
gateways = defaultdict(list)
def __init__(self, callback_func):
self.callback_func = callback_func
self._listening = False
self._listensocket = None
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._threads = []
def discover_gateways(self):
"""Discover gateways using broadcast"""
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
_socket.settimeout(GATEWAY_SEARCH_TIMEOUT)
try:
_socket.sendto('search all'.encode('ascii'),
(BROADCAST_ADDRESS, GATEWAY_DISCOVERY_PORT))
while True:
data, (ip_address, _) = _socket.recvfrom(SOCKET_BUFSIZE)
if len(data) is None:
continue
deviceinfo = data.decode().split(' ')
sid = deviceinfo[4][7:].replace(':','')
_LOGGER.info('Lienci Gateway %s found at IP %s', sid, ip_address)
self.gateways[ip_address] = LienciGateway(ip_address, sid, self._socket)
except socket.timeout:
_LOGGER.info("Gateway discovery finished in 5 seconds")
_socket.close()
def listen(self):
"""Start listening."""
_LOGGER.info('Creating Multicast Socket')
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind(('', HRARTBEAT_PORT))
self._listensocket = _socket
self._listening = True
thread = Thread(target=self._listen_to_msg, args=())
self._threads.append(thread)
thread.daemon = True
thread.start()
def stop_listen(self):
"""Stop listening."""
self._listening = False
if self._socket is not None:
_LOGGER.info('Closing socket')
self._socket.close()
self._socket = None
if self._listensocket is not None:
_LOGGER.info('Closing multisocket')
self._listensocket.close()
self._listensocket = None
for thread in self._threads:
thread.join()
def _Heartbeat(self):
if self._listening:
self._listensocket.settimeout(5.0)
for ip in self.gateways:
nid = ip.split('.')[3]
cmd = 'SET;0000000A;{' + nid +'.251.0.1};\x0d\x0a'
try:
self._listensocket.sendto(cmd.encode('ascii'),(ip,GATEWAY_UDP_PORT))
data, (ip_addr, _) = self._listensocket.recvfrom(SOCKET_BUFSIZE)
if data.decode().split(';')[1] == cmd.split(';')[2]:
_LOGGER.info('Heartbeat')
else:
_LOGGER.error('Error Heartbeat send %s, but receive %s', cmd, data)
except socket.timeout:
_LOGGER.error('Set Heartbeat timeout %s', cmd)
cmd = 'GET;{' + nid + '.255.255.255};\x0d\x0a'
try:
self._listensocket.sendto(cmd.encode('ascii'),(ip,GATEWAY_UDP_PORT))
except socket.timeout:
_LOGGER.error('Get info Heartbeat timeout %s', cmd)
self._listensocket.settimeout(10.0)
def _listen_to_msg(self):
t = time.time()
while time.time() - t < 3:
time.sleep(1)
self._Heartbeat()
while self._listening:
if self._listensocket is None:
continue
if time.time() - t >= 300:
t = time.time()
self._Heartbeat()
try:
data, (ip_add, _) = self._listensocket.recvfrom(SOCKET_BUFSIZE)
data = data.decode().split(';')
gateway = self.gateways.get(ip_add)
if gateway is None:
_LOGGER.error('Unknown gateway ip %s', ip_add)
continue
cmd = data[0].split(':')
if cmd[0] == 'fb':
continue
elif cmd[0] == 'FB':
_LOGGER.debug('MCAST (%s) << %s', cmd, data)
if data[1].split('.')[3] == '0}':
addr_id = data[1] + ';' + cmd[1][-3:-2]
self.callback_func(gateway.push_data, cmd[1], addr_id)
else:
self.callback_func(gateway.push_data, cmd[1], data[1])
else:
_LOGGER.error('Unknown multicast data: %s', data)
except socket.timeout:
# _LOGGER.info('No response from Gateway')
continue
class LienciGateway(object):
"""Lienci Gateway Component"""
def __init__(self, ip_address, sid, sock):
self.ip_address = ip_address
self.port = GATEWAY_UDP_PORT
self.sid = sid
self.devices = defaultdict(list)
self.callbacks = defaultdict(list)
self._socket = sock
trycount = 5
for _ in range(trycount):
_LOGGER.info('Discovering Lienci Devices')
if self._discover_devices():
break
self._discover_scenes()
def _discover_scenes(self):
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_socket.settimeout(SCENES_SEARCH_TIMEOUT)
nid = self.ip_address.split('.')[3]
cmd = 'GET;{' + nid + '.16.255.255};\x0d\x0a'
try:
_socket.sendto(cmd.encode('ascii'),(self.ip_address, self.port))
while True:
data, (ip_address, _) = _socket.recvfrom(SOCKET_BUFSIZE)
if len(data) == 0:
continue
sceneinfo = data.decode().split(';')
addr_id = sceneinfo[1]
info = addr_id[1:-1].split('.')
if info[0] == nid and info[1] == '16':
did = str(int(info[2]) * 256 + int(info[3]))
lienci_device = {
"model": 'scenes',
"sid": self.sid + '_scenes_' + did,
"addr_id": addr_id,
"name": 'scene_' + nid + '_' + did
}
self.devices['scene'].append(lienci_device)
_LOGGER.debug('Registering scene as: %s', addr_id)
else:
continue
except socket.timeout:
_LOGGER.info("Scenes discovery finished in 3 seconds")
_socket.close()
def _discover_devices(self):
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_devices = {}
_resp = None
try:
_socket.connect((self.ip_address, DEVICE_DISCOVERY_PORT))
_socket.send('read serial.json$'.encode('ascii'))
total_data=[]
while True:
data = _socket.recv(SOCKET_BUFSIZE).decode()
if not data: break
total_data.append(data)
if len(total_data) == 0:
_LOGGER.error("Devices discovery return json is None")
return False
_resp = ''.join(total_data)
_devices = json.loads(_resp)['serial']
_socket.close()
except Exception:
_LOGGER.error('Unknown Devices discovery get json data: %s', _resp)
_socket.close()
return False
device_types = {
'switch': ['ET-R0816A'],
'light': ['ET-D0201A','ET-R0416D4I'],
'binary_sensor': ['KP06A']
}
for device in _devices:
model = device['serial'].rstrip()
version = device['version'].rstrip()
sid = device['mac8'].replace(':','')
nid = device['nid']
did = device['id']
for device_type in device_types:
if model in device_types[device_type]:
if model in ['KP06A']:
for virtual in range(1,7):
addr_id = '{' + str(nid) + '.0.' + str(did) + '.0};' + str(virtual)
lienci_device = {
"model": model,
"sid": sid + '_' + str(virtual),
"addr_id": addr_id,
"name": model + '_' + str(nid) + '_' + str(did) + '_0_' + str(virtual)
}
self.devices[device_type].append(lienci_device)
_LOGGER.debug('Registering device %s, %s as: %s', addr_id, model, device_type)
else:
for channel in range(1,device['portnum']):
addr_id = '{' + str(nid) + '.0.' + str(did) + '.' + str(channel) + '}'
lienci_device = {
"model": model,
"sid": sid + '_' + str(did) + '_' + str(channel),
"addr_id": addr_id,
"name": model + '_' + str(nid) + '_' + str(did) + '_' + str(channel)
}
self.devices[device_type].append(lienci_device)
_LOGGER.debug('Registering device %s, %s as: %s', addr_id, model, device_type)
return True
def _send_cmd(self, cmd, re=None):
try:
self._socket.settimeout(10.0)
_LOGGER.debug(">> %s", cmd.encode('ascii'))
self._socket.sendto(cmd.encode('ascii'), (self.ip_address, self.port))
if re:
data, _ = self._socket.recvfrom(SOCKET_BUFSIZE)
if data is None:
_LOGGER.error("No response from Gateway")
return None
resp = data.decode().split(';')
data = resp[0].split(':')
if resp[1] == re:
return data[1]
else:
_LOGGER.error("Non matching response. Expecting %s, but got %s", re, resp[1])
return None
else:
return True
except socket.timeout:
_LOGGER.error("Cannot connect to Gateway")
return None
def write_to_hub(self, addr_id, cmd_str):
"""Send data to gateway to turn on / off device"""
cmd = 'SET;' + cmd_str + ';' + addr_id + ';\x0d\x0a'
return self._send_cmd(cmd)
def get_from_hub(self, addr_id):
"""Get data from gateway"""
cmd = 'GET;' + addr_id + ';\x0d\x0a'
resp = self._send_cmd(cmd, addr_id)
return self.push_data(resp, addr_id)
def push_data(self, data, addr_id):
"""Push data broadcasted from gateway to device"""
if data is None:
return False
for func in self.callbacks[addr_id]:
func(data)
return True
def setup(hass, config):
"""Set up the Lienci component."""
discovery_retry = 3
def lienci_gw_discovered(service, discovery_info):
"""Called when Lienci Gateway device(s) has been found."""
_LOGGER.info("Discovered: %s", discovery_info)
discovery.listen(hass, 'lienci', lienci_gw_discovered)
lienci = hass.data[PY_LIENCI_GATEWAY] = PyLienciGateway(hass.add_job)
for k in range(discovery_retry):
_LOGGER.info('Discovering Lienci Gateways (Try %s)', k + 1)
lienci.discover_gateways()
if len(lienci.gateways) > 0:
break
if not lienci.gateways:
_LOGGER.error("No gateway discovered")
return False
for component in ['switch','light','binary_sensor','scene']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
lienci.listen()
_LOGGER.debug("Gateways discovered. Listening for broadcasts")
def stop_lienci(event):
"""Stop Lienci Socket."""
_LOGGER.info("Shutting down Lienci Hub.")
lienci.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_lienci)
return True
class LienciDevice(Entity):
"""Representation a base Lienci device."""
def __init__(self, device, lienci_hub):
"""Initialize the lienci device."""
self._state = None
self._sid = device['sid']
self._name = device['name']
self._addr_id = device['addr_id']
self._write_to_hub = lienci_hub.write_to_hub
self._get_from_hub = lienci_hub.get_from_hub
self._device_state_attributes = {}
lienci_hub.callbacks[self._addr_id].append(self.push_data)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
def push_data(self, data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
if self.parse_data(data):
try:
self.schedule_update_ha_state()
except:
return
def parse_data(self, data):
"""Parse data sent by gateway."""
raise NotImplementedError()
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
file_name = 'ieee_30'
pmu = [9, 12, 25]
n = 7
df_length = 30
# test_buses = [1, 1, 1, 1]
test_buses = []
for i in range(n):
test_buses.append(i + 1)
print(test_buses)
def create_test_buses(n, test_buses, pmu, df):
for k in range(n):
if test_buses[k] == df_length:
test_buses[k] = 1
while test_buses[k] in pmu:
test_buses[k] = test_buses[k] + 1
else:
test_buses[k] = test_buses[k] + 1
while test_buses[k] in pmu:
test_buses[k] = test_buses[k] + 1
break
return test_buses
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# pmu = [2, 6, 10, 19, 20, 22, 23, 25, 29]
# test_buses = [1, 3, 5, 7]
# test_buses = [9, 13, 14, 17]
def read_file(path):
data = pd.read_csv(path)
df = pd.DataFrame(data)
return df
# import files
path = '/Users/nathanoliver/Desktop/Python/SCADA/csv/01_files_after_cleaning/' + \
file_name + '.csv'
df = read_file(path)
visible_buses = []
for i in range(df_length):
visible_buses.append(0)
print(visible_buses)
print(df)
# program to determine visible buses
def check_visibility(df, visible_buses, pmu):
for i in range(len(df)):
bus1 = df.iloc[i, 0]
bus2 = df.iloc[i, 1]
if bus1 in pmu:
visible_buses[bus2 - 1] = 1
visible_buses[bus1 - 1] = 1
if bus2 in pmu:
visible_buses[bus1 - 1] = 1
visible_buses[bus2 - 1] = 1
return visible_buses
visible_buses = check_visibility(df, visible_buses, pmu)
# visible_buses_new = check_visibility(df, visible_buses, test_buses)
# print(visible_buses)
for i in range((df_length - len(pmu))**n):
x = visible_buses.copy()
# print(test_buses)
test_buses = create_test_buses(n, test_buses, pmu, df)
# print(test_buses)
# print(x)
visible_buses_new = check_visibility(df, x, test_buses)
# print(visible_buses_new)
if sum(visible_buses_new) == df_length:
print(test_buses)
|
from django.db import models
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class Budget(models.Model):
"""
Represents a Budgets in the system
"""
amount = models.DecimalField('Amount (NET)', max_digits=11, decimal_places=2)
year = models.IntegerField('Year', blank=True, null=True)
expensecode = models.ForeignKey('ExpenseCode', verbose_name='Expense Code', on_delete=models.CASCADE)
class Meta:
verbose_name = "Budget"
verbose_name_plural = "Budgets"
unique_together = ( ('year','expensecode'),)
def __str__(self):
return self.amount
|
string=input()
print(string+'.')
|
import re
from nltk.stem import PorterStemmer
import spacy
spacy_nlp = spacy.load('en_core_web_sm')
def findIntersection(lst1, lst2):
ps = PorterStemmer()
#print("lst1",lst1)
#print("lst2",lst2)
lst3 = [value for value in lst1 if ps.stem(value.lower()) in lst2]
#print(lst3)
return lst3
def findAnswer(f, question):
# f = open("demo", "r").read()
'''
doc = spacy_nlp(f)
tokens = [token.text for token in doc if not token.is_stop]
s = ""
for tok in tokens:
s += tok + " "
'''
l = f.split(".")
l = [i for i in l if i != '']
# question = input("Enter the question.")
ps = PorterStemmer()
qdoc = spacy_nlp(question)
qtok = [token.text for token in qdoc if not token.is_stop]
ql = [ps.stem(i.lower()) for i in qtok]
intersection = [len(findIntersection(i.split(), ql)) for i in l]
# print(intersection)
# for i in range(len(intersection)):
# print(i,intersection[i],l[i])
# print(intersection.index(max(intersection)))
# print(l[intersection.index(max(intersection))])
return l[intersection.index(max(intersection))]
# Who felt worried
# Who enjoyed looking at the tall trees and lovely flowers
# Apart from tall trees what did he enjoy
'''
f = input("Passage:")
a = input("que")
psg = "Walton was a lawer. He had 3 daughters. His Daughter was a Doctor."
que = "who was doctor ?"
print(findAnswer(psg,que))
'''
|
# -*- coding: utf-8 -*-
import cserial
import camabio
import time, codecs, logging
import inject
from threading import Semaphore
import model
from model.config import Config
"""
Reader nulo
usado cuando se desactiva el reader en la config
"""
class Reader:
def start(self):
logging.debug('Reader Start')
def stop(self):
logging.debug('Reader Stop')
def identify(self):
logging.debug('reader identify')
while True:
time.sleep(100)
def enroll(self, need_first=None, need_second=None, need_third=None, need_release=None, error=None, fatal_error=None):
if need_first is not None:
need_first()
time.sleep(5)
if need_release is not None:
need_release()
if need_second is not None:
need_second()
time.sleep(5)
if need_release is not None:
need_release()
if need_third is not None:
need_third()
time.sleep(5)
if need_release is not None:
need_release()
return (None,None)
class FirmwareReader(Reader):
config = inject.attr(Config)
def __init__(self):
""" passing the baton """
self.entry = Semaphore(1)
self.e = Semaphore(0)
self.ne = 0
self.de = 0
self.i = Semaphore(0)
self.ni = 0
self.di = 0
def start(self):
port = self.config.configs['reader_port']
cserial.open(port)
def stop(self):
cserial.close()
"""
retorna un número de huella disponible para poder usar en el lector.
None = error o no existe huella disponible
"""
def getEmptyId(self):
data = camabio.createPackage(0x0107,0x2,0x0)
cserial.write(data)
time.sleep(0.5)
resp = cserial.readS(24)
status = camabio.getAttrFromPackage(camabio.RET,resp)
if status == camabio.ERR_FAIL:
logging.warn('error en la respuesta')
return None
empty = camabio.getAttrFromPackage(camabio.DATA,resp)
if empty == camabio.ERR_EMPTY_ID_NOEXIST:
logging.warn('no existe espacio para huellas adicionales')
return None
return empty
"""
retorna (number,template) el contenido del template y el numero de huella donde se encuentra en el lector
None = error o no encuentra la huella
"""
def readTemplate(self,id):
data = camabio.createPackage(0x10a,0x2,id)
cserial.write(data)
time.sleep(0.5)
resp = cserial.readS(24)
logging.debug(resp)
size = camabio.getAttrFromPackage(camabio.DATA,resp)
time.sleep(0.5)
rtmpl = cserial.readS(10 + size)
logging.debug(rtmpl)
r = camabio.extractResponseDataPackage(rtmpl)
if r['PREFIX'] != 0x5aa5:
return (None,None)
if r['RCM'] != 0x10a:
return (None,None)
""" extraigo el contenido del paquete de datos """
number = int.from_bytes(r['DATA'][0:2],byteorder='little')
template = r['DATA'][2:]
if (len(template) != (size - 2)):
return (None,None)
return (number,template)
"""
el SIGNAL de la técnica passing the baton
"""
def _signal(self):
if self.de > 0 and self.di == 0:
self.de = self.de - 1
self.e.release()
elif self.di > 0 and self.de == 0:
self.di = self.di - 1
self.i.release()
elif self.di > 0 and self.de > 0:
self.de = self.de - 1
self.e.release()
else:
self.entry.release()
"""
identifica la huella de una persona
"""
def identify(self):
""" <await (ne == 0 and ni == 0) ni = ni + 1> """
self.entry.acquire()
if (self.ne > 0 or self.ni > 0):
self.di = self.di + 1
self.entry.release()
logging.debug('identify esperando')
self.i.acquire()
self.ni = self.ni + 1
self._signal()
logging.debug('reader.identify')
try:
data = camabio.createPackage(0x102,0,0)
cserial.write(data)
time.sleep(0.5)
huella = None
exit = False
while not exit:
logging.debug('readding serial')
resp = cserial.readS(24)
logging.debug('paquete leido {}'.format(resp))
ret = camabio.getAttrFromPackage(camabio.RET,resp)
data = camabio.getAttrFromPackage(camabio.DATA,resp)
exit = True
if ret == camabio.ERR_FAIL:
if data == camabio.ERR_IDENTIFY:
logging.warn('no existe ninguna persona con esa huella')
if data == camabio.ERR_ALL_TMPL_EMPTY:
logging.warn('no existen huellas en el lector')
if data == camabio.ERR_TIME_OUT:
logging.warn('timeout')
if data == camabio.ERR_BAD_QUALITY:
logging.warn('mala calidad de la huella')
if data == camabio.ERR_FP_CANCEL:
logging.warn('identificación cancelada')
elif ret == camabio.ERR_SUCCESS:
if data == camabio.GD_NEED_RELEASE_FINGER:
exit = False
else:
huella = data
else:
logging.warn('respuesta desconocida')
logging.warn(codecs.encode(resp,'hex'))
return huella
finally:
self.entry.acquire()
self.ni = self.ni - 1
self._signal()
"""
retorna la (huella,template) despues de haber sido enrolada en el lector
huella = número asignado dentro del lector a la huella
template = template de la huella
"""
def enroll(self, need_first=None, need_second=None, need_third=None, need_release=None, error=None, fatal_error=None):
canceled = False
""" <await (ne == 0 and ni == 0) ne = ne + 1> """
self.entry.acquire()
if (self.ne > 0 or self.de > 0):
logging.debug('ya existe en ejecución/espera un enrolado')
self._signal()
return
elif (self.ni > 0):
self.de = self.de + 1
self.entry.release()
logging.debug('enroll esperando')
""" disparo un cancel """
canceled = True
data = camabio.createPackage(0x130,0,0)
cserial.write(data)
self.e.acquire()
self.ne = self.ne + 1
self._signal()
if canceled:
resp = cserial.readS(24)
ret = camabio.getAttrFromPackage(camabio.RET,resp)
if ret != camabio.ERR_SUCCESS:
logging.warn('se cancelo {} pero se leyo del serie {}'.format(self.status,codecs.encode(resp,'hex')))
logging.debug('enroll')
try:
empty = self.getEmptyId()
data = camabio.createPackage(0x0103,0x02,empty)
cserial.write(data)
time.sleep(0.5)
fase = 0
huella = None
while huella is None:
resp = cserial.readS(24)
logging.debug(codecs.encode(resp,'hex'))
err = camabio.getAttrFromPackage(camabio.RET,resp)
rdata = camabio.getAttrFromPackage(camabio.DATA,resp)
if err == camabio.ERR_FAIL:
""" error, proceso el error """
if rdata == camabio.ERR_INVALID_TMPL_NO:
msg = 'error en el número de huella'
logging.warn(msg)
if fatal_error:
fatal_error(msg)
return (None,None)
if rdata == camabio.ERR_TMPL_NOT_EMPTY:
msg = 'el número de huella no esta vacío'
logging.warn(msg)
if fatal_error:
fatal_error(msg)
return (None,None)
if rdata == camabio.ERR_TIME_OUT:
msg = 'timeout'
logging.warn(msg)
if error:
error(msg)
continue
if rdata == camabio.ERR_BAD_QUALITY:
msg = 'mala calidad'
logging.warn(msg)
if error:
error(msg)
continue
if rdata == camabio.ERR_GENERALIZE:
msg = 'error generalizando las huellas'
logging.warn(msg)
if fatal_error:
fatal_error(msg)
return (None,None)
if rdata == camabio.ERR_DUPLICATION_ID:
pos = camabio.getIntFromPackage(camabio.DATA + 2,resp)
msg = 'error, huella duplicada en la posición {}'.format(pos)
logging.warn(msg)
if fatal_error:
fatal_error(msg)
return (None,None)
if rdata == camabio.ERR_FP_CANCEL:
msg = 'Se ha cancelado el comando de enrolado'
logging.warn(msg)
if fatal_error:
fatal_error(msg)
return (None,None)
msg = 'error desconocido'
logging.warn(msg)
logging.warn(codecs.encode(resp,'hex'))
if fatal_error:
fatal_error(msg)
return (None,None)
elif err == camabio.ERR_SUCCESS:
""" ok es un resultado """
if rdata == camabio.GD_NEED_FIRST_SWEEP:
if need_first:
need_first()
logging.debug('Necesita primera huella')
fase = 1
continue
if rdata == camabio.GD_NEED_SECOND_SWEEP:
if need_second:
need_second()
logging.debug('Necesita segunda huella')
fase = 2
continue
if rdata == camabio.GD_NEED_THIRD_SWEEP:
if need_third:
need_third()
logging.debug('Necesita tercera huella')
fase = 3
continue
if rdata == camabio.GD_NEED_RELEASE_FINGER:
if need_release:
need_release()
logging.debug('levante el dedo del lector')
continue
huella = rdata
else:
msg = 'estado desconocido'
logging.warn(msg)
logging.warn(codecs.encode(resp,'hex'))
if fatal_error:
fatal_error(msg)
return (None,None)
if huella is None:
return (None,None)
(number,template) = self.readTemplate(huella)
return (huella,template)
finally:
self.entry.acquire()
self.ne = self.ne - 1
self._signal()
"""
el provider del reader
"""
def getReader():
config = inject.instance(Config)
if config.configs['reader_enable']:
return inject.instance(FirmwareReader)
else:
return inject.instance(Reader)
|
#!/usr/bin/env python
# Standard Imports
import sys
from pprint import pprint
# Object Loader
sys.path.insert(0,'../WebScrapper')
from obj_loader import read_armor_files
# Cassandra Driver
import cassandraDriver as db
KEYSPACE = 'testkeyspace'
def main():
print("Connecting to Cassandra")
db.connect()
print("Connected")
print("Attempting to create Armor Table")
db.createArmorTable()
print("Begining Load")
(armor_item_list, id_list) = read_armor_files()
print("Load Complete")
print(armor_item_list[10])
for armor in armor_item_list:
print("Processing armor piece " + armor.get('Name'))
if "dummy" in armor.get('Name'):
continue
main_armor = armor
db.insertArmor(main_armor)
print("Finished Dumping Files")
'''
Loaded Format
----------------------------------
Armor_Item:
{
'id' : '1248329814',
Name : 'Leather Trousers',
Type : 'All',
Part : 'Head',
Gender : 'Both',
Rarity : '1',
Defense : {
'initial' : '1',
'max' : '71'
}
Slot : '1',
Fire : '-1',
Water : '0',
Ice : '0',
Thunder : '0',
Dragon : '1',
Skills : [
{
'id' : '1234125',
'Name' : 'Gathering',
'Value' : '1'
},
{
'id' : '12314850',
'Name' : 'Whim',
'Value' : '3'
}
],
'Crafting Items' : [
{
'id' : '11235143',
'Name' : 'Warm Pelt'
'Quantity': '1'
},
{
'id' : '12341542323',
'Name' : 'Iron Ore',
'Quanity' : '1'
}
]
}
'''
if __name__ == "__main__":
main()
|
from puddles.puddles import *
|
import os
os.sys.path.insert(0, os.path.abspath('..\settings_folder'))
import settings
from utils import *
import msgs
import pandas as pd
import numpy
import matplotlib.pyplot as plt
def filter(data, key, value):
result = {}
res_index = []
ctr = 0
for el_val in data[key]:
if el_val == value:
res_index.append(ctr)
ctr +=1
for key in data:
result[key] = []
for key in data:
for idx in res_index:
result[key].append(data[key][idx])
return result
def main():
# set the following params
msgs.algo = "DQN"
msgs.mode = "train"
# parse data
input_file = "desktop_static_obstacles_final.txt"
data_file = os.path.join(settings.proj_root_path, "data", "DQN", input_file)
data = parse_data(data_file)
# parse the success first
result = filter(data, 'success', 'True')
#result = data
result_per_zone = {}
for zone_idx in range (0,4):
result_per_zone[zone_idx] = filter(result, 'cur_zone_number', zone_idx)
so_far_success_count = 0
for zone_idx in result_per_zone.keys():
print("zone:" + str(zone_idx) + " success_count:" +\
str(result_per_zone[zone_idx]["success_count_within_window"][-1] - so_far_success_count), end = "||")
so_far_success_count = result_per_zone[zone_idx]["success_count_within_window"][-1]
print("zone:" + str(zone_idx) + " distance_traveled:" + str(numpy.mean(result_per_zone[zone_idx]["distance_traveled"])), end = "||")
print("zone:" + str(zone_idx) + " energy_consumed:" + str(numpy.mean(result_per_zone[zone_idx]["energy_consumed"])), end = "||")
print("zone:" + str(zone_idx) + " flight_time:" + str(numpy.mean(result_per_zone[zone_idx]["flight_time"])), end = "||"),
print("zone:" + str(zone_idx) + " stepN:" + str(numpy.mean(result_per_zone[zone_idx]["stepN"])))
print("zone:" + str(zone_idx) + " std:" + str(numpy.std(result_per_zone[zone_idx]["stepN"])))
print(result_per_zone[zone_idx]["goal"])
print("zone:" + str(zone_idx) + " end_mag_mean:" + str(numpy.mean(list(map(lambda x: numpy.linalg.norm(x), result_per_zone[zone_idx]["goal"])))))
#print("zone:" + str(zone_idx) + " end_mean:" + str(numpy.mean(result_per_zone[zone_idx]["goal"])))
plt.hist(result_per_zone[zone_idx]["stepN"], range(0,200, 10))
plt.title("zone:" + str(zone_idx) + " data")
#plt.figure()
plt.savefig(str(zone_idx) + input_file.replace("txt", "png"))
# dumping into a csv
"""
data_frame = pd.DataFrame(data)
data_frame.to_csv(data_file.replace("txt", "csv"), index=False)
data_file = os.path.join(settings.proj_root_path, "data", "DQN", "filterd.txt")
data_frame = pd.DataFrame(result_zone2)
data_frame.to_csv(data_file.replace("txt", "csv"), index=False)
"""
if __name__ == "__main__":
main()
|
# Generated by Django 2.1 on 2018-10-02 13:41
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0029_auto_20181002_1315'),
]
operations = [
migrations.AddField(
model_name='organization',
name='desc',
field=tinymce.models.HTMLField(blank=True, null=True, verbose_name='О компании'),
),
migrations.AlterField(
model_name='news',
name='desc',
field=tinymce.models.HTMLField(verbose_name='Содержание'),
),
migrations.AlterField(
model_name='organization',
name='contact_text',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Контактный текст'),
),
migrations.AlterField(
model_name='organization',
name='email',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Электронная почта'),
),
migrations.AlterField(
model_name='organization',
name='footer_desc',
field=models.TextField(blank=True, null=True, verbose_name='Описание в футере'),
),
migrations.AlterField(
model_name='organization',
name='footer_email',
field=models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email в футере'),
),
migrations.AlterField(
model_name='organization',
name='footer_phone',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='Телефон в футере'),
),
migrations.AlterField(
model_name='organization',
name='logo',
field=models.ImageField(upload_to='logo', verbose_name='Логотип'),
),
migrations.AlterField(
model_name='organization',
name='name',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='Наименование'),
),
migrations.AlterField(
model_name='organization',
name='phone',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='Телефон'),
),
migrations.AlterField(
model_name='organization',
name='phone_prefix',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name='Код города'),
),
]
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
prim.py
=========
Loads a list of primitives from a GParts persisted directory.
Used for debugging things such as missing transforms.
See: notes/issues/OKX4Test_partBuffer_difference.rst
TODO: consolidate ana/prim.py with dev/csg/GParts.py
"""
import sys, os, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.sysrap.OpticksCSG import CSG_
from opticks.ana.blib import BLib
from opticks.ana.mesh import Mesh
from opticks.ana.shape import Shape
class Ellipsoid(Shape):pass
class Tubs(Shape):pass
class Torus(Shape):pass
class Cons(Shape):pass
class Hype(Shape):pass
class Box(Shape):pass
class UnionSolid(Shape):pass
class SubtractionSolid(Shape):pass
class IntersectionSolid(Shape):pass
class Part(object):
"""
Parts are CSG constituents, aka nodes of the CSG trees that make up each solid
"""
part_idx = 0
def __init__(self, part, trans, d):
"""
Part instances are created within the parent Prim instance
by mapping this Part ctor over elements of the parts (nodes)
and global transforms arrays.
:param part: single csg node of shape (4,4)
:param trans: 1 or more transforms, shape (ntran,4,4)
:param d: Dir instance
"""
assert part.shape == (4,4), part
assert trans.shape[1:] == (4,4), trans
assert trans.shape[0] < 16, trans
assert d.__class__.__name__ == 'Dir'
ntran = trans.shape[0]
assert ntran > 0
log.debug( "Part : trans.shape %s " % repr(trans.shape))
#print("trans", trans)
f = part.view(np.float32)
u = part.view(np.uint32)
fc = part.copy().view(np.float32)
fc[1][2] = 0 # scrub boundary in copy, as it is known discrepant : due to lack of surfaces
tc = u[2][3] ## typecode eg CSG_.UNION
tcn = CSG_.desc(tc) ## typename
bnd = u[1][2] ## boundary index
# check the complements, viewing as float otherwise lost "-0. ok but not -0"
comp = np.signbit(f[3,3])
# shift everything away, leaving just the signbit
comp2 = u[3,3] >> 31
assert comp == comp2
# recover the gtransform index by getting rid of the complement signbit
gt = u[3][3] & 0x7fffffff
if tc in [CSG_.DIFFERENCE, CSG_.INTERSECTION, CSG_.UNION]:
assert gt == 0, "operators are expected to not have a gtransform"
elif tc == CSG_.ZERO:
assert gt == 0, "zeros are not expected to have a gtransform"
else:
assert gt > 0, "primitives are expected to have a gtransform "
assert gt <= ntran, ( "1-based gt expected to be <= ntran (local index, not global) ", gt, ntran )
if ntran > 5:
pass
#log.info(" part_idx:%5d ntran:%2d gt:%2d tcn:%s " % ( self.__class__.part_idx, ntran, gt, tcn ))
pass
pass
self.f = f
self.fc = fc
self.tc = tc
self.tcn = tcn
self.comp = comp
self.gt = gt # 1-based gtransform pointer into trans
self.bnd = bnd
self.bname = d.blib.bname(bnd)
try:
tran = trans[gt-1] if gt > 0 else np.eye(4)
except IndexError:
log.error("trans issue gt %s trans.shape %s " % ( gt, repr(trans.shape)))
tran = np.eye(4)
pass
self.tran = tran
self.d = d
self.idx = self.__class__.part_idx
self.__class__.part_idx += 1
def __repr__(self):
return " Part %1s%2s %2s %15s %3d %25s tz:%10.3f %s " % ( "!" if self.comp else " ", self.tc, self.gt, self.tcn, self.bnd, self.bname, self.tz, self.detail() )
def maxdiff(self, other):
"""
:param other: Part instance
:return float: max absolute difference between float param values of the CSG constituent part
(actually the boundary index is excluded by comparing a copy and scrubbing that)
"""
return np.max( np.abs(self.fc - other.fc) )
r = property(lambda self:self.f[0][3])
tz = property(lambda self:self.tran[3][2])
xbox = property(lambda self:self.f[0][0])
ybox = property(lambda self:self.f[0][1])
zbox = property(lambda self:self.f[0][2])
r1co = property(lambda self:self.f[0][0])
z1co = property(lambda self:self.f[0][1])
r2co = property(lambda self:self.f[0][2])
z2co = property(lambda self:self.f[0][3])
z1 = property(lambda self:self.f[1][0]) # cy or zsphere
z2 = property(lambda self:self.f[1][1])
r1 = property(lambda self:self.f[0][2])
r2 = property(lambda self:self.f[0][3])
dz = property(lambda self:self.z2 - self.z1)
dr = property(lambda self:self.r2 - self.r1)
def as_shape(self, name, sc):
if self.tc == CSG_.BOX3:
sh = Box(name, [self.xbox/sc, self.zbox/sc ] )
elif self.tc == CSG_.CYLINDER:
sh = Tubs(name, [self.r/sc, abs(self.z1/sc) ] )
elif self.tc == CSG_.TORUS:
sh = Ellipsoid(name, [self.r/sc, self.r/sc ] )
else:
sh = None
pass
if not sh is None:
sh.ltransform = [0, self.tz/sc ]
pass
return sh
def detail(self):
tz = self.tz
if self.tc == CSG_.ZSPHERE:
msg = " r: %10.3f z1:%10.3f z2:%10.3f " % ( self.r, self.z1, self.z2 )
elif self.tc == CSG_.SPHERE:
msg = " r: %10.3f " % ( self.f[0][3] )
elif self.tc == CSG_.CYLINDER:
msg = " z1:%10.3f z2:%10.3f r :%10.3f z1+tz:%10.3f z2+tz:%10.3f" % ( self.z1, self.z2, self.r, self.z1 + tz, self.z2 + tz)
elif self.tc == CSG_.CONE:
msg = " z1:%10.3f z2:%10.3f r1:%10.3f r2:%10.3f z1+tz:%10.3f z2+tz:%10.3f" % ( self.z1co, self.z2co, self.r1co, self.r2co, self.z1co+tz, self.z2co+tz )
elif self.tc == CSG_.BOX3:
msg = " x:%10.3f y:%10.3f z:%10.3f " % ( self.xbox, self.ybox, self.zbox )
else:
msg = ""
pass
return msg
class Prim(object):
def __init__(self, primIdx, prim, d):
"""
"""
assert primIdx > -1 and primIdx < 10000, primIdx
assert prim.shape == (4,), "unexpected prim.shape %s " % repr(prim.shape)
self.primIdx = primIdx
self.prim = prim
idx = d.idx[primIdx] if d.idx is not None else -np.ones(4,dtype=np.uint32)
lvIdx = idx[2]
self.idx = idx
self.lvIdx = lvIdx
self.lvName = d.ma.idx2name.get(lvIdx, "-") if d.ma is not None else "--"
partOffset, numParts, tranOffset, planOffset = prim
numTran = d.ntran[primIdx]
parts_ = d.part[partOffset:partOffset+numParts]
trans_ = d.tran[tranOffset:tranOffset+numTran,0] # eg shape (2, 4, 4) plucking the first from the t,v,q triplet of transforms
self.parts_ = parts_
self.trans_ = trans_ ## without the python class wrapping
self.parts = map(lambda _:Part(_,trans_,d), parts_) ## note that every part gets passed all the trans_ need to use the gt to determine which one to use
self.partOffset = partOffset
self.numParts= numParts
self.numTran = numTran
self.tranOffset = tranOffset
self.planOffset = planOffset
self.d = d
def maxdiff(self, other):
"""
:return float: max difference over the constituent parts, from Part.maxdiff
"""
assert len(self.parts) == len(other.parts)
return max(map( lambda ab:ab[0].maxdiff(ab[1]), zip( self.parts, other.parts)))
def tr_maxdiff(self, other):
"""
:param other: Prim instance to compare self with
:return value: max absolute difference between the (numtran,4,4) elements
"""
return np.max(np.abs(self.trans_ - other.trans_))
def __repr__(self):
return "primIdx %3s idx %30s lvIdx %3d lvName %30s partOffset %3s numParts %3s tranOffset %3s numTran %3s planOffset %3s " % (self.primIdx, str(self.idx), self.lvIdx, self.lvName, self.partOffset, self.numParts, self.tranOffset, self.numTran, self.planOffset )
def __str__(self):
return "\n".join(["",repr(self)] + map(str,filter(lambda pt:pt.tc > 0, self.parts)) + [repr(self.trans_)])
class Dir(object):
def __init__(self, base, kd):
"""
:param base: directory containing primBuffer.npy etc..
TODO: reworking following deferred GParts creation, so these buffers are not persisted anymore
"""
self.base = base
self.blib = BLib.make(base) # auto finds the idpath
prim = np.load(os.path.join(base, "primBuffer.npy")) # "solid" tree level index into part and tran buffers
part = np.load(os.path.join(base, "partBuffer.npy"))
tran = np.load(os.path.join(base, "tranBuffer.npy"))
idxpath = os.path.join(base,"idxBuffer.npy")
idx = np.load(idxpath) if os.path.exists(idxpath) else None
ma = Mesh(kd) # uses IDPATH envvar , used to lookup solid/mesh names from lvIdx
ntran = np.zeros( len(prim), dtype=np.uint32)
ntran[0:len(prim)-1] = prim[1:,2] - prim[:-1,2] ## differencing the tranOffsets to give numtran
ntran[len(prim)-1] = 1 # arbitrary guess for the number of transforms of the last prim
self.prim = prim
self.part = part
self.tran = tran
self.ntran = ntran
self.idx = idx
self.ma = ma
self.prims = self.get_prims()
def get_prims(self):
"""
:return pp: python array of Prim instances deserialized from self.prim array
"""
pp = []
for primIdx, prim in enumerate(self.prim):
p = Prim(primIdx, prim, self)
pp.append(p)
pass
return pp
def enumerate_prim_zip(self, other):
"""
:return i_ab: (idx,(this_prim,other_prim)) i_ab[0]
"""
assert len(self.prims) == len(other.prims)
return enumerate(zip(self.prims, other.prims))
def where_discrepant_tr(self, other, cut=0.1):
assert len(self.prims) == len(other.prims)
return map(lambda i_ab:i_ab[0], filter(lambda i_ab:i_ab[1][0].tr_maxdiff(i_ab[1][1]) > cut , enumerate(zip(self.prims, other.prims)) ))
def where_discrepant_prims(self, other, cut=0.1):
assert len(self.prims) == len(other.prims)
return map(lambda i_ab:i_ab[0], filter(lambda i_ab:i_ab[1][0].maxdiff(i_ab[1][1]) > cut , enumerate(zip(self.prims, other.prims)) ))
def get_discrepant_prims(self, other, cut=0.1):
assert len(self.prims) == len(other.prims)
return filter(lambda i_ab:i_ab[1][0].maxdiff(i_ab[1][1]) > cut , enumerate(zip(self.prims, other.prims)) )
def __repr__(self):
return "\n".join([self.base,"prim %s part %s tran %s " % ( repr(self.prim.shape), repr(self.part.shape), repr(self.tran.shape))])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
ddir = "/usr/local/opticks/geocache/DayaBay_VGDX_20140414-1300/g4_00.dae/96ff965744a2f6b78c24e33c80d3a4cd/103/GPartsAnalytic/5"
dir_ = sys.argv[1] if len(sys.argv) > 1 else ddir
sli_ = sys.argv[2] if len(sys.argv) > 2 else "0:10"
sli = slice(*map(int, sli_.split(":")))
if dir_ == ddir:
log.warning("using hardcoded dir" ) ;
pass
from opticks.ana.key import keydir
kd = keydir(os.environ["OPTICKS_KEY"])
d = Dir(dir_, kd)
print("Dir(dir_)", d)
pp = d.prims
print("dump sliced prims from the dir slice %s " % repr(sli))
for p in pp[sli]:
print(p)
pass
#print(d.tran)
|
from flask import Flask, render_template, request, redirect, url_for, session
from flask_mysqldb import MySQL
import MySQLdb.cursors
import re
db = MySQL()
def createApp():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisisaverysecurekeylmaoplsdontstealthisthx'
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'popdogdb'
db.init_app(app)
app.static_folder = 'static'
from .auth import auth as authBlueprint
app.register_blueprint(authBlueprint)
from .main import main as mainBlueprint
app.register_blueprint(mainBlueprint)
from .userhandle import userhandle as userhandleBlueprint
app.register_blueprint(userhandleBlueprint)
from .dashboard import dashboard as dashboardBlueprint
app.register_blueprint(dashboardBlueprint)
from .roles import roles as rolesBlueprint
app.register_blueprint(rolesBlueprint)
from .search import search as searchBlueprint
app.register_blueprint(searchBlueprint)
from .location import location as locationBlueprint
app.register_blueprint(locationBlueprint)
from .inventory import inventory as inventoryBlueprint
app.register_blueprint(inventoryBlueprint)
from .maintenance import maintenance as maintenanceBlueprint
app.register_blueprint(maintenanceBlueprint)
from .patients import patients as patientsBlueprint
app.register_blueprint(patientsBlueprint)
from .information import information as informationBlueprint
app.register_blueprint(informationBlueprint)
return app
|
from user_registration import UserRegistration
from user_registration_input import UserRegistrationInput
obj_user_input = UserRegistrationInput()
obj_user_check = UserRegistration()
if __name__ == '__main__':
first_name = input("Enter your first name:")
obj_user_input.set_first_name_input(first_name)
first_name_input = obj_user_input.get_first_name_input()
obj_user_check.first_name_validation(first_name_input)
last_name = input("Enter Your Last Name:")
obj_user_input.set_last_name_input(last_name)
last_name_input = obj_user_input.get_last_name_input()
obj_user_check.last_name_validation(last_name_input)
email_input = input("Enter Your Email Address:")
obj_user_input.set_email_input(email_input)
email_input = obj_user_input.get_email_input()
obj_user_check.email_validation(email_input)
phone_number_input = input("Enter Your Phone Number:")
obj_user_input.set_email_input(email_input)
phone_number = obj_user_input.get_phone_number_input()
obj_user_check.phone_number_validation(phone_number)
password_input = input("Enter Your Password:")
obj_user_input.set_password_input(password_input)
password_input = obj_user_input.get_password_input()
obj_user_check.password_validation(password_input)
|
"""
This example demonstrates how to upload a video.
"""
import pyyoutube.models as mds
from pyyoutube import Client
from pyyoutube.media import Media
# Access token with scope:
# https://www.googleapis.com/auth/youtube.upload
# https://www.googleapis.com/auth/youtube
# https://www.googleapis.com/auth/youtube.force-ssl
ACCESS_TOKEN = "xxx"
def upload_video():
cli = Client(access_token=ACCESS_TOKEN)
body = mds.Video(
snippet=mds.VideoSnippet(title="video title", description="video description")
)
media = Media(filename="target_video.mp4")
upload = cli.videos.insert(
body=body, media=media, parts=["snippet"], notify_subscribers=True
)
response = None
while response is None:
print(f"Uploading video...")
status, response = upload.next_chunk()
if status is not None:
print(f"Uploading video progress: {status.progress()}...")
# Use video class to representing the video resource.
video = mds.Video.from_dict(response)
print(f"Video id {video.id} was successfully uploaded.")
if __name__ == "__main__":
upload_video()
|
import random
import smtplib
import datetime
import imghdr
import email.message
# Mailing List
# Credentials
my_email = "{Enter E-Mail}"
password = "{Enter Password}"
# Creating current date object
current_date = datetime.datetime.now()
# Initializing connection and starting TLS
connection = smtplib.SMTP("smtp.gmail.com")
connection.starttls()
connection.login(user=my_email, password=password)
MESSAGE = email.message.EmailMessage()
MESSAGE['Subject'] = 'CRITICAL ALERT'
MESSAGE['From'] = 'gfapsnmpblasterbussy@gmail.com'
MESSAGE['To'] = da_bros
# MESSAGE.preamble = 'You will not see this in a MIME-aware mail reader.\n'
with open("quotes.txt", mode="r") as quotes:
quote = quotes.readlines()[random.randint(0, 100)]
content = str(f"IT IS HUMP DAYYY!!!!!!!!!!!!\nHere is your hump day quote:\n{quote}")
MESSAGE.set_content(content)
with open("its-wednesday-camel-.jpg", mode="rb") as file:
read_file = file.read()
MESSAGE.add_attachment(read_file, maintype='image', subtype=imghdr.what(None, read_file))
while True:
print("cute dog")
connection.send_message(MESSAGE)
connection.close()
|
import random
import time
class Formula:
def __init__(self):
self.n = [0.5492, 0.5492, 0.5719, 0.5694, 0.5675, 0.5569, 0.5552, 0.5447, 0.5380, 0.5361, 0.5295, 0.5288, 0.5281, 0.5245, 0.5238, 0.5215]
self.h1 = [8.45,8.64, 9.52, 10.67, 11.82, 13.98, 17.82, 24.25, 28.56, 32.64, 37.76, 41.64, 45.34, 47.34, 50.07, 53.61]
self.hr = 30.32 #round(random.uniform(8,45), 2)
def search(self):
for i in self.h1:
if self.hr >= i:
continue
else:
bigger = self.h1.index(i)
smaller = bigger-1
return self.h1[bigger], self.h1[smaller], bigger, smaller
def formula(self, b_value, s_value, b_index, s_index):
num = ((self.hr-s_value)/(b_value-s_value)*self.n[b_index])+((b_value-self.hr)/(b_value-s_value)*self.n[s_index])
return num
if __name__ == '__main__':
st_time = time.time()
fm = Formula()
for i in range(1000000):
b_value, s_value, b_index, s_index = fm.search()
num = fm.formula(b_value, s_value, b_index, s_index)
Hr = fm.hr*(1/0.25)**(1-num)
print('runtime='+str(time.time()-st_time))
print(Hr)
|
# -*- coding: utf-8 -*-
# @Time : 2019/12/26 11:44
# @Author : Jeff Wang
# @Email : jeffwang987@163.com OR wangxiaofeng2020@ia.ac.cn
# @Software: PyCharm
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('./picture/wave.png')
"""本文档学习了图像识别基础,主要包含:
0. 模式和模式类(基础知识)
1. 模板匹配
2. Bayes分类器
3. 应用举例
"""
"""0. 模式和模式类(模式类(Class):一类事物的代表;模式(Pattern):某一事物的具体表现)
0. 定义:模式类(Class):一类事物的代表;模式(Pattern):某一事物的具体表现;
如:数字0,1,2,3,4,5,6,7,8,9是模式类,用户任意手写的一个数字则是模式,是数字的具体化。
1. 假设有N个样本,每个样本n个特征,针对这些特诊,有一些描述:均值,方差,协方差,协方差矩阵、
"""
"""1. 模板匹配
0.
"""
|
str_no_string = 0
str_empty_string = 1
str_yes = 2
str_no = 3
str_blank_string = 4
str_error_string = 5
str_noone = 6
str_s0 = 7
str_blank_s1 = 8
str_reg1 = 9
str_s50_comma_s51 = 10
str_s50_and_s51 = 11
str_s52_comma_s51 = 12
str_s52_and_s51 = 13
str_s5_s_party = 14
str_given_by_s1_at_s2 = 15
str_given_by_s1_in_wilderness = 16
str_s7_raiders = 17
str_bandits_eliminated_by_another = 18
str_msg_battle_won = 19
str_tutorial_map1 = 20
str_change_color_1 = 21
str_change_color_2 = 22
str_change_background = 23
str_change_flag_type = 24
str_change_map_flag_type = 25
str_randomize = 26
str_sample_banner = 27
str_sample_map_banner = 28
str_number_of_charges = 29
str_change_charge_1 = 30
str_change_charge_1_color = 31
str_change_charge_2 = 32
str_change_charge_2_color = 33
str_change_charge_3 = 34
str_change_charge_3_color = 35
str_change_charge_4 = 36
str_change_charge_4_color = 37
str_change_charge_position = 38
str_choose_position = 39
str_choose_charge = 40
str_choose_background = 41
str_choose_flag_type = 42
str_choose_map_flag_type = 43
str_choose_color = 44
str_accept = 45
str_charge_no_1 = 46
str_charge_no_2 = 47
str_charge_no_3 = 48
str_charge_no_4 = 49
str_change = 50
str_color_no_1 = 51
str_color_no_2 = 52
str_charge = 53
str_color = 54
str_flip_horizontal = 55
str_flip_vertical = 56
str_hold_fire = 57
str_blunt_hold_fire = 58
str_tutorial_ammo_refilled = 59
str_tutorial_failed = 60
str_tutorial_1_msg_1 = 61
str_tutorial_1_msg_2 = 62
str_tutorial_1_msg_3 = 63
str_tutorial_1_msg_4 = 64
str_tutorial_1_msg_5 = 65
str_tutorial_1_msg_6 = 66
str_tutorial_2_msg_1 = 67
str_tutorial_2_msg_2 = 68
str_tutorial_2_msg_3 = 69
str_tutorial_2_msg_4 = 70
str_tutorial_2_msg_5 = 71
str_tutorial_2_msg_6 = 72
str_tutorial_2_msg_7 = 73
str_tutorial_2_msg_8 = 74
str_tutorial_2_msg_9 = 75
str_tutorial_3_msg_1 = 76
str_tutorial_3_msg_2 = 77
str_tutorial_3_msg_3 = 78
str_tutorial_3_msg_4 = 79
str_tutorial_3_msg_5 = 80
str_tutorial_3_msg_6 = 81
str_tutorial_3_2_msg_1 = 82
str_tutorial_3_2_msg_2 = 83
str_tutorial_3_2_msg_3 = 84
str_tutorial_3_2_msg_4 = 85
str_tutorial_3_2_msg_5 = 86
str_tutorial_4_msg_1 = 87
str_tutorial_4_msg_2 = 88
str_tutorial_4_msg_3 = 89
str_tutorial_4_msg_4 = 90
str_tutorial_4_msg_5 = 91
str_tutorial_4_msg_6 = 92
str_tutorial_4_msg_7 = 93
str_tutorial_5_msg_1 = 94
str_tutorial_5_msg_2 = 95
str_tutorial_5_msg_3 = 96
str_tutorial_5_msg_4 = 97
str_tutorial_5_msg_5 = 98
str_tutorial_5_msg_6 = 99
str_trainer_help_1 = 100
str_trainer_help_2 = 101
str_custom_battle_1 = 102
str_custom_battle_2 = 103
str_custom_battle_3 = 104
str_custom_battle_4 = 105
str_custom_battle_5 = 106
str_finished = 107
str_delivered_damage = 108
str_archery_target_hit = 109
str_use_baggage_for_inventory = 110
str_cant_use_inventory_now = 111
str_cant_use_inventory_arena = 112
str_cant_use_inventory_disguised = 113
str_cant_use_inventory_tutorial = 114
str_1_denar = 115
str_reg1_denars = 116
str_january_reg1_reg2 = 117
str_february_reg1_reg2 = 118
str_march_reg1_reg2 = 119
str_april_reg1_reg2 = 120
str_may_reg1_reg2 = 121
str_june_reg1_reg2 = 122
str_july_reg1_reg2 = 123
str_august_reg1_reg2 = 124
str_september_reg1_reg2 = 125
str_october_reg1_reg2 = 126
str_november_reg1_reg2 = 127
str_december_reg1_reg2 = 128
str_town_nighttime = 129
str_door_locked = 130
str_castle_is_abondened = 131
str_town_is_abondened = 132
str_place_is_occupied_by_player = 133
str_place_is_occupied_by_enemy = 134
str_place_is_occupied_by_friendly = 135
str_do_you_want_to_retreat = 136
str_give_up_fight = 137
str_do_you_wish_to_leave_tutorial = 138
str_do_you_wish_to_surrender = 139
str_can_not_retreat = 140
str_s1_joined_battle_enemy = 141
str_s1_joined_battle_friend = 142
str_entrance_to_town_forbidden = 143
str_sneaking_to_town_impossible = 144
str_battle_won = 145
str_battle_lost = 146
str_attack_walls_success = 147
str_attack_walls_failure = 148
str_attack_walls_continue = 149
str_order_attack_success = 150
str_order_attack_failure = 151
str_order_attack_continue = 152
str_join_order_attack_success = 153
str_join_order_attack_failure = 154
str_join_order_attack_continue = 155
str_siege_defender_order_attack_success = 156
str_siege_defender_order_attack_failure = 157
str_siege_defender_order_attack_continue = 158
str_hero_taken_prisoner = 159
str_hero_freed = 160
str_center_captured = 161
str_troop_relation_increased = 162
str_troop_relation_detoriated = 163
str_faction_relation_increased = 164
str_faction_relation_detoriated = 165
str_party_gained_morale = 166
str_party_lost_morale = 167
str_other_party_gained_morale = 168
str_other_party_lost_morale = 169
str_qst_follow_spy_noticed_you = 170
str_father = 171
str_husband = 172
str_wife = 173
str_daughter = 174
str_mother = 175
str_son = 176
str_brother = 177
str_sister = 178
str_he = 179
str_she = 180
str_s3s_s2 = 181
str_s5_is_s51 = 182
str_s5_is_the_ruler_of_s51 = 183
str_s5_is_a_nobleman_of_s6 = 184
str_relation_mnus_100 = 185
str_relation_mnus_90 = 186
str_relation_mnus_80 = 187
str_relation_mnus_70 = 188
str_relation_mnus_60 = 189
str_relation_mnus_50 = 190
str_relation_mnus_40 = 191
str_relation_mnus_30 = 192
str_relation_mnus_20 = 193
str_relation_mnus_10 = 194
str_relation_plus_0 = 195
str_relation_plus_10 = 196
str_relation_plus_20 = 197
str_relation_plus_30 = 198
str_relation_plus_40 = 199
str_relation_plus_50 = 200
str_relation_plus_60 = 201
str_relation_plus_70 = 202
str_relation_plus_80 = 203
str_relation_plus_90 = 204
str_relation_mnus_100_ns = 205
str_relation_mnus_90_ns = 206
str_relation_mnus_80_ns = 207
str_relation_mnus_70_ns = 208
str_relation_mnus_60_ns = 209
str_relation_mnus_50_ns = 210
str_relation_mnus_40_ns = 211
str_relation_mnus_30_ns = 212
str_relation_mnus_20_ns = 213
str_relation_mnus_10_ns = 214
str_relation_plus_0_ns = 215
str_relation_plus_10_ns = 216
str_relation_plus_20_ns = 217
str_relation_plus_30_ns = 218
str_relation_plus_40_ns = 219
str_relation_plus_50_ns = 220
str_relation_plus_60_ns = 221
str_relation_plus_70_ns = 222
str_relation_plus_80_ns = 223
str_relation_plus_90_ns = 224
str_relation_reg1 = 225
str_center_relation_mnus_100 = 226
str_center_relation_mnus_90 = 227
str_center_relation_mnus_80 = 228
str_center_relation_mnus_70 = 229
str_center_relation_mnus_60 = 230
str_center_relation_mnus_50 = 231
str_center_relation_mnus_40 = 232
str_center_relation_mnus_30 = 233
str_center_relation_mnus_20 = 234
str_center_relation_mnus_10 = 235
str_center_relation_plus_0 = 236
str_center_relation_plus_10 = 237
str_center_relation_plus_20 = 238
str_center_relation_plus_30 = 239
str_center_relation_plus_40 = 240
str_center_relation_plus_50 = 241
str_center_relation_plus_60 = 242
str_center_relation_plus_70 = 243
str_center_relation_plus_80 = 244
str_center_relation_plus_90 = 245
str_town_prosperity_0 = 246
str_town_prosperity_10 = 247
str_town_prosperity_20 = 248
str_town_prosperity_30 = 249
str_town_prosperity_40 = 250
str_town_prosperity_50 = 251
str_town_prosperity_60 = 252
str_town_prosperity_70 = 253
str_town_prosperity_80 = 254
str_town_prosperity_90 = 255
str_town_prosperity_100 = 256
str_village_prosperity_0 = 257
str_village_prosperity_10 = 258
str_village_prosperity_20 = 259
str_village_prosperity_30 = 260
str_village_prosperity_40 = 261
str_village_prosperity_50 = 262
str_village_prosperity_60 = 263
str_village_prosperity_70 = 264
str_village_prosperity_80 = 265
str_village_prosperity_90 = 266
str_village_prosperity_100 = 267
str_town_alt_prosperity_0 = 268
str_town_alt_prosperity_20 = 269
str_town_alt_prosperity_40 = 270
str_town_alt_prosperity_60 = 271
str_town_alt_prosperity_80 = 272
str_village_alt_prosperity_0 = 273
str_village_alt_prosperity_20 = 274
str_village_alt_prosperity_40 = 275
str_village_alt_prosperity_60 = 276
str_village_alt_prosperity_80 = 277
str_oasis_village_alt_prosperity_0 = 278
str_oasis_village_alt_prosperity_20 = 279
str_oasis_village_alt_prosperity_40 = 280
str_oasis_village_alt_prosperity_60 = 281
str_oasis_village_alt_prosperity_80 = 282
str_acres_grain = 283
str_acres_orchard = 284
str_acres_oasis = 285
str_looms = 286
str_boats = 287
str_head_cattle = 288
str_head_sheep = 289
str_mills = 290
str_kilns = 291
str_pans = 292
str_deposits = 293
str_hives = 294
str_breweries = 295
str_presses = 296
str_smithies = 297
str_caravans = 298
str_traps = 299
str_gardens = 300
str_tanneries = 301
str_master_miller = 302
str_master_brewer = 303
str_master_presser = 304
str_master_smith = 305
str_master_tanner = 306
str_master_weaver = 307
str_master_dyer = 308
str_war_report_minus_4 = 309
str_war_report_minus_3 = 310
str_war_report_minus_2 = 311
str_war_report_minus_1 = 312
str_war_report_0 = 313
str_war_report_plus_1 = 314
str_war_report_plus_2 = 315
str_war_report_plus_3 = 316
str_war_report_plus_4 = 317
str_persuasion_summary_very_bad = 318
str_persuasion_summary_bad = 319
str_persuasion_summary_average = 320
str_persuasion_summary_good = 321
str_persuasion_summary_very_good = 322
str_secret_sign_1 = 323
str_secret_sign_2 = 324
str_secret_sign_3 = 325
str_secret_sign_4 = 326
str_countersign_1 = 327
str_countersign_2 = 328
str_countersign_3 = 329
str_countersign_4 = 330
str_name_1 = 331
str_name_2 = 332
str_name_3 = 333
str_name_4 = 334
str_name_5 = 335
str_name_6 = 336
str_name_7 = 337
str_name_8 = 338
str_name_9 = 339
str_name_10 = 340
str_name_11 = 341
str_name_12 = 342
str_name_13 = 343
str_name_14 = 344
str_name_15 = 345
str_name_16 = 346
str_name_17 = 347
str_name_18 = 348
str_name_19 = 349
str_name_20 = 350
str_name_21 = 351
str_name_22 = 352
str_name_23 = 353
str_name_24 = 354
str_name_25 = 355
str_surname_1 = 356
str_surname_2 = 357
str_surname_3 = 358
str_surname_4 = 359
str_surname_5 = 360
str_surname_6 = 361
str_surname_7 = 362
str_surname_8 = 363
str_surname_9 = 364
str_surname_10 = 365
str_surname_11 = 366
str_surname_12 = 367
str_surname_13 = 368
str_surname_14 = 369
str_surname_15 = 370
str_surname_16 = 371
str_surname_17 = 372
str_surname_18 = 373
str_surname_19 = 374
str_surname_20 = 375
str_surname_21 = 376
str_surname_22 = 377
str_surname_23 = 378
str_surname_24 = 379
str_surname_25 = 380
str_surname_26 = 381
str_surname_27 = 382
str_surname_28 = 383
str_surname_29 = 384
str_surname_30 = 385
str_surname_31 = 386
str_surname_32 = 387
str_surname_33 = 388
str_surname_34 = 389
str_surname_35 = 390
str_surname_36 = 391
str_surname_37 = 392
str_surname_38 = 393
str_surname_39 = 394
str_surname_40 = 395
str_surname_41 = 396
str_surnames_end = 397
str_number_of_troops_killed_reg1 = 398
str_number_of_troops_wounded_reg1 = 399
str_number_of_own_troops_killed_reg1 = 400
str_number_of_own_troops_wounded_reg1 = 401
str_retreat = 402
str_siege_continues = 403
str_casualty_display = 404
str_casualty_display_hp = 405
str_quest_log_updated = 406
str_banner_selection_text = 407
str_retirement_text_1 = 408
str_retirement_text_2 = 409
str_retirement_text_3 = 410
str_retirement_text_4 = 411
str_retirement_text_5 = 412
str_retirement_text_6 = 413
str_retirement_text_7 = 414
str_retirement_text_8 = 415
str_retirement_text_9 = 416
str_retirement_text_10 = 417
str_loot_village = 418
str_steal_from_villagers = 419
str_rob_caravan = 420
str_sell_slavery = 421
str_men_hungry = 422
str_men_unpaid = 423
str_excessive_casualties = 424
str_surrender = 425
str_flee_battle = 426
str_pay_bandits = 427
str_fail_quest = 428
str_squander_money = 429
str_murder_merchant = 430
str_round_up_serfs = 431
str_battle_fate_1 = 432
str_battle_fate_2 = 433
str_battle_fate_3 = 434
str_battle_fate_4 = 435
str_battle_fate_5 = 436
str_npc_morale_report = 437
str_happy = 438
str_content = 439
str_concerned = 440
str_not_happy = 441
str_miserable = 442
str_morale_reg1 = 443
str_bar_enthusiastic = 444
str_bar_content = 445
str_bar_weary = 446
str_bar_disgruntled = 447
str_bar_miserable = 448
str_here_plus_space = 449
str_npc1_intro = 450
str_npc2_intro = 451
str_npc3_intro = 452
str_npc4_intro = 453
str_npc5_intro = 454
str_npc6_intro = 455
str_npc7_intro = 456
str_npc8_intro = 457
str_npc9_intro = 458
str_npc10_intro = 459
str_npc11_intro = 460
str_npc12_intro = 461
str_npc13_intro = 462
str_npc14_intro = 463
str_npc15_intro = 464
str_npc16_intro = 465
str_npc17_intro = 466
str_npc1_intro_response_1 = 467
str_npc2_intro_response_1 = 468
str_npc3_intro_response_1 = 469
str_npc4_intro_response_1 = 470
str_npc5_intro_response_1 = 471
str_npc6_intro_response_1 = 472
str_npc7_intro_response_1 = 473
str_npc8_intro_response_1 = 474
str_npc9_intro_response_1 = 475
str_npc10_intro_response_1 = 476
str_npc11_intro_response_1 = 477
str_npc12_intro_response_1 = 478
str_npc13_intro_response_1 = 479
str_npc14_intro_response_1 = 480
str_npc15_intro_response_1 = 481
str_npc16_intro_response_1 = 482
str_npc17_intro_response_1 = 483
str_npc1_intro_response_2 = 484
str_npc2_intro_response_2 = 485
str_npc3_intro_response_2 = 486
str_npc4_intro_response_2 = 487
str_npc5_intro_response_2 = 488
str_npc6_intro_response_2 = 489
str_npc7_intro_response_2 = 490
str_npc8_intro_response_2 = 491
str_npc9_intro_response_2 = 492
str_npc10_intro_response_2 = 493
str_npc11_intro_response_2 = 494
str_npc12_intro_response_2 = 495
str_npc13_intro_response_2 = 496
str_npc14_intro_response_2 = 497
str_npc15_intro_response_2 = 498
str_npc16_intro_response_2 = 499
str_npc17_intro_response_2 = 500
str_npc1_backstory_a = 501
str_npc2_backstory_a = 502
str_npc3_backstory_a = 503
str_npc4_backstory_a = 504
str_npc5_backstory_a = 505
str_npc6_backstory_a = 506
str_npc7_backstory_a = 507
str_npc8_backstory_a = 508
str_npc9_backstory_a = 509
str_npc10_backstory_a = 510
str_npc11_backstory_a = 511
str_npc12_backstory_a = 512
str_npc13_backstory_a = 513
str_npc14_backstory_a = 514
str_npc15_backstory_a = 515
str_npc16_backstory_a = 516
str_npc17_backstory_a = 517
str_npc1_backstory_b = 518
str_npc2_backstory_b = 519
str_npc3_backstory_b = 520
str_npc4_backstory_b = 521
str_npc5_backstory_b = 522
str_npc6_backstory_b = 523
str_npc7_backstory_b = 524
str_npc8_backstory_b = 525
str_npc9_backstory_b = 526
str_npc10_backstory_b = 527
str_npc11_backstory_b = 528
str_npc12_backstory_b = 529
str_npc13_backstory_b = 530
str_npc14_backstory_b = 531
str_npc15_backstory_b = 532
str_npc16_backstory_b = 533
str_npc1_backstory_c = 534
str_npc2_backstory_c = 535
str_npc3_backstory_c = 536
str_npc4_backstory_c = 537
str_npc5_backstory_c = 538
str_npc6_backstory_c = 539
str_npc7_backstory_c = 540
str_npc8_backstory_c = 541
str_npc9_backstory_c = 542
str_npc10_backstory_c = 543
str_npc11_backstory_c = 544
str_npc12_backstory_c = 545
str_npc13_backstory_c = 546
str_npc14_backstory_c = 547
str_npc15_backstory_c = 548
str_npc16_backstory_c = 549
str_npc1_backstory_later = 550
str_npc2_backstory_later = 551
str_npc3_backstory_later = 552
str_npc4_backstory_later = 553
str_npc5_backstory_later = 554
str_npc6_backstory_later = 555
str_npc7_backstory_later = 556
str_npc8_backstory_later = 557
str_npc9_backstory_later = 558
str_npc10_backstory_later = 559
str_npc11_backstory_later = 560
str_npc12_backstory_later = 561
str_npc13_backstory_later = 562
str_npc14_backstory_later = 563
str_npc15_backstory_later = 564
str_npc16_backstory_later = 565
str_npc1_backstory_response_1 = 566
str_npc2_backstory_response_1 = 567
str_npc3_backstory_response_1 = 568
str_npc4_backstory_response_1 = 569
str_npc5_backstory_response_1 = 570
str_npc6_backstory_response_1 = 571
str_npc7_backstory_response_1 = 572
str_npc8_backstory_response_1 = 573
str_npc9_backstory_response_1 = 574
str_npc10_backstory_response_1 = 575
str_npc11_backstory_response_1 = 576
str_npc12_backstory_response_1 = 577
str_npc13_backstory_response_1 = 578
str_npc14_backstory_response_1 = 579
str_npc15_backstory_response_1 = 580
str_npc16_backstory_response_1 = 581
str_npc17_backstory_response_1 = 582
str_npc1_backstory_response_2 = 583
str_npc2_backstory_response_2 = 584
str_npc3_backstory_response_2 = 585
str_npc4_backstory_response_2 = 586
str_npc5_backstory_response_2 = 587
str_npc6_backstory_response_2 = 588
str_npc7_backstory_response_2 = 589
str_npc8_backstory_response_2 = 590
str_npc9_backstory_response_2 = 591
str_npc10_backstory_response_2 = 592
str_npc11_backstory_response_2 = 593
str_npc12_backstory_response_2 = 594
str_npc13_backstory_response_2 = 595
str_npc14_backstory_response_2 = 596
str_npc15_backstory_response_2 = 597
str_npc16_backstory_response_2 = 598
str_npc1_signup = 599
str_npc2_signup = 600
str_npc3_signup = 601
str_npc4_signup = 602
str_npc5_signup = 603
str_npc6_signup = 604
str_npc7_signup = 605
str_npc8_signup = 606
str_npc9_signup = 607
str_npc10_signup = 608
str_npc11_signup = 609
str_npc12_signup = 610
str_npc13_signup = 611
str_npc14_signup = 612
str_npc15_signup = 613
str_npc16_signup = 614
str_npc17_signup = 615
str_npc1_signup_2 = 616
str_npc2_signup_2 = 617
str_npc3_signup_2 = 618
str_npc4_signup_2 = 619
str_npc5_signup_2 = 620
str_npc6_signup_2 = 621
str_npc7_signup_2 = 622
str_npc8_signup_2 = 623
str_npc9_signup_2 = 624
str_npc10_signup_2 = 625
str_npc11_signup_2 = 626
str_npc12_signup_2 = 627
str_npc13_signup_2 = 628
str_npc14_signup_2 = 629
str_npc15_signup_2 = 630
str_npc16_signup_2 = 631
str_npc17_signup_2 = 632
str_npc1_signup_response_1 = 633
str_npc2_signup_response_1 = 634
str_npc3_signup_response_1 = 635
str_npc4_signup_response_1 = 636
str_npc5_signup_response_1 = 637
str_npc6_signup_response_1 = 638
str_npc7_signup_response_1 = 639
str_npc8_signup_response_1 = 640
str_npc9_signup_response_1 = 641
str_npc10_signup_response_1 = 642
str_npc11_signup_response_1 = 643
str_npc12_signup_response_1 = 644
str_npc13_signup_response_1 = 645
str_npc14_signup_response_1 = 646
str_npc15_signup_response_1 = 647
str_npc16_signup_response_1 = 648
str_npc17_signup_response_1 = 649
str_npc1_signup_response_2 = 650
str_npc2_signup_response_2 = 651
str_npc3_signup_response_2 = 652
str_npc4_signup_response_2 = 653
str_npc5_signup_response_2 = 654
str_npc6_signup_response_2 = 655
str_npc7_signup_response_2 = 656
str_npc8_signup_response_2 = 657
str_npc9_signup_response_2 = 658
str_npc10_signup_response_2 = 659
str_npc11_signup_response_2 = 660
str_npc12_signup_response_2 = 661
str_npc13_signup_response_2 = 662
str_npc14_signup_response_2 = 663
str_npc15_signup_response_2 = 664
str_npc16_signup_response_2 = 665
str_npc17_signup_response_2 = 666
str_npc1_payment = 667
str_npc2_payment = 668
str_npc3_payment = 669
str_npc4_payment = 670
str_npc5_payment = 671
str_npc6_payment = 672
str_npc7_payment = 673
str_npc8_payment = 674
str_npc9_payment = 675
str_npc10_payment = 676
str_npc11_payment = 677
str_npc12_payment = 678
str_npc13_payment = 679
str_npc14_payment = 680
str_npc15_payment = 681
str_npc16_payment = 682
str_npc16_payment = 683
str_npc1_payment_response = 684
str_npc2_payment_response = 685
str_npc3_payment_response = 686
str_npc4_payment_response = 687
str_npc5_payment_response = 688
str_npc6_payment_response = 689
str_npc7_payment_response = 690
str_npc8_payment_response = 691
str_npc9_payment_response = 692
str_npc10_payment_response = 693
str_npc11_payment_response = 694
str_npc12_payment_response = 695
str_npc13_payment_response = 696
str_npc14_payment_response = 697
str_npc15_payment_response = 698
str_npc16_payment_response = 699
str_npc17_payment_response = 700
str_npc1_morality_speech = 701
str_npc2_morality_speech = 702
str_npc3_morality_speech = 703
str_npc4_morality_speech = 704
str_npc5_morality_speech = 705
str_npc6_morality_speech = 706
str_npc7_morality_speech = 707
str_npc8_morality_speech = 708
str_npc9_morality_speech = 709
str_npc10_morality_speech = 710
str_npc11_morality_speech = 711
str_npc12_morality_speech = 712
str_npc13_morality_speech = 713
str_npc14_morality_speech = 714
str_npc15_morality_speech = 715
str_npc16_morality_speech = 716
str_npc1_2ary_morality_speech = 717
str_npc2_2ary_morality_speech = 718
str_npc3_2ary_morality_speech = 719
str_npc4_2ary_morality_speech = 720
str_npc5_2ary_morality_speech = 721
str_npc6_2ary_morality_speech = 722
str_npc7_2ary_morality_speech = 723
str_npc8_2ary_morality_speech = 724
str_npc9_2ary_morality_speech = 725
str_npc10_2ary_morality_speech = 726
str_npc11_2ary_morality_speech = 727
str_npc12_2ary_morality_speech = 728
str_npc13_2ary_morality_speech = 729
str_npc14_2ary_morality_speech = 730
str_npc15_2ary_morality_speech = 731
str_npc16_2ary_morality_speech = 732
str_npc1_personalityclash_speech = 733
str_npc2_personalityclash_speech = 734
str_npc3_personalityclash_speech = 735
str_npc4_personalityclash_speech = 736
str_npc5_personalityclash_speech = 737
str_npc6_personalityclash_speech = 738
str_npc7_personalityclash_speech = 739
str_npc8_personalityclash_speech = 740
str_npc9_personalityclash_speech = 741
str_npc10_personalityclash_speech = 742
str_npc11_personalityclash_speech = 743
str_npc12_personalityclash_speech = 744
str_npc13_personalityclash_speech = 745
str_npc14_personalityclash_speech = 746
str_npc15_personalityclash_speech = 747
str_npc16_personalityclash_speech = 748
str_npc1_personalityclash_speech_b = 749
str_npc2_personalityclash_speech_b = 750
str_npc3_personalityclash_speech_b = 751
str_npc4_personalityclash_speech_b = 752
str_npc5_personalityclash_speech_b = 753
str_npc6_personalityclash_speech_b = 754
str_npc7_personalityclash_speech_b = 755
str_npc8_personalityclash_speech_b = 756
str_npc9_personalityclash_speech_b = 757
str_npc10_personalityclash_speech_b = 758
str_npc11_personalityclash_speech_b = 759
str_npc12_personalityclash_speech_b = 760
str_npc13_personalityclash_speech_b = 761
str_npc14_personalityclash_speech_b = 762
str_npc15_personalityclash_speech_b = 763
str_npc16_personalityclash_speech_b = 764
str_npc1_personalityclash2_speech = 765
str_npc2_personalityclash2_speech = 766
str_npc3_personalityclash2_speech = 767
str_npc4_personalityclash2_speech = 768
str_npc5_personalityclash2_speech = 769
str_npc6_personalityclash2_speech = 770
str_npc7_personalityclash2_speech = 771
str_npc8_personalityclash2_speech = 772
str_npc9_personalityclash2_speech = 773
str_npc10_personalityclash2_speech = 774
str_npc11_personalityclash2_speech = 775
str_npc12_personalityclash2_speech = 776
str_npc13_personalityclash2_speech = 777
str_npc14_personalityclash2_speech = 778
str_npc15_personalityclash2_speech = 779
str_npc16_personalityclash2_speech = 780
str_npc1_personalityclash2_speech_b = 781
str_npc2_personalityclash2_speech_b = 782
str_npc3_personalityclash2_speech_b = 783
str_npc4_personalityclash2_speech_b = 784
str_npc5_personalityclash2_speech_b = 785
str_npc6_personalityclash2_speech_b = 786
str_npc7_personalityclash2_speech_b = 787
str_npc8_personalityclash2_speech_b = 788
str_npc9_personalityclash2_speech_b = 789
str_npc10_personalityclash2_speech_b = 790
str_npc11_personalityclash2_speech_b = 791
str_npc12_personalityclash2_speech_b = 792
str_npc13_personalityclash2_speech_b = 793
str_npc14_personalityclash2_speech_b = 794
str_npc15_personalityclash2_speech_b = 795
str_npc16_personalityclash2_speech_b = 796
str_npc1_personalitymatch_speech = 797
str_npc2_personalitymatch_speech = 798
str_npc3_personalitymatch_speech = 799
str_npc4_personalitymatch_speech = 800
str_npc5_personalitymatch_speech = 801
str_npc6_personalitymatch_speech = 802
str_npc7_personalitymatch_speech = 803
str_npc8_personalitymatch_speech = 804
str_npc9_personalitymatch_speech = 805
str_npc10_personalitymatch_speech = 806
str_npc11_personalitymatch_speech = 807
str_npc12_personalitymatch_speech = 808
str_npc13_personalitymatch_speech = 809
str_npc14_personalitymatch_speech = 810
str_npc15_personalitymatch_speech = 811
str_npc16_personalitymatch_speech = 812
str_npc1_personalitymatch_speech_b = 813
str_npc2_personalitymatch_speech_b = 814
str_npc3_personalitymatch_speech_b = 815
str_npc4_personalitymatch_speech_b = 816
str_npc5_personalitymatch_speech_b = 817
str_npc6_personalitymatch_speech_b = 818
str_npc7_personalitymatch_speech_b = 819
str_npc8_personalitymatch_speech_b = 820
str_npc9_personalitymatch_speech_b = 821
str_npc10_personalitymatch_speech_b = 822
str_npc11_personalitymatch_speech_b = 823
str_npc12_personalitymatch_speech_b = 824
str_npc13_personalitymatch_speech_b = 825
str_npc14_personalitymatch_speech_b = 826
str_npc15_personalitymatch_speech_b = 827
str_npc16_personalitymatch_speech_b = 828
str_npc1_retirement_speech = 829
str_npc2_retirement_speech = 830
str_npc3_retirement_speech = 831
str_npc4_retirement_speech = 832
str_npc5_retirement_speech = 833
str_npc6_retirement_speech = 834
str_npc7_retirement_speech = 835
str_npc8_retirement_speech = 836
str_npc9_retirement_speech = 837
str_npc10_retirement_speech = 838
str_npc11_retirement_speech = 839
str_npc12_retirement_speech = 840
str_npc13_retirement_speech = 841
str_npc14_retirement_speech = 842
str_npc15_retirement_speech = 843
str_npc16_retirement_speech = 844
str_npc1_rehire_speech = 845
str_npc2_rehire_speech = 846
str_npc3_rehire_speech = 847
str_npc4_rehire_speech = 848
str_npc5_rehire_speech = 849
str_npc6_rehire_speech = 850
str_npc7_rehire_speech = 851
str_npc8_rehire_speech = 852
str_npc9_rehire_speech = 853
str_npc10_rehire_speech = 854
str_npc11_rehire_speech = 855
str_npc12_rehire_speech = 856
str_npc13_rehire_speech = 857
str_npc14_rehire_speech = 858
str_npc15_rehire_speech = 859
str_npc16_rehire_speech = 860
str_npc1_home_intro = 861
str_npc2_home_intro = 862
str_npc3_home_intro = 863
str_npc4_home_intro = 864
str_npc5_home_intro = 865
str_npc6_home_intro = 866
str_npc7_home_intro = 867
str_npc8_home_intro = 868
str_npc9_home_intro = 869
str_npc10_home_intro = 870
str_npc11_home_intro = 871
str_npc12_home_intro = 872
str_npc13_home_intro = 873
str_npc14_home_intro = 874
str_npc15_home_intro = 875
str_npc16_home_intro = 876
str_npc1_home_description = 877
str_npc2_home_description = 878
str_npc3_home_description = 879
str_npc4_home_description = 880
str_npc5_home_description = 881
str_npc6_home_description = 882
str_npc7_home_description = 883
str_npc8_home_description = 884
str_npc9_home_description = 885
str_npc10_home_description = 886
str_npc11_home_description = 887
str_npc12_home_description = 888
str_npc13_home_description = 889
str_npc14_home_description = 890
str_npc15_home_description = 891
str_npc16_home_description = 892
str_npc1_home_description_2 = 893
str_npc2_home_description_2 = 894
str_npc3_home_description_2 = 895
str_npc4_home_description_2 = 896
str_npc5_home_description_2 = 897
str_npc6_home_description_2 = 898
str_npc7_home_description_2 = 899
str_npc8_home_description_2 = 900
str_npc9_home_description_2 = 901
str_npc10_home_description_2 = 902
str_npc11_home_description_2 = 903
str_npc12_home_description_2 = 904
str_npc13_home_description_2 = 905
str_npc14_home_description_2 = 906
str_npc15_home_description_2 = 907
str_npc16_home_description_2 = 908
str_npc1_home_recap = 909
str_npc2_home_recap = 910
str_npc3_home_recap = 911
str_npc4_home_recap = 912
str_npc5_home_recap = 913
str_npc6_home_recap = 914
str_npc7_home_recap = 915
str_npc8_home_recap = 916
str_npc9_home_recap = 917
str_npc10_home_recap = 918
str_npc11_home_recap = 919
str_npc12_home_recap = 920
str_npc13_home_recap = 921
str_npc14_home_recap = 922
str_npc15_home_recap = 923
str_npc16_home_recap = 924
str_npc1_honorific = 925
str_npc2_honorific = 926
str_npc3_honorific = 927
str_npc4_honorific = 928
str_npc5_honorific = 929
str_npc6_honorific = 930
str_npc7_honorific = 931
str_npc8_honorific = 932
str_npc9_honorific = 933
str_npc10_honorific = 934
str_npc11_honorific = 935
str_npc12_honorific = 936
str_npc13_honorific = 937
str_npc14_honorific = 938
str_npc15_honorific = 939
str_npc16_honorific = 940
str_npc1_kingsupport_1 = 941
str_npc2_kingsupport_1 = 942
str_npc3_kingsupport_1 = 943
str_npc4_kingsupport_1 = 944
str_npc5_kingsupport_1 = 945
str_npc6_kingsupport_1 = 946
str_npc7_kingsupport_1 = 947
str_npc8_kingsupport_1 = 948
str_npc9_kingsupport_1 = 949
str_npc10_kingsupport_1 = 950
str_npc11_kingsupport_1 = 951
str_npc12_kingsupport_1 = 952
str_npc13_kingsupport_1 = 953
str_npc14_kingsupport_1 = 954
str_npc15_kingsupport_1 = 955
str_npc16_kingsupport_1 = 956
str_npc1_kingsupport_2 = 957
str_npc2_kingsupport_2 = 958
str_npc3_kingsupport_2 = 959
str_npc4_kingsupport_2 = 960
str_npc5_kingsupport_2 = 961
str_npc6_kingsupport_2 = 962
str_npc7_kingsupport_2 = 963
str_npc8_kingsupport_2 = 964
str_npc9_kingsupport_2 = 965
str_npc10_kingsupport_2 = 966
str_npc11_kingsupport_2 = 967
str_npc12_kingsupport_2 = 968
str_npc13_kingsupport_2 = 969
str_npc14_kingsupport_2 = 970
str_npc15_kingsupport_2 = 971
str_npc16_kingsupport_2 = 972
str_npc1_kingsupport_2a = 973
str_npc2_kingsupport_2a = 974
str_npc3_kingsupport_2a = 975
str_npc4_kingsupport_2a = 976
str_npc5_kingsupport_2a = 977
str_npc6_kingsupport_2a = 978
str_npc7_kingsupport_2a = 979
str_npc8_kingsupport_2a = 980
str_npc9_kingsupport_2a = 981
str_npc10_kingsupport_2a = 982
str_npc11_kingsupport_2a = 983
str_npc12_kingsupport_2a = 984
str_npc13_kingsupport_2a = 985
str_npc14_kingsupport_2a = 986
str_npc15_kingsupport_2a = 987
str_npc16_kingsupport_2a = 988
str_npc1_kingsupport_2b = 989
str_npc2_kingsupport_2b = 990
str_npc3_kingsupport_2b = 991
str_npc4_kingsupport_2b = 992
str_npc5_kingsupport_2b = 993
str_npc6_kingsupport_2b = 994
str_npc7_kingsupport_2b = 995
str_npc8_kingsupport_2b = 996
str_npc9_kingsupport_2b = 997
str_npc10_kingsupport_2b = 998
str_npc11_kingsupport_2b = 999
str_npc12_kingsupport_2b = 1000
str_npc13_kingsupport_2b = 1001
str_npc14_kingsupport_2b = 1002
str_npc15_kingsupport_2b = 1003
str_npc16_kingsupport_2b = 1004
str_npc1_kingsupport_3 = 1005
str_npc2_kingsupport_2 = 1006
str_npc3_kingsupport_3 = 1007
str_npc4_kingsupport_3 = 1008
str_npc5_kingsupport_3 = 1009
str_npc6_kingsupport_3 = 1010
str_npc7_kingsupport_3 = 1011
str_npc8_kingsupport_3 = 1012
str_npc9_kingsupport_3 = 1013
str_npc10_kingsupport_3 = 1014
str_npc11_kingsupport_3 = 1015
str_npc12_kingsupport_3 = 1016
str_npc13_kingsupport_3 = 1017
str_npc14_kingsupport_3 = 1018
str_npc15_kingsupport_3 = 1019
str_npc16_kingsupport_3 = 1020
str_npc1_kingsupport_objection = 1021
str_npc2_kingsupport_objection = 1022
str_npc3_kingsupport_objection = 1023
str_npc4_kingsupport_objection = 1024
str_npc5_kingsupport_objection = 1025
str_npc6_kingsupport_objection = 1026
str_npc7_kingsupport_objection = 1027
str_npc8_kingsupport_objection = 1028
str_npc9_kingsupport_objection = 1029
str_npc10_kingsupport_objection = 1030
str_npc11_kingsupport_objection = 1031
str_npc12_kingsupport_objection = 1032
str_npc13_kingsupport_objection = 1033
str_npc14_kingsupport_objection = 1034
str_npc15_kingsupport_objection = 1035
str_npc16_kingsupport_objection = 1036
str_npc1_intel_mission = 1037
str_npc2_intel_mission = 1038
str_npc3_intel_mission = 1039
str_npc4_intel_mission = 1040
str_npc5_intel_mission = 1041
str_npc6_intel_mission = 1042
str_npc7_intel_mission = 1043
str_npc8_intel_mission = 1044
str_npc9_intel_mission = 1045
str_npc10_intel_mission = 1046
str_npc11_intel_mission = 1047
str_npc12_intel_mission = 1048
str_npc13_intel_mission = 1049
str_npc14_intel_mission = 1050
str_npc15_intel_mission = 1051
str_npc16_intel_mission = 1052
str_npc1_fief_acceptance = 1053
str_npc2_fief_acceptance = 1054
str_npc3_fief_acceptance = 1055
str_npc4_fief_acceptance = 1056
str_npc5_fief_acceptance = 1057
str_npc6_fief_acceptance = 1058
str_npc7_fief_acceptance = 1059
str_npc8_fief_acceptance = 1060
str_npc9_fief_acceptance = 1061
str_npc10_fief_acceptance = 1062
str_npc11_fief_acceptance = 1063
str_npc12_fief_acceptance = 1064
str_npc13_fief_acceptance = 1065
str_npc14_fief_acceptance = 1066
str_npc15_fief_acceptance = 1067
str_npc16_fief_acceptance = 1068
str_npc1_woman_to_woman = 1069
str_npc2_woman_to_woman = 1070
str_npc3_woman_to_woman = 1071
str_npc4_woman_to_woman = 1072
str_npc5_woman_to_woman = 1073
str_npc6_woman_to_woman = 1074
str_npc7_woman_to_woman = 1075
str_npc8_woman_to_woman = 1076
str_npc9_woman_to_woman = 1077
str_npc10_woman_to_woman = 1078
str_npc11_woman_to_woman = 1079
str_npc12_woman_to_woman = 1080
str_npc13_woman_to_woman = 1081
str_npc14_woman_to_woman = 1082
str_npc15_woman_to_woman = 1083
str_npc16_woman_to_woman = 1084
str_npc1_turn_against = 1085
str_npc2_turn_against = 1086
str_npc3_turn_against = 1087
str_npc4_turn_against = 1088
str_npc4_turn_against = 1089
str_npc6_turn_against = 1090
str_npc7_turn_against = 1091
str_npc8_turn_against = 1092
str_npc9_turn_against = 1093
str_npc10_turn_against = 1094
str_npc11_turn_against = 1095
str_npc12_turn_against = 1096
str_npc13_turn_against = 1097
str_npc14_turn_against = 1098
str_npc15_turn_against = 1099
str_npc16_turn_against = 1100
str_npc17_turn_against = 1101
str_comment_intro_liege_affiliated = 1102
str_comment_intro_famous_liege = 1103
str_comment_intro_famous_martial = 1104
str_comment_intro_famous_badtempered = 1105
str_comment_intro_famous_pitiless = 1106
str_comment_intro_famous_cunning = 1107
str_comment_intro_famous_sadistic = 1108
str_comment_intro_famous_goodnatured = 1109
str_comment_intro_famous_upstanding = 1110
str_comment_intro_noble_liege = 1111
str_comment_intro_noble_martial = 1112
str_comment_intro_noble_badtempered = 1113
str_comment_intro_noble_pitiless = 1114
str_comment_intro_noble_cunning = 1115
str_comment_intro_noble_sadistic = 1116
str_comment_intro_noble_goodnatured = 1117
str_comment_intro_noble_upstanding = 1118
str_comment_intro_common_liege = 1119
str_comment_intro_common_martial = 1120
str_comment_intro_common_badtempered = 1121
str_comment_intro_common_pitiless = 1122
str_comment_intro_common_cunning = 1123
str_comment_intro_common_sadistic = 1124
str_comment_intro_common_goodnatured = 1125
str_comment_intro_common_upstanding = 1126
str_comment_intro_female_famous_liege = 1127
str_comment_intro_female_famous_martial = 1128
str_comment_intro_female_famous_badtempered = 1129
str_comment_intro_female_famous_pitiless = 1130
str_comment_intro_female_famous_cunning = 1131
str_comment_intro_female_famous_sadistic = 1132
str_comment_intro_female_famous_goodnatured = 1133
str_comment_intro_female_famous_upstanding = 1134
str_comment_intro_female_noble_liege = 1135
str_comment_intro_female_noble_martial = 1136
str_comment_intro_female_noble_badtempered = 1137
str_comment_intro_female_noble_pitiless = 1138
str_comment_intro_female_noble_cunning = 1139
str_comment_intro_female_noble_sadistic = 1140
str_comment_intro_female_noble_goodnatured = 1141
str_comment_intro_female_noble_upstanding = 1142
str_comment_intro_female_admiring_liege = 1143
str_comment_intro_female_admiring_martial = 1144
str_comment_intro_female_badtempered_admiring = 1145
str_comment_intro_female_pitiless_admiring = 1146
str_comment_intro_female_cunning_admiring = 1147
str_comment_intro_female_sadistic_admiring = 1148
str_comment_intro_female_admiring_goodnatured = 1149
str_comment_intro_female_admiring_upstanding = 1150
str_comment_intro_female_common_liege = 1151
str_comment_intro_female_common_martial = 1152
str_comment_intro_female_common_badtempered = 1153
str_comment_intro_female_common_pitiless = 1154
str_comment_intro_female_common_cunning = 1155
str_comment_intro_female_common_sadistic = 1156
str_comment_intro_female_common_goodnatured = 1157
str_comment_intro_female_common_upstanding = 1158
str_rejoinder_intro_female_common_badtempered = 1159
str_rejoinder_intro_female_noble_sadistic = 1160
str_rejoinder_intro_female_common_sadistic = 1161
str_rejoinder_intro_female_noble_pitiless = 1162
str_rejoinder_intro_female_common_pitiless = 1163
str_rejoinder_intro_noble_sadistic = 1164
str_rejoinder_intro_female_pitiless_admiring = 1165
str_rejoinder_intro_female_common_upstanding = 1166
str_rejoinder_intro_female_noble_upstanding = 1167
str_rejoinder_intro_female_common_martial = 1168
str_rejoinder_intro_female_sadistic_admiring = 1169
str_rejoinder_intro_female_badtempered_admiring = 1170
str_comment_you_raided_my_village_enemy_benevolent = 1171
str_comment_you_raided_my_village_enemy_spiteful = 1172
str_comment_you_raided_my_village_enemy_coldblooded = 1173
str_comment_you_raided_my_village_enemy = 1174
str_comment_you_raided_my_village_unfriendly_spiteful = 1175
str_comment_you_raided_my_village_friendly = 1176
str_comment_you_raided_my_village_default = 1177
str_comment_you_stole_cattles_from_my_village_enemy_benevolent = 1178
str_comment_you_stole_cattles_from_my_village_enemy_spiteful = 1179
str_comment_you_stole_cattles_from_my_village_enemy_coldblooded = 1180
str_comment_you_stole_cattles_from_my_village_enemy = 1181
str_comment_you_stole_cattles_from_my_village_unfriendly_spiteful = 1182
str_comment_you_stole_cattles_from_my_village_friendly = 1183
str_comment_you_stole_cattles_from_my_village_default = 1184
str_comment_you_robbed_my_village_enemy_coldblooded = 1185
str_comment_you_robbed_my_village_enemy = 1186
str_comment_you_robbed_my_village_friendly_spiteful = 1187
str_comment_you_robbed_my_village_friendly = 1188
str_comment_you_robbed_my_village_default = 1189
str_comment_you_accosted_my_caravan_enemy = 1190
str_comment_you_accosted_my_caravan_default = 1191
str_comment_you_helped_villagers_benevolent = 1192
str_comment_you_helped_villagers_friendly_cruel = 1193
str_comment_you_helped_villagers_friendly = 1194
str_comment_you_helped_villagers_unfriendly_spiteful = 1195
str_comment_you_helped_villagers_cruel = 1196
str_comment_you_helped_villagers_default = 1197
str_comment_you_give_castle_in_my_control = 1198
str_comment_you_captured_a_castle_allied_friendly = 1199
str_comment_you_captured_a_castle_allied_spiteful = 1200
str_comment_you_captured_a_castle_allied_unfriendly_spiteful = 1201
str_comment_you_captured_a_castle_allied_unfriendly = 1202
str_comment_you_captured_a_castle_allied = 1203
str_comment_you_captured_my_castle_enemy_spiteful = 1204
str_comment_you_captured_my_castle_enemy_chivalrous = 1205
str_comment_you_captured_my_castle_enemy = 1206
str_comment_we_defeated_a_lord_unfriendly_spiteful = 1207
str_comment_we_defeated_a_lord_unfriendly = 1208
str_comment_we_defeated_a_lord_cruel = 1209
str_comment_we_defeated_a_lord_quarrelsome = 1210
str_comment_we_defeated_a_lord_upstanding = 1211
str_comment_we_defeated_a_lord_default = 1212
str_comment_we_fought_in_siege_unfriendly_spiteful = 1213
str_comment_we_fought_in_siege_unfriendly = 1214
str_comment_we_fought_in_siege_cruel = 1215
str_comment_we_fought_in_siege_quarrelsome = 1216
str_comment_we_fought_in_siege_upstanding = 1217
str_comment_we_fought_in_siege_default = 1218
str_comment_we_fought_in_major_battle_unfriendly_spiteful = 1219
str_comment_we_fought_in_major_battle_unfriendly = 1220
str_comment_we_fought_in_major_battle_cruel = 1221
str_comment_we_fought_in_major_battle_quarrelsome = 1222
str_comment_we_fought_in_major_battle_upstanding = 1223
str_comment_we_fought_in_major_battle_default = 1224
str_comment_you_defeated_a_lord_allied_liege = 1225
str_comment_you_defeated_a_lord_allied_unfriendly_spiteful = 1226
str_comment_you_defeated_a_lord_allied_spiteful = 1227
str_comment_you_defeated_a_lord_allied_unfriendly_chivalrous = 1228
str_comment_you_defeated_a_lord_allied = 1229
str_comment_you_defeated_me_enemy_chivalrous = 1230
str_comment_you_defeated_me_enemy_spiteful = 1231
str_comment_you_defeated_me_enemy = 1232
str_comment_i_defeated_you_enemy_spiteful = 1233
str_comment_i_defeated_you_enemy_chivalrous = 1234
str_comment_i_defeated_you_enemy_benevolent = 1235
str_comment_i_defeated_you_enemy_coldblooded = 1236
str_comment_i_defeated_you_enemy = 1237
str_comment_we_were_defeated_unfriendly_spiteful = 1238
str_comment_we_were_defeated_unfriendly = 1239
str_comment_we_were_defeated_cruel = 1240
str_comment_we_were_defeated_default = 1241
str_comment_you_were_defeated_allied_friendly_spiteful = 1242
str_comment_you_were_defeated_allied_unfriendly_cruel = 1243
str_comment_you_were_defeated_allied_spiteful = 1244
str_comment_you_were_defeated_allied_pitiless = 1245
str_comment_you_were_defeated_allied_unfriendly_upstanding = 1246
str_comment_you_were_defeated_allied_unfriendly = 1247
str_comment_you_were_defeated_allied = 1248
str_comment_you_helped_my_ally_unfriendly_chivalrous = 1249
str_comment_you_helped_my_ally_unfriendly = 1250
str_comment_you_helped_my_ally_liege = 1251
str_comment_you_helped_my_ally_unfriendly_spiteful = 1252
str_comment_you_helped_my_ally_spiteful = 1253
str_comment_you_helped_my_ally_chivalrous = 1254
str_comment_you_helped_my_ally_default = 1255
str_comment_you_were_defeated_allied_unfriendly = 1256
str_comment_you_were_defeated_allied = 1257
str_comment_you_abandoned_us_unfriendly_spiteful = 1258
str_comment_you_abandoned_us_unfriendly_pitiless = 1259
str_comment_you_abandoned_us_spiteful = 1260
str_comment_you_abandoned_us_chivalrous = 1261
str_comment_you_abandoned_us_benefitofdoubt = 1262
str_comment_you_abandoned_us_default = 1263
str_comment_you_ran_from_me_enemy_spiteful = 1264
str_comment_you_ran_from_me_enemy_chivalrous = 1265
str_comment_you_ran_from_me_enemy_benevolent = 1266
str_comment_you_ran_from_me_enemy_coldblooded = 1267
str_comment_you_ran_from_me_enemy = 1268
str_comment_you_ran_from_foe_allied_chivalrous = 1269
str_comment_you_ran_from_foe_allied_upstanding = 1270
str_comment_you_ran_from_foe_allied_spiteful = 1271
str_comment_you_defeated_my_friend_enemy_pragmatic = 1272
str_comment_you_defeated_my_friend_enemy_chivalrous = 1273
str_comment_you_defeated_my_friend_enemy_spiteful = 1274
str_comment_you_defeated_my_friend_enemy = 1275
str_comment_you_captured_a_lord_allied_friendly_spiteful = 1276
str_comment_you_captured_a_lord_allied_unfriendly_spiteful = 1277
str_comment_you_captured_a_lord_allied_chivalrous = 1278
str_comment_you_captured_a_lord_allied = 1279
str_comment_you_let_go_a_lord_allied_chivalrous = 1280
str_comment_you_let_go_a_lord_allied_upstanding = 1281
str_comment_you_let_go_a_lord_allied_coldblooded = 1282
str_comment_you_let_go_a_lord_allied_unfriendly_spiteful = 1283
str_comment_you_let_go_a_lord_allied = 1284
str_comment_you_let_me_go_spiteful = 1285
str_comment_you_let_me_go_enemy_chivalrous = 1286
str_comment_you_let_me_go_enemy_coldblooded = 1287
str_comment_you_let_me_go_enemy = 1288
str_comment_you_let_me_go_default = 1289
str_comment_pledged_allegiance_allied_martial_unfriendly = 1290
str_comment_pledged_allegiance_allied_martial = 1291
str_comment_pledged_allegiance_allied_quarrelsome_unfriendly = 1292
str_comment_pledged_allegiance_allied_quarrelsome = 1293
str_comment_pledged_allegiance_allied_selfrighteous_unfriendly = 1294
str_comment_pledged_allegiance_allied_selfrighteous = 1295
str_comment_pledged_allegiance_allied_cunning_unfriendly = 1296
str_comment_pledged_allegiance_allied_cunning = 1297
str_comment_pledged_allegiance_allied_debauched_unfriendly = 1298
str_comment_pledged_allegiance_allied_debauched = 1299
str_comment_pledged_allegiance_allied_goodnatured_unfriendly = 1300
str_comment_pledged_allegiance_allied_goodnatured = 1301
str_comment_pledged_allegiance_allied_upstanding_unfriendly = 1302
str_comment_pledged_allegiance_allied_upstanding = 1303
str_comment_our_king_granted_you_a_fief_allied_friendly_cruel = 1304
str_comment_our_king_granted_you_a_fief_allied_friendly_cynical = 1305
str_comment_our_king_granted_you_a_fief_allied_friendly = 1306
str_comment_our_king_granted_you_a_fief_allied_unfriendly_upstanding = 1307
str_comment_our_king_granted_you_a_fief_allied_unfriendly_spiteful = 1308
str_comment_our_king_granted_you_a_fief_allied_spiteful = 1309
str_comment_our_king_granted_you_a_fief_allied = 1310
str_comment_you_renounced_your_alliegance_enemy_friendly = 1311
str_comment_you_renounced_your_alliegance_friendly = 1312
str_comment_you_renounced_your_alliegance_unfriendly_spiteful = 1313
str_comment_you_renounced_your_alliegance_unfriendly_moralizing = 1314
str_comment_you_renounced_your_alliegance_enemy = 1315
str_comment_you_renounced_your_alliegance_default = 1316
str_comment_you_claimed_the_throne_1_player_liege = 1317
str_comment_you_claimed_the_throne_2_player_liege = 1318
str_comment_lord_intervened_against_me = 1319
str_comment_i_protested_marshall_appointment = 1320
str_comment_i_blamed_defeat = 1321
str_comment_i_was_entitled_to_fief = 1322
str_comment_i_quarreled_with_troop_over_woman = 1323
str_comment_i_quarreled_with_you_over_woman_default = 1324
str_comment_i_quarreled_with_you_over_woman_derisive = 1325
str_comment_player_suggestion_succeeded = 1326
str_comment_player_suggestion_failed = 1327
str_comment_you_enfiefed_a_commoner_hesitant = 1328
str_comment_you_enfiefed_a_commoner_derisive = 1329
str_comment_you_enfiefed_a_commoner_nasty = 1330
str_comment_marriage_normal_family = 1331
str_comment_marriage_normal = 1332
str_comment_marriage_normal_nasty = 1333
str_comment_marriage_elopement_family = 1334
str_comment_marriage_elopement_liege = 1335
str_comment_you_broke_truce_as_my_vassal = 1336
str_comment_you_attacked_neutral_as_my_vassal = 1337
str_personality_archetypes = 1338
str_martial = 1339
str_quarrelsome = 1340
str_selfrighteous = 1341
str_cunning = 1342
str_debauched = 1343
str_goodnatured = 1344
str_upstanding = 1345
str_roguish = 1346
str_benevolent = 1347
str_mercantile = 1348
str_surrender_demand_default = 1349
str_surrender_demand_martial = 1350
str_surrender_demand_quarrelsome = 1351
str_surrender_demand_pitiless = 1352
str_surrender_demand_cunning = 1353
str_surrender_demand_sadistic = 1354
str_surrender_demand_goodnatured = 1355
str_surrender_demand_upstanding = 1356
str_surrender_offer_default = 1357
str_surrender_offer_martial = 1358
str_surrender_offer_quarrelsome = 1359
str_surrender_offer_pitiless = 1360
str_surrender_offer_cunning = 1361
str_surrender_offer_sadistic = 1362
str_surrender_offer_goodnatured = 1363
str_surrender_offer_upstanding = 1364
str_lord_declines_negotiation_offer_default = 1365
str_lord_declines_negotiation_offer_martial = 1366
str_lord_declines_negotiation_offer_quarrelsome = 1367
str_lord_declines_negotiation_offer_pitiless = 1368
str_lord_declines_negotiation_offer_cunning = 1369
str_lord_declines_negotiation_offer_sadistic = 1370
str_lord_declines_negotiation_offer_goodnatured = 1371
str_lord_declines_negotiation_offer_upstanding = 1372
str_prisoner_released_default = 1373
str_prisoner_released_martial = 1374
str_prisoner_released_quarrelsome = 1375
str_prisoner_released_pitiless = 1376
str_prisoner_released_cunning = 1377
str_prisoner_released_sadistic = 1378
str_prisoner_released_goodnatured = 1379
str_prisoner_released_upstanding = 1380
str_enemy_meet_default = 1381
str_enemy_meet_martial = 1382
str_enemy_meet_quarrelsome = 1383
str_enemy_meet_pitiless = 1384
str_enemy_meet_cunning = 1385
str_enemy_meet_sadistic = 1386
str_enemy_meet_goodnatured = 1387
str_enemy_meet_upstanding = 1388
str_battle_won_default = 1389
str_battle_won_martial = 1390
str_battle_won_quarrelsome = 1391
str_battle_won_pitiless = 1392
str_battle_won_cunning = 1393
str_battle_won_sadistic = 1394
str_battle_won_goodnatured = 1395
str_battle_won_upstanding = 1396
str_battle_won_grudging_default = 1397
str_battle_won_grudging_martial = 1398
str_battle_won_grudging_quarrelsome = 1399
str_battle_won_grudging_pitiless = 1400
str_battle_won_grudging_cunning = 1401
str_battle_won_grudging_sadistic = 1402
str_battle_won_grudging_goodnatured = 1403
str_battle_won_grudging_upstanding = 1404
str_battle_won_unfriendly_default = 1405
str_battle_won_unfriendly_martial = 1406
str_battle_won_unfriendly_quarrelsome = 1407
str_battle_won_unfriendly_pitiless = 1408
str_battle_won_unfriendly_cunning = 1409
str_battle_won_unfriendly_sadistic = 1410
str_battle_won_unfriendly_goodnatured = 1411
str_battle_won_unfriendly_upstanding = 1412
str_troop_train_request_default = 1413
str_troop_train_request_martial = 1414
str_troop_train_request_quarrelsome = 1415
str_troop_train_request_pitiless = 1416
str_troop_train_request_cunning = 1417
str_troop_train_request_sadistic = 1418
str_troop_train_request_goodnatured = 1419
str_troop_train_request_upstanding = 1420
str_unprovoked_attack_default = 1421
str_unprovoked_attack_martial = 1422
str_unprovoked_attack_quarrelsome = 1423
str_unprovoked_attack_pitiless = 1424
str_unprovoked_attack_cunning = 1425
str_unprovoked_attack_sadistic = 1426
str_unprovoked_attack_goodnatured = 1427
str_unprovoked_attack_upstanding = 1428
str_unnecessary_attack_default = 1429
str_unnecessary_attack_martial = 1430
str_unnecessary_attack_quarrelsome = 1431
str_unnecessary_attack_pitiless = 1432
str_unnecessary_attack_cunning = 1433
str_unnecessary_attack_sadistic = 1434
str_unnecessary_attack_goodnatured = 1435
str_unnecessary_attack_upstanding = 1436
str_lord_challenged_default = 1437
str_lord_challenged_martial = 1438
str_lord_challenged_quarrelsome = 1439
str_lord_challenged_pitiless = 1440
str_lord_challenged_cunning = 1441
str_lord_challenged_sadistic = 1442
str_lord_challenged_goodnatured = 1443
str_lord_challenged_upstanding = 1444
str_lord_mission_failed_default = 1445
str_lord_mission_failed_martial = 1446
str_lord_mission_failed_quarrelsome = 1447
str_lord_mission_failed_pitiless = 1448
str_lord_mission_failed_cunning = 1449
str_lord_mission_failed_sadistic = 1450
str_lord_mission_failed_goodnatured = 1451
str_lord_mission_failed_upstanding = 1452
str_lord_follow_refusal_default = 1453
str_lord_follow_refusal_martial = 1454
str_lord_follow_refusal_quarrelsome = 1455
str_lord_follow_refusal_pitiless = 1456
str_lord_follow_refusal_cunning = 1457
str_lord_follow_refusal_sadistic = 1458
str_lord_follow_refusal_goodnatured = 1459
str_lord_follow_refusal_upstanding = 1460
str_lord_insult_default = 1461
str_lord_insult_martial = 1462
str_lord_insult_quarrelsome = 1463
str_lord_insult_pitiless = 1464
str_lord_insult_cunning = 1465
str_lord_insult_sadistic = 1466
str_lord_insult_goodnatured = 1467
str_lord_insult_upstanding = 1468
str_lord_derogatory_default = 1469
str_lord_derogatory_martial = 1470
str_lord_derogatory_quarrelsome = 1471
str_lord_derogatory_pitiless = 1472
str_lord_derogatory_cunning = 1473
str_lord_derogatory_sadistic = 1474
str_lord_derogatory_goodnatured = 1475
str_lord_derogatory_upstanding = 1476
str_lord_derogatory_result = 1477
str_lord_derogatory_martial_action = 1478
str_lord_derogatory_quarrelsome_action = 1479
str_lord_derogatory_pitiles_action = 1480
str_lord_derogatory_cunning_action = 1481
str_lord_derogatory_sadistic_action = 1482
str_lord_derogatory_goodnatured_action = 1483
str_lord_derogatory_upstanding_action = 1484
str_rebellion_dilemma_default = 1485
str_rebellion_dilemma_martial = 1486
str_rebellion_dilemma_quarrelsome = 1487
str_rebellion_dilemma_pitiless = 1488
str_rebellion_dilemma_cunning = 1489
str_rebellion_dilemma_sadistic = 1490
str_rebellion_dilemma_goodnatured = 1491
str_rebellion_dilemma_upstanding = 1492
str_rebellion_dilemma_2_default = 1493
str_rebellion_dilemma_2_martial = 1494
str_rebellion_dilemma_2_quarrelsome = 1495
str_rebellion_dilemma_2_pitiless = 1496
str_rebellion_dilemma_2_cunning = 1497
str_rebellion_dilemma_2_sadistic = 1498
str_rebellion_dilemma_2_goodnatured = 1499
str_rebellion_dilemma_2_upstanding = 1500
str_political_philosophy_default = 1501
str_political_philosophy_martial = 1502
str_political_philosophy_quarrelsome = 1503
str_political_philosophy_pitiless = 1504
str_political_philosophy_cunning = 1505
str_political_philosophy_sadistic = 1506
str_political_philosophy_goodnatured = 1507
str_political_philosophy_upstanding = 1508
str_political_philosophy_roguish = 1509
str_political_philosophy_benefactor = 1510
str_political_philosophy_custodian = 1511
str_rebellion_prior_argument_very_favorable = 1512
str_rebellion_prior_argument_favorable = 1513
str_rebellion_prior_argument_unfavorable = 1514
str_rebellion_prior_argument_very_unfavorable = 1515
str_rebellion_rival_default = 1516
str_rebellion_rival_martial = 1517
str_rebellion_rival_quarrelsome = 1518
str_rebellion_rival_pitiless = 1519
str_rebellion_rival_cunning = 1520
str_rebellion_rival_sadistic = 1521
str_rebellion_rival_goodnatured = 1522
str_rebellion_rival_upstanding = 1523
str_rebellion_argument_favorable = 1524
str_rebellion_argument_neutral = 1525
str_rebellion_argument_unfavorable = 1526
str_rebellion_persuasion_favorable = 1527
str_rebellion_persuasion_neutral = 1528
str_rebellion_persuasion_unfavorable = 1529
str_rebellion_relation_very_favorable = 1530
str_rebellion_relation_favorable = 1531
str_rebellion_relation_neutral = 1532
str_rebellion_relation_unfavorable = 1533
str_and_comma_3 = 1534
str_but_comma_3 = 1535
str_and_comma_1 = 1536
str_but_comma_1 = 1537
str_and_comma_2 = 1538
str_but_comma_2 = 1539
str_rebellion_agree_default = 1540
str_rebellion_agree_martial = 1541
str_rebellion_agree_quarrelsome = 1542
str_rebellion_agree_pitiless = 1543
str_rebellion_agree_cunning = 1544
str_rebellion_agree_sadistic = 1545
str_rebellion_agree_goodnatured = 1546
str_rebellion_agree_upstanding = 1547
str_rebellion_refuse_default = 1548
str_rebellion_refuse_martial = 1549
str_rebellion_refuse_quarrelsome = 1550
str_rebellion_agree_pitiless = 1551
str_rebellion_agree_cunning = 1552
str_rebellion_agree_sadistic = 1553
str_rebellion_agree_goodnatured = 1554
str_rebellion_agree_upstanding = 1555
str_talk_later_default = 1556
str_talk_later_martial = 1557
str_talk_later_quarrelsome = 1558
str_talk_later_pitiless = 1559
str_talk_later_cunning = 1560
str_talk_later_sadistic = 1561
str_talk_later_goodnatured = 1562
str_talk_later_upstanding = 1563
str_npc_claim_throne_liege = 1564
str_npc_claim_throne_liege_martial = 1565
str_npc_claim_throne_liege_quarrelsome = 1566
str_npc_claim_throne_liege_pitiless = 1567
str_npc_claim_throne_liege_cunning = 1568
str_npc_claim_throne_liege_sadistic = 1569
str_npc_claim_throne_liege_goodnatured = 1570
str_npc_claim_throne_liege_upstanding = 1571
str_gossip_about_character_default = 1572
str_gossip_about_character_martial = 1573
str_gossip_about_character_quarrelsome = 1574
str_gossip_about_character_selfrighteous = 1575
str_gossip_about_character_cunning = 1576
str_gossip_about_character_sadistic = 1577
str_gossip_about_character_goodnatured = 1578
str_gossip_about_character_upstanding = 1579
str_latest_rumor = 1580
str_changed_my_mind_default = 1581
str_changed_my_mind_martial = 1582
str_changed_my_mind_quarrelsome = 1583
str_changed_my_mind_pitiless = 1584
str_changed_my_mind_cunning = 1585
str_changed_my_mind_sadistic = 1586
str_changed_my_mind_goodnatured = 1587
str_changed_my_mind_upstanding = 1588
str_swadian_rebellion_pretender_intro = 1589
str_vaegir_rebellion_pretender_intro = 1590
str_khergit_rebellion_pretender_intro = 1591
str_nord_rebellion_pretender_intro = 1592
str_rhodok_rebellion_pretender_intro = 1593
str_sarranid_rebellion_pretender_intro = 1594
str_swadian_rebellion_pretender_story_1 = 1595
str_vaegir_rebellion_pretender_story_1 = 1596
str_khergit_rebellion_pretender_story_1 = 1597
str_nord_rebellion_pretender_story_1 = 1598
str_rhodok_rebellion_pretender_story_1 = 1599
str_sarranid_rebellion_pretender_story_1 = 1600
str_swadian_rebellion_pretender_story_2 = 1601
str_vaegir_rebellion_pretender_story_2 = 1602
str_khergit_rebellion_pretender_story_2 = 1603
str_nord_rebellion_pretender_story_2 = 1604
str_rhodok_rebellion_pretender_story_2 = 1605
str_sarranid_rebellion_pretender_story_2 = 1606
str_swadian_rebellion_pretender_story_3 = 1607
str_vaegir_rebellion_pretender_story_3 = 1608
str_khergit_rebellion_pretender_story_3 = 1609
str_nord_rebellion_pretender_story_3 = 1610
str_rhodok_rebellion_pretender_story_3 = 1611
str_sarranid_rebellion_pretender_story_3 = 1612
str_swadian_rebellion_monarch_response_1 = 1613
str_vaegir_rebellion_monarch_response_1 = 1614
str_khergit_rebellion_monarch_response_1 = 1615
str_nord_rebellion_monarch_response_1 = 1616
str_rhodok_rebellion_monarch_response_1 = 1617
str_sarranid_rebellion_monarch_response_1 = 1618
str_swadian_rebellion_monarch_response_2 = 1619
str_vaegir_rebellion_monarch_response_2 = 1620
str_khergit_rebellion_monarch_response_2 = 1621
str_nord_rebellion_monarch_response_2 = 1622
str_rhodok_rebellion_monarch_response_2 = 1623
str_sarranid_rebellion_monarch_response_2 = 1624
str_courtship_comment_conventional_generic = 1625
str_courtship_comment_adventurous_generic = 1626
str_courtship_comment_otherworldly_generic = 1627
str_courtship_comment_ambitious_generic = 1628
str_courtship_comment_moralist_generic = 1629
str_feast_description = 1630
str_feast_description_2 = 1631
str_feast_description_3 = 1632
str_feast_description_4 = 1633
str_feast_description_5 = 1634
str_feast_description_6 = 1635
str_feast_lengthy_description_1 = 1636
str_feast_lengthy_description_2 = 1637
str_feast_lengthy_description_3 = 1638
str_feast_lengthy_description_4 = 1639
str_feast_lengthy_description_5 = 1640
str_feast_lengthy_description_6 = 1641
str_kingdom_1_adjective = 1642
str_kingdom_2_adjective = 1643
str_kingdom_3_adjective = 1644
str_kingdom_4_adjective = 1645
str_kingdom_5_adjective = 1646
str_kingdom_6_adjective = 1647
str_credits_1 = 1648
str_credits_2 = 1649
str_credits_3 = 1650
str_credits_4 = 1651
str_credits_5 = 1652
str_credits_6 = 1653
str_credits_7 = 1654
str_credits_8 = 1655
str_credits_9 = 1656
str_credits_10 = 1657
str_credits_11 = 1658
str_credits_12 = 1659
str_multi_scene_1 = 1660
str_multi_scene_2 = 1661
str_multi_scene_3 = 1662
str_multi_scene_4 = 1663
str_multi_scene_5 = 1664
str_multi_scene_6 = 1665
str_multi_scene_7 = 1666
str_multi_scene_8 = 1667
str_multi_scene_9 = 1668
str_multi_scene_10 = 1669
str_multi_scene_11 = 1670
str_multi_scene_16 = 1671
str_multi_scene_17 = 1672
str_multi_scene_18 = 1673
str_multi_scene_19 = 1674
str_multi_scene_20 = 1675
str_multi_scene_21 = 1676
str_multi_scene_22 = 1677
str_multi_scene_23 = 1678
str_multi_scene_24 = 1679
str_multi_scene_12 = 1680
str_multi_scene_13 = 1681
str_multi_scene_14 = 1682
str_multi_scene_15 = 1683
str_multi_scene_end = 1684
str_multi_game_type_1 = 1685
str_multi_game_type_2 = 1686
str_multi_game_type_3 = 1687
str_multi_game_type_4 = 1688
str_multi_game_type_5 = 1689
str_multi_game_type_6 = 1690
str_multi_game_type_7 = 1691
str_multi_game_type_8 = 1692
str_multi_game_types_end = 1693
str_poll_kick_player_s1_by_s0 = 1694
str_poll_ban_player_s1_by_s0 = 1695
str_poll_change_map_to_s1_by_s0 = 1696
str_poll_change_map_to_s1_and_factions_to_s2_and_s3_by_s0 = 1697
str_poll_change_number_of_bots_to_reg0_and_reg1_by_s0 = 1698
str_poll_kick_player = 1699
str_poll_ban_player = 1700
str_poll_change_map = 1701
str_poll_change_map_with_faction = 1702
str_poll_change_number_of_bots = 1703
str_poll_time_left = 1704
str_poll_result_yes = 1705
str_poll_result_no = 1706
str_total_item_cost_reg0 = 1707
str_server_name = 1708
str_game_password = 1709
str_map = 1710
str_game_type = 1711
str_max_number_of_players = 1712
str_number_of_bots_in_team_reg1 = 1713
str_team_reg1_faction = 1714
str_enable_valve_anti_cheat = 1715
str_allow_friendly_fire = 1716
str_allow_melee_friendly_fire = 1717
str_friendly_fire_damage_self_ratio = 1718
str_friendly_fire_damage_friend_ratio = 1719
str_spectator_camera = 1720
str_control_block_direction = 1721
str_map_time_limit = 1722
str_round_time_limit = 1723
str_players_take_control_of_a_bot_after_death = 1724
str_team_points_limit = 1725
str_point_gained_from_flags = 1726
str_point_gained_from_capturing_flag = 1727
str_respawn_period = 1728
str_add_to_official_game_servers_list = 1729
str_combat_speed = 1730
str_combat_speed_0 = 1731
str_combat_speed_1 = 1732
str_combat_speed_2 = 1733
str_combat_speed_3 = 1734
str_combat_speed_4 = 1735
str_off = 1736
str_on = 1737
str_defender_spawn_count_limit = 1738
str_unlimited = 1739
str_automatic = 1740
str_by_mouse_movement = 1741
str_free = 1742
str_stick_to_any_player = 1743
str_stick_to_team_members = 1744
str_stick_to_team_members_view = 1745
str_make_factions_voteable = 1746
str_make_kick_voteable = 1747
str_make_ban_voteable = 1748
str_bots_upper_limit_for_votes = 1749
str_make_maps_voteable = 1750
str_valid_vote_ratio = 1751
str_auto_team_balance_limit = 1752
str_welcome_message = 1753
str_initial_gold_multiplier = 1754
str_battle_earnings_multiplier = 1755
str_round_earnings_multiplier = 1756
str_allow_player_banners = 1757
str_force_default_armor = 1758
str_reg0 = 1759
str_s0_reg0 = 1760
str_s0_s1 = 1761
str_reg0_dd_reg1reg2 = 1762
str_s0_dd_reg0 = 1763
str_respawning_in_reg0_seconds = 1764
str_no_more_respawns_remained_this_round = 1765
str_reg0_respawns_remained = 1766
str_this_is_your_last_respawn = 1767
str_wait_next_round = 1768
str_yes_wo_dot = 1769
str_no_wo_dot = 1770
str_we_resign = 1771
str_i_resign = 1772
str_s1_returned_flag = 1773
str_s1_auto_returned_flag = 1774
str_s1_captured_flag = 1775
str_s1_taken_flag = 1776
str_s1_neutralized_flag_reg0 = 1777
str_s1_captured_flag_reg0 = 1778
str_s1_pulling_flag_reg0 = 1779
str_s1_destroyed_target_0 = 1780
str_s1_destroyed_target_1 = 1781
str_s1_destroyed_catapult = 1782
str_s1_destroyed_trebuchet = 1783
str_s1_destroyed_all_targets = 1784
str_s1_saved_1_target = 1785
str_s1_saved_2_targets = 1786
str_s1_defended_castle = 1787
str_s1_captured_castle = 1788
str_auto_team_balance_in_20_seconds = 1789
str_auto_team_balance_next_round = 1790
str_auto_team_balance_done = 1791
str_s1_won_round = 1792
str_round_draw = 1793
str_round_draw_no_one_remained = 1794
str_death_mode_started = 1795
str_reset_to_default = 1796
str_done = 1797
str_player_name = 1798
str_kills = 1799
str_deaths = 1800
str_ping = 1801
str_dead = 1802
str_reg0_dead = 1803
str_bots_reg0_agents = 1804
str_bot_1_agent = 1805
str_score = 1806
str_score_reg0 = 1807
str_flags_reg0 = 1808
str_reg0_players = 1809
str_reg0_player = 1810
str_open_gate = 1811
str_close_gate = 1812
str_open_door = 1813
str_close_door = 1814
str_raise_ladder = 1815
str_drop_ladder = 1816
str_back = 1817
str_start_map = 1818
str_choose_an_option = 1819
str_choose_a_poll_type = 1820
str_choose_faction = 1821
str_choose_a_faction = 1822
str_choose_troop = 1823
str_choose_a_troop = 1824
str_choose_items = 1825
str_options = 1826
str_redefine_keys = 1827
str_submit_a_poll = 1828
str_administrator_panel = 1829
str_kick_player = 1830
str_ban_player = 1831
str_mute_player = 1832
str_unmute_player = 1833
str_quit = 1834
str_poll_for_changing_the_map = 1835
str_poll_for_changing_the_map_and_factions = 1836
str_poll_for_changing_number_of_bots = 1837
str_poll_for_kicking_a_player = 1838
str_poll_for_banning_a_player = 1839
str_choose_a_player = 1840
str_choose_a_map = 1841
str_choose_a_faction_for_team_reg0 = 1842
str_choose_number_of_bots_for_team_reg0 = 1843
str_spectator = 1844
str_spectators = 1845
str_score = 1846
str_command = 1847
str_profile_banner_selection_text = 1848
str_use_default_banner = 1849
str_party_morale_is_low = 1850
str_weekly_report = 1851
str_has_deserted_the_party = 1852
str_have_deserted_the_party = 1853
str_space = 1854
str_us_ = 1855
str_allies_ = 1856
str_enemies_ = 1857
str_routed = 1858
str_weekly_budget = 1859
str_income_from_s0 = 1860
str_mercenary_payment_from_s0 = 1861
str_s0s_party = 1862
str_loss_due_to_tax_inefficiency = 1863
str_wages_for_s0 = 1864
str_earlier_debts = 1865
str_net_change = 1866
str_earlier_wealth = 1867
str_new_wealth = 1868
str_new_debts = 1869
str_completed_faction_troop_assignments_cheat_mode_reg3 = 1870
str_completed_political_events_cheat_mode_reg3 = 1871
str_assigned_love_interests_attraction_seed_reg3 = 1872
str_located_kingdom_ladies_cheat_mode_reg3 = 1873
str_team_reg0_bot_count_is_reg1 = 1874
str_input_is_not_correct_for_the_command_type_help_for_more_information = 1875
str_maximum_seconds_for_round_is_reg0 = 1876
str_respawn_period_is_reg0_seconds = 1877
str_bots_upper_limit_for_votes_is_reg0 = 1878
str_map_is_voteable = 1879
str_map_is_not_voteable = 1880
str_factions_are_voteable = 1881
str_factions_are_not_voteable = 1882
str_players_respawn_as_bot = 1883
str_players_do_not_respawn_as_bot = 1884
str_kicking_a_player_is_voteable = 1885
str_kicking_a_player_is_not_voteable = 1886
str_banning_a_player_is_voteable = 1887
str_banning_a_player_is_not_voteable = 1888
str_player_banners_are_allowed = 1889
str_player_banners_are_not_allowed = 1890
str_default_armor_is_forced = 1891
str_default_armor_is_not_forced = 1892
str_percentage_of_yes_votes_required_for_a_poll_to_get_accepted_is_reg0 = 1893
str_auto_team_balance_threshold_is_reg0 = 1894
str_starting_gold_ratio_is_reg0 = 1895
str_combat_gold_bonus_ratio_is_reg0 = 1896
str_round_gold_bonus_ratio_is_reg0 = 1897
str_point_gained_from_flags_is_reg0 = 1898
str_point_gained_from_capturing_flag_is_reg0 = 1899
str_map_time_limit_is_reg0 = 1900
str_team_points_limit_is_reg0 = 1901
str_defender_spawn_count_limit_is_s1 = 1902
str_system_error = 1903
str_prisoner_granted_parole = 1904
str_prisoner_not_offered_parole = 1905
str__age_reg1_family_ = 1906
str_s49_s12_s11_rel_reg0 = 1907
str_s49_s12_s11 = 1908
str_lord_info_string = 1909
str_updating_faction_notes_for_s14_temp_=_reg4 = 1910
str_foreign_relations__ = 1911
str_s21__the_s5_is_at_war_with_the_s14 = 1912
str_s21_the_s5_has_had_the_upper_hand_in_the_fighting = 1913
str_s21_the_s5_has_gotten_the_worst_of_the_fighting = 1914
str_s21_the_fighting_has_gone_on_for_some_time_and_the_war_may_end_soon_with_a_truce = 1915
str_s21_the_fighting_has_begun_relatively_recently_and_the_war_may_continue_for_some_time = 1916
str_s21_reg4reg5 = 1917
str__however_the_truce_is_no_longer_binding_on_the_s14 = 1918
str_s21__the_s5_is_bound_by_truce_not_to_attack_the_s14s18_the_truce_will_expire_in_reg1_days = 1919
str_s21__the_s5_has_recently_suffered_provocation_by_subjects_of_the_s14_and_there_is_a_risk_of_war = 1920
str_s21__the_s5_has_no_outstanding_issues_with_the_s14 = 1921
str_s21_the_s14_was_recently_provoked_by_subjects_of_the_s5_and_there_is_a_risk_of_war_ = 1922
str_s21_cheat_mode_assessment_s14_ = 1923
str_the_s5_is_ruled_by_s6_it_occupies_s8_its_vassals_are_s10__s21 = 1924
str_assigned_lord_reputation_and_relations_cheat_mode_reg3 = 1925
str_caravan_trades_in_s5_originally_from_s4_ = 1926
str_your_hero_prisoned_at_s1 = 1927
str_old_morale_is_reg0_new_morale_is_reg1 = 1928
str_our_per_person__reg0_num_people__reg1_total_gain__reg2 = 1929
str_ene_per_person__reg0_num_people__reg1_total_gain__reg2 = 1930
str_all_per_person__reg0_num_people__reg1_total_gain__reg2 = 1931
str_loss_ratio_is_reg1 = 1932
str_total_enemy_morale_gain__reg6_last_total_enemy_morale_gain__reg7_remaining_enemy_population__reg5 = 1933
str_reg4_killed_reg5_wounded_reg6_routed = 1934
str_reg4_killed_reg5_routed = 1935
str_reg4_killed_reg5_wounded = 1936
str_reg4_wounded_reg5_routed = 1937
str_routed = 1938
str_caravan_in_s10_considers_s11_total_price_dif_=_reg3 = 1939
str_test__caravan_in_s3_selects_for_s4_trade_score_reg3 = 1940
str_prisoner_relative_is_reg0 = 1941
str_test_diagnosis__traveller_attacks_for_s4 = 1942
str_traveller_attack_found = 1943
str_s42 = 1944
str_test_diagnostic_quest_found_for_s4 = 1945
str_s4_changing_sides_aborts_quest = 1946
str_s4_awarded_to_s5 = 1947
str_s11_reacts_to_granting_of_s12_to_s10 = 1948
str_debug__hiring_men_to_s7_ideal_size__reg6_ideal_top_size__reg7_hiring_budget__reg8 = 1949
str_debug__hiring_men_to_party_for_s0 = 1950
str_calculating_sortie_for_s4_strength_of_reg3_vs_reg4_enemies = 1951
str_s4_sorties = 1952
str_current_wealth_reg1_taxes_last_collected_from_s4 = 1953
str_s4_considers_going_to_s5_to_pay_court_to_s6 = 1954
str_relation_with_1_bug_found_here__probably_because_s5_has_just_been_captured = 1955
str_s4_has_reg4_chance_of_going_to_home_center = 1956
str_s4_has_reg4_chance_of_recruiting_troops = 1957
str_s4_has_reg4_chance_of_going_to_s5 = 1958
str_s4_has_reg5_chance_of_patrolling_s6 = 1959
str_s4_has_reg5_chance_of_raiding_s6 = 1960
str_s4_has_reg5_chance_of_besieging_s6 = 1961
str_sum_chances_reg6 = 1962
str_deciding_faction_ai_for_s3 = 1963
str_s5_decides_s14 = 1964
str_lords_of_the_s1_gather_for_a_feast_at_s2 = 1965
str_s5_begins_offensive = 1966
str_renown_change_of_reg4_reduced_to_reg5_because_of_high_existing_renown = 1967
str_s14 = 1968
str_players_kingdom_has_had_reg3_days_of_peace = 1969
str_s4_is_present_at_the_center_and_in_place_of_honor = 1970
str_s4_is_present_at_the_center_as_a_refugee = 1971
str_s4_is_present_at_the_center_and_not_attending_the_feast = 1972
str_s4_is_present_at_the_center_and_is_married = 1973
str_s4_is_present_at_the_center_and_is_attending_the_feast = 1974
str_s4_is_present_at_the_center_and_is_awaiting_the_player_in_private = 1975
str_s4_is_present_at_the_center_and_is_allowed_to_meet_the_player = 1976
str_s4_is_present_at_the_center_and_is_not_allowed_to_meet_the_player = 1977
str_no_relation = 1978
str_wife = 1979
str_husband = 1980
str_father = 1981
str_mother = 1982
str_daughter = 1983
str_son = 1984
str_sister = 1985
str_brother = 1986
str_niece = 1987
str_nephew = 1988
str_aunt = 1989
str_uncle = 1990
str_cousin = 1991
str_daughterinlaw = 1992
str_soninlaw = 1993
str_motherinlaw = 1994
str_fatherinlaw = 1995
str_sisterinlaw = 1996
str_brotherinlaw = 1997
str_print_party_members_entered = 1998
str_num_companion_stacks_=_reg10 = 1999
str_someone = 2000
str_i_take_what_work_i_can_sirmadame_i_carry_water_or_help_the_merchants_with_their_loads_or_help_build_things_if_theres_things_to_be_built = 2001
str_dna_reg4_total_production_reg5_modula_reg7 = 2002
str_agent_produces_s9 = 2003
str_im_not_doing_anything_sirmadame_theres_no_work_to_be_had_around_here_these_days = 2004
str_im_not_doing_anything_sirmadame_i_have_no_land_of_my_own_and_theres_no_work_to_be_had_around_here_these_days = 2005
str_why_im_still_living_off_of_your_kindness_and_goodness_sirmadame_hopefully_there_will_be_work_shortly = 2006
str_i_work_in_the_fields_just_outside_the_walls_where_they_grow_grain_we_dont_quite_grow_enough_to_meet_our_needs_though_and_have_to_import_grain_from_the_surrounding_countryside = 2007
str_i_work_mostly_in_the_fields_growing_grain_in_the_town_they_grind_it_to_make_bread_or_ale_and_we_can_also_boil_it_as_a_porridge = 2008
str_i_work_in_the_breweries_making_ale_the_poor_folk_drink_a_lot_of_it_as_its_cheaper_than_wine_we_make_it_with_grain_brought_in_from_the_countryside = 2009
str_i_work_in_a_mill_grinding_flour_to_make_bread_bread_is_cheap_keeps_well_and_fills_the_stomach = 2010
str_i_tend_cattle_we_dry_and_salt_meat_to_preserve_it_and_make_cheese_from_the_milk = 2011
str_i_tend_cattle_we_dry_and_salt_meat_to_preserve_it_and_make_cheese_from_the_milk_so_it_doesnt_spoil = 2012
str_i_tend_sheep_we_send_the_wool_to_the_cities_to_be_woven_into_cloth_and_make_mutton_sausage_when_we_cull_the_herds = 2013
str_i_work_at_a_loom_spinning_cloth_from_wool_wool_is_some_of_the_cheapest_cloth_you_can_buy_but_it_will_still_keep_you_warm = 2014
str_i_crew_a_fishing_boat_we_salt_and_smoke_the_flesh_to_sell_it_far_inland = 2015
str_i_sift_salt_from_a_nearby_flat_they_need_salt_everywhere_to_preserve_meat_and_fish = 2016
str_i_mine_iron_from_a_vein_in_a_nearby_cliffside_they_use_it_to_make_tools_arms_and_other_goods = 2017
str_i_make_pottery_which_people_use_to_store_grain_and_carry_water = 2018
str_trade_explanation_tools = 2019
str_trade_explanation_oil = 2020
str_trade_explanation_linen = 2021
str_trade_explanation_velvet = 2022
str_trade_explanation_spice = 2023
str_trade_explanation_apples = 2024
str_trade_explanation_grapes = 2025
str_trade_explanation_dyes = 2026
str_trade_explanation_leatherwork = 2027
str_trade_explanation_flax = 2028
str_trade_explanation_dates = 2029
str_trade_explanation_dates = 2030
str_trade_explanation_olives = 2031
str_s10_has_reg4_needs_reg5 = 2032
str_s14_i_hear_that_you_can_find_a_good_price_for_it_in_s15 = 2033
str_s1_reg1 = 2034
str_s1_reg2 = 2035
str_s1_reg3 = 2036
str_s1_reg4 = 2037
str_s1_reg5 = 2038
str_s1_reg6 = 2039
str_s1_reg7 = 2040
str_s1_reg8 = 2041
str_s1_reg9 = 2042
str_reg13 = 2043
str_reg14 = 2044
str_reg15 = 2045
str_reg16 = 2046
str_reg17 = 2047
str_reg18 = 2048
str_reg19 = 2049
str_reg20 = 2050
str_reg21 = 2051
str_assigning_lords_to_empty_centers = 2052
str_assign_lords_to_empty_centers_just_happened = 2053
str_s4_of_the_s5_is_unassigned = 2054
str_s4_of_the_s5_is_reserved_for_player = 2055
str_s4_of_the_s5_has_no_fiefs = 2056
str_s4_unassigned_centers_plus_landless_lords_=_reg4 = 2057
str_s4_holds_s5_in_reserve = 2058
str_s2s_rebellion = 2059
str_political_suggestion = 2060
str_updating_volunteers_for_s4_faction_is_s5 = 2061
str_shuffling_companion_locations = 2062
str_s4_is_at_s5 = 2063
str_instability_reg0_of_lords_are_disgruntled_reg1_are_restless = 2064
str_reg1shehe_is_prisoner_of_s1 = 2065
str_s39_rival = 2066
str_s40 = 2067
str_s41_s39_rival = 2068
str_reputation_cheat_mode_only_martial_ = 2069
str_reputation_cheat_mode_only_debauched_ = 2070
str_reputation_cheat_mode_only_pitiless_ = 2071
str_reputation_cheat_mode_only_calculating_ = 2072
str_reputation_cheat_mode_only_quarrelsome_ = 2073
str_reputation_cheat_mode_only_goodnatured_ = 2074
str_reputation_cheat_mode_only_upstanding_ = 2075
str_reputation_cheat_mode_only_conventional_ = 2076
str_reputation_cheat_mode_only_adventurous_ = 2077
str_reputation_cheat_mode_only_romantic_ = 2078
str_reputation_cheat_mode_only_moralist_ = 2079
str_reputation_cheat_mode_only_ambitious_ = 2080
str_reputation_cheat_mode_only_reg11_ = 2081
str_love_interest = 2082
str_betrothed = 2083
str_s40_s39_s2_reg0 = 2084
str_other_relations_s40_ = 2085
str_relation_with_liege_reg0_ = 2086
str_sense_of_security_military_reg1_court_position_reg3_ = 2087
str_s46s45s44s48 = 2088
str_political_details_s47_ = 2089
str_checking_volunteer_availability_script = 2090
str_center_relation_at_least_zero = 2091
str_relationfaction_conditions_met = 2092
str_troops_available = 2093
str_party_has_capacity = 2094
str_personality_clash_conversation_begins = 2095
str_personality_match_conversation_begins = 2096
str_the_s55 = 2097
str_travellers_on_the_road = 2098
str_attack_on_travellers_found_reg3_hours_ago = 2099
str_trade_event_found_reg3_hours_ago = 2100
str_a_short_while_ago = 2101
str_one_day_ago = 2102
str_two_days_day_ago = 2103
str_earlier_this_week = 2104
str_about_a_week_ago = 2105
str_about_two_weeks_ago = 2106
str_several_weeks_ago = 2107
str_unknown_assailants = 2108
str_swadians = 2109
str_vaegirs = 2110
str_khergits = 2111
str_nords = 2112
str_rhodoks = 2113
str_sarranids = 2114
str_bandits = 2115
str_deserters = 2116
str_your_followers = 2117
str_we_have_heard_that_travellers_heading_to_s40_were_attacked_on_the_road_s46_by_s39 = 2118
str_s43_s44 = 2119
str_we_have_heard_that_travellers_coming_from_s40_were_attacked_on_the_road_s46_by_s39 = 2120
str_travellers_coming_from_s40_traded_here_s46 = 2121
str_s44 = 2122
str_it_is_still_early_in_the_caravan_season_so_we_have_seen_little_tradings42 = 2123
str_there_has_been_very_little_trading_activity_here_recentlys42 = 2124
str_there_has_some_trading_activity_here_recently_but_not_enoughs42 = 2125
str_there_has_some_trading_activity_here_recently_but_the_roads_are_dangerouss42 = 2126
str_the_roads_around_here_are_very_dangerouss42 = 2127
str_we_have_received_many_traders_in_town_here_although_there_is_some_danger_on_the_roadss42 = 2128
str_we_have_received_many_traders_in_town_heres42 = 2129
str_s44_s41 = 2130
str_s41 = 2131
str_there_is_little_news_about_the_caravan_routes_to_the_towns_of_s44_and_nearby_parts_but_no_news_is_good_news_and_those_are_therefore_considered_safe = 2132
str_s47_also_the_roads_to_the_villages_of_s44_and_other_outlying_hamlets_are_considered_safe = 2133
str_however_the_roads_to_the_villages_of_s44_and_other_outlying_hamlets_are_considered_safe = 2134
str_we_have_shortages_of = 2135
str_s33_s34_reg1 = 2136
str_we_have_adequate_stores_of_all_commodities = 2137
str_s33_and_some_other_commodities = 2138
str_the_roads_are_full_of_brigands_friend_but_that_name_in_particular_does_not_sound_familiar_good_hunting_to_you_nonetheless = 2139
str_less_than_an_hour_ago = 2140
str_maybe_reg3_hours_ago = 2141
str_reg3_days_ago = 2142
str_youre_in_luck_we_sighted_those_bastards_s16_near_s17_hurry_and_you_might_be_able_to_pick_up_their_trail_while_its_still_hot = 2143
str_you_speak_of_claims_to_the_throne_good_there_is_nothing_id_rather_do_than_fight_for_a_good_cause = 2144
str_you_speak_of_claims_to_the_throne_well_there_is_nothing_id_rather_do_than_fight_for_a_good_cause_but_the_claim_you_make_seems_somewhat_weak = 2145
str_i_am_pleased_that_you_speak_of_upholding_my_ancient_rights_which_are_sometimes_trod_upon_in_these_sorry_days = 2146
str_i_am_pleased_that_you_speak_of_upholding_my_ancient_rights_but_sometimes_men_make_pledges_before_they_are_king_which_they_cannot_keep_once_they_take_the_throne = 2147
str_you_speak_of_protecting_the_commons_well_i_supposed_thats_good_but_sometimes_the_commons_overstep_their_boundaries_im_more_concerned_that_your_claim_be_legal_so_i_can_swing_my_sword_with_a_good_conscience = 2148
str_you_speak_of_giving_me_land_good_i_ask_for_no_more_than_my_due = 2149
str_you_speak_of_giving_me_land_unfortunately_you_are_not_wellknown_for_rewarding_those_to_whom_you_have_made_such_offers = 2150
str_you_speak_of_unifying_calradia_well_i_believe_that_well_always_be_fighting__its_important_that_we_fight_for_a_rightful_cause = 2151
str_you_talk_of_claims_to_the_throne_but_i_leave_bickering_about_legalities_to_the_lawyers_and_clerks = 2152
str_you_speak_of_ruling_justly_hah_ill_believe_theres_such_a_thing_as_a_just_king_when_i_see_one = 2153
str_you_spoke_of_protecting_the_rights_of_the_nobles_if_you_did_youd_be_the_first_king_to_do_so_in_a_very_long_time = 2154
str_you_speak_of_giving_me_land_ay_well_lets_see_if_you_deliver = 2155
str_you_speak_of_giving_me_land_bah_youre_not_known_for_delivering_on_your_pledges = 2156
str_you_speak_of_unifying_calradia_well_youve_done_a_good_job_at_making_calradia_bend_its_knee_to_you_so_maybe_thats_not_just_talk = 2157
str_you_speak_of_unifying_calradia_id_be_impressed_if_i_thought_you_could_do_it_but_unfortunately_you_dont = 2158
str_you_speak_of_claims_to_the_throne_well_any_peasant_can_claim_to_be_a_kings_bastard = 2159
str_well_its_a_fine_thing_to_court_the_commons_with_promises_but_what_do_you_have_to_offer_me = 2160
str_you_speak_of_protecting_the_rights_of_lords_that_would_make_a_fine_change_if_my_rights_as_lord_would_be_respected = 2161
str_you_speak_of_protecting_the_rights_of_lords_that_would_make_a_fine_change_if_my_rights_as_lord_would_be_respected_however_it_is_easy_for_you_to_make_promises_while_you_are_weak_that_you_have_no_intention_of_keeping_when_you_are_strong = 2162
str_you_speak_of_giving_me_land_well_my_family_is_of_ancient_and_noble_lineage_so_you_promise_me_no_more_than_my_due_still_your_gesture_is_appreciated = 2163
str_you_speak_of_giving_me_land_well_you_make_that_pledge_but_i_am_not_impressed = 2164
str_you_speak_of_unifying_calradia_well_much_of_this_land_now_bends_its_knee_to_you_so_perhaps_that_is_not_just_talk = 2165
str_you_speak_of_unifying_calradia_but_right_now_yours_is_just_one_squabbling_faction_among_many = 2166
str_you_speak_of_claims_well_no_offense_but_a_claim_unsupported_by_might_rarely_prospers = 2167
str_you_speak_of_protecting_the_commons_well_i_suppose_that_will_make_for_a_more_prosperous_realm_ive_always_tried_to_treat_my_peasants_decently_saves_going_to_bed_worrying_about_whether_youll_wake_up_with_the_roof_on_fire = 2168
str_you_speak_of_protecting_the_commons_very_well_but_remember_that_peasants_are_more_likely_to_cause_trouble_if_you_make_promises_then_dont_deliver_than_if_you_never_made_the_promise_in_the_first_place = 2169
str_you_speak_of_protecting_the_rights_of_lords_good_youd_be_well_advised_to_do_that__men_fight_better_for_a_king_wholl_respect_their_rights = 2170
str_you_speak_of_protecting_the_rights_of_lords_very_well_but_remember__failing_to_keep_promises_which_you_made_while_scrambling_up_the_throne_is_the_quickest_way_to_topple_off_of_it_once_you_get_there = 2171
str_you_speak_of_giving_me_land_very_good_but_often_i_find_that_when_a_man_makes_too_many_promises_trying_to_get_to_the_top_he_has_trouble_keeping_them_once_he_reaches_it = 2172
str_you_speak_of_unifying_calradia_well_many_have_said_that_you_might_very_well_be_the_one_to_do_it = 2173
str_you_speak_of_unifying_calradia_well_all_the_kings_say_that_im_not_sure_that_you_will_succeed_while_they_fail = 2174
str_you_speak_of_claims_do_you_think_i_care_for_the_nattering_of_lawyers = 2175
str_you_speak_of_protecting_the_commons_how_kind_of_you_i_shall_tell_my_swineherd_all_about_your_sweet_promises_no_doubt_he_will_become_your_most_faithful_vassal = 2176
str_you_speak_of_protecing_the_rights_of_lords_such_sweet_words_but_ill_tell_you_this__the_only_rights_that_are_respected_in_this_world_are_the_rights_to_dominate_whoever_is_weaker_and_to_submit_to_whoever_is_stronger = 2177
str_you_speak_of_giving_me_land_yes_very_good__but_you_had_best_deliver = 2178
str_you_speak_of_giving_me_land_hah_perhaps_all_those_others_to_whom_you_promised_lands_will_simply_step_aside = 2179
str_you_speak_of_unifying_calradia_you_may_indeed_humble_the_other_kings_of_this_land_and_in_that_case_i_would_hope_that_you_would_remember_me_as_your_faithful_servant = 2180
str_you_speak_of_unifying_calradia_but_you_are_weak_and_i_think_that_you_will_remain_weak = 2181
str_you_speak_of_claims_its_good_for_a_king_to_have_a_strong_claim_although_admittedly_im_more_concerned_that_he_rules_just_ly_than_with_legalities_anyway_your_claim_seems_wellfounded_to_me = 2182
str_you_speak_of_claims_but_your_claim_seems_a_bit_weak_to_me = 2183
str_you_speak_of_protecting_the_commons_i_like_that_my_tenants_are_a_happy_lot_i_think_but_i_hear_of_others_in_other_estates_that_arent_so_fortunate = 2184
str_you_speak_of_protecting_the_commons_im_glad_to_hear_you_say_that_but_do_me_a_favor__dont_promise_the_commons_anything_you_cant_deliver_thats_a_sure_way_to_get_them_to_rebel_and_it_breaks_my_heart_to_have_to_put_them_down = 2185
str_you_speak_of_protecting_the_rights_of_lords_well_very_good_i_suppose_but_you_know__we_lords_can_take_of_ourselves_its_the_common_folk_who_need_a_strong_king_to_look_out_for_them_to_my_mind = 2186
str_you_speak_of_giving_me_land_its_kind_of_you_really_though_that_is_not_necessary = 2187
str_you_speak_of_unifying_calradia_well_maybe_you_can_unite_this_land_by_the_sword_but_im_not_sure_that_this_will_make_you_a_good_ruler = 2188
str_you_speak_of_claims_a_king_must_have_a_strong_legal_claim_for_there_not_to_be_chaos_in_the_realm_and_yours_is_wellestablished = 2189
str_you_speak_of_claims_a_king_must_have_a_strong_legal_claim_for_there_not_to_be_chaos_in_the_realm_but_your_claim_is_not_so_strong = 2190
str_you_speak_of_protecting_the_rights_of_lords_it_is_of_course_important_that_a_king_respect_the_rights_of_his_vassals_although_i_worry_that_a_king_who_took_a_throne_without_proper_cause_would_not_rule_with_justice = 2191
str_you_speak_of_protecting_the_rights_of_lords_it_is_of_course_important_that_a_king_respect_the_rights_of_his_vassals_however_i_would_like_to_know_that_you_would_indeed_deliver_on_your_promises = 2192
str_you_speak_of_protecting_the_commons_i_would_be_pleased_to_serve_a_king_who_respected_the_rights_of_his_subjects_although_i_worry_that_a_king_who_took_a_throne_without_proper_cause_would_not_rule_with_justice = 2193
str_you_speak_of_protecting_the_commons_i_would_be_pleased_to_serve_a_king_who_respected_the_rights_of_his_subjects_however_i_would_like_to_know_that_you_would_indeed_deliver_on_your_promises = 2194
str_i_am_not_swayed_by_promises_of_reward = 2195
str_you_speak_of_unifying_calradia_it_would_be_good_to_bring_peace_to_the_realm_and_i_believe_that_you_are_strong_enough_to_do_so = 2196
str_you_speak_of_unifying_calradia_it_would_be_good_to_bring_peace_the_realm_but_with_your_kingdom_in_its_current_state_i_worry_that_you_are_just_bringing_more_discord = 2197
str_s15 = 2198
str_my_s11_s15 = 2199
str_stop_gap__s15_is_the_rival_of_s16 = 2200
str_my_s11_s18 = 2201
str_the_socalled_s11_s18 = 2202
str_s18_would_cheat_me_of_my_inheritance_by_heaven_i_know_my_rights_and_im_not_going_to_back_down = 2203
str_s18_once_questioned_my_honour_and_my_bravery_i_long_for_the_day_when_i_can_meet_him_in_battle_and_make_him_retract_his_statement = 2204
str_s18_once_questioned_my_judgment_in_battle_by_heaven_would_he_have_us_shirk_our_duty_to_smite_our_sovereigns_foes = 2205
str_s18_seems_to_think_he_has_the_right_to_some_of_my_property_well_he_does_not = 2206
str_s18_once_took_something_i_said_amiss_stubborn_bastard_wont_give_it_up_and_keeps_trying_to_get_me_to_recant_my_words = 2207
str_s18_is_a_crafty_weasel_and_i_dont_trust_him_one_bit = 2208
str_s18_i_despite_him_he_puts_on_such_a_nauseating_display_of_virtue_and_thinks_nothing_of_insulting_his_betters = 2209
str_s18_entered_into_a_little_deal_with_me_and_is_now_trying_to_wriggle_out_of_it = 2210
str_s18_once_ran_an_errand_for_me_and_now_thinks_i_owe_him_something_i_owe_his_ilk_nothing = 2211
str_s18_is_soft_and_weak_and_not_fit_to_govern_a_fief_and_i_have_always_detested_him = 2212
str_s18_is_a_quarrelsome_oaf_and_a_liability_in_my_opinion_and_ive_let_him_know_as_much = 2213
str_s18_i_am_sorry_to_say_is_far_too_softhearted_a_man_to_be_given_any_kind_of_responsibility_his_chivalry_will_allow_the_enemy_to_flee_to_fight_another_day_and_will_cost_the_lives_of_my_own_faithful_men = 2214
str_s18_seems_to_have_something_against_me_for_some_reason_i_dont_like_to_talk_ill_of_people_but_i_think_hes_can_be_a_bit_of_a_cad_sometimes = 2215
str_s18_has_always_treated_me_contemptuously_although_i_have_done_him_no_wrong = 2216
str_s18_is_thoroughly_dishonorable_and_a_compulsive_spinner_of_intrigues_which_i_fear_will_drag_us_into_wars_or_incite_rebellions = 2217
str_s18_disappoints_me_i_once_scolded_for_his_rashness_in_battle_and_he_took_offense_i_do_not_care_to_apologize_for_my_efforts_to_save_his_life_and_the_lives_of_his_men = 2218
str_s18_squanders_money_and_carouses_in_a_way_most_unbefitting_a_noble_by_doing_so_he_disgraces_us_all = 2219
str_s18_has_been_speaking_ill_of_me_behind_my_back_or_so_they_say = 2220
str_s18_is_a_disgrace_reg3shehe_consorts_with_merchants_lends_money_at_interest_uses_coarse_language_and_shows_no_attempt_to_uphold_the_dignity_of_the_honor_bestowed_upon_reg3herhim = 2221
str_s18_has_condemned_me_for_engaging_in_commerce_what_could_possibly_be_wrong_with_that = 2222
str_s18_i_have_heard_has_been_encouraging_seditious_ideas_among_the_peasantry__a_foolish_move_which_endangers_us_all = 2223
str_s18_has_called_me_out_for_the_way_i_deal_with_my_tenants_well_so_be_it_if_i_teach_them_that_they_are_the_equal_of_anyone_with_socalled_gentle_blood_what_is_it_to_reg3herhim = 2224
str_a_most_gallant_gentleman_who_knows_how_to_treat_a_lady = 2225
str_a_base_cad = 2226
str_a_man_who_treats_me_as_his_equal_which_is_rare = 2227
str_appears_to_value_me_with_his_estate_and_his_horse_as_prizes_worth_having = 2228
str_a_bit_dull_but_what_can_you_expect = 2229
str_the_man_whom_destiny_intends_for_me = 2230
str_is_not_right_for_me__i_cannot_say_why_but_he_makes_my_skin_crawl = 2231
str_is_a_man_who_clearly_intends_to_make_his_mark_in_the_world = 2232
str_is_a_layabout_a_naif_prey_for_others_who_are_cleverer_than_he = 2233
str_is_a_man_of_stalwart_character = 2234
str_appears_to_be_a_man_of_low_morals = 2235
str_appears_to_be_a_man_who_lacks_selfdiscipline = 2236
str_check_reg8_s4_reconciles_s5_and_s6_ = 2237
str_diagnostic__player_should_receive_consultation_quest_here_if_not_already_active = 2238
str_check_reg8_s4_rules_in_s5s_favor_in_quarrel_with_s6_ = 2239
str_check_reg8_new_rivalry_generated_between_s5_and_s6 = 2240
str_check_reg8_s5_attempts_to_win_over_s6 = 2241
str_s1_has_no_lords = 2242
str_do_political_consequences_for_s4_victory_over_s5 = 2243
str_bandits_attacked_a_party_on_the_roads_so_a_bounty_is_probably_available = 2244
str_travellers_attacked_on_road_from_s15_to_s16 = 2245
str_s15_shares_joy_of_victory_with_s16 = 2246
str_faction_marshall_s15_involved_in_defeat = 2247
str_player_faction_marshall_involved_in_defeat = 2248
str_s14_of_s15_defeated_in_battle_loses_one_point_relation_with_liege = 2249
str_s14_defeated_in_battle_by_s15_loses_one_point_relation = 2250
str_s14_blames_s15_for_defeat = 2251
str_s32_is_undeclared_rebel = 2252
str_small_bonus_for_no_base = 2253
str_s15_considered_member_of_faction_s20_weight_of_reg15 = 2254
str_checking_for_recruitment_argument_reg24 = 2255
str_g_talk_troop_s20_evaluates_being_vassal_to_s22_of_s21 = 2256
str_base_result_for_security_reg1 = 2257
str_result_for_security_weighted_by_personality_reg2 = 2258
str_base_result_for_political_connections_reg3 = 2259
str_result_for_political_connections_weighted_by_personality_reg4 = 2260
str_result_for_argument_strength_reg7 = 2261
str_result_for_argument_appeal_reg17 = 2262
str_combined_result_for_argument_modified_by_persuasion_reg8 = 2263
str_base_changing_sides_penalty_reg9 = 2264
str_changing_sides_penalty_weighted_by_personality_reg10 = 2265
str_combined_bonuses_and_penalties_=_reg0 = 2266
str_intrigue_test_troop_party_is_active = 2267
str_intrigue_test_troop_party_is_not_in_battle = 2268
str_intrigue_test_troop_is_not_prisoner = 2269
str_intrigue_test_troop_is_nearby = 2270
str_s20_relation_with_s15_changed_by_reg4_to_reg14 = 2271
str_total_additions_reg4 = 2272
str_total_subtractions_reg4 = 2273
str_checking_lord_reactions_in_s15 = 2274
str_s14_protests_the_appointment_of_s15_as_marshall = 2275
str_s11_relocates_to_s10 = 2276
str_checking_s3_at_s5_with_s11_relationship_with_s4_score_reg0 = 2277
str_s3_feast_concludes_at_s4 = 2278
str_attendance_reg3_nobles_out_of_reg4 = 2279
str_romantic_chemistry_reg0 = 2280
str_personality_modifier_reg2 = 2281
str_renown_modifier_reg3 = 2282
str_final_score_reg0 = 2283
str_s4_pursues_suit_with_s5_in_s7 = 2284
str_note__favor_event_logged = 2285
str_result_lady_forced_to_agree_to_engagement = 2286
str_result_lady_rejects_suitor = 2287
str_result_happy_engagement_between_s4_and_s5 = 2288
str_result_s4_elopes_with_s5 = 2289
str_result_s4_reluctantly_agrees_to_engagement_with_s5 = 2290
str_result_stalemate_patience_roll_=_reg3 = 2291
str_s3_marries_s4_at_s5 = 2292
str__i_must_attend_to_this_matter_before_i_worry_about_the_affairs_of_the_realm = 2293
str_the_other_matter_took_precedence = 2294
str_i_cannot_leave_this_fortress_now_as_it_is_under_siege = 2295
str_after_all_we_are_under_siege = 2296
str_we_are_not_strong_enough_to_face_the_enemy_out_in_the_open = 2297
str_i_should_probably_seek_shelter_behind_some_stout_walls = 2298
str_enemies_are_reported_to_be_nearby_and_we_should_stand_ready_to_either_man_the_walls_or_sortie_out_to_do_battle = 2299
str_the_enemy_is_nearby = 2300
str_as_the_marshall_i_am_assembling_the_army_of_the_realm = 2301
str_as_the_marshall_i_am_assembling_the_army_of_the_realm_and_travel_to_lands_near_s10_to_inform_more_vassals = 2302
str_i_intend_to_assemble_the_army_of_the_realm = 2303
str_as_the_marshall_i_am_leading_the_siege = 2304
str_i_intend_to_begin_the_siege = 2305
str_as_the_marshall_i_am_leading_our_raid = 2306
str_i_intend_to_start_our_raid = 2307
str_as_the_marshall_i_am_leading_our_forces_in_search_of_the_enemy = 2308
str_i_intend_to_lead_our_forces_out_to_find_the_enemy = 2309
str_as_the_marshall_i_am_leading_our_forces_to_engage_the_enemy_in_battle = 2310
str_i_intend_to_lead_our_forces_out_to_engage_the_enemy = 2311
str_i_dont_have_enough_troops_and_i_need_to_get_some_more = 2312
str_i_am_running_low_on_troops = 2313
str_we_are_following_your_direction = 2314
str_i_need_to_make_preparations_for_your_wedding = 2315
str_after_all_i_need_to_make_preparations_for_your_wedding = 2316
str_i_am_heading_to_the_site_of_our_wedding = 2317
str_after_all_we_are_soon_to_be_wed = 2318
str_i_am_hosting_a_feast_there = 2319
str_i_have_a_feast_to_host = 2320
str_i_am_to_be_the_bridegroom_there = 2321
str_my_wedding_day_draws_near = 2322
str_i_have_too_much_loot_and_too_many_prisoners_and_need_to_secure_them = 2323
str_i_should_think_of_dropping_off_some_of_my_prisoners = 2324
str_i_need_to_reinforce_it_as_it_is_poorly_garrisoned = 2325
str_there_is_a_hole_in_our_defenses = 2326
str_i_am_following_the_marshals_orders = 2327
str_the_marshal_has_given_me_this_command = 2328
str_i_am_answering_the_marshals_summons = 2329
str_our_realm_needs_my_support_there_is_enemy_raiding_one_of_our_villages_which_is_not_to_far_from_here_i_am_going_there = 2330
str_the_marshal_has_issued_a_summons = 2331
str_comradeinarms = 2332
str_i_am_supporting_my_s11_s10 = 2333
str_i_believe_that_one_of_my_comrades_is_in_need = 2334
str_s20_decided_to_attack_s21 = 2335
str_a_fortress_is_vulnerable = 2336
str_i_believe_that_the_enemy_may_be_vulnerable = 2337
str_i_need_to_inspect_my_properties_and_collect_my_dues = 2338
str_it_has_been_too_long_since_i_have_inspected_my_estates = 2339
str_my_men_are_weary_so_we_are_returning_home = 2340
str_my_men_are_becoming_weary = 2341
str_i_have_a_score_to_settle_with_the_lord_there = 2342
str_i_am_thinking_of_settling_an_old_score = 2343
str_i_am_short_of_money_and_i_hear_that_there_is_much_wealth_there = 2344
str_i_need_to_refill_my_purse_preferably_with_the_enemys_money = 2345
str_by_striking_at_the_enemys_richest_lands_perhaps_i_can_draw_them_out_to_battle = 2346
str_i_am_thinking_of_going_on_the_attack = 2347
str_perhaps_if_i_strike_one_more_blow_we_may_end_this_war_on_our_terms_ = 2348
str_we_may_be_able_to_bring_this_war_to_a_close_with_a_few_more_blows = 2349
str_i_wish_to_attend_the_feast_there = 2350
str_there_is_a_feast_which_i_wish_to_attend = 2351
str_there_is_a_fair_lady_there_whom_i_wish_to_court = 2352
str_i_have_the_inclination_to_pay_court_to_a_fair_lady = 2353
str_we_have_heard_reports_that_the_enemy_is_in_the_area = 2354
str_i_have_heard_reports_of_enemy_incursions_into_our_territory = 2355
str_i_need_to_spend_some_time_with_my_household = 2356
str_it_has_been_a_long_time_since_i_have_been_able_to_spend_time_with_my_household = 2357
str_i_am_watching_the_borders = 2358
str_i_may_be_needed_to_watch_the_borders = 2359
str_i_will_guard_the_areas_near_my_home = 2360
str_i_am_perhaps_needed_most_at_home = 2361
str_i_cant_think_of_anything_better_to_do = 2362
str_i_am_completing_what_i_have_already_begun = 2363
str_i_dont_even_have_a_home_to_which_to_return = 2364
str_debug__s10_decides_s14_faction_ai_s15 = 2365
str__i_am_acting_independently_because_no_marshal_is_appointed = 2366
str__i_am_acting_independently_because_our_marshal_is_currently_indisposed = 2367
str__i_am_acting_independently_because_our_realm_is_currently_not_on_campaign = 2368
str__i_am_not_accompanying_the_marshal_because_i_fear_that_he_may_lead_us_into_disaster = 2369
str_i_am_not_accompanying_the_marshal_because_i_question_his_judgment = 2370
str_i_am_not_accompanying_the_marshal_because_i_can_do_greater_deeds = 2371
str__s16_has_kept_us_on_campaign_on_far_too_long_and_there_are_other_pressing_matters_to_which_i_must_attend = 2372
str__i_am_not_participating_in_the_marshals_campaign_because_i_do_not_know_where_to_find_our_main_army = 2373
str__i_am_acting_independently_although_some_enemies_have_been_spotted_within_our_borders_they_havent_come_in_force_and_the_local_troops_should_be_able_to_dispatch_them = 2374
str__the_needs_of_the_realm_must_come_first = 2375
str_we_are_likely_to_be_overwhelmed_by_the_s9_let_each_defend_their_own = 2376
str_we_should_see_this_siege_through = 2377
str_we_should_prepare_to_defend_s21_but_we_should_gather_our_forces_until_we_are_strong_enough_to_engage_them = 2378
str_we_should_prepare_to_defend_s21_but_first_we_have_to_gather = 2379
str_we_should_ride_to_break_the_siege_of_s21 = 2380
str_we_should_ride_to_defeat_the_enemy_gathered_near_s21 = 2381
str_we_have_located_s21s_army_and_we_should_engage_it = 2382
str_this_offensive_needs_to_wind_down_soon_so_the_vassals_can_attend_to_their_own_business = 2383
str_the_vassals_are_tired_we_let_them_rest_for_some_time = 2384
str_the_vassals_still_need_time_to_attend_to_their_own_business = 2385
str_it_is_time_to_go_on_the_offensive_and_we_must_first_assemble_the_army = 2386
str_we_must_continue_to_gather_the_army_before_we_ride_forth_on_an_offensive_operation = 2387
str_there_is_no_need_to_beat_around_the_borders__we_can_take_one_of_their_important_towns = 2388
str_we_should_exploit_our_success_over_s21_by_seizing_one_of_their_fortresses = 2389
str_we_shall_leave_a_fiery_trail_through_the_heart_of_the_enemys_lands_targeting_the_wealthy_settlements_if_we_can = 2390
str_the_army_will_be_disbanded_because_we_have_been_waiting_too_long_without_a_target = 2391
str_it_is_time_for_the_feast_to_conclude = 2392
str_we_should_continue_the_feast_unless_there_is_an_emergency = 2393
str_you_had_wished_to_hold_a_feast = 2394
str_your_wedding_day_approaches_my_lady = 2395
str_your_wedding_day_approaches = 2396
str_s22_and_s23_wish_to_marry = 2397
str_it_has_been_a_long_time_since_the_lords_of_the_realm_gathered_for_a_feast = 2398
str_the_circumstances_which_led_to_this_decision_no_longer_apply_so_we_should_stop_and_reconsider_shortly = 2399
str_we_cant_think_of_anything_to_do = 2400
str_s15_is_at_war_with_s16_ = 2401
str_in_the_short_term_s15_has_a_truce_with_s16_as_a_matter_of_general_policy_ = 2402
str_in_the_short_term_s15_was_recently_provoked_by_s16_and_is_under_pressure_to_declare_war_as_a_matter_of_general_policy_ = 2403
str_envoymodified_diplomacy_score_honor_plus_relation_plus_envoy_persuasion_=_reg4 = 2404
str_s12s15_cannot_negotiate_with_s16_as_to_do_so_would_undermine_reg4herhis_own_claim_to_the_throne_this_civil_war_must_almost_certainly_end_with_the_defeat_of_one_side_or_another = 2405
str_s12s15_considers_s16_to_be_dangerous_and_untrustworthy_and_shehe_wants_to_bring_s16_down = 2406
str_s12s15_is_anxious_to_reclaim_old_lands_such_as_s18_now_held_by_s16 = 2407
str_s12s15_feels_that_reg4shehe_is_winning_the_war_against_s16_and_sees_no_reason_not_to_continue = 2408
str_s12s15_faces_too_much_internal_discontent_to_feel_comfortable_ignoring_recent_provocations_by_s16s_subjects = 2409
str_s12even_though_reg4shehe_is_fighting_on_two_fronts_s15_is_inclined_to_continue_the_war_against_s16_for_a_little_while_longer_for_the_sake_of_honor = 2410
str_s12s15_feels_that_reg4shehe_must_pursue_the_war_against_s16_for_a_little_while_longer_for_the_sake_of_honor = 2411
str_s12s15_is_currently_on_the_offensive_against_s17_now_held_by_s16_and_reluctant_to_negotiate = 2412
str_s12s15_is_alarmed_by_the_growing_power_of_s16 = 2413
str_s12s15_distrusts_s16_and_fears_that_any_deals_struck_between_the_two_realms_will_not_be_kept = 2414
str_s12s15_is_at_war_on_too_many_fronts_and_eager_to_make_peace_with_s16 = 2415
str_s12s15_seems_to_think_that_s16_and_reg4shehe_have_a_common_enemy_in_the_s17 = 2416
str_s12s15_feels_frustrated_by_reg4herhis_inability_to_strike_a_decisive_blow_against_s16 = 2417
str_s12s15_has_suffered_enough_in_the_war_with_s16_for_too_little_gain_and_is_ready_to_pursue_a_peace = 2418
str_s12s15_would_like_to_firm_up_a_truce_with_s16_to_respond_to_the_threat_from_the_s17 = 2419
str_s12s15_wishes_to_be_at_peace_with_s16_so_as_to_pursue_the_war_against_the_s17 = 2420
str_s12s15_seems_to_be_intimidated_by_s16_and_would_like_to_avoid_hostilities = 2421
str_s12s15_has_no_particular_reason_to_continue_the_war_with_s16_and_would_probably_make_peace_if_given_the_opportunity = 2422
str_s12s15_seems_to_be_willing_to_improve_relations_with_s16 = 2423
str_excuse_me_how_can_you_possibly_imagine_yourself_worthy_to_marry_into_our_family = 2424
str_em_with_regard_to_her_ladyship_we_were_looking_specifically_for_a_groom_of_some_distinction_fight_hard_count_your_dinars_and_perhaps_some_day_in_the_future_we_may_speak_of_such_things_my_good_man = 2425
str_em_with_regard_to_her_ladyship_we_were_looking_specifically_for_a_groom_of_some_distinction = 2426
str_it_is_too_early_for_you_to_be_speaking_of_such_things_you_are_still_making_your_mark_in_the_world = 2427
str_you_dont_serve_the_s4_so_id_say_no_one_day_we_may_be_at_war_and_i_prefer_not_to_have_to_kill_my_inlaws_if_at_all_possible = 2428
str_as_you_are_not_a_vassal_of_the_s4_i_must_decline_your_request_the_twists_of_fate_may_mean_that_we_will_one_day_cross_swords_and_i_would_hope_not_to_make_a_widow_of_a_lady_whom_i_am_obligated_to_protect = 2429
str_as_you_are_not_a_pledged_vassal_of_our_liege_with_the_right_to_hold_land_i_must_refuse_your_request_to_marry_into_our_family = 2430
str_look_here_lad__the_young_s14_has_been_paying_court_to_s16_and_youll_have_to_admit__hes_a_finer_catch_for_her_than_you_so_lets_have_no_more_of_this_talk_shall_we = 2431
str_i_do_not_care_for_you_sir_and_i_consider_it_my_duty_to_protect_the_ladies_of_my_household_from_undesirable_suitors = 2432
str_hmm_young_girls_may_easily_be_led_astray_so_out_of_a_sense_of_duty_to_the_ladies_of_my_household_i_think_i_would_like_to_get_to_know_you_a_bit_better_we_may_speak_of_this_at_a_later_date = 2433
str_you_may_indeed_make_a_fine_match_for_the_young_mistress = 2434
str_madame__given_our_relations_in_the_past_this_proposal_is_most_surprising_i_do_not_think_that_you_are_the_kind_of_woman_who_can_be_bent_to_a_hushands_will_and_i_would_prefer_not_to_have_our_married_life_be_a_source_of_constant_acrimony = 2435
str_i_would_prefer_to_marry_a_proper_maiden_who_will_obey_her_husband_and_is_not_likely_to_split_his_head_with_a_sword = 2436
str_my_lady_while_i_admire_your_valor_you_will_forgive_me_if_i_tell_you_that_a_woman_like_you_does_not_uphold_to_my_ideal_of_the_feminine_of_the_delicate_and_of_the_pure = 2437
str_nah_i_want_a_woman_wholl_keep_quiet_and_do_what_shes_told_i_dont_think_thats_you = 2438
str_my_lady_you_are_possessed_of_great_charms_but_no_properties_until_you_obtain_some_to_marry_you_would_be_an_act_of_ingratitude_towards_my_ancestors_and_my_lineage = 2439
str_my_lady_you_are_a_woman_of_no_known_family_of_no_possessions__in_short_a_nobody_do_you_think_that_you_are_fit_to_marry_into_may_family = 2440
str_my_lady__forgive_me__the_quality_of_our_bond_is_not_of_the_sort_which_the_poets_tell_us_is_necessary_to_sustain_a_happy_marriage = 2441
str_um_i_think_that_if_i_want_to_stay_on_s4s_good_side_id_best_not_marry_you = 2442
str_you_serve_another_realm_i_dont_see_s4_granting_reg4herhis_blessing_to_our_union = 2443
str_madame_my_heart_currently_belongs_to_s4 = 2444
str_my_lady_you_are_a_woman_of_great_spirit_and_bravery_possessed_of_beauty_grace_and_wit_i_shall_give_your_proposal_consideration = 2445
str_my_lady_you_are_a_woman_of_great_spirit_and_bravery_possessed_of_beauty_grace_and_wit_i_would_be_most_honored_were_you_to_become_my_wife = 2446
str_poem_choice_reg4_lady_rep_reg5 = 2447
str_ah__kais_and_layali__such_a_sad_tale_many_a_time_has_it_been_recounted_for_my_family_by_the_wandering_poets_who_come_to_our_home_and_it_has_never_failed_to_bring_tears_to_our_eyes = 2448
str_kais_and_layali_three_hundred_stanzas_of_pathetic_sniveling_if_you_ask_me_if_kais_wanted_to_escape_heartbreak_he_should_have_learned_to_live_within_his_station_and_not_yearn_for_what_he_cannot_have = 2449
str_kais_and_layali_no_one_should_ever_have_written_such_a_sad_poem_if_it_was_the_destiny_of_kais_and_layali_to_be_together_than_their_love_should_have_conquered_all_obstacles = 2450
str_ah_kais_and_layali_a_very_old_standby_but_moving_in_its_way = 2451
str_the_saga_of_helgered_and_kara_such_happy_times_in_which_our_ancestors_lived_women_like_kara_could_venture_out_into_the_world_like_men_win_a_name_for_themselves_and_not_linger_in_their_husbands_shadow = 2452
str_ah_the_saga_of_helgered_and_kara_now_there_was_a_lady_who_knew_what_she_wanted_and_was_not_afraid_to_obtain_it = 2453
str_the_saga_of_helgered_and_kara_a_terrible_tale__but_it_speaks_of_a_very_great_love_if_she_were_willing_to_make_war_on_her_own_family = 2454
str_the_saga_of_helgered_and_kara_as_i_recall_kara_valued_her_own_base_passions_over_duty_to_her_family_that_she_made_war_on_her_own_father_i_have_no_time_for_a_poem_which_praises_such_a_woman = 2455
str_the_saga_of_helgered_and_kara_how_could_a_woman_don_armor_and_carry_a_sword_how_could_a_man_love_so_ungentle_a_creature = 2456
str_a_conversation_in_the_garden_i_cannot_understand_the_lady_in_that_poem_if_she_loves_the_man_why_does_she_tease_him_so = 2457
str_a_conversation_in_the_garden_let_us_see__it_is_morally_unedifying_it_exalts_deception_it_ends_with_a_maiden_surrendering_to_her_base_passions_and_yet_i_cannot_help_but_find_it_charming_perhaps_because_it_tells_us_that_love_need_not_be_tragic_to_be_memorable = 2458
str_a_conversation_in_the_garden_now_that_is_a_tale_every_lady_should_know_by_heart_to_learn_the_subtleties_of_the_politics_she_must_practice = 2459
str_a_conversation_in_the_garden_it_is_droll_i_suppose__although_there_is_nothing_there_that_truly_stirs_my_soul = 2460
str_storming_the_fortress_of_love_ah_yes_the_lady_sits_within_doing_nothing_while_the_man_is_the_one_who_strives_and_achieves_i_have_enough_of_that_in_my_daily_life_why_listen_to_poems_about_it = 2461
str_storming_the_fortress_of_love_ah_yes_an_uplifting_tribute_to_the_separate_virtues_of_man_and_woman = 2462
str_storming_the_fortress_of_love_ah_yes_but_although_it_is_a_fine_tale_of_virtues_it_speaks_nothing_of_passion = 2463
str_storming_the_fortress_of_love_ah_a_sermon_dressed_up_as_a_love_poem_if_you_ask_me = 2464
str_a_hearts_desire_ah_such_a_beautiful_account_of_the_perfect_perfect_love_to_love_like_that_must_be_to_truly_know_rapture = 2465
str_a_hearts_desire_silly_if_you_ask_me_if_the_poet_desires_a_lady_then_he_should_endeavor_to_win_her__and_not_dress_up_his_desire_with_a_pretense_of_piety = 2466
str_a_hearts_desire_hmm__it_is_an_interesting_exploration_of_earthly_and_divine_love_it_does_speak_of_the_spiritual_quest_which_brings_out_the_best_in_man_but_i_wonder_if_the_poet_has_not_confused_his_yearning_for_higher_things_with_his_baser_passions = 2467
str_a_hearts_desire_oh_yes__it_is_very_worthy_and_philosophical_but_if_i_am_to_listen_to_a_bard_strum_a_lute_for_three_hours_i_personally_prefer_there_to_be_a_bit_of_a_story = 2468
str_result_reg4_string_s11 = 2469
str_calculating_effect_for_policy_for_s3 = 2470
str_reg3_units_of_s4_for_reg5_guests_and_retinue = 2471
str_reg3_units_of_spice_of_reg5_to_be_consumed = 2472
str_reg3_units_of_oil_of_reg5_to_be_consumed = 2473
str_of_food_which_must_come_before_everything_else_the_amount_is_s8 = 2474
str_s9_and_the_variety_is_s8_ = 2475
str_s9_of_drink_which_guests_will_expect_in_great_abundance_the_amount_is_s8 = 2476
str_s9_of_spice_which_is_essential_to_demonstrate_that_we_spare_no_expense_as_hosts_the_amount_is_s8_ = 2477
str_s9_of_oil_which_we_shall_require_to_light_the_lamps_the_amount_is_s8 = 2478
str_s9_overall_our_table_will_be_considered_s8 = 2479
str_rebel = 2480
str_bandit = 2481
str_relation_of_prisoner_with_captor_is_reg0 = 2482
str_s5_suffers_attrition_reg3_x_s4 = 2483
str_s65 = 2484
str_s10_said_on_s1_s11__ = 2485
str_rumor_note_to_s3s_slot_reg4_s5 = 2486
str_totalling_casualties_caused_during_mission = 2487
str_removing_s4_from_s5 = 2488
str_s4_joins_prison_break = 2489
str_helper_is_spawned = 2490
str_leaving_area_during_prison_break = 2491
str_talk_to_the_trainer = 2492
str_woman = 2493
str_man = 2494
str_noble = 2495
str_common = 2496
str_may_find_that_you_are_able_to_take_your_place_among_calradias_great_lords_relatively_quickly = 2497
str_may_face_some_difficulties_establishing_yourself_as_an_equal_among_calradias_great_lords = 2498
str_may_face_great_difficulties_establishing_yourself_as_an_equal_among_calradias_great_lords = 2499
str_current_party_morale_is_reg5_current_party_morale_modifiers_are__base_morale__50_party_size_s2reg1_leadership_s3reg2_food_variety_s4reg3s5s6_recent_events_s7reg4_total__reg5___ = 2500
str_s1extra_morale_for_s9_troops__reg6_ = 2501
str_courtships_in_progress_ = 2502
str_s1_s2_relation_reg3_last_visit_reg4_days_ago = 2503
str_s1__poems_known = 2504
str_s1_storming_the_castle_of_love_allegoric = 2505
str_s1_kais_and_layali_tragic = 2506
str_s1_a_conversation_in_the_garden_comic = 2507
str_s1_helgered_and_kara_epic = 2508
str_s1_a_hearts_desire_mystic = 2509
str_no_companions_in_service = 2510
str_gathering_support = 2511
str_expected_back_imminently = 2512
str_expected_back_in_approximately_reg3_days = 2513
str_gathering_intelligence = 2514
str_diplomatic_embassy_to_s9 = 2515
str_serving_as_minister = 2516
str_in_your_court_at_s9 = 2517
str_under_arms = 2518
str_in_your_party = 2519
str_s4_s8_s5 = 2520
str_s2_s3 = 2521
str_attacking_enemy_army_near_s11 = 2522
str_holding_feast_at_s11 = 2523
str_sfai_reg4 = 2524
str_s9s10_current_state_s11_hours_at_current_state_reg3_current_strategic_thinking_s14_marshall_s12_since_the_last_offensive_ended_reg4_hours_since_the_decisive_event_reg10_hours_since_the_last_rest_reg9_hours_since_the_last_feast_ended_reg5_hours_percent_disgruntled_lords_reg7_percent_restless_lords_reg8__ = 2525
str__right_to_rule_reg12 = 2526
str_political_arguments_made_legality_reg3_rights_of_lords_reg4_unificationpeace_reg5_rights_of_commons_reg6_fief_pledges_reg7 = 2527
str_renown_reg2_honour_rating_reg3s12_friends_s8_enemies_s6_s9 = 2528
str_you_lie_stunned_for_several_minutes_then_stagger_to_your_feet_to_find_your_s10_standing_over_you_you_have_lost_the_duel = 2529
str_s10_lies_in_the_arenas_dust_for_several_minutes_then_staggers_to_his_feet_you_have_won_the_duel = 2530
str_debug__you_fought_with_a_center_so_no_change_in_morale = 2531
str__this_castle_is_temporarily_under_royal_control = 2532
str__this_castle_does_not_seem_to_be_under_anyones_control = 2533
str__this_town_is_temporarily_under_royal_control = 2534
str__the_townspeople_seem_to_have_declared_their_independence = 2535
str_to_your_husband_s11 = 2536
str__you_see_the_banner_of_your_wifehusband_s7_over_the_castle_gate = 2537
str__the_banner_of_your_wifehusband_s7_flies_over_the_town_gates = 2538
str__the_lord_is_currently_holding_a_feast_in_his_hall = 2539
str__join_the_feast = 2540
str_belligerent_drunk_in_s4 = 2541
str_belligerent_drunk_not_found = 2542
str_roughlooking_character_in_s4 = 2543
str_roughlooking_character_not_found = 2544
str__however_you_have_sufficiently_distinguished_yourself_to_be_invited_to_attend_the_ongoing_feast_in_the_lords_castle = 2545
str_s8_you_are_also_invited_to_attend_the_ongoing_feast_in_the_castle = 2546
str___hardship_index_reg0_avg_towns_reg1_avg_villages_reg2__ = 2547
str___s3_price_=_reg4_calradian_average_reg6_capital_reg11_s4_base_reg1modified_by_raw_material_reg2modified_by_prosperity_reg3_calradian_average_production_base_reg5_total_reg12_consumed_reg7used_as_raw_material_reg8modified_total_reg9_calradian_consumption_base_reg10_total_reg13s1_ = 2548
str_s11_unfortunately_s12_was_wounded_and_had_to_be_left_behind = 2549
str_s11_also_s12_was_wounded_and_had_to_be_left_behind = 2550
str_trial_influences_s17s_relation_with_s18_by_reg3 = 2551
str_with_the_s10 = 2552
str_outside_calradia = 2553
str_you_have_been_indicted_for_treason_to_s7_your_properties_have_been_confiscated_and_you_would_be_well_advised_to_flee_for_your_life = 2554
str_by_order_of_s6_s4_of_the_s5_has_been_indicted_for_treason_the_lord_has_been_stripped_of_all_reg4herhis_properties_and_has_fled_for_reg4herhis_life_he_is_rumored_to_have_gone_into_exile_s11 = 2555
str_local_notables_from_s1_a_village_claimed_by_the_s4_have_been_mistreated_by_their_overlords_from_the_s3_and_petition_s5_for_protection = 2556
str_villagers_from_s1_stole_some_cattle_from_s2 = 2557
str_villagers_from_s1_abducted_a_woman_from_a_prominent_family_in_s2_to_marry_one_of_their_boys = 2558
str_villagers_from_s1_killed_some_farmers_from_s2_in_a_fight_over_the_diversion_of_a_stream = 2559
str_your_new_minister_ = 2560
str_s10_is_your_new_minister_and_ = 2561
str_due_to_the_fall_of_s10_your_court_has_been_relocated_to_s12 = 2562
str_after_to_the_fall_of_s10_your_faithful_vassal_s9_has_invited_your_court_to_s11_ = 2563
str_after_to_the_fall_of_s11_your_court_has_nowhere_to_go = 2564
str_s8_wishes_to_inform_you_that_the_lords_of_s9_will_be_gathering_for_a_feast_at_his_great_hall_in_s10_and_invites_you_to_be_part_of_this_august_assembly = 2565
str_consult_with_s11_at_your_court_in_s12 = 2566
str_as_brief_as_our_separation_has_been_the_longing_in_my_heart_to_see_you_has_made_it_seem_as_many_years = 2567
str_although_it_has_only_been_a_short_time_since_your_departure_but_i_would_be_most_pleased_to_see_you_again = 2568
str_although_i_have_received_no_word_from_you_for_quite_some_time_i_am_sure_that_you_must_have_been_very_busy_and_that_your_failure_to_come_see_me_in_no_way_indicates_that_your_attentions_to_me_were_insincere_ = 2569
str_i_trust_that_you_have_comported_yourself_in_a_manner_becoming_a_gentleman_during_our_long_separation_ = 2570
str_it_has_been_many_days_since_you_came_and_i_would_very_much_like_to_see_you_again = 2571
str__you_should_ask_my_s11_s16s_permission_but_i_have_no_reason_to_believe_that_he_will_prevent_you_from_coming_to_see_me = 2572
str__you_should_first_ask_her_s11_s16s_permission = 2573
str__alas_as_we_know_my_s11_s16_will_not_permit_me_to_see_you_however_i_believe_that_i_can_arrange_away_for_you_to_enter_undetected = 2574
str__as_my_s11_s16_has_already_granted_permission_for_you_to_see_me_i_shall_expect_your_imminent_arrival = 2575
str_visit_s3_who_was_last_at_s4s18 = 2576
str_giver_troop_=_s2 = 2577
str_the_guards_at_the_gate_have_been_ordered_to_allow_you_through_you_might_be_imagining_things_but_you_think_one_of_them_may_have_given_you_a_wink = 2578
str_the_guards_glare_at_you_and_you_know_better_than_to_ask_permission_to_enter_however_as_you_walk_back_towards_your_lodgings_an_elderly_lady_dressed_in_black_approaches_you_i_am_s11s_nurse_she_whispers_urgently_don_this_dress_and_throw_the_hood_over_your_face_i_will_smuggle_you_inside_the_castle_to_meet_her_in_the_guise_of_a_skullery_maid__the_guards_will_not_look_too_carefully_but_i_beg_you_for_all_of_our_sakes_be_discrete = 2579
str_the_guards_glare_at_you_and_you_know_better_than_to_ask_permission_to_enter_however_as_you_walk_back_towards_your_lodgings_an_elderly_lady_dressed_in_black_approaches_you_i_am_s11s_nurse_she_whispers_urgently_wait_for_a_while_by_the_spring_outside_the_walls_i_will_smuggle_her_ladyship_out_to_meet_you_dressed_in_the_guise_of_a_shepherdess_but_i_beg_you_for_all_of_our_sakes_be_discrete = 2580
str_the_guards_glare_at_you_and_you_know_better_than_to_ask_permission_to_enter_however_as_you_walk_back_towards_your_lodgings_an_elderly_lady_dressed_in_black_approaches_you_i_am_s11s_nurse_she_whispers_urgently_her_ladyship_asks_me_to_say_that_yearns_to_see_you_but_that_you_should_bide_your_time_a_bit_her_ladyship_says_that_to_arrange_a_clandestine_meeting_so_soon_after_your_last_encounter_would_be_too_dangerous = 2581
str_the_guards_glare_at_you_and_you_know_better_than_to_ask_permission_to_enter = 2582
str_s3_commander_of_party_reg4_which_is_not_his_troop_leaded_party_reg5 = 2583
str_party_with_commander_mismatch__check_log_for_details_ = 2584
str_s4_adds_wealth_has_reg4_wealth_accumulated = 2585
str_doing_political_calculations_for_s9 = 2586
str_s9_does_not_have_a_fief = 2587
str_current_wealth_reg1 = 2588
str_debug__attempting_to_spawn_s4 = 2589
str_debug__s0_is_spawning_around_party__s7 = 2590
str_no_trigger_noted = 2591
str_triggered_by_npc_is_quitting = 2592
str_triggered_by_npc_has_grievance = 2593
str_triggered_by_npc_has_personality_clash = 2594
str_triggered_by_npc_has_political_grievance = 2595
str_triggered_by_npc_to_rejoin_party = 2596
str_triggered_by_npc_has_sisterly_advice = 2597
str_triggered_by_local_histories = 2598
str_s1_reg0_s2 = 2599
str_s3_reg0_s2 = 2600
str_s1_s2 = 2601
str_wanted_bandits_spotted_by_s4 = 2602
str_s4_ready_to_voice_objection_to_s3s_mission_if_in_party = 2603
str_test_effective_relation_=_reg3 = 2604
str_g_talk_troop_=_reg0__g_encountered_party_=_reg1__slot_value_=_reg2 = 2605
str_strange_that_one_didnt_seem_like_your_ordenary_troublemaker_he_didnt_drink_all_that_much__he_just_stood_there_quietly_and_watched_the_door_you_may_wish_to_consider_whether_you_have_any_enemies_who_know_you_are_in_town_a_pity_that_blood_had_to_be_spilled_in_my_establishment = 2606
str_wielded_item_reg3 = 2607
str_you_never_let_him_draw_his_weapon_still_it_looked_like_he_was_going_to_kill_you_take_his_sword_and_purse_i_suppose_he_was_trouble_but_its_not_good_for_an_establishment_to_get_a_name_as_a_place_where_men_are_killed = 2608
str_well_id_say_that_he_started_it_that_entitles_you_to_his_sword_and_purse_i_suppose_have_a_drink_on_the_house_as_i_daresay_youve_saved_a_patron_or_two_a_broken_skull_still_i_hope_he_still_has_a_pulse_its_not_good_for_an_establishment_to_get_a_name_as_a_place_where_men_are_killed = 2609
str_stop_no_shooting_no_shooting = 2610
str_em_ill_stay_out_of_this = 2611
str_the_s12_is_a_labyrinth_of_rivalries_and_grudges_lords_ignore_their_lieges_summons_and_many_are_ripe_to_defect = 2612
str_the_s12_is_shaky_many_lords_do_not_cooperate_with_each_other_and_some_might_be_tempted_to_defect_to_a_liege_that_they_consider_more_worthy = 2613
str_the_s12_is_fairly_solid_some_lords_bear_enmities_for_each_other_but_they_tend_to_stand_together_against_outside_enemies = 2614
str_the_s12_is_a_rock_of_stability_politically_speaking_whatever_the_lords_may_think_of_each_other_they_fight_as_one_against_the_common_foe = 2615
str_tribune_s12 = 2616
str_lady_s12 = 2617
str_lord_s12 = 2618
str_resolve_the_dispute_between_s11_and_s12 = 2619
str_in_doing_so_you_will_be_in_violation_of_your_truce_is_that_what_you_want = 2620
str_if_you_attack_without_provocation_some_of_your_vassals_may_consider_you_to_be_too_warlike_is_that_what_you_want = 2621
str_our_men_are_ready_to_ride_forth_at_your_bidding_are_you_sure_this_is_what_you_want = 2622
str_seek_recognition = 2623
str_seek_vassalhood = 2624
str_seek_a_truce = 2625
str__promised_fief = 2626
str_no_fiefss12 = 2627
str_fiefs_s0s12 = 2628
str_please_s65_ = 2629
str__s15_is_also_being_held_here_and_you_may_wish_to_see_if_reg4shehe_will_join_us = 2630
str_one_thing_in_our_favor_is_that_s12s_grip_is_very_shaky_he_rules_over_a_labyrinth_of_rivalries_and_grudges_lords_often_fail_to_cooperate_and_many_would_happily_seek_a_better_liege = 2631
str_thankfully_s12s_grip_is_fairly_shaky_many_lords_do_not_cooperate_with_each_other_and_some_might_be_tempted_to_seek_a_better_liege = 2632
str_unfortunately_s12s_grip_is_fairly_strong_until_we_can_shake_it_we_may_have_to_look_long_and_hard_for_allies = 2633
str_unfortunately_s12s_grip_is_very_strong_unless_we_can_loosen_it_it_may_be_difficult_to_find_allies = 2634
str_playername_come_to_plague_me_some_more_have_you = 2635
str_ah_it_is_you_again = 2636
str_well_playername = 2637
str_comment_found_s1 = 2638
str_rejoinder_noted = 2639
str_s11 = 2640
str_flagon_of_mead = 2641
str_skin_of_kumis = 2642
str_mug_of_kvass = 2643
str_cup_of_wine = 2644
str_you_intend_to_challenge_s13_to_force_him_to_retract_an_insult = 2645
str_intrigue_impatience=_reg3_must_be_less_than_100 = 2646
str_youll_have_to_speak_to_me_at_some_other_time_then = 2647
str_this_is_no_time_for_words = 2648
str_lord_not_alone = 2649
str_of_course_my_wife = 2650
str_perhaps_not_our_marriage_has_become_a_bit_strained_dont_you_think = 2651
str_why_is_that_my_wife_actually_our_marriage_has_become_such_that_i_prefer_to_have_a_witness_for_all_of_our_converations = 2652
str_all_right_then_what_do_you_have_to_say_out_with_it = 2653
str_bah__im_in_no_mood_for_whispering_in_the_corner = 2654
str_bah_i_dont_like_you_that_much_im_not_going_to_go_plot_with_you_in_some_corner = 2655
str_well__now_what_do_you_have_to_propose = 2656
str_trying_our_hand_at_intrigue_are_we_i_think_not = 2657
str_hah_i_trust_you_as_a_i_would_a_serpent_i_think_not = 2658
str_i_do_not_like_to_conduct_my_business_in_the_shadows_but_sometimes_it_must_be_done_what_do_you_have_to_say = 2659
str_i_would_prefer_to_conduct_our_affairs_out_in_the_open = 2660
str_do_not_take_this_amiss_but_with_you_i_would_prefer_to_conduct_our_affairs_out_in_the_open = 2661
str_hmm_you_have_piqued_my_interest_what_do_you_have_to_say = 2662
str_em_lets_keep_our_affairs_out_in_the_open_for_the_time_being = 2663
str_thats_sensible__the_world_is_full_of_churls_who_poke_their_noses_into_their_betters_business_now_tell_me_what_it_is_that_you_have_to_say = 2664
str_what_do_you_take_me_for_a_plotter = 2665
str_well_i_normally_like_to_keep_things_out_in_the_open_but_im_sure_someone_like_you_would_not_want_to_talk_in_private_unless_heshe_had_a_good_reason_what_is_it = 2666
str_surely_we_can_discuss_whatever_you_want_to_discuss_out_here_in_the_open_cant_we = 2667
str_im_a_simple__man_not_one_for_intrigue_but_id_guess_that_you_have_something_worthwhile_to_say_what_is_it = 2668
str_forgive_me_but_im_not_one_for_going_off_in_corners_to_plot = 2669
str_please_do_not_take_this_amiss_but_i_do_not_trust_you = 2670
str_certainly_playername_what_is_it = 2671
str_forgive_me_but_id_prefer_to_keep_our_conversations_in_the_open = 2672
str_please_do_not_take_this_amiss_but_im_not_sure_you_and_i_are_still_on_those_terms = 2673
str_persuasion__relation_less_than_5 = 2674
str_s15 = 2675
str_persuasion__2__lord_reputation_modifier__relation_less_than_10 = 2676
str_s13 = 2677
str_placeholder = 2678
str_really_well_this_is_the_first_i_have_heard_of_it_unless_you_build_up_support_for_that_claim_you_may_find_it_difficult_to_find_allies_however_whenever_you_see_fit_to_declare_yourself_publically_as_queen_i_should_be_honored_to_be_your_consort = 2679
str_yes_i_have_heard_such_talk_while_it_is_good_that_you_are_building_up_your_support_i_do_not_think_that_you_are_quite_ready_to_proclaim_yourself_yet_however_i_will_let_you_be_the_judge_of_that_and_when_you_decide_i_should_be_honored_to_be_your_consort = 2680
str_yes_and_many_others_in_calradia_think_so_as_well_perhaps_it_is_time_that_you_declared_yourself_and_we_shall_ride_forth_together_to_claim_your_throne_i_should_be_honored_to_be_your_consort = 2681
str_i_am_disturbed_about_my_lord_s15s_choice_of_companions = 2682
str_well_ill_be_honest_i_feel_that_sometimes_s15_overlooks_my_rights_and_extends_his_protection_to_the_unworthy = 2683
str_heh_one_thing_that_ill_say_about_s15_is_that_he_has_a_ripe_batch_of_bastards_in_his_court = 2684
str_well_sometimes_i_have_to_say_that_i_question_s15s_judgment_regarding_those_who_he_keeps_in_his_court = 2685
str_s15_is_a_weak_man_who_too_easily_lends_his_ear_to_evil_council_and_gives_his_protection_to_some_who_have_done_me_wrong = 2686
str_i_will_confess_that_sometimes_i_worry_about_s15s_judgment_particularly_in_the_matter_of_the_counsel_that_he_keeps = 2687
str_what_do_i_think_i_think_that_s15_is_a_vile_pretender_a_friend_to_the_flatterer_and_the_hypocrite = 2688
str_well_s15_is_not_like_you_ill_say_that_much = 2689
str_s15_long_may_he_live = 2690
str_he_is_my_liege_that_is_all_that_i_will_say_on_this_matter = 2691
str_that_you_are_the_rightful_heir_to_the_throne_of_calradia = 2692
str_that_s14_is_the_rightful_ruler_of_calradia = 2693
str_that_s14_will_rule_this_land_justly = 2694
str_that_s14_will_protect_our_rights_as_nobles = 2695
str_that_s14_will_unify_this_land_and_end_this_war = 2696
str_that_s14_will_reward_me_with_a_fief = 2697
str_he = 2698
str_king = 2699
str_she = 2700
str_queen = 2701
str_khan = 2702
str_i = 2703
str_according_to_the_ancient_law_and_custom_of_the_calradians_s45_should_be_s47 = 2704
str_because_s44_is_the_rightful_s47_of_the_s46 = 2705
str_you_speak_of_claims_and_legalities_yet_to_others_you_talk_of_bringing_peace_by_force = 2706
str_you_speak_of_bringing_peace_by_force_yet_to_others_you_make_legal_claims = 2707
str_you_speak_to_some_of_upholding_the_rights_of_the_commons_yet_you_speak_to_others_of_uphold_the_rights_of_nobles_what_if_those_rights_are_in_conflict = 2708
str_you_speak_to_me_of_upholding_my_rights_as_a_lord_but_to_others_you_talk_of_upholding_the_rights_of_all_commons_what_if_those_rights_come_into_conflict = 2709
str_a_claim_should_be_strong_indeed_before_one_starts_talking_about_it = 2710
str_a_king_should_prove_his_valor_beyond_any_doubt_before_he_starts_talking_about_a_claim_to_the_throne = 2711
str_you_must_prove_yourself_a_great_warrior_before_men_will_follow_you_as_king = 2712
str_a_claim_to_the_throne_should_be_strong_indeed_before_one_presses_it = 2713
str_indeed_but_a_man_must_also_prove_himself_a_great_warrior_before_men_will_follow_you_as_king = 2714
str_my_pigherd_can_declare_himself_king_if_he_takes_he_fancy_i_think_you_need_to_break_a_few_more_heads_on_tbe_battlefield_before_men_will_follow_you = 2715
str_if_you_do_not_wish_to_die_on_a_scaffold_like_so_many_failed_pretenders_before_you_i_would_suggest_that_you_to_build_your_claim_on_stronger_foundations_so_that_men_will_follow_you = 2716
str_if_you_do_not_wish_to_die_on_a_scaffold_like_so_many_failed_pretenders_before_you_i_would_advise_you_prove_yourself_on_the_field_of_battle_so_that_men_will_follow_you = 2717
str_talk_is_for_heralds_and_lawyers_real_kings_prove_themselves_with_their_swords = 2718
str_i_were_you_i_would_try_to_prove_myself_a_bit_more_before_i_went_about_pressing_my_claim = 2719
str_trump_check_random_reg4_skill_reg3 = 2720
str_s12_s43 = 2721
str_indeed_please_continue = 2722
str_me = 2723
str_preliminary_result_for_political_=_reg4 = 2724
str_i_worry_about_those_with_whom_you_have_chosen_to_surround_yourself = 2725
str_there_are_some_outstanding_matters_between_me_and_some_of_your_vassals_ = 2726
str_result_for_political_=_reg41 = 2727
str_my_liege_has_his_faults_but_i_dont_care_for_your_toadies = 2728
str_i_think_youre_a_good_man_but_im_worried_that_you_might_be_pushed_in_the_wrong_direction_by_some_of_those_around_you = 2729
str_i_am_loathe_to_fight_alongside_you_so_long_as_you_take_under_your_wing_varlots_and_base_men = 2730
str_ill_be_honest__with_some_of_those_who_follow_you_i_think_id_be_more_comfortable_fighting_against_you_than_with_you = 2731
str_i_say_that_you_can_judge_a_man_by_the_company_he_keeps_and_you_have_surrounded_yourself_with_vipers_and_vultures = 2732
str_you_know_that_i_have_always_had_a_problem_with_some_of_our_companions = 2733
str_politically_i_would_be_a_better_position_in_the_court_of_my_current_liege_than_in_yours = 2734
str_i_am_more_comfortable_with_you_and_your_companions_than_with_my_current_liege = 2735
str_militarily_youre_in_no_position_to_protect_me_should_i_be_attacked_id_be_reluctant_to_join_you_until_you_could = 2736
str_militarily_when_i_consider_the_lay_of_the_land_i_realize_that_to_pledge_myself_to_you_now_would_endanger_my_faithful_retainers_and_my_family = 2737
str_militarily_youre_in_no_position_to_come_to_my_help_if_someone_attacked_me_i_dont_mind_a_good_fight_but_i_like_to_have_a_chance_of_winning = 2738
str_militarily_you_would_have_me_join_you_only_to_find_myself_isolated_amid_a_sea_of_enemies = 2739
str_militarily_youre_in_no_position_to_come_to_my_help_if_someone_attacked_me_youd_let_me_be_cut_down_like_a_dog_id_bet = 2740
str_militarily_i_wouldnt_be_any_safer_if_i_joined_you = 2741
str_militarily_i_might_be_safer_if_i_joined_you = 2742
str_finally_there_is_a_cost_to_ones_reputation_to_change_sides_in_this_case_the_cost_would_be_very_high = 2743
str_finally_there_is_a_cost_to_ones_reputation_to_change_sides_in_this_case_the_cost_would_be_significant = 2744
str_finally_there_is_a_cost_to_ones_reputation_to_change_sides_in_this_case_however_many_men_would_understand = 2745
str_chance_of_success_=_reg1 = 2746
str_random_=_reg3 = 2747
str_i_will_not_have_it_be_said_about_me_that_i_am_a_traitor_that_is_my_final_decision_i_have_nothing_more_to_say_on_this_matter = 2748
str_i_am_pledged_to_defend_s14_i_am_sorry_though_we_may_meet_on_the_battlefield_i_hope_that_we_will_still_be_friends = 2749
str_i_really_cannot_bring_myself_to_renounce_s14_i_am_sorry_please_lets_not_talk_about_this_any_more = 2750
str_however_i_have_decided_that_i_must_remain_loyal_to_s14_i_am_sorry = 2751
str_however_i_will_not_let_you_lead_me_into_treason_do_not_talk_to_me_of_this_again = 2752
str_its_not_good_to_get_a_reputation_as_one_who_abandons_his_liege_that_is_my_decision_let_us_speak_no_more_of_this_matter = 2753
str_ive_decided_to_stick_with_s14_i_dont_want_to_talk_about_this_matter_any_more = 2754
str_lord_pledges_to_s4 = 2755
str_lord_recruitment_provokes_home_faction = 2756
str_error__wrong_quest_type = 2757
str_you_are_challenging_me_to_a_duel_how_droll_as_you_wish_playername_it_will_be_good_sport_to_bash_your_head_in = 2758
str_call_me_coward_very_well_you_leave_me_no_choice = 2759
str_reg3_hours = 2760
str_hour = 2761
str_however_circumstances_have_changed_since_we_made_that_decision_and_i_may_reconsider_shortly_s16 = 2762
str_i_wish_to_marry_your_s11_s10_i_ask_for_your_blessing = 2763
str_i_wish_to_marry_your_s11_s10_i_ask_for_your_help = 2764
str_you_plan_to_marry_s3_at_a_feast_hosted_by_s4_in_s5_you_should_be_notifed_of_the_feast_as_soon_as_it_is_held = 2765
str_your_s11_ = 2766
str_i_ask_again_may = 2767
str_may = 2768
str_very_well_as_far_as_im_concerned_i_suppose_she_can_see_most_anyone_she_likes__within_reason_of_course = 2769
str_very_well_an_alliance_with_you_could_be_valuable_go_chat_with_her_and_see_if_you_can_get_her_to_take_a_fancy_to_you_if_she_doesnt_and_if_we_still_want_to_conclude_this_business_then_i_can_make_her_see_reason = 2770
str_you_have_my_blessing_to_pay_suit_to_her__so_long_as_your_intentions_are_honorable_of_course_depending_on_how_things_proceed_between_you_two_we_may_have_more_to_discuss_at_a_later_date = 2771
str_war_damage_inflicted_reg3_suffered_reg4_ratio_reg5 = 2772
str_error__did_not_calculate_war_progress_string_properly = 2773
str_the_war_has_barely_begun_so_and_it_is_too_early_to_say_who_is_winning_and_who_is_losing = 2774
str_we_have_been_hitting_them_very_hard_and_giving_them_little_chance_to_recover = 2775
str_the_fighting_has_been_hard_but_we_have_definitely_been_getting_the_better_of_them = 2776
str_they_have_been_hitting_us_very_hard_and_causing_great_suffering = 2777
str_the_fighting_has_been_hard_and_i_am_afraid_that_we_have_been_having_the_worst_of_it = 2778
str_both_sides_have_suffered_in_the_fighting = 2779
str_no_clear_winner_has_yet_emerged_in_the_fighting_but_i_think_we_are_getting_the_better_of_them = 2780
str_no_clear_winner_has_yet_emerged_in_the_fighting_but_i_fear_they_may_be_getting_the_better_of_us = 2781
str_no_clear_winner_has_yet_emerged_in_the_fighting = 2782
str_s9_s14 = 2783
str_there_is_no_campaign_currently_in_progress = 2784
str_we_are_assembling_the_army = 2785
str_we_aim_to_take_the_fortress_of_s8 = 2786
str_we_are_on_a_raid_and_are_now_targeting_s8 = 2787
str_we_are_trying_to_seek_out_and_defeat_s8 = 2788
str_we_are_riding_to_the_defense_of_s8 = 2789
str_we_are_gathering_for_a_feast_at_s8 = 2790
str__however_that_may_change_shortly_s14 = 2791
str_it_is_our_custom_to_seal_any_such_alliances_with_marriage_and_in_fact_we_have_been_looking_for_a_suitable_groom_for_my_s11_s14 = 2792
str_once_again_ = 2793
str_cheat__marriage_proposal = 2794
str_you_plan_to_marry_s4_as_you_have_no_family_in_calradia_he_will_organize_the_wedding_feast = 2795
str_s43_just_so_you_know_if_you_attack_me_you_will_be_in_violation_of_the_truce_you_signed_with_the_s34 = 2796
str_very_well__you_are_now_my_liege_as_well_as_my_husband = 2797
str_i_thank_you_reg39my_ladylord = 2798
str_now_some_might_say_that_women_have_no_business_leading_mercenary_companies_but_i_suspect_that_you_would_prove_them_wrong_what_do_you_say = 2799
str_what_do_you_say_to_entering_the_service_of_s9_as_a_mercenary_captain_i_have_no_doubt_that_you_would_be_up_to_the_task = 2800
str_s9_asked_you_to_rescue_s13_who_is_prisoner_at_s24 = 2801
str_s9_asked_you_to_attack_a_village_or_some_caravans_as_to_provoke_a_war_with_s13 = 2802
str_s9_asked_you_to_catch_the_three_groups_of_runaway_serfs_and_bring_them_back_to_s4_alive_and_breathing_he_said_that_all_three_groups_are_heading_towards_s3 = 2803
str_error__player_not_logged_as_groom = 2804
str_you_intend_to_bring_goods_to_s9_in_preparation_for_the_feast_which_will_be_held_as_soon_as_conditions_permit = 2805
str_hello_playername = 2806
str_ah_my_gentle_playername_how_much_good_it_does_my_heart_to_see_you_again = 2807
str_playername__i_am_so_glad_to_see_you_again_i_must_say_i_do_envy_your_freedom_to_ride_out_and_experience_the_world = 2808
str_playername__i_am_so_glad_to_see_you_i_trust_that_you_have_been_behaving_honorably_since_last_we_met = 2809
str_playername__i_am_so_glad_that_you_were_able_to_come = 2810
str_i_do_enjoy_speaking_to_you_but_i_am_sure_you_understand_that_our_people_cluck_their_tongues_at_a_woman_to_spend_too_long_conversing_with_a_man_outside_her_family__although_the_heavens_know_its_never_the_man_who_is_held_to_blame_ = 2811
str_as_much_as_i_enjoy_speaking_to_you_i_do_not_care_to_be_gossiped_about_by_others_who_might_lack_my_grace_and_beauty_ = 2812
str_i_do_so_enjoy_speaking_to_you_but_as_a_daughter_of_one_of_the_great_families_of_this_land_i_must_set_an_example_of_propriety_ = 2813
str_i_do_so_enjoy_speaking_to_you_but_as_a_daughter_of_good_family_i_must_protect_my_reputation_ = 2814
str_although_it_is_kind_of_you_to_pay_me_such_attentions_i_suspect_that_you_might_find_other_ladies_who_may_be_more_inclined_to_return_your_affection = 2815
str_as_flattered_as_i_am_by_your_attentions_perhaps_you_should_seek_out_another_lady_of_somewhat_shall_we_say_different_tastes = 2816
str_as_flattered_as_i_am_by_your_attentions_i_am_a_daughter_of_good_family_and_must_be_aware_of_my_reputation_it_is_not_seemly_that_i_converse_too_much_at_one_time_with_one_man_i_am_sure_you_understand_now_if_you_will_excuse_me = 2817
str_very_well__i_will_let_you_choose_the_time = 2818
str_good_i_am_glad_that_you_have_abandoned_the_notion_of_pushing_me_into_marriage_before_i_was_ready = 2819
str_rival_found_s4_reg0_relation = 2820
str_i_am = 2821
str_s12 = 2822
str_s12_s11_to_s14 = 2823
str_s12 = 2824
str_s12_i_am_here_for_the_feast = 2825
str_another_tournament_dedication_oh_i_suppose_it_is_always_flattering = 2826
str_do_you_why_what_a_most_gallant_thing_to_say = 2827
str_hmm_i_cannot_say_that_i_altogether_approve_of_such_frivolity_but_i_must_confess_myself_a_bit_flattered = 2828
str_why_thank_you_you_are_most_kind_to_do_so = 2829
str_you_are_most_courteous_and_courtesy_is_a_fine_virtue_ = 2830
str_hmm_youre_a_bold_one_but_i_like_that_ = 2831
str_ah_well_they_all_say_that_but_no_matter_a_compliment_well_delivered_is_at_least_a_good_start_ = 2832
str_oh_do_you_mean_that_such_a_kind_thing_to_say = 2833
str_you_are_a_most_gallant_young_man_ = 2834
str__do_come_and_see_me_again_soon = 2835
str_you_intend_to_ask_s12_for_permission_to_marry_s15 = 2836
str_you_intend_to_ask_s12_to_pressure_s10_to_marry_you = 2837
str_do_be_careful_i_am_so_much_endebted_to_you_for_this = 2838
str_go_then__we_shall_see_which_of_you_triumphs = 2839
str_sigh_i_will_never_truly_understand_men_and_their_rash_actions = 2840
str_you_intend_to_challenge_s13_to_force_him_to_relinquish_his_suit_of_s11 = 2841
str_farewell = 2842
str_farewell_playername = 2843
str___we_believe_that_there_is_money_to_be_made_selling_ = 2844
str_s14s15_ = 2845
str__we_carry_a_selection_of_goods_although_the_difference_in_prices_for_each_is_not_so_great_we_hope_to_make_a_profit_off_of_the_whole = 2846
str_s14and_other_goods = 2847
str__have_you_not_signed_a_truce_with_our_lord = 2848
str_parole = 2849
str_normal = 2850
str_s51 = 2851
str__meanwhile_s51_reg2areis_being_held_in_the_castle_but_reg2havehas_made_pledges_not_to_escape_and_reg2areis_being_held_in_more_comfortable_quarters = 2852
str_you_may_be_aware_my_lord_of_the_quarrel_between_s4_and_s5_which_is_damaging_the_unity_of_this_realm_and_sapping_your_authority_if_you_could_persuade_the_lords_to_reconcile_it_would_boost_your_own_standing_however_in_taking_this_on_you_run_the_risk_of_one_the_lords_deciding_that_you_have_taken_the_rivals_side = 2853
str_you_may_be_aware_my_lord_of_the_quarrel_between_s4_and_s5_which_is_damaging_the_unity_of_this_realm_and_sapping_your_authority_if_you_could_persuade_the_lords_to_reconcile_i_imagine_that_s7_would_be_most_pleased_however_in_taking_this_on_you_run_the_risk_of_one_the_lords_deciding_that_you_have_taken_the_rivals_side = 2854
str__of_course_the_land_is_currently_at_peace_so_you_may_have_better_luck_in_other_realms = 2855
str_here = 2856
str_over = 2857
str_s8_in_s12 = 2858
str__has_put_together_a_bounty_on_some_bandits_who_have_been_attacking_travellers_in_the_area = 2859
str__is_looking_for_a_way_to_avoid_an_impending_war = 2860
str__may_need_help_rescuing_an_imprisoned_family_member = 2861
str__has_been_asking_around_for_someone_who_might_want_work_id_watch_yourself_with_him_though = 2862
str_town = 2863
str_castle = 2864
str__but_he_is_holding_there_as_a_prisoner_at_dungeon_of_s13 = 2865
str_log_entry_type_reg4_for_s4_total_entries_reg5 = 2866
str_error__reputation_type_for_s9_not_within_range = 2867
str_they_say_that_s9_is_a_most_conventional_maiden__devoted_to_her_family_of_a_kind_and_gentle_temperament_a_lady_in_all_her_way = 2868
str_they_say_that_s9_is_a_bit_of_a_romantic_a_dreamer__of_a_gentle_temperament_yet_unpredictable_she_is_likely_to_be_led_by_her_passions_and_will_be_trouble_for_her_family_ill_wager = 2869
str_they_say_that_s9_is_determined_to_marry_well_and_make_her_mark_in_the_world_she_may_be_a_tremendous_asset_for_her_husband__provided_he_can_satisfy_her_ambition = 2870
str_they_say_that_s9_loves_to_hunt_and_ride_maybe_she_wishes_she_were_a_man_whoever_she_marries_will_have_a_tough_job_keeping_the_upper_hand_i_would_say = 2871
str_they_say_that_s9_is_a_lady_of_the_highest_moral_standards_very_admirable_very_admirable__and_very_hard_to_please_ill_warrant = 2872
str_s9_is_now_betrothed_to_s11_soon_we_believe_there_shall_be_a_wedding = 2873
str_i_have_not_heard_any_news_about_her = 2874
str_searching_for_rumors_for_s9 = 2875
str_they_say_that_s9_has_shown_favor_to_s11_perhaps_it_will_not_be_long_until_they_are_betrothed__if_her_family_permits = 2876
str_they_say_that_s9_has_been_forced_by_her_family_into_betrothal_with_s11 = 2877
str_they_say_that_s9_has_agreed_to_s11s_suit_and_the_two_are_now_betrothed = 2878
str_they_say_that_s9_under_pressure_from_her_family_has_agreed_to_betrothal_with_s11 = 2879
str_they_say_that_s9_has_refused_s11s_suit = 2880
str_they_say_that_s11_has_tired_of_pursuing_s9 = 2881
str_they_say_that_s9s_family_has_forced_her_to_renounce_s11_whom_she_much_loved = 2882
str_they_say_that_s9_has_run_away_with_s11_causing_her_family_much_grievance = 2883
str_they_say_that_s9_and_s11_have_wed = 2884
str_they_say_that_s9_was_recently_visited_by_s11_who_knows_where_that_might_lead = 2885
str_there_is_not_much_to_tell_but_it_is_still_early_in_the_season = 2886
str_error_lady_selected_=_s9 = 2887
str_s12there_is_a_feast_of_the_s3_in_progress_at_s4_but_it_has_been_going_on_for_a_couple_of_days_and_is_about_to_end_ = 2888
str_s12there_is_a_feast_of_the_s3_in_progress_at_s4_which_should_last_for_at_least_another_day_ = 2889
str_s12there_is_a_feast_of_the_s3_in_progress_at_s4_which_has_only_just_begun_ = 2890
str_not_at_this_time_no = 2891
str_s12the_great_lords_bring_their_daughters_and_sisters_to_these_occasions_to_see_and_be_seen_so_they_represent_an_excellent_opportunity_to_make_a_ladys_acquaintance = 2892
str_you_will_not_be_disappointed_sirmadam_you_will_not_find_better_warriors_in_all_calradia = 2893
str_your_excellency = 2894
str_s10_and_s11 = 2895
str_your_loyal_subjects = 2896
str_loyal_subjects_of_s10 = 2897
str_the = 2898
str_we = 2899
str_track_down_s7_and_defeat_him_defusing_calls_for_war_within_the_s11 = 2900
str_track_down_the_s9_who_attacked_travellers_near_s8_then_report_back_to_the_town = 2901
str_fire_time__reg0_cur_time__reg1 = 2902
str_fire_set_up_time_at_city_reg0_is_reg1 = 2903
str_our_power__reg3__enemy_power__reg4 = 2904
str_do_you_wish_to_award_it_to_one_of_your_vassals = 2905
str_who_do_you_wish_to_give_it_to = 2906
str_sire_my_lady_we_have_taken_s1_s2 = 2907
str_s12i_want_to_have_s1_for_myself = 2908
str_fiefs_s0 = 2909
str_reserved_001 = 2910
str_production_setting_buy_from_market = 2911
str_production_setting_buy_from_inventory = 2912
str_production_setting_produce_to_inventory = 2913
str_production_setting_produce_to_market = 2914
str_feast_quest_expired = 2915
str_whereabouts_unknown = 2916
str_mulberry_groves = 2917
str_olive_groves = 2918
str_acres_flax = 2919
str_enterprise_enemy_realm = 2920
str_intrigue_success_chance = 2921
str_you_intend_to_denounce_s14_to_s13_on_behalf_of_s12 = 2922
str_you_intend_to_denounce_s14_to_his_face_on_behalf_of_s14 = 2923
str_you_intend_to_bring_gift_for_s14 = 2924
str_we_will_gather_the_army_but_not_ride_until_we_have_an_objective = 2925
str_we_shall_lay_siege_to_an_easy_fortress_to_capture = 2926
str_we_shall_strike_at_the_heart_of_our_foe_and_seize_the_fortress_of_s14 = 2927
str_we_shall_take_the_fortress_of_s14_which_appears_easily_defensible = 2928
str_we_shall_cut_a_fiery_trail_through_their_richest_lands_to_draw_them_out_to_battle = 2929
str_strategy_criticism_rash = 2930
str_strategy_criticism_cautious = 2931
str_tavernkeeper_invalid_quest = 2932
str_faction_title_male_player = 2933
str_faction_title_male_1 = 2934
str_faction_title_male_2 = 2935
str_faction_title_male_3 = 2936
str_faction_title_male_4 = 2937
str_faction_title_male_5 = 2938
str_faction_title_male_6 = 2939
str_faction_title_female_player = 2940
str_faction_title_female_1 = 2941
str_faction_title_female_2 = 2942
str_faction_title_female_3 = 2943
str_faction_title_female_4 = 2944
str_faction_title_female_5 = 2945
str_faction_title_female_6 = 2946
str_name_kingdom_text = 2947
str_default_kingdom_name = 2948
str_lord_defects_ordinary = 2949
str_lord_defects_player = 2950
str_lord_defects_player_faction = 2951
str_lord_indicted_player_faction = 2952
str_lord_indicted_dialog_approach = 2953
str_lord_indicted_dialog_approach_yes = 2954
str_lord_indicted_dialog_approach_no = 2955
str_lord_indicted_dialog_rejected = 2956
str__has_been_worried_about_bandits_establishing_a_hideout_near_his_home = 2957
str_bandit_lair_quest_description = 2958
str_bandit_hideout_preattack = 2959
str_bandit_hideout_failure = 2960
str_bandit_hideout_success = 2961
str_bandit_approach_defile = 2962
str_bandit_approach_swamp = 2963
str_bandit_approach_thickets = 2964
str_bandit_approach_cliffs = 2965
str_bandit_approach_cove = 2966
str_political_explanation_lord_lacks_center = 2967
str_political_explanation_lord_took_center = 2968
str_political_explanation_most_deserving_friend = 2969
str_political_explanation_most_deserving_in_faction = 2970
str_political_explanation_self = 2971
str_political_explanation_marshal = 2972
str_prisoner_at_large = 2973
str_quick_battle_scene_1 = 2974
str_quick_battle_scene_2 = 2975
str_quick_battle_scene_3 = 2976
str_quick_battle_scene_4 = 2977
str_quick_battle_scene_5 = 2978
str_quick_battle_troop_1 = 2979
str_quick_battle_troop_2 = 2980
str_quick_battle_troop_3 = 2981
str_quick_battle_troop_4 = 2982
str_quick_battle_troop_5 = 2983
str_quick_battle_troop_6 = 2984
str_quick_battle_troop_7 = 2985
str_quick_battle_troop_8 = 2986
str_quick_battle_troop_9 = 2987
str_quick_battle_troop_10 = 2988
str_quick_battle_troop_11 = 2989
str_quick_battle_troops_end = 2990
str_tutorial_training_ground_intro_message = 2991
str_map_basic = 2992
str_game_type_basic = 2993
str_battle = 2994
str_siege_offense = 2995
str_siege_defense = 2996
str_character = 2997
str_biography = 2998
str_player = 2999
str_enemy = 3000
str_faction = 3001
str_army_composition = 3002
str_army_size = 3003
str_reg0_percent = 3004
str_reg0_men = 3005
str_start = 3006
str_i_need_to_raise_some_men_before_attempting_anything_else = 3007
str_we_are_currently_at_peace = 3008
str_the_marshalship = 3009
str_you = 3010
str_myself = 3011
str_my_friend_s15 = 3012
str_custom_battle = 3013
str_comment_intro_liege_affiliated_to_player = 3014
str_s21_the_s8_declared_war_out_of_personal_enmity = 3015
str_s21_the_s8_declared_war_in_response_to_border_provocations = 3016
str_s21_the_s8_declared_war_to_curb_the_other_realms_power = 3017
str_s21_the_s8_declared_war_to_regain_lost_territory = 3018
str__family_ = 3019
str_we_are_conducting_recce = 3020
str__family_ = 3021
str_s49_s12_s11_end = 3022
str_center_party_not_active = 3023
str_center_is_friendly = 3024
str_center_is_already_besieged = 3025
str_center_is_looted_or_raided_already = 3026
str_center_marshal_does_not_want_to_attack_innocents = 3027
str_center_we_have_already_committed_too_much_time_to_our_present_siege_to_move_elsewhere = 3028
str_center_we_are_already_here_we_should_at_least_loot_the_village = 3029
str_center_far_away_we_can_reconnoiter_but_will_delay_decision_until_we_get_close = 3030
str_center_far_away_our_cautious_marshal_does_not_wish_to_reconnoiter = 3031
str_center_far_away_even_for_our_aggressive_marshal_to_reconnoiter = 3032
str_center_far_away_reason = 3033
str_center_closer_but_this_is_not_enought = 3034
str_center_protected_by_enemy_army_aggressive = 3035
str_center_protected_by_enemy_army_cautious = 3036
str_center_cautious_marshal_believes_center_too_difficult_to_capture = 3037
str_center_even_aggressive_marshal_believes_center_too_difficult_to_capture = 3038
str_center_value_outweighed_by_difficulty_of_capture = 3039
str_center_value_justifies_the_difficulty_of_capture = 3040
str_center_is_indefensible = 3041
str_we_are_waiting_for_selection_of_marshal = 3042
str_best_to_attack_the_enemy_lands = 3043
str_we_believe_the_fortress_will_be_worth_the_effort_to_take_it = 3044
str_we_will_gather_to_defend_the_beleaguered_fortress = 3045
str_the_enemy_temporarily_has_the_field = 3046
str_center_has_not_been_scouted = 3047
str_we_have_assembled_some_vassals = 3048
str_we_are_waiting_here_for_vassals = 3049
str_we_are_travelling_to_s11_for_vassals = 3050
str_center_strength_not_scouted = 3051
str_center_strength_strongly_defended = 3052
str_center_strength_moderately_defended = 3053
str_center_strength_weakly_defended = 3054
str_center_distant_from_concentration = 3055
str_plus = 3056
str_minus = 3057
str_tutorial_training_ground_warning_no_weapon = 3058
str_tutorial_training_ground_warning_shield = 3059
str_tutorial_training_ground_warning_melee_with_parry = 3060
str_tutorial_training_ground_warning_melee = 3061
str_tutorial_training_ground_attack_training = 3062
str_tutorial_training_ground_attack_training_down = 3063
str_tutorial_training_ground_attack_training_up = 3064
str_tutorial_training_ground_attack_training_left = 3065
str_tutorial_training_ground_attack_training_right = 3066
str_tutorial_training_ground_parry_training = 3067
str_tutorial_training_ground_chamber_training = 3068
str_tutorial_training_ground_archer_training = 3069
str_tutorial_training_ground_ammo_refill = 3070
str_tutorial_training_ground_archer_text_1 = 3071
str_tutorial_training_ground_archer_text_2 = 3072
str_tutorial_training_ground_archer_text_3 = 3073
str_tutorial_training_ground_archer_text_4 = 3074
str_tutorial_training_ground_archer_text_5 = 3075
str_tutorial_training_ground_horseman_text_1 = 3076
str_tutorial_training_ground_horseman_text_2 = 3077
str_tutorial_training_ground_horseman_text_3 = 3078
str_tutorial_training_ground_horseman_text_4 = 3079
str_tutorial_training_ground_horseman_text_5 = 3080
str_tutorial_training_ground_horseman_text_6 = 3081
str_the_great_lords_of_your_kingdom_plan_to_gather_at_your_hall_in_s10_for_a_feast = 3082
str_your_previous_court_some_time_ago = 3083
str_awaiting_the_capture_of_a_fortress_which_can_serve_as_your_court = 3084
str_but_if_this_goes_badly = 3085
str_i_realize_that_you_are_on_good_terms_with_s4_but_we_ask_you_to_do_this_for_the_good_of_the_realm = 3086
str_i_realize_that_you_are_on_good_terms_with_s4_but_the_blow_will_hurt_him_more = 3087
str_killed_bandit_at_alley_fight = 3088
str_wounded_by_bandit_at_alley_fight = 3089
str_cannot_leave_now = 3090
str_press_tab_to_exit_from_town = 3091
str_find_the_lair_near_s9_and_free_the_brother_of_the_prominent_s10_merchant = 3092
str_please_sir_my_lady_go_find_some_volunteers_i_do_not_know_how_much_time_we_have = 3093
str_you_need_more_men_sir_my_lady = 3094
str_good_you_have_enough_men = 3095
str_do_not_waste_time_go_and_learn_where_my_brother_is = 3096
str_start_up_quest_message_1 = 3097
str_start_up_quest_message_2 = 3098
str_start_up_quest_message_3 = 3099
str_start_up_first_quest = 3100
str_reason_1 = 3101
str_reason_2 = 3102
str_reason_3 = 3103
str_reason_4 = 3104
str_reason_5 = 3105
str_reason_6 = 3106
str_reason_7 = 3107
str_reason_8 = 3108
str_reason_9 = 3109
str_has_decided_that_an_attack_on_ = 3110
str_this_would_be_better_worth_the_effort = 3111
str_has_decided_to_defend_ = 3112
str_before_going_offensive_we_should_protect_our_lands_if_there_is_any_threat_so_this_can_be_reason_marshall_choosed_defending_s4 = 3113
str_are_you_all_right = 3114
str_you_are_awake = 3115
str_save_town_from_bandits = 3116
str_you_fought_well_at_town_fight_survived = 3117
str_you_fought_normal_at_town_fight_survived = 3118
str_you_fought_bad_at_town_fight_survived = 3119
str_you_fought_well_at_town_fight = 3120
str_you_wounded_at_town_fight = 3121
str_you_fought_well_at_town_fight_survived_answer = 3122
str_you_fought_normal_at_town_fight_survived_answer = 3123
str_you_fought_bad_at_town_fight_survived_answer = 3124
str_you_fought_well_at_town_fight_answer = 3125
str_you_wounded_at_town_fight_answer = 3126
str_unfortunately_reg0_civilians_wounded_during_fight_more = 3127
str_unfortunately_reg0_civilians_wounded_during_fight = 3128
str_also_one_another_good_news_is_any_civilians_did_not_wounded_during_fight = 3129
str_merchant_and_you_call_some_townsmen_and_guards_to_get_ready_and_you_get_out_from_tavern = 3130
str_town_fight_ended_you_and_citizens_cleaned_town_from_bandits = 3131
str_town_fight_ended_you_and_citizens_cleaned_town_from_bandits_you_wounded = 3132
str_journey_to_reyvadin = 3133
str_journey_to_praven = 3134
str_journey_to_jelkala = 3135
str_journey_to_sargoth = 3136
str_journey_to_tulga = 3137
str_journey_to_shariz = 3138
str_lost_tavern_duel_ordinary = 3139
str_lost_tavern_duel_assassin = 3140
str_lost_startup_hideout_attack = 3141
str_reg1_blank_s3 = 3142
str_as_you_no_longer_maintain_an_independent_kingdom_you_no_longer_maintain_a_court = 3143
str_rents_from_s0 = 3144
str_tariffs_from_s0 = 3145
str_general_quarrel = 3146
str_the_steppes = 3147
str_the_deserts = 3148
str_the_tundra = 3149
str_the_forests = 3150
str_the_highlands = 3151
str_the_coast = 3152
str_my_lady_not_sufficient_chemistry = 3153
str_my_lady_engaged_to_another = 3154
str_attempting_to_rejoin_party = 3155
str_separated_from_party = 3156
str_whereabouts_unknown = 3157
str_none_yet_gathered = 3158
str_betrothed = 3159
str_leading_party = 3160
str_court_disbanded = 3161
str_i_am_not_accompanying_the_marshal_because_will_be_reappointment = 3162
str_persuasion_opportunity = 3163
str_marshal_warning = 3164
str_follow_army_quest_brief_2 = 3165
str_greetings_playername__it_is_good_to_see_you_i_hope_that_you_have_had_success_in_your_efforts_to_make_your_name_in_the_world = 3166
str_minister_advice_select_fief = 3167
str_minister_advice_select_fief_wait = 3168
str_minister_advice_fief_leading_vassal = 3169
str_unassigned_center = 3170
str_s43_also_you_should_know_that_an_unprovoked_assault_is_declaration_of_war = 3171
str_missing_after_battle = 3172
str_retrieve_garrison_warning = 3173
str_s12s15_declared_war_to_control_calradia = 3174
str_offer_gift_description = 3175
str_resolve_dispute_description = 3176
str_feast_wedding_opportunity = 3177
str_s21_the_s8_declared_war_as_part_of_a_bid_to_conquer_all_calradia = 3178
str_master_vinter = 3179
str_s54_has_left_the_realm = 3180
str_enterprise_s5_at_s0 = 3181
str_bread_site = 3182
str_ale_site = 3183
str_oil_site = 3184
str_wine_site = 3185
str_tool_site = 3186
str_leather_site = 3187
str_linen_site = 3188
str_wool_cloth_site = 3189
str_velvet_site = 3190
str_under_sequestration = 3191
str_describe_secondary_input = 3192
str_profit = 3193
str_loss = 3194
str_server_name_s0 = 3195
str_map_name_s0 = 3196
str_game_type_s0 = 3197
str_remaining_time_s0reg0_s1reg1 = 3198
str_you_are_a_lord_lady_of_s8_s9 = 3199
str_you_are_king_queen_of_s8_s9 = 3200
str_for_s4 = 3201
str_cancel_fiancee_quest = 3202
str_a_duel_request_is_sent_to_s0 = 3203
str_s0_offers_a_duel_with_you = 3204
str_your_duel_with_s0_is_cancelled = 3205
str_a_duel_between_you_and_s0_will_start_in_3_seconds = 3206
str_you_have_lost_a_duel = 3207
str_you_have_won_a_duel = 3208
str_server_s0 = 3209
str_disallow_ranged_weapons = 3210
str_ranged_weapons_are_disallowed = 3211
str_ranged_weapons_are_allowed = 3212
str_duel_starts_in_reg0_seconds = 3213
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.PostView.as_view(), name='post'),
path('<int:pk>', views.PostDetailView.as_view(), name='post'),
path('like/<int:pk>', views.TooglePostLikeView.as_view(), name='post_like'),
]
|
import requests as rq
from bs4 import BeautifulSoup
crtfc_key = '25320e73e479a8e8b9dcfa7a5a76faf0233652b5'
corp_code = '00126380' # 고유번호(주식코드 불가)
bsns_year = '2015'
reprt_code = '11011'
# dart_url = 'https://opendart.fss.or.kr/api/alotMatter.xml?crtfc_key='+crtfc_key+'&corp_code='+corp_code+'&bsns_year='+bsns_year+'&reprt_code='+reprt_code
dart_url = 'https://opendart.fss.or.kr/api/alotMatter.xml?crtfc_key='+crtfc_key+'&corp_code='+corp_code+'&bsns_year='+bsns_year+'&reprt_code='+reprt_code
res = rq.get(dart_url)
res2 = res.content.decode('utf-8')
soup = BeautifulSoup(res2, "lxml")
print(type(res2))
print(type(soup))
print(res2)
|
import sys, os, platform
#This function is used to print runtime process and error messages to console and log file
#If the testcase is run on linux, it will only print the runtime process to console and will not create any log files
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
filename = str.split(sys.argv[0],'.') #get scriptname and split by '.' delimiter
self.log = open("logs/" + filename[0] + '.log' , "w")
def write(self, message):
if platform.system() == 'Windows':
self.terminal.write(message)
self.log.write(message)
elif platform.system() == 'Linux':
self.terminal.write(message)
|
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import ColorSensor
from pybricks.parameters import Port
import time
from sys import stderr
ev3 = EV3Brick()
colourLeft = ColorSensor(Port.S2)
def RLI_testing2():
x=0
start_time = time.time()
while time.time() < start_time + 1:
RLI = colourLeft.reflection()
x = x+1
print(x, file=stderr)
|
import pytest
from django.contrib.auth.models import User
def test_new_user(create_user):
user = create_user
print(User.objects.count()) # 1
print(user.username) # django
assert True
|
def calculate(a, o, b):
if(o == "+"):
return a + b
if(o == "-"):
return a - b
if(o == "/" and b != 0):
return a / b
if(o == "*"):
return a * b
'''
Debug a function called calculate that takes 3 values.
The first and third values are numbers. The second value is a character.
If the character is "+" , "-", "*", or "/", the function will return the
result of the corresponding mathematical function on the two numbers.
If the string is not one of the specified characters, the function should return null.
calculate(2,"+", 4); //Should return 6
calculate(6,"-", 1.5); //Should return 4.5
calculate(-4,"*", 8); //Should return -32
calculate(49,"/", -7); //Should return -7
calculate(8,"m", 2); //Should return null
calculate(4,"/",0) //should return null
'''
|
from Cookie import SimpleCookie
from cgi import parse_qs, escape
import os
import sys
import base64
import traceback
import ConfigParser
from datetime import datetime
from BaseHTTPServer import HTTPServer
import json
parDir = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
sys.path.append(parDir)
from sharedlib.testlink import TestlinkAPIClient, TestLinkHelper
config = ConfigParser.ConfigParser()
config.readfp(open(os.path.join(parDir, "testlink_config.cfg")))
testlink_url = config.get('variables', 'url')
devkey = config.get('variables', 'devkey')
testlink_obj = ''
def application(environ, start_response):
global testlink_obj
response_headers=[]
status = '200 OK'
try:
req_length = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
req_length = 0
if not testlink_obj:
tl_helper = TestLinkHelper(testlink_url, devkey, '')
tl_helper.setParamsFromArgs()
testlink_obj = tl_helper.connect(TestlinkAPIClient)
request_body = parse_qs(environ['QUERY_STRING'])
result = ""
if request_body.get('kwargs'):
result = getattr(testlink_obj, request_body.get('function')[-1])(eval(request_body.get('kwargs')[-1]))
else:
result = getattr(testlink_obj, request_body.get('function')[-1])()
ret_data = json.dumps(result)
response_headers.extend((('Content-type', 'application/json'),('Content-Length', str(len(ret_data)))))
start_response(status, response_headers)
return ret_data
|
#Trivial solution plus some memoization... still too slow
def findmin(s,A):
n=len(A)
B=[]
for i in range(len(A)):
B.append(A[i])
for l in range(1,n+1):
for i in range(len(B)):
if B[i] >= s:
return l
B.pop()
for i in range(len(B)):
B[i] = B[i]+A[i+l]
print B
return 0
A=[2,3,1,4,3]
s=9
|
#!/urs/bin/env python3
import sys
import re
import io
import unittest
from unittest.mock import patch
try:
from count_well_duplicates import output_writer, TALLY, LENGTH
except:
#If this fails, you is probably running the tests wrongly
print("****",
"You want to run these tests from the top-level source folder by using:",
" python3 -m unittest test.test_count_well_duplicates",
"or even",
" python3 -m unittest discover",
"****",
sep="\n")
raise
#Ensure out assumptions about constants are right
assert TALLY == 0
assert LENGTH == 1
# dups_found = TILE_DUPL[tile][target][level][TALLY]
# targets_inspected = TILE_DUPL[tile][target][level][LENGTH]
# erm erm erm
# A lane has ~100 tiles. A tile has maybe 2500 targets.
# A target has 5 levels (not including the centre).
# But here we have a lane with 1 tile, and the tile has 4 targets,
# and the target has 3 levels.
# See notebook sketch, which I will try to add add as a PNG.
# Each col = 1 level of target. We're not recording lev 0!
LANE_DUPL = {'1208' : [
# Level 1 T/L 2 T/L 3 T/L
[ ( 0, 6), ( 0,12), ( 0,18) ], #<-- 1 row = 1 target of this tile
[ ( 2, 6), ( 1,10), ( 0,12) ],
[ ( 3, 6), ( 1,10), ( 1,12) ],
[ ( 0, 6), ( 1,12), ( 0,18) ] ] }
EXPECTED_OUT_1 = """
Lane: 1 Tile: 1208 Targets: 4/4
Level: 1 Wells: 24 Dups: 5 Hit: 2 AccO: 2 AccI: 3
Level: 2 Wells: 44 Dups: 3 Hit: 3 AccO: 3 AccI: 3
Level: 3 Wells: 60 Dups: 1 Hit: 1 AccO: 3 AccI: 1
LaneSummary: 1 Tiles: 1 Targets: 4/4
Level: 1 Wells: 24 Dups: 5 (0.20833) Hit: 2 (0.50000) AccO: 2 (0.50000) AccI: 3 (0.75000)
Level: 2 Wells: 44 Dups: 3 (0.06818) Hit: 3 (0.75000) AccO: 3 (0.75000) AccI: 3 (0.75000)
Level: 3 Wells: 60 Dups: 1 (0.01667) Hit: 1 (0.25000) AccO: 3 (0.75000) AccI: 1 (0.25000)
"""
BAD_TILE_LANE = {'1209': [ ] } #No valid targets.
BAD_TILE_LANE.update(LANE_DUPL)
# If we add the bad tile, what do we get?
EXPECTED_OUT_2 = """
Lane: 1 Tile: 1208 Targets: 4/4
Level: 1 Wells: 24 Dups: 5 Hit: 2 AccO: 2 AccI: 3
Level: 2 Wells: 44 Dups: 3 Hit: 3 AccO: 3 AccI: 3
Level: 3 Wells: 60 Dups: 1 Hit: 1 AccO: 3 AccI: 1
Lane: 1 Tile: 1209 Targets: 0/4
Level: 1 Wells: 0 Dups: 0 Hit: 0 AccO: 0 AccI: 0
Level: 2 Wells: 0 Dups: 0 Hit: 0 AccO: 0 AccI: 0
Level: 3 Wells: 0 Dups: 0 Hit: 0 AccO: 0 AccI: 0
LaneSummary: 1 Tiles: 2 Targets: 4/8
Level: 1 Wells: 24 Dups: 5 (0.20833) Hit: 2 (0.50000) AccO: 2 (0.50000) AccI: 3 (0.75000)
Level: 2 Wells: 44 Dups: 3 (0.06818) Hit: 3 (0.75000) AccO: 3 (0.75000) AccI: 3 (0.75000)
Level: 3 Wells: 60 Dups: 1 (0.01667) Hit: 1 (0.25000) AccO: 3 (0.75000) AccI: 1 (0.25000)
"""
# If we just ask for 2 levels?
# FIXME - my test dataset doesn't capture the case where AccI would be different at
# level 2 if there were a dupe at level 3 only.
EXPECTED_OUT_3 = """
Lane: 1 Tile: 1208 Targets: 4/4
Level: 1 Wells: 24 Dups: 5 Hit: 2 AccO: 2 AccI: 3
Level: 2 Wells: 44 Dups: 3 Hit: 3 AccO: 3 AccI: 3
LaneSummary: 1 Tiles: 1 Targets: 4/4
Level: 1 Wells: 24 Dups: 5 (0.20833) Hit: 2 (0.50000) AccO: 2 (0.50000) AccI: 3 (0.75000)
Level: 2 Wells: 44 Dups: 3 (0.06818) Hit: 3 (0.75000) AccO: 3 (0.75000) AccI: 3 (0.75000)
"""
# Empty output when the lane is totally bad and no targets are read at all.
EXPECTED_OUT_4 = """
Lane: 1 Tile: 1222 Targets: 0/4
LaneSummary: 1 Tiles: 1 Targets: 0/4
"""
class TestCountWellDuplicates(unittest.TestCase):
#Capture sys.stdout - standard Mock procedure.
@patch('sys.stdout', new_callable=io.StringIO)
def test_output_writer_1lane_full(self, mock_stdout):
output_writer(1, 4, LANE_DUPL, verbose=1)
self._rescmp(mock_stdout, EXPECTED_OUT_1)
@patch('sys.stdout', new_callable=io.StringIO)
def test_output_writer_badlane_full(self, mock_stdout):
output_writer(1, 4, BAD_TILE_LANE, verbose=1)
self._rescmp(mock_stdout, EXPECTED_OUT_2)
@patch('sys.stdout', new_callable=io.StringIO)
def test_output_writer_badlane_brief(self, mock_stdout):
output_writer(1, 4, BAD_TILE_LANE, verbose=0)
#Basically we expect to see just the last 4 lines
self._rescmp(mock_stdout, EXPECTED_OUT_2, -4)
@patch('sys.stdout', new_callable=io.StringIO)
def test_output_writer_limited_levels(self, mock_stdout):
output_writer(1, 4, LANE_DUPL, verbose=1, levels=2)
self._rescmp(mock_stdout, EXPECTED_OUT_3)
@patch('sys.stdout', new_callable=io.StringIO)
def test_output_writer_empty_data(self, mock_stdout):
#Note if you try to specify levels you'll get a div by zero
#error, but otherwise you'll just get a blank result.
output_writer(1, 4, {'1222': [ ] }, verbose=1)
self._rescmp(mock_stdout, EXPECTED_OUT_4)
def _rescmp(self, ioobj, astring, start=0, end=None):
"""This just helps you to compare the thing that got printed
out with the string that holds the expected result.
"""
#The thing that got printed...
lines1 = ioobj.getvalue().rstrip("\n").split("\n")
#The string we expected, ignoring leading newline
#and swapping consecutive spaces for tabs.
lines2 = [ re.sub('\s\s+', '\t', s) for s in astring.lstrip().rstrip("\n").split("\n") ]
lines2 = lines2[start:end]
#And now we can compare!
self.assertEqual(lines1, lines2)
|
#!/usr/bin/env python3
# coding: utf-8
#--------------------------------------------------------------------------------------------------------------------------------------------
# Reading the Bank Dataset
print('Reading the Bank Dataset...')
# importing pandas for reading the datasets
print('Importing pandas for reading the datasets...')
import pandas as pd
# reading the training dataset with a ';' delimiter
print('Reading the training dataset with a ; delimiter...')
bdata_train=pd.read_csv('BankData_train.csv',delimiter=';')
# reading the testing dataset with comma as delimiter
print('Reading the testing dataset with comma as delimiter...')
bdata_test=pd.read_csv('BankData_eval.csv')
print('Successfully read training and testing dataset')
c=input('Press any key to continue...')
#--------------------------------------------------------------------------------------------------------------------------------------------
# importing matplotlib for plotting the graphs
print('Let us explore the feature variables and their relationship with the tendency to subscribe a term deposit')
print('Importing matplotlib for visualizing the relationship between the feature variables and subscription tendency')
import matplotlib.pyplot as plt
pd.crosstab(bdata_train.job,bdata_train.y).plot(kind='bar')
plt.title('Subscriptions based on Job')
plt.xlabel('Job')
plt.ylabel('No of Subscriptions')
plt.show()
# we observe that the people from the management industry are approached more
print('We observe that the people from the management industry are approached more')
pd.crosstab(bdata_train.marital,bdata_train.y).plot(kind='bar')
plt.title('Subscriptions based on Marital Status')
plt.xlabel('Marital Status')
plt.ylabel('No of Subscriptions')
plt.show()
# we observe that the married people are approached more
print('we observe that the married people are approached more')
pd.crosstab(bdata_train.education,bdata_train.y).plot(kind='bar')
plt.title('Subscriptions based on Education')
plt.xlabel('Education')
plt.ylabel('No of Subscriptions')
plt.show()
# we observe that the people with the secondary education are approached more
print('we observe that the people with the secondary education are approached more')
pd.crosstab(bdata_train.poutcome,bdata_train.y).plot(kind='bar',stacked=True)
plt.title('Subscriptions based on Outcome of Previous Campaign')
plt.xlabel('Outcome of Previous Campaign')
plt.ylabel('No of Subscriptions')
plt.show()
# we observe that the success of the previous campaign affect the subscription tendency
print('we observe that the success of the previous campaign affect the subscription tendency')
#--------------------------------------------------------------------------------------------------------------------------------------------
print('Performing data preprocessing before we actually fit our model...')
print('Creating dummy variables...')
# creating dummy variables for the training set
# creating a list of categorical variables to be transformed into dummy variables
category=['job','marital','education','default','housing','loan','contact','month','poutcome']
# creating a training set backup
bdata_train_new = bdata_train
# creating dummy variables and joining it to the training set
for c in category:
new_column = pd.get_dummies(bdata_train_new[c], prefix=c)
bdata_train_dummy=bdata_train_new.join(new_column)
bdata_train_new=bdata_train_dummy
# removing the dummy trap
dummy_drop=['job_unknown','marital_divorced','education_unknown','default_no','housing_no','loan_no','contact_unknown','month_nov','poutcome_unknown']
# removing the unwanted columns by dropping it
bdata_train_final=bdata_train_new.drop(category+dummy_drop,axis=1)
# creating training set of features
x_train=bdata_train_final.drop(['y'],axis=1)
# creating training set of output variable
y_train=pd.DataFrame(bdata_train_final['y'])
# coding yes as '1' and no as '0'
y_train[y_train=='yes']='1'
y_train[y_train=='no']='0'
# converting it into integer categorical variable
y_train.y=y_train.y.astype('int64')
y_train.y=y_train.y.astype('category')
# creating dummy variables for the testing set
# creating a testing set backup
bdata_test_new = bdata_test
# creating dummy variables and joining it to the testing set
for c in category:
new_column = pd.get_dummies(bdata_test_new[c], prefix=c)
bdata_test_dummy=bdata_test_new.join(new_column)
bdata_test_new=bdata_test_dummy
# removing the dummy trap
dummy_drop_test=['job_unknown','marital_divorced','education_unknown','default_no','housing_no','loan_no','poutcome_unknown']
# removing the unwanted columns by dropping it
bdata_test_final=bdata_test_new.drop(category+dummy_drop_test,axis=1)
# creating testing set of features
x_test=bdata_test_final.drop(['y'],axis=1)
# creating testing set of output variable
y_test=pd.DataFrame(bdata_test_final['y'])
# coding yes as '1' and no as '0'
y_test[y_test=='yes']='1'
y_test[y_test=='no']='0'
# converting it into integer categorical variable
y_test.y=y_test.y.astype('int64')
y_test.y=y_test.y.astype('category')
#--------------------------------------------------------------------------------------------------------------------------------------------
# Equalizing the number of features in training set and testing set
# finding the features which are in training set but not in testing set
print('finding the features which are in training set but not in testing set...')
print([i for i in x_train.columns.values.tolist() if i not in x_test.columns.values.tolist()])
# importing numpy for creating zero matrices
import numpy as np
# creating missing feature columns with zero entries
print('creating missing feature columns with zero entries...')
job=pd.DataFrame(np.zeros(shape=(100,1)),columns=['job_self-employed'])
default=pd.DataFrame(np.zeros(shape=(100,1)),columns=['default_yes'])
month_1=pd.DataFrame(np.zeros(shape=(100,1)),columns=['month_aug'])
month_2=pd.DataFrame(np.zeros(shape=(100,1)),columns=['month_oct'])
month_3=pd.DataFrame(np.zeros(shape=(100,1)),columns=['month_sep'])
# joining the missing feature columns to the testing feature set
print('joining the missing feature columns to the testing feature set...')
x_test=x_test.join(job)
x_test=x_test.join(default)
x_test=x_test.join(month_1)
x_test=x_test.join(month_2)
x_test=x_test.join(month_3)
#--------------------------------------------------------------------------------------------------------------------------------------------
# Standardizing the training and testing feature set
print('Standardizing the training and testing feature set...')
# importing the Standard Scaler from sklearn
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
print('Data preprocessing completed successfully')
c=input('Press any key to continue...')
#--------------------------------------------------------------------------------------------------------------------------------------------
# Creating the ANN model
print('Creating the Artificial Neural Network Model...')
# importing keras library
print('Importing the keras library')
import keras
from keras.models import Sequential
from keras.layers import Dense
# initializing the ANN
print('Initializing the ANN Classifier...')
classifier = Sequential()
# Adding the input layer and the hidden layer
print('Adding the input layer and the hidden layer...')
classifier.add(Dense(units = 18 , kernel_initializer = 'uniform', activation = 'relu', input_dim = 35))
# Adding the output layer
print('Adding the output layer...')
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
print('Compiling ANN model...')
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
print('Fitting the ANN model to the training set...')
classifier.fit(x_train, y_train, batch_size = 10, epochs = 100)
#--------------------------------------------------------------------------------------------------------------------------------------------
# Plotting the ANN model
print('Plotting the Artifical Neural Network...')
# importing ann_viz from ann_visualizer
from ann_visualizer.visualize import ann_viz
ann_viz(classifier, title = 'ANN model predicting Banking Subscriptions')
#--------------------------------------------------------------------------------------------------------------------------------------------
# Evaluating the performance of the ANN model
print('Let us evaluate the performance of the ANN model')
print('Summary - ANN Model :')
print('=====================')
classifier.summary()
print('=====================')
# evaluating the final loss and accuracy of the classifier
test_loss, test_accuracy = classifier.evaluate(x_test, y_test)
print('Final Loss and Accuracy of the Classifier on the Testing Set')
print('============================================================')
print('loss :',round(test_loss*100),'%')
print('accuracy :',round(test_accuracy*100),'%')
print('============================================================')
# predicting the testing set results
print('Obtaining the prediction probabilities...')
y_pred = classifier.predict(x_test)
print('Categorizing the cases with probabilities above 50% as yes and below as no')
y_pred = (y_pred > 0.50)
#--------------------------------------------------------------------------------------------------------------------------------------------
# importing confusion matrix and roc_auc_score from sklearn
print('Plotting the Confusion Matrix...')
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
# importing seaborn for plotting the heatmap
import seaborn as sn
cm = confusion_matrix(y_test, y_pred) # rows = truth, cols = prediction
df_cm = pd.DataFrame(cm, index = ('no', 'yes'), columns = ('predicted no', 'predicted yes'))
plt.figure(figsize = (5,4))
plt.title('Confusion Matrix')
plt.xlabel('Predicted')
plt.ylabel('Actual')
sn.set(font_scale=1.4)
sn.heatmap(df_cm, annot=True, fmt='g')
plt.show()
print("Test Data Accuracy: %0.4f" % roc_auc_score(y_test, y_pred))
#--------------------------------------------------------------------------------------------------------------------------------------------
# importing roc curve and metrics from sklearn
print('Plotting the ROC curve...')
from sklearn.metrics import roc_curve
import sklearn.metrics as metrics
fpr, tpr, threshold = metrics.roc_curve(y_test, y_pred)
roc_auc=roc_auc_score(y_test, y_pred)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.001, 1])
plt.ylim([0, 1.001])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
#--------------------------------------------------------------------------------------------------------------------------------------------
# Saving the ANN model
print('Saving the ANN model as predict_bank_subscription_ann.model...')
classifier.save('predict_bank_subscription_ann.model')
print('Saved ANN model Successfully')
# Syntax for loading the saved ANN model
# ann_model = keras.models.load_model('predict_bank_subscription_ann.model')
|
# #*********************************************************************
# content = Utility for reading and writing json files
# version = 0.1.0
# date = 2017-09-30
#
# license = MIT
# copyright = Copyright 2017 Thomas Moore
# author = Thomas Moore <moore.thomasj@gmail.com>
# #*********************************************************************
import json
try:
to_unicode = unicode
except NameError:
to_unicode = str
def write_json_file(dataToWrite, filename):
if ".json" not in filename:
filename += ".json"
print "> write to json file is seeing: {0}".format(filename)
with open(filename, "w") as jsonFile:
json.dump(dataToWrite, jsonFile, indent=2)
print "Data was successfully written to {0}".format(filename)
return filename
def read_json_file(filename):
if ".json" not in filename:
filename += ".json"
try:
with open(filename, 'r') as jsonFile:
return json.load(jsonFile)
except:
raise OSError('STOP PROCESS', "Could not read {0}".format(filename))
|
def rand5_recursive():
roll = rand7()
return roll if roll <= 5 else rand5_recursive()
#O(infinity)
def rand5():
result = 7 # arbitrarily large
while result > 5:
result = rand7()
return result
|
#!/usr/bin/env python
import sys
import pandas as pd
import argparse
import math
from os.path import basename
from scipy.stats import spearmanr
DEFAULT_MIN_CORR = 0.5
DEFAULT_ZSCORE = 1.0
def na_spearmanr(a, b):
mask = a.notnull().values & b.notnull().values
rho, p = spearmanr(a[mask], b[mask])
if not isinstance(rho, float) or math.isnan(rho):
return -1.0, 1.0
else:
return rho, p
deviant_subject_cache = {}
def remove_most_deviant_subjects(data, n):
if id(data) in deviant_subject_cache:
by_rho = deviant_subject_cache[id(data)]
else:
judgement_columns = data.columns[2:]
agreements = {}
for j in judgement_columns:
ratings = data[j]
other_subjects = list(set(judgement_columns) - set([j]))
exclusive_means = data[other_subjects].transpose().mean()
rho, p = na_spearmanr(ratings, exclusive_means)
# sys.stderr.write("%s with others: %f\n" % (j, rho))
agreements[j] = rho
by_rho = sorted(agreements.keys(), key=agreements.__getitem__)
deviant_subject_cache[id(data)] = by_rho
out_data = data.copy()
for j in by_rho[:n]:
out_data[j] = float('nan')
return out_data
def remove_percent_deviant_subjects(data, p):
judgement_columns = data.columns[2:]
n = int(math.ceil(p * len(judgement_columns)))
return remove_most_deviant_subjects(data, n)
def calculate_subject_agreements(data):
judgement_columns = data.columns[2:]
agreements = {}
for j in judgement_columns:
ratings = data[j]
other_subjects = list(set(judgement_columns) - set([j]))
exclusive_means = data[other_subjects].transpose().mean()
rho, p = na_spearmanr(ratings, exclusive_means)
agreements[j] = rho
return agreements
def remove_deviant_subjects(data, min_corr=DEFAULT_MIN_CORR):
# TODO: don't hardcode this
judgement_columns = data.columns[2:]
to_remove = []
for j in judgement_columns:
ratings = data[j]
other_subjects = list(set(judgement_columns) - set([j]))
exclusive_means = data[other_subjects].transpose().mean()
rho, p = na_spearmanr(ratings, exclusive_means)
# sys.stderr.write("%s with others: %f\n" % (j, rho))
if rho < min_corr:
# sys.stderr.write("Removing %s\n" % j)
to_remove.append(j)
out_data = data.copy()
for j in to_remove:
out_data[j] = float('nan')
return out_data
deviant_ratings_cache = {}
def remove_deviant_ratings(data, outlier_zscore=DEFAULT_ZSCORE):
judgement_columns = data.columns[2:]
if id(data) in deviant_ratings_cache:
zscores = deviant_ratings_cache[id(data)]
else:
rows = []
for i, row in data.iterrows():
new_row = row.copy()
mean = new_row[judgement_columns].transpose().mean()
stddev = new_row[judgement_columns].transpose().std()
for j in judgement_columns:
zscore = (row[j] - mean) / stddev
new_row[j] = abs(zscore)
rows.append(new_row)
zscores = pd.DataFrame(rows)[judgement_columns]
deviant_ratings_cache[id(data)] = zscores
data = data.copy()
data[judgement_columns] = data[judgement_columns].where(zscores <= outlier_zscore)
return data
def aggregate_ratings(data):
output_data = []
judgement_columns = data.columns[2:]
for i, row in data.iterrows():
ratings = row[judgement_columns].transpose()
nonnull = sum(ratings.notnull())
if nonnull == 0:
# skip data points with only 0 or 1 ratings.
continue
mean = ratings.mean()
stddev = nonnull > 1 and ratings.std() or 0
median = nonnull > 1 and ratings.median() or ratings[0]
output_data.append({
'compound': row['compound'],
'const': row['const'],
'mean': mean,
'median': median,
'stddev': stddev,
'var': stddev * stddev
})
return pd.DataFrame(output_data)
def main():
parser = argparse.ArgumentParser(
description='Filters out abnormal ratings from comp ratings.')
parser.add_argument('--input', '-i', metavar="FILE", action="append", default=[],
help="Input comp ratings.")
parser.add_argument('--filter-subjects', '-r', metavar="CORR", type=float,
help=("Filters subjects who do not correlate "
"with the others by at least CORR."))
parser.add_argument('--filter-deviations', '-z', metavar="SIGMAs", type=float,
help=("Filters individual judgements that deviate "
"from the average by SIGMA std devs."))
parser.add_argument('--output-aggregate', '-a', action='store_true',
help=("Output aggregate statistics only. (Default: "
"outputs new judgements with blanks)."))
parser.add_argument('--compare-whole', '-w', metavar='FILE',
help='Compute correlations with the whole compound ratings.')
args = parser.parse_args()
input = pd.concat(map(pd.read_csv, args.input))
output = input
if args.filter_subjects:
output = remove_deviant_subjects(output, args.filter_subjects)
if args.filter_deviations:
output = remove_deviant_ratings(output, args.filter_deviations)
if args.output_aggregate:
aggregate_ratings(output).to_csv(sys.stdout, index=False)
else:
output.to_csv(sys.stdout, index=False)
if args.compare_whole:
agg = aggregate_ratings(output)
whole_compounds = pd.read_csv(args.compare_whole)
# we only want the intersection of the columns
keepers = set(output.compound).intersection(set(whole_compounds.compound))
agg = agg[agg.compound.map(keepers.__contains__)]
whole_compounds = whole_compounds[whole_compounds.compound.map(keepers.__contains__)]
whole_compounds = whole_compounds.sort('compound')
by_sum = agg.groupby('compound').sum()
by_sum['compound'] = by_sum.index
by_sum = by_sum.sort('compound')
rho_sum, p_sum = na_spearmanr(by_sum['mean'], whole_compounds['mean'])
by_prod = agg.groupby('compound').prod()
by_prod['compound'] = by_prod.index
by_prod = by_prod.sort('compound')
rho_prod, p_prod = na_spearmanr(by_prod['mean'], whole_compounds['mean'])
sys.stderr.write("rho w/ whole compounds by sum: %f\n" % rho_sum)
sys.stderr.write("rho w/ whole compounds by prod: %f\n" % rho_prod)
if __name__ == '__main__':
main()
|
import cv2
import numpy as np
import sys
facePath = "opencv-master/data/haarcascades/haarcascade_frontalface_default.xml"
smilePath = "opencv-master/data/haarcascades/haarcascade_smile.xml"
faceCascade = cv2.CascadeClassifier(facePath)
smileCascade = cv2.CascadeClassifier(smilePath)
eye_cascade = cv2.CascadeClassifier('opencv-master/data/haarcascades/haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
sF = 1.05
cam = cv2.VideoCapture(0)
while True:
ret, img = cam.read() # Capture img-by-img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1, 5)
# ---- Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
smile = smileCascade.detectMultiScale(
roi_gray,
scaleFactor= 1.7,
minNeighbors=22,
minSize=(25, 25),
)
for (sx, sy, sw, sh) in smile:
print ("Found", len(smile), "smiles!")
cv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (255, 0, 0), 1)
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
print ("Found", len(eyes), "eyes!")
cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0), 2)
cv2.imshow('my webcam', img)
if cv2.waitKey(1) == 27:
break
cam.release()
cv2.destroyAllWindows()
cap.release()
cv2.destroyAllWindows()
|
################################################# -*- python -*-
#
# SConstruct makefile for Second Life viewer
# and servers.
#
# To build everything:
#
# scons ARCH=all BTARGET=all DISTCC=yes
#
# For help on options:
#
# scons -h
#
# Originally written by Tom Yedwab, 6/2006.
#
#################################################
import os
import sys
import glob
import re
platform = sys.platform
if platform == 'linux2':
platform = 'linux'
######################
# GET VERSION #
######################
def grep(filestr,searchstr):
try:
f = open( filestr, 'r' )
except IOError:
print "No such file " + filestr
sys.exit(2)
r = re.compile( searchstr )
for line in f.read().split('\n'):
if ( r.search(line) ):
f.close()
return line
f.close()
return None
def get_version(llver):
re_vernum = re.compile("[0-9]+")
rstr = grep('llcommon/llversion.h', "LL_VERSION_" + llver)
if rstr == None:
print "No version information in llversion.h"
sys.exit(2)
version = re_vernum.findall( rstr )[1]
return version
version_major = get_version("MAJOR")
version_minor = get_version("MINOR")
version_patch = get_version("PATCH")
version_build = get_version("BUILD")
version = version_major + '.' + version_minor + '.' + version_patch + '.' + version_build
###############
# SYSTEM INFO #
###############
# Debian Sarge has a broken glibc that leads to build failures on
# *non*-Sarge systems (because of prebuilt static libraries built on
# Sarge).
try:
debian_sarge = open('/etc/debian_version').read().strip() == '3.1'
except:
debian_sarge = False
#########################
# COMMAND LINE OPTIONS #
#########################
opts = Options()
opts.AddOptions(
EnumOption('BUILD', 'Set build type', 'releasefordownload',
allowed_values=('debug', 'release', 'releasenoopt', 'releasefordownload')),
EnumOption('ARCH', 'Set architecture', 'i686',
allowed_values=('i686', 'powerpc', 'x86_64')),
EnumOption('BTARGET', 'Set build target', 'server',
allowed_values=('client', 'server', 'all')),
BoolOption('DISTCC', 'Enabled distcc', True),
BoolOption('MOZLIB', 'Enabled llmozlib/mozilla support', True),
BoolOption('FMOD', 'Enabled FMOD audio support', True),
BoolOption('COLORGCC', 'Enabled colorgcc', True),
EnumOption('GRID', 'Client package\'s default grid', 'default',
allowed_values=('default', 'aditi', 'agni', 'dmz', 'durga', 'firstlook', 'ganga', 'shakti', 'siva', 'soma', 'uma', 'vaak')),
BoolOption('OPENSOURCE', 'Build using only non-proprietary dependencies', True) # OPENSOURCE: do not edit this line
)
optenv = Environment(options = opts)
Help(opts.GenerateHelpText(optenv))
build_param = optenv['BUILD']
arch = optenv['ARCH']
target_param = optenv['BTARGET']
enable_distcc = optenv['DISTCC']
enable_mozlib = optenv['MOZLIB']
enable_fmod = optenv['FMOD']
enable_colorgcc = optenv['COLORGCC']
grid = optenv['GRID']
opensource = optenv['OPENSOURCE']
targets = [ target_param ]
if target_param == 'all':
targets = [ 'client', 'server' ]
#####################
# ITERATE TARGETS #
#####################
for build_target in targets:
buildtype = build_param
if build_target == 'server' and buildtype == 'releasefordownload':
buildtype = 'release'
system_str = arch + '-' + platform
print 'Building ' + build_target + ' ' + version + ' on ' + system_str + ' (' + buildtype + ')'
system_lib_dir = '../libraries/' + system_str
if build_target == 'client':
system_lib_dir += '/lib_release_client'
elif buildtype == 'debug':
system_lib_dir += '/lib_debug'
else:
system_lib_dir += '/lib_release'
lib_dir = './lib_' + buildtype + '_' + build_target + '/' + system_str
try:
build_dir_prefix = os.environ['TEMP_BUILD_DIR']
except:
build_dir_prefix = '/tmp/' + os.environ['USER']
build_dir = build_dir_prefix + os.getcwd() + '/' + system_str + '-' + build_target + '-' + buildtype
### Base include directories ###
include_dirs = Split("""
./llcommon ./llmath ./llwindow ./llaudio ./llcharacter
./lldatabase ./llhavok ./llimage ./llinventory ./llmedia ./llmessage
./llprimitive ./llrender ./llscene ./llui ./llvfs ./llwindow
./llxml ./lscript
../libraries/include
../libraries/include/havok
""" +
'../libraries/' + system_str + '/include' )
client_external_libs = []
system_link_flags = ''
if platform != 'linux' and build_target == 'client' and enable_mozlib:
### Mozilla include directories ###
mozilla_dir = '../libraries/' + system_str + '/include/mozilla'
include_dirs += Split(
mozilla_dir + '/include/webbrwsr ' +
mozilla_dir + '/include/docshell ' +
mozilla_dir + '/include/dom ' +
mozilla_dir + '/include/xpcom ' +
mozilla_dir + '/include/widget ' +
mozilla_dir + '/include/gfx ' +
mozilla_dir + '/include/string ' +
mozilla_dir + '/include/uriloader ' +
mozilla_dir + '/include/view ' +
mozilla_dir + '/include/layout ' +
mozilla_dir + '/include/content ' +
mozilla_dir + '/include/locale ' +
mozilla_dir + '/include/profdirserviceprovider ' +
mozilla_dir + '/include/xulapp ' +
mozilla_dir + '/include/pref ' +
mozilla_dir + '/sdk/include')
##############
# CPP Flags #
##############
# Generic GCC flags
flags = '-g -pipe -Wall -Wno-trigraphs '
if opensource:
flags += '-DLL_USE_KDU=0 '
else:
flags += '-DLL_USE_KDU=1 '
if build_target == 'server':
# Server flags
flags += '-D_GNU_SOURCE -ftemplate-depth-60 -DLL_MESA_HEADLESS=1 -DLL_MESA=1 '
if arch == 'i686':
flags += '-march=pentiumpro '
if debian_sarge:
def_server_cppflags = ''
else:
def_server_cppflags = '-DCTYPE_WORKAROUND'
server_cppflags = os.environ.get('SERVER_CPPFLAGS',
def_server_cppflags)
flags += server_cppflags + ' '
else:
# Viewer flags
flags += '-falign-loops=16 -fno-math-errno -fexceptions -fsigned-char -fno-strict-aliasing -ffast-math '
flags += '-DLL_MESA_HEADLESS=0 -DLL_MESA=0 '
try:
client_cppflags = os.environ['CLIENT_CPPFLAGS']
except:
client_cppflags = ''
flags += client_cppflags + ' '
if platform == 'linux':
# Linux-only flags
flags += '-DLL_LINUX=1 '
if build_target == 'client':
flags += '-DAPPID=secondlife -DLL_SDL=1 '
if arch == 'x86_64' or arch == 'x86_64cross' or not enable_fmod:
flags += '-DLL_FMOD=0 '
flags += '-DLL_X11=1 -DLL_GTK=1 '
client_external_libs += [ 'gtk-x11-2.0', 'elfio' ]
include_dirs += [ '../libraries/' + system_str + '/include/gtk-2.0' ]
include_dirs += [ '../libraries/' + system_str + '/include/glib-2.0']
include_dirs += [ '../libraries/' + system_str + '/include/pango-1.0' ]
include_dirs += [ '../libraries/' + system_str + '/include/atk-1.0' ]
include_dirs += [ '../libraries/' + system_str + '/include/ELFIO' ]
include_dirs += [ '../libraries/' + system_str + '/include/llfreetype2' ]
# llmozlib stuff
if enable_mozlib:
flags += '-DLL_LIBXUL_ENABLED=1 '
client_external_libs += [ 'llmozlib' ]
client_external_libs += [ 'mozjs', 'nspr4', 'plc4', 'plds4', 'profdirserviceprovider_s', 'xpcom', 'xul' ]
else:
flags += '-DLL_LIBXUL_ENABLED=0 '
else:
# Mac-only flags
flags += '-x c++ -arch ppc -pipe -Wno-trigraphs -fpascal-strings -faltivec -fasm-blocks -g -fmessage-length=0 -mtune=G4 -Wno-deprecated-declarations -Wno-invalid-offsetof -mmacosx-version-min=10.3 -DLL_DARWIN=1 -Wmost -Wno-sign-compare -Wno-switch -fpch-preprocess -F./newview/build/Deployment -fconstant-cfstrings -ffor-scope -Wno-reorder -isysroot /Developer/SDKs/MacOSX10.3.9.sdk '
### Build type-specific flags ###
debug_opts = flags + '-fno-inline -O0 -D_DEBUG -DLL_DEBUG=1 '
release_opts = flags + '-O2 -DNDEBUG -DLL_RELEASE=1 '
releasenoopt_opts = flags + '-O0 -DNDEBUG -DLL_RELEASE=1 '
releasefordownload_opts = flags + '-O2 -DNDEBUG -DLL_RELEASE=1 -DLL_RELEASE_FOR_DOWNLOAD=1 '
################
# ENVIRONMENT #
################
gcc_bin = 'g++32'
# If you strip more aggressively than -S then the quality of crash-
# logger backtraces deteriorates.
strip_cmd = 'strip -S -o $TARGET $SOURCE'
# hidesyms_cmd is something which copies an executable while 'hiding'
# all of its exposed symbols except a very few desired ones. This is
# used mainly to hide the symbols of the many common libraries we
# static-link, which otherwise cause hard-to-trace fatal crashes due
# to clashes in the run-time symbol namespace.
if platform == 'linux':
exposed_symbols_file = 'newview/linux_tools/exposed-symbols.txt'
hidesyms_cmd = 'objcopy --keep-global-symbols ' + exposed_symbols_file + ' $SOURCE $TARGET'
else:
hidesyms_cmd = 'cp -f $SOURCE $TARGET'
if build_target != 'client':
gcc_bin = 'g++-3.3'
if arch == 'x86_64cross':
gcc_bin = '/opt/crosstool/gcc-4.0.2-glibc-2.3.6/x86_64-unknown-linux-gnu/bin/x86_64-unknown-linux-gnu-gcc'
strip_cmd = '/opt/crosstool/gcc-4.0.2-glibc-2.3.6/x86_64-unknown-linux-gnu/x86_64-unknown-linux-gnu/bin/strip -S -o $TARGET $SOURCE'
compiler = gcc_bin
compiler_no_distcc = compiler
if enable_distcc:
compiler = 'distcc ' + gcc_bin
lib_path = [lib_dir] + [system_lib_dir]
mysql_lib_dir = '/usr/lib/mysql4/mysql'
if os.path.isdir(mysql_lib_dir):
lib_path.append(mysql_lib_dir)
base_env = Environment(CXX = compiler,
CPPPATH = include_dirs,
LIBPATH = lib_path,
LINKFLAGS = system_link_flags + '--no-keep-memory --reduce-memory-overheads ' )
### Environments for various build types ###
env = base_env.Copy(CPPFLAGS = releasefordownload_opts)
if buildtype == 'debug':
env = base_env.Copy(CPPFLAGS = debug_opts)
if buildtype == 'release':
env = base_env.Copy(CPPFLAGS = release_opts)
if buildtype == 'releasenoopt':
env = base_env.Copy(CPPFLAGS = releasenoopt_opts)
# ccache needs this to be set
try:
env['ENV']['CCACHE_DIR'] = os.environ['CCACHE_DIR']
except:
print "No CCACHE_DIR set."
env_no_distcc = env.Copy(CXX = compiler_no_distcc)
### Distributed build hosts ###
if enable_distcc:
hosts = 'localhost/2 station9.lindenlab.com,lzo station7.lindenlab.com,lzo station6.lindenlab.com,lzo station11.lindenlab.com,lzo station5.lindenlab.com,lzo station15.lindenlab.com,lzo station10.lindenlab.com,lzo station13.lindenlab.com,lzo station12.lindenlab.com,lzo'
if arch == 'x86_64' or arch == 'x86_64cross':
hosts = 'localhost'
print "Distributing to hosts: " + hosts
env['ENV']['DISTCC_HOSTS'] = hosts
env['ENV']['USER'] = os.environ['USER']
env['ENV']['HOME'] = os.environ['HOME']
if enable_colorgcc:
env['ENV']['PATH'] = os.environ['PATH']
env['ENV']['TERM'] = os.environ['TERM']
env['ENV']['HOME'] = os.environ['HOME']
### Configure lex and yacc ###
env.Append(YACCFLAGS = ["-v", "-d"])
env.CFile(target=build_dir+'/lscript/lscript_compile/indra.l.cpp', source='lscript/lscript_compile/indra.l')
env.CFile(target=build_dir+'/lscript/lscript_compile/indra.y.c', source='lscript/lscript_compile/indra.y')
env.Command(build_dir+'/lscript/lscript_compile/indra.y.cpp',build_dir+'/lscript/lscript_compile/indra.y.c',
[Move('$TARGET','$SOURCE'),Delete(build_dir+'/lscript/lscript_compile/indra.y.output')])
#####################
# HELPER FUNCTIONS #
#####################
### Load a files.lst and files.PLATFORM.lst for each module ###
def load_files(module, source_fname):
new_list = []
try:
list_file = open('./' + module + '/' + source_fname, 'r')
list = Split(list_file.read())
for x in list:
if not x.startswith('#'):
file = os.path.join(build_dir, x)
if x == 'newsim/lltask.cpp':
print 'Found lltask!'
obj = env_no_distcc.Object(file)
new_list.append(obj)
else:
new_list.append(file)
list_file.close()
except IOError, val:
print 'Error: unable to open file list',source_fname,
print 'for module', module + ":", val
return []
try:
platform_list_file = open('./' + module + '/files.' + platform + '.lst', 'r')
list = Split(platform_list_file.read())
for x in list:
file = os.path.join(build_dir, x)
new_list.append(file)
platform_list_file.close()
except IOError:
return new_list
return new_list
### Create a static library from the module ###
def create_static_module_from_dir(
input_dir,
mod_name,
local_flags="",
source_files = 'files.lst',
extra_depends=None):
files_list = load_files(input_dir, source_files)
BuildDir(build_dir + '/' + input_dir, input_dir)
local_env = env.Copy(CPPFLAGS = env['CPPFLAGS'] + ' ' + local_flags)
if extra_depends:
for x in files_list:
Depends(local_env.Object(x), extra_depends)
tgt = local_env.StaticLibrary(lib_dir + '/' + mod_name, files_list)
Default(tgt)
def create_static_module(module, local_flags="", source_files = 'files.lst', extra_depends=None):
create_static_module_from_dir(module, module, local_flags, source_files, extra_depends)
def create_dynamic_module(
module,
local_flags="",
module_libs = None,
source_files = 'files.lst'):
files_list = load_files(module, source_files)
BuildDir(build_dir + '/' + module, module)
local_env = env.Copy(CPPFLAGS = env['CPPFLAGS'] + ' ' + local_flags)
tgt = local_env.SharedLibrary(lib_dir + '/' + module, files_list, LIBS = module_libs)
Default(tgt)
### Create an executable from the module ###
def create_executable(
exec_file, module, module_libs, source_files = 'files.lst'):
files_list = load_files(module, source_files)
BuildDir(build_dir + '/' + module, module)
tgt = env.Program(exec_file, files_list, LIBS = module_libs)
Default(tgt)
####################
# BUILD LIBRARIES #
####################
create_static_module('llcommon')
create_static_module('llmath')
create_static_module('llmessage')
create_static_module('llvfs')
create_static_module('llimage')
create_static_module('llinventory')
create_static_module('llcharacter')
create_static_module('llprimitive')
create_static_module('llrender')
create_static_module('llwindow')
create_static_module('llxml')
create_static_module('lscript', extra_depends=build_dir + '/lscript/lscript_compile/indra.y.h')
net_external_libs = [ 'curl', 'cares', 'ssl', 'crypto', 'expat', 'aprutil-1', 'apr-1' ]
common_external_libs = net_external_libs + [ 'xmlrpc', 'z' ]
if build_target == 'client':
if platform == 'linux':
#############################
# BUILD LINUX_CRASH_LOGGER #
#############################
output_crashlogger_bin = 'linux_crash_logger/linux-crash-logger-' + arch + '-bin'
external_libs = net_external_libs + [ 'db-4.2', 'gtk-x11-2.0' ]
external_libs.remove('cares')
internal_libs = [ 'llvfs', 'llmath', 'llcommon' ]
create_executable(output_crashlogger_bin + '-globalsyms', 'linux_crash_logger', internal_libs + external_libs)
env.Command(output_crashlogger_bin, output_crashlogger_bin + '-globalsyms', hidesyms_cmd)
create_static_module('llaudio')
create_static_module('llmedia')
create_static_module('llui')
create_static_module('llimagej2coj')
if not opensource:
create_dynamic_module('llkdu', '', ['llimage', 'llvfs', 'llmath', 'llcommon', 'apr-1', 'kdu_v42R'])
##################
# BUILD NEWVIEW #
##################
output_bin = 'newview/secondlife-' + arch + '-bin'
external_libs = client_external_libs + common_external_libs + [ 'freetype', 'jpeg', 'SDL', 'GL', 'GLU', 'ogg', 'vorbisenc', 'vorbisfile', 'vorbis', 'db-4.2', 'openjpeg' ]
if arch != 'x86_64' and arch != 'x86_64cross':
if enable_fmod:
external_libs += [ 'fmod-3.75' ]
external_libs += ['tcmalloc', 'stacktrace']
external_libs.remove('cares')
internal_libs = [ 'lscript', 'llwindow', 'llrender', 'llprimitive',
'llmedia', 'llinventory',
'llimage', 'llimagej2coj',
'llcharacter', 'llaudio', 'llui', 'llxml',
'llmessage', 'llvfs', 'llmath', 'llcommon' ]
create_executable(output_bin + '-globalsyms', 'newview', internal_libs + external_libs)
env.Command(output_bin, output_bin + '-globalsyms', hidesyms_cmd)
Default(output_bin)
if buildtype == 'releasefordownload':
#######################
# PACKAGE THE CLIENT #
#######################
if platform == 'linux':
env.Command(output_bin + '-stripped', output_bin, strip_cmd)
env.Command(output_crashlogger_bin + '-stripped', output_crashlogger_bin, strip_cmd)
product_name = 'SecondLife_' + arch + '_' + version_major + "_" + version_minor + "_" + version_patch + "_" + version_build
if grid not in ['default', 'agni']:
product_name += "_" + grid.upper()
package_name = product_name + '.tar.bz2'
cmd = 'rm -rf newview/%(pn)s* && newview/viewer_manifest.py --grid=%(grid)s --installer_name=%(pn)s --arch=%(arch)s' % {
'pn': product_name,
'grid':grid,
'arch':arch}
env.Command('newview/' + package_name, 'newview/viewer_manifest.py', cmd)
Depends('newview/' + package_name, output_bin + '-stripped')
Depends('newview/' + package_name, output_crashlogger_bin + '-stripped')
Default('newview/' + package_name)
elif build_target == 'server':
create_static_module('lldatabase')
create_static_module('llscene')
create_static_module('llhavok', '-fno-exceptions -fno-rtti')
create_static_module_from_dir('llkdu', 'llkdustatic')
##################
# BUILD SERVERS #
##################
file_suffix = ''
if buildtype == 'debug':
file_suffix = '_debug'
common_external_libs += [ 'pthread' ]
# Chatter test application
external_libs = common_external_libs
internal_libs = [ 'llmessage', 'llvfs', 'llmath', 'llcommon' ]
create_executable('test_apps/chatter/chatter', 'test_apps/chatter',
internal_libs + external_libs)
# Tool to buffer all of standard input to memory.
create_executable('tools/simbin2xml/buffer_file/buffer_file',
'tools/simbin2xml/buffer_file', "")
# Simstate binary to XML utility.
external_libs = common_external_libs
internal_libs = [ 'llxml', 'llcommon', 'llmath' ]
create_executable('tools/simbin2xml/simbin2xml', 'tools/simbin2xml',
internal_libs + external_libs)
# Launcher
external_libs = common_external_libs
internal_libs = [ 'llmessage', 'llvfs', 'llmath', 'llcommon' ]
create_executable('launcher/launcher' + file_suffix, 'launcher',
internal_libs + external_libs)
# Dataserver
Depends('dataserver/dataserver', 'launcher/launcher' + file_suffix)
external_libs = common_external_libs + ['boost_regex-gcc-mt', 'mysqlclient', 'tcmalloc', 'stacktrace']
internal_libs = [ 'llcharacter', 'lldatabase', 'llimage', 'llinventory',
'llscene', 'llmessage', 'llvfs', 'llxml', 'llcommon', 'llmath' ]
create_executable('dataserver/dataserver' + file_suffix, 'dataserver',
internal_libs + external_libs)
# Spaceserver
Depends('newspace/spaceserver', 'dataserver/dataserver' + file_suffix)
external_libs = common_external_libs + ['mysqlclient']
internal_libs = ['llscene', 'lldatabase', 'llmessage', 'llvfs',
'llmath', 'llcommon']
create_executable('newspace/spaceserver' + file_suffix, 'newspace',
internal_libs + external_libs)
# Userserver
Depends('userserver/userserver', 'newspace/spaceserver' + file_suffix)
external_libs = common_external_libs
internal_libs = ['llinventory', 'llscene', 'llmessage', 'llvfs',
'llxml', 'llmath', 'llcommon']
create_executable('userserver/userserver' + file_suffix, 'userserver',
internal_libs + external_libs)
# Rpcserver
Depends('rpcserver/rpcserver', 'userserver/userserver' + file_suffix)
external_libs = common_external_libs + ['xmlrpc',
'mysqlclient']
internal_libs = ['llscene', 'llmessage', 'lldatabase', 'llvfs',
'llmath', 'llcommon']
create_executable('rpcserver/rpcserver' + file_suffix, 'rpcserver',
internal_libs + external_libs)
# Mapserver
Depends('mapserver/mapserver', 'rpcserver/rpcserver' + file_suffix)
external_libs = common_external_libs + ['OSMesa16', 'kdu',
'boost_regex-gcc-mt', 'iconv', 'jpeg', 'GL',
'mysqlclient', 'pthread', 'dl']
internal_libs = ['llrender', 'llwindow', 'llimage', 'lldatabase', 'llprimitive', 'llmessage', 'llkdustatic',
'llxml', 'llvfs', 'llmath', 'llcommon']
create_executable('mapserver/mapserver' + file_suffix, 'mapserver',
internal_libs + external_libs)
# Simulator
Depends('newsim/simulator' + file_suffix, 'mapserver/mapserver' + file_suffix)
external_libs = common_external_libs + ['hkdynamics', 'hkgeometry', 'hkmath', 'hkbase', 'hkcollide', 'hkactions', 'boost_regex-gcc-mt', 'dl', 'kdu', 'mysqlclient', 'iconv', 'tcmalloc', 'stacktrace']
internal_libs = [ 'lscript', 'llprimitive',
'llscene', 'llhavok', 'llinventory', 'llimage',
'llcharacter', 'llxml', 'lldatabase', 'llkdustatic',
'llmessage', 'llvfs', 'llmath', 'llcommon' ]
create_executable('newsim/simulator' + file_suffix, 'newsim',
internal_libs + external_libs)
# texture upload verifier
external_libs = common_external_libs + [ 'kdu', 'dl' ]
internal_libs = [
'llimage',
'llkdustatic',
'llinventory',
'llmessage',
'llvfs',
'llxml',
'llcommon',
'llmath' ]
create_executable(
'web/doc/asset-upload/plugins/verify-texture',
'web/doc/asset-upload/plugins',
internal_libs + external_libs,
'verify-texture.lst')
# notecard upload verifier
create_executable(
'web/doc/asset-upload/plugins/verify-notecard',
'web/doc/asset-upload/plugins',
internal_libs + external_libs,
'verify-notecard.lst')
# LSL compiler plugin for asset upload CGI.
external_libs = common_external_libs
internal_libs = ['lscript', 'llmath', 'llcommon']
create_executable('web/doc/asset-upload/plugins/lsl_compiler/lslc' + file_suffix, 'web/doc/asset-upload/plugins/lsl_compiler/', internal_libs + external_libs);
# Test
Depends('test/test', 'newsim/simulator' + file_suffix)
external_libs = common_external_libs + ['mysqlclient']
internal_libs = [ 'lldatabase', 'llinventory', 'llmessage', 'llxml',
'llvfs', 'llmath', 'llcommon' ]
test_executable = 'test/test' + file_suffix
create_executable(test_executable, 'test',
internal_libs + external_libs)
# Run tests
test_results_file = 'test/test_results' + file_suffix + '.txt'
env.Command(test_results_file,
test_executable,
"$SOURCE 2>&1 | tee $TARGET")
Depends(test_results_file, test_executable)
Default(test_results_file)
test_script = 'test/test.py'
script_test_results = 'test/script_test_result' + file_suffix + '.txt'
env.Command(script_test_results,
test_script,
"$SOURCE 2>&1 | tee $TARGET")
Depends(script_test_results, test_results_file)
Default(script_test_results)
#########
# DONE #
#########
|
# author: Chengtian Liu
from sqlalchemy import create_engine
import sqlite3
# define database engines
sqlite_engine = create_engine(
'sqlite:///database.db',
echo=True
)
def add_favorite(id, ticker):
conn = sqlite3.connect('StockTracking/database.db')
cursor = conn.cursor()
create_favorite_db = """
CREATE TABLE IF NOT EXISTS favorite(
id INTEGER,
favorite_stock VARCHAR(5),
PRIMARY KEY (id, favorite_stock),
FOREIGN KEY (id) REFERENCES user(id)
)
"""
cursor.execute(create_favorite_db)
add_favorite_stock = """
INSERT INTO favorite (id, favorite_stock)
VALUES({__id__}, '{__ticker__}')
"""
try:
cursor.execute(add_favorite_stock.format(__id__=id, __ticker__=ticker))
conn.commit()
except sqlite3.IntegrityError:
print('ticker', ticker, 'already in favorite.')
def read_favorite(id):
conn = sqlite3.connect('StockTracking/database.db')
cursor = conn.cursor()
read_favorite_stock = """
SELECT *
FROM favorite
WHERE id = {__id__}
"""
ticker = []
q_results = cursor.execute(read_favorite_stock.format(__id__=id))
for res in q_results:
ticker.append(res[1])
return ticker
def delete_favorite(id, ticker):
conn = sqlite3.connect('StockTracking/database.db')
cursor = conn.cursor()
delete_favorite_stock = """
DELETE
FROM favorite
WHERE id = {__id__} and favorite_stock = '{__ticker__}'
"""
cursor.execute(delete_favorite_stock.format(__id__=id, __ticker__=ticker))
conn.commit()
return
# if __name__ == '__main__':
# conn = sqlite3.connect('../../database.db')
# cursor = conn.cursor()
# add_favorite(11, 'MSFT')
# print(read_favorite(11))
|
"""
check out my youtube channel
link: https://www.youtube.com/channel/UCjPk9YDheKst1FlAf_KSpyA
"""
import pygame
import os
from Boundary import Boundary
from ray import Ray
from random import randint
os.environ["SDL_VIDEO_CENTERED"]='1'
width, height = 1920, 1080
size = (width, height)
#colors
black = (0, 0, 0)
white = (255, 255, 255)
gray = (100, 100, 100)
#pygame configurations
pygame.init()
pygame.display.set_caption("2D Raymarching")
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
fps = 60
screen_offset = 50
object_count = 10
angle = 0
objects = []
for i in range(object_count):
obj = Boundary(randint(screen_offset, width - screen_offset), randint(screen_offset, height - screen_offset), randint(20, 100))
objects.append(obj)
run = True
while run:
clock.tick(fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
ray = Ray(width//2, height//2, 0, screen, white)
ray.angle = angle
screen.fill(black)
for object in objects:
object.display(screen, white)
ray.March(objects)
for pt in ray.collisions:
pygame.draw.circle(screen, white, (int(pt[0]), int(pt[1])), 1)
angle += 0.005
pygame.quit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import configparser
import pexpect
import os
import csv
import multiprocessing
from multiprocess import Pool, Manager
# from py2neo import Graph, Node
# from py2neo import authenticate
# from toolz import thread_last
from funcy import partial, compose, lmap, re_all, re_test, cat
from device.olt import Zte, Huawei
import time
config = configparser.ConfigParser()
config.read(os.path.expanduser('~/.weihu/config.ini'))
neo4j_username = config.get('neo4j', 'username')
neo4j_password = config.get('neo4j', 'password')
olts_file, log_file, result_file = ('olts.txt', 'result/olt_log.txt',
'result/olt_info.txt')
# authenticate('61.155.48.36:7474', neo4j_username, neo4j_password)
# graph = Graph("http://61.155.48.36:7474/db/data")
# def import_olt():
# cmd = """
# create (n:Olt)
# set n.hostname={hostname},n.ip={ip},n.company={company},n.area={area}
# """
# tx = graph.cypher.begin()
# with open('olt-iposs.csv', 'r') as folt:
# for x in folt:
# hostname, ip, company, area = x.strip().split(',')
# print('{0}:{1}:{2}:{3}'.format(hostname, ip, company, area))
# tx.append(
# cmd, hostname=hostname, ip=ip, company=company, area=area)
# tx.process()
# tx.commit()
#
#
# def clear_log():
# for f in [log_file, result_file]:
# if os.path.exists(f):
# os.remove(f)
# os.mknod(f)
#
#
# def _company(funcs, device):
# def unknow_company(**kw):
# return ('fail', None, kw['ip'])
#
# company = device.pop('company')
# return funcs.get(company, unknow_company)(**device)
#
#
# def _add_infs(lock, record):
# mark, infs, ip = record
# statement = """
# match(n:Olt {ip:{ip}})
# merge (n)-[:HAS]->(i:Inf {name:{name}})
# on create set i.desc={desc},i.state={state},i.bw={bw},
# i.inTraffic={inTraffic},i.outTraffic={outTraffic},i.updated=timestamp()
# on match set i.desc={desc},i.state={state},i.bw={bw},
# i.inTraffic={inTraffic},i.outTraffic={outTraffic},i.updated=timestamp()"""
# with lock:
# with open(log_file, 'a') as lf:
# lf.write('{ip}:{mark}\n'.format(ip=ip, mark=mark))
# if mark == 'success' and infs:
# tx = graph.cypher.begin()
# lmap(lambda x: tx.append(statement, ip=ip, **x), infs)
# tx.process()
# tx.commit()
#
#
# def add_infs():
# funcs = {'zte': Zte.get_infs, 'hw': Huawei.get_infs}
# get_infs = partial(_company, funcs)
#
# clear_log()
# nodes = graph.cypher.execute(
# 'match (n:Olt) return n.ip as ip,n.company as company')
# olts = [dict(ip=x['ip'], company=x['company']) for x in nodes]
# pool = Pool(128)
# lock = Manager().Lock()
# _add_infs_p = partial(_add_infs, lock)
# list(pool.map(compose(_add_infs_p, get_infs), olts))
# pool.close()
# pool.join()
#
#
# def _add_groups(lock, record):
# mark, groups, ip = record
# stmt1 = """
# match(n:Olt {ip:{ip}})
# merge (n)-[:HAS]->(g:Group {name:{name}})
# set g.desc={desc},g.mode={mode},g.updated=timestamp()"""
# stmt2 = """
# match (i:Inf {name:{infName}})<--(n:Olt {ip:{ip}})-->(g:Group {name:{name}})
# merge (g)-[r:OWNED]->(i)
# set r.updated=timestamp()"""
#
# with lock:
# with open(log_file, 'a') as lf:
# lf.write('{ip}:{mark}\n'.format(ip=ip, mark=mark))
# if mark == 'success' and groups:
# tx = graph.cypher.begin()
# for x in groups:
# tx.append(
# stmt1,
# ip=ip,
# name=x['name'],
# desc=x['desc'],
# mode=x['mode'])
# for infName in x['infs']:
# tx.append(stmt2, infName=infName, ip=ip, name=x['name'])
# tx.process()
# tx.commit()
#
#
# def add_groups():
# funcs = {'zte': Zte.get_groups, 'hw': Huawei.get_groups}
# get_groups = partial(_company, funcs)
#
# clear_log()
# nodes = graph.cypher.execute(
# 'match (n: Olt) return n.ip as ip, n.company as company')
# olts = [dict(ip=x['ip'], company=x['company']) for x in nodes]
# pool = Pool(128)
# lock = Manager().Lock()
# _add_groups_p = partial(_add_groups, lock)
# list(pool.map(compose(_add_groups_p, get_groups), olts))
# pool.close()
# pool.join()
#
#
# def _add_main_card(lock, record):
# mark, rslt, ip = record
# stmt = """
# match (n:Olt) where n.ip={ip}
# set n.mainCard={rslt}
# """
# with lock:
# with open(result_file, 'a') as frslt:
# frslt.write('{ip}:{mark}\n'.format(ip=ip, mark=mark))
# if mark == 'success':
# tx = graph.cypher.begin()
# tx.append(stmt, ip=ip, rslt=rslt)
# tx.process()
# tx.commit()
#
#
# def add_main_card():
# funcs = {'zte': Zte.get_main_card, 'hw': Huawei.get_main_card}
# get_main_card = partial(_company, funcs)
# clear_log()
#
# nodes = graph.cypher.execute(
# 'match (n: Olt) return n.ip as ip, n.company as company')
# olts = [dict(ip=x['ip'], company=x['company']) for x in nodes]
# pool = Pool(128)
# lock = Manager().Lock()
# _add_main_card_p = partial(_add_main_card, lock)
# list(pool.map(compose(_add_main_card_p, get_main_card), olts))
# pool.close()
# pool.join()
#
#
# def get_hw_epba():
# nodes = graph.cypher.execute(
# 'match (n:Olt) where n.company="hw" return n.ip as ip')
# with open('epba.txt', 'w') as fh:
# for olt in nodes:
# _, slots, ip = Huawei.get_epba_card(olt['ip'])
# if slots:
# fh.write('{ip}:{slots}\n'.format(ip=ip, slots=','.join(slots)))
#
#
# def _add_power_info(lock, record):
# mark, rslt, ip = record
# stmt = """
# match (n:Olt) where n.ip={ip}
# set n.powerInfo={rslt}
# """
# with lock:
# with open(log_file, 'a') as frslt:
# frslt.write('{ip}:{mark}\n'.format(ip=ip, mark=mark))
# if mark == 'success':
# tx = graph.cypher.begin()
# tx.append(stmt, ip=ip, rslt=rslt)
# tx.process()
# tx.commit()
#
#
# def add_power_info():
# funcs = {'zte': Zte.get_power_info, 'hw': Huawei.get_power_info}
# get_power_info = partial(_company, funcs)
# clear_log()
#
# nodes = graph.cypher.execute(
# 'match (n: Olt) return n.ip as ip, n.company as company')
# olts = [dict(ip=x['ip'], company=x['company']) for x in nodes]
# pool = Pool(128)
# lock = Manager().Lock()
# _add_power_info_p = partial(_add_power_info, lock)
# list(pool.map(compose(_add_power_info_p, get_power_info), olts))
# pool.close()
# pool.join()
#
#
# def del_old_data():
# cmd1 = """
# match (:Olt)-->(i:Inf)
# detach delete i
# """
# cmd2 = """
# match (:Olt)-->(g:Group)
# detach delete g
# """
# cmd3 = """
# match (:Olt)-->(:Group)-[r]->(:Inf)
# detach delete r
# """
# graph.cypher.execute(cmd1)
# graph.cypher.execute(cmd3)
# graph.cypher.execute(cmd2)
#
def get_svlan():
devices = dict(ZTE=Zte, HW=Huawei)
action = 'get_svlan'
reader = csv.reader(open('deviceList.csv'))
next(reader)
olts = (line[:2] for line in reader)
funcs = map(lambda x: partial(getattr(devices.get(x[1]), action), x[0]),
olts)
with Pool(32) as p:
rslt = p.map(lambda f: f(), funcs)
rslt = cat(rslt)
with open('svlan.csv', 'w') as fb:
writer = csv.writer(fb)
writer.writerows(rslt)
def get_active_port():
devices = dict(ZTE=Zte, HW=Huawei)
action = 'get_active_port'
reader = csv.reader(open('deviceList.csv'))
next(reader)
olts = (line[:2] for line in reader)
funcs = map(lambda x: partial(getattr(devices.get(x[1]), action), x[0]),
olts)
with Pool(64) as p:
rslt = p.map(lambda f: f(), funcs)
rslt = cat(rslt)
with open('up_port.csv', 'w') as fb:
writer = csv.writer(fb)
writer.writerows(rslt)
def main():
# pass
start = time.time()
# add_infs()
# add_groups()
# add_main_card()
# add_power_info()
# get_hw_epba()
print(time.time() - start)
# import_olt()
if __name__ == '__main__':
main()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given an array of integers, every element appears three times except for one,
# which appears exactly once. Find that single one.
# Note:
# Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
# 11 / 11 test cases passed.
# Status: Accepted
# Runtime: 45 ms
# Your runtime beats 50.10 % of python submissions.
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
nums.sort()
for i in range(1, len(nums), 3):
if nums[i-1] != nums[i+1]:
return nums[i-1] if nums[i+1] == nums[i] else nums[i+1]
return nums[-1]
# 11 / 11 test cases passed.
# Status: Accepted
# Runtime: 42 ms
# Your runtime beats 59.06 % of python submissions.
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
a = b = 0
for i in nums:
b=(b^i)&~a
a=(a^i)&~b
return b
# 11 / 11 test cases passed.
# Status: Accepted
# Runtime: 45 ms
# Your runtime beats 50.10 % of python submissions.
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
x1 = x2 = 0
for num in nums:
x2 ^= x1 & num
x1 ^= num
mask = ~(x1 & x2)
# If x1=1 and x2=1, then mask = -2.
# And -2 & 1 = 0.So mask can reset the counter when count equals k.
print(x1, x2, mask)
x1 &= mask
x2 &= mask
return x1
if __name__ == '__main__':
print(Solution().singleNumber([1, 1, 1, 2, 3, 2, 2]))
# print(Solution().singleNumber([1, 1, 1, 2, 2, 2, 3]))
# https://discuss.leetcode.com/topic/11877/detailed-explanation-and-generalization-of-the-bitwise-operation-method-for-single-numbers
# Here is a list of few quick examples to show how the algorithm works:
# k = 2, p = 1
# k is 2, then m = 1, we need only one 32-bit integer(x1) as the counter.
# And 2^m = k so we do not even need a mask! A complete java program will look like:
# public int singleNumber(int[] A) {
# int x1 = 0;
#
# for (int i : A) {
# x1 ^= i;
# }
#
# return x1;
# }
# k = 3, p = 1
# k is 3, then m = 2, we need two 32-bit integers(x2, x1) as the counter.
# And 2^m > k so we do need a mask. Write k in its binary form: k = '11',
# then k1 = 1, k2 = 1, so we have mask = ~(x1 & x2). A complete java program will look like:
# public int singleNumber(int[] A) {
# int x1 = 0, x2 = 0, mask = 0;
#
# for (int i : A) {
# x2 ^= x1 & i;
# x1 ^= i;
# mask = ~(x1 & x2);
# x2 &= mask;
# x1 &= mask;
# }
#
# return x1; // p = 1, in binary form p = '01', then p1 = 1, so we should return x1;
# // if p = 2, in binary form p = '10', then p2 = 1, so we should return x2.
# }
# k = 5, p = 3
# k is 5, then m = 3, we need three 32-bit integers(x3, x2, x1) as the counter.
# And 2^m > k so we need a mask. Write k in its binary form: k = '101',
# then k1 = 1, k2 = 0, k3 = 1, so we have mask = ~(x1 & ~x2 & x3). A complete java program will look like:
# public int singleNumber(int[] A) {
# int x1 = 0, x2 = 0, x3 = 0, mask = 0;
#
# for (int i : A) {
# x3 ^= x2 & x1 & i;
# x2 ^= x1 & i;
# x1 ^= i;
# mask = ~(x1 & ~x2 & x3);
# x3 &= mask;
# x2 &= mask;
# x1 &= mask;
# }
#
# return x1; // p = 3, in binary form p = '011', then p1 = p2 = 1, so we can
# // return either x1 or x2. But if p = 4, in binary form p = '100',
# // only p3 = 1, which implies we can only return x3.
# }
|
"""
---------------------------------------------------------------------------
pool2.py
09/2018
Kirk Evans, GIS Analyst\Programmer, TetraTech EC @ USDA Forest Service R5/Remote Sensing Lab
3237 Peacekeeper Way, Suite 201
McClellan, CA 95652
kdevans@fs.fed.us
script to: DoPool and supporting functions
result classes moved to poolResults.py
task classes moved to poolTasks.py
know limitations: python 3.x
---------------------------------------------------------------------------
"""
import traceback
import sys
import os
import time
import multiprocessing
import pickle
import subprocess
import py3_general.general as g
import py3_general.poolResults as poolR
# below import so that other scripts still find task objects
from py3_general.poolTasks import *
# ---------------------------------------------------------------------------
# worker and wrapper functions
def _worker(qInput, qOutput):
""" Worker function placed into queue. """
for TS in iter(qInput.get, 'STOP'):
R = _fWrap(TS)
qOutput.put(R)
def _fWrap(TaskSet):
""" Wrapper for passed functions.
Organizes results, timing and exceptions into MP_ResultSet objects.
"""
try:
t0 = time.time()
iResultSet = poolR.MP_ResultSet(TaskSet.comment)
for task in TaskSet.tasks:
t1 = time.time()
iResult = poolR.MP_Result(task.comment, task.args)
# call function
args = task.args
f = task.func
r = f(*args)
iResult.result = r
iResult.time = time.time() - t1
iResultSet.addResult(iResult)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
strTrace = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
print(strTrace)
iResult.error = strTrace
iResultSet.addResult(iResult)
iResultSet.hasError = True
finally:
# record timing and return resultset object.
iResultSet.time = time.time() - t0
return iResultSet
def Range(i):
""" Dummy test function """
return range(i)
def test_args(a, b= '2'):
""" Dummy *args test function """
time.sleep(a/2)
return str(a) + str(b)
def submit(cmd, output = None):
""" Submit a command to the command prompt.
Optional output will be checked for existance if given and returned if true.
"""
# if output exists, skip cmd and return output
if output and os.path.exists(output):
print('Output already present.')
return output
r = os.system(cmd)
if output:
if not os.path.exists(output):
raise Exception('Output not created: ' + output)
return output
else:
if r:
raise Exception('Nonzero exit status.')
return 'submit'
def POpen(cmd, output = None):
""" Submit a command via subprocess.Popen.
Optional output will be checked for existance if given and returned if true.
--- NOT working! ---
"""
# if output exists, skip cmd and return output
if output and os.path.exists(output):
return output
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
if output:
if not os.path.exists(output):
raise Exception('Output not created: ' + output + '\nmessage:\n' + out)
return output
else:
return 'POpen'
# ---------------------------------------------------------------------------
def DoPool(lstTasks, intWorkers, txtPickle = None):
''' Run tasksets in lstTasks over intWorkers workers using multiprocessing.Process.
Return PoolResults object.
'''
# Create queues and result object
print('\n\tStart Pool:')
print('\t\t' + str(len(lstTasks)) + ' task(s).')
t0 = time.time()
task_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
iPoolResult = poolR.PoolResults(intWorkers)
# Submit tasks
for task in lstTasks:
task_queue.put(task)
# Start worker processes
print('\t\t' + str(intWorkers) + ' worker(s).')
for i in range(intWorkers):
multiprocessing.Process(target=_worker, args=(task_queue, done_queue)).start()
# Get and print results
print('\t\tUnordered results:')
for i in range(len(lstTasks)):
resultSet = done_queue.get()
print('\t\t\t' + str(resultSet))
iPoolResult.record(resultSet)
# Tell child processes to stop
for i in range(intWorkers):
task_queue.put('STOP')
iPoolResult.runtime = time.time() - t0
if iPoolResult.ErrorCount:
print('\n\t\tPool done: WITH ERRORS!.\n')
else:
print('\n\t\tPool done: ' + g.time_string(iPoolResult.runtime) + '\n')
if txtPickle:
iPoolResult.Pickle(txtPickle)
return iPoolResult
|
from django import forms
from .models import People, Job, Eye
class Edit_employee(forms.ModelForm):
class Meta:
model = People
fields = '__all__'
# fields = ['person_name', 'birthday', 'telephone', 'job', 'eyes']
def is_valid(self):
res = super(Edit_employee, self).is_valid()
return res
|
#!/usr/bin/env python
adc_ngps=1 #to choose between adc and gps testing scrpt
if(adc_ngps==1):
import spqradc
else:
import spqrgps
def main():
if(adc_ngps==1):
adc1 = spqradc.SpqrADC(reset_pin=23, send_rdy_pin=21, port="/dev/ttyUSB1", baudrate=230400)
#buff="a,b,c\n"
#adc1.readBuff2(buff)
#return
if( adc1.isValid() ):
adc1.start()
adc1.streamBuff(100)
else:
gps1 = spqrgps.SpqrGPS(gps_clk_pin=19, port="/dev/ttyUSB1", baudrate=19200)
if( gps1.isValid() ):
#gps1.readAll()
gps1.monitorGPGGA(True)
#if( hasattr(adc1, 'r_p') ):
# print 'cool\n'
#print "Employee.__doc__:", mycl.SpqrADC.__doc__
#print "Employee.__name__:", mycl.SpqrADC.__name__
#print "Employee.__module__:", mycl.SpqrADC.__module__
#print "Employee.__bases__:", mycl.SpqrADC.__bases__
#print "Employee.__dict__:", mycl.SpqrADC.__dict__
if __name__ == '__main__':
main()
|
"""
This python script contains the main Flask app.
"""
import tempfile
import yaml
import markdown
from werkzeug.utils import secure_filename
from flask import *
from flask_cors import cross_origin
from rq import Queue
from rq.job import Job
from rq.exceptions import NoSuchJobError
from misc.logger import *
from manage_redis import *
from judge_submission import judge_submission
app = Flask(__name__)
app.secret_key = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(24))
# Set max uploaded code file size
app.config['MAX_CONTENT_LENGTH'] = (MAX_CODE_SIZE + 1) * 1024
q = Queue(connection=REDIS_CONN)
"""
Client webpage methods
"""
@app.route('/favicon.ico', methods=['GET'])
def favicon():
return send_from_directory('media', 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/media/particles.mp4', methods=['GET'])
def media_particles():
return send_from_directory('media', 'particles.mp4', mimetype='video/mp4')
@app.route('/', methods=['GET'])
def show_index():
timestamp = datetime.now(tz=pytz.utc).astimezone(pytz.timezone('US/Pacific'))
timestamp = timestamp.strftime("%m/%d/%Y %I:%M %p")
return render_template('index.html', num_problems=get_num_problems(), num_submissions=redis_get_num_submissions(),
curr_time=timestamp)
@app.route('/problem_list', methods=['GET'])
def show_problem_list():
return render_template('problem_list.html', problem_list=_get_problem_list())
@app.route('/submission_list', methods=['GET'])
def show_submission_list():
if 'admin' not in session:
return render_template('login.html', error='Please login to access this page.')
page = 1
if 'page' in request.args:
page = int(request.args['page'])
return render_template('submission_list.html', submissions=_get_submissions(page), page=page,
num_pages=(redis_get_num_submissions() + PAGE_SIZE - 1) // PAGE_SIZE)
@app.route('/submission_details', methods=['GET'])
def show_submission_details():
if 'job_id' not in request.args:
return render_template('error.html', error_msg='No job id provided!')
try:
Job.fetch(request.args['job_id'], connection=REDIS_CONN)
except NoSuchJobError:
return render_template('error.html', error_msg='Job not found!')
job_id = request.args['job_id']
return render_template('submission_details.html', submission_source=_get_submission_source(job_id),
submission=redis_get_submission(job_id), job_id=job_id)
@app.route('/view_problem/<problem_id>', methods=['GET'])
def view_problem(problem_id):
return render_template('view_problem.html', problem=_get_problem_info(problem_id))
@app.route('/api', methods=['GET'])
def show_api_reference():
return render_template('api_reference.html')
@app.route('/status', methods=['GET'])
def show_status():
if 'job_id' not in request.args:
return render_template('error.html', error_msg='No job id provided!')
try:
Job.fetch(request.args['job_id'], connection=REDIS_CONN)
except NoSuchJobError:
return render_template('error.html', error_msg='Job not found!')
return render_template('status.html', job_id=request.args['job_id'])
@app.route('/login', methods=['GET', 'POST'])
def login_form():
if request.method == 'GET':
if 'admin' not in session:
return render_template('login.html')
else:
return redirect('/')
if not request.form or 'secret_key' not in request.form:
return render_template('login.html')
if request.form['secret_key'] != SECRET_KEY:
return render_template('login.html', error='Incorrect secret key!')
# Login successful
session['admin'] = True
return redirect('/submission_list')
@app.route('/logout', methods=['GET'])
def logout():
session.pop('admin', None)
return redirect('/')
"""
API methods
"""
def json_error(msg):
return {'error': msg}, 400
def is_valid_problem_id(problem_id):
problem_list = _get_problem_list()
valid_problem = False
for group in problem_list['groups']:
for problem in group['problems']:
if problem['id'] == problem_id:
valid_problem = True
return valid_problem
def get_num_problems():
problem_list = _get_problem_list()
num_problems = 0
for group in problem_list['groups']:
for _ in group['problems']:
num_problems += 1
return num_problems
def change_md_to_html(md_file, default):
raw: str
if os.path.isfile(md_file):
with open(md_file, 'r') as mdf:
raw = mdf.read()
raw = markdown.markdown(raw,
extensions=['fenced_code', 'nl2br', 'mdx_math'],
extension_configs={
'mdx_math': {
'enable_dollar_delimiter': True
}
})
else:
raw = default
return raw
@app.route('/api/get_submissions/<page>', methods=['GET'])
@cross_origin()
def get_submissions(page):
if 'secret_key' not in request.args:
return 'Missing secret key in GET parameters!', 400
elif request.args['secret_key'] != SECRET_KEY:
return 'Invalid secret key!', 400
return json.dumps(_get_submissions(int(page)))
def _get_submissions(page=1):
return redis_get_submissions(page)
@app.route('/api/get_submission_source/<job_id>', methods=['GET'])
@cross_origin()
def get_submission_source(job_id):
return _get_submission_source(job_id)
def _get_submission_source(job_id):
try:
Job.fetch(job_id, connection=REDIS_CONN)
except NoSuchJobError:
return 'Invalid job id!', 400
source_code = redis_get_submission_source(job_id)
if source_code is None:
return 'Invalid submission index!', 400
else:
source_code = source_code.decode('utf-8')
return source_code
@app.route('/api/get_problem_list', methods=['GET'])
@cross_origin()
def get_problem_list():
return _get_problem_list()
def _get_problem_list():
# Get problem list
try:
problem_file = open('{}/problems.yml'.format(PROBLEM_INFO_PATH), 'r')
except OSError:
return {'groups': []}
problem_data = yaml.safe_load(problem_file)
problem_file.close()
# Only return problems that are active
active_groups = []
for group in problem_data['groups']:
if group['status'] == 'down':
continue
# This group is active; add to active problem data
current_group = {'id': group['id'], 'name': group['name'], 'problems': []}
for problem in group['problems']:
if problem['status'] == 'down':
continue
# Problem is not down; add to active group
current_group['problems'].append({'id': problem['id'], 'name': problem['name'],
'blurb': problem['blurb'] if 'blurb' in problem else '',
'difficulty': problem['difficulty'] if 'difficulty' in problem else ''})
active_groups.append(current_group)
# Return the active groups / problems
return {'groups': active_groups}
@app.route('/api/get_problem_info/<problem_id>', methods=['GET'])
@cross_origin()
def get_problem_info(problem_id):
return _get_problem_info(problem_id)
def _get_problem_info(problem_id):
# Make sure there is a problem with the given id
if not is_valid_problem_id(problem_id):
return json_error('Invalid problem ID!')
# Get problem info
problem_info_file = open('{}/{}/info.yml'.format(PROBLEM_INFO_PATH, problem_id), 'r')
pinfo = yaml.safe_load(problem_info_file)
problem_info_file.close()
# Get problem statement (if there is one)
problem_statement = change_md_to_html('{}/{}/statement.md'.format(PROBLEM_INFO_PATH, problem_id), '')
# Get bonus (if there is any)
bonus = change_md_to_html('{}/{}/bonus.md'.format(PROBLEM_INFO_PATH, problem_id), '')
# Get hints (if there are any)
hints = change_md_to_html('{}/{}/hints.md'.format(PROBLEM_INFO_PATH, problem_id), '')
# Return only the info that the client needs to know about the problem
return {'id': pinfo['problem_id'], 'name': pinfo['problem_name'], 'time_limit': pinfo['time_limit'],
'memory_limit': pinfo['memory_limit'], 'max_score': pinfo['max_score'],
'statement': problem_statement, 'bonus': bonus, 'hints': hints,
'difficulty': pinfo['difficulty'] if 'difficulty' in pinfo else ''}
@app.route('/api/submit', methods=['GET', 'POST'])
@cross_origin()
def handle_submission():
if request.method == 'GET':
# Return a simple testing form
return render_template('test_submit_form.html')
# Validate request
if not request.form:
return json_error('Empty request form (maybe invalid code file?)')
# Secret key needed if not admin
if 'admin' not in session:
if 'secret_key' not in request.form:
return json_error('Missing secret key in POST parameters!')
if request.form['secret_key'] != SECRET_KEY:
return json_error('Invalid secret key!')
if 'problem_id' not in request.form or not is_valid_problem_id(request.form['problem_id']):
return json_error('Invalid problem ID!')
if 'type' not in request.form:
return json_error('No submission language!')
if request.form['type'] not in ['java', 'cpp', 'python']:
return json_error('Invalid submission language!')
if 'code' not in request.files or not request.files['code']:
return json_error('No code file submitted!')
if 'username' not in request.form or request.form['username'] == '':
return json_error('No username!')
run_bonus = True
if 'run_bonus' in request.form and (request.form['run_bonus'] == 'off' or not request.form['run_bonus']):
run_bonus = False
sec_filename = secure_filename(request.files['code'].filename)
if sec_filename in ['', 'input.in.txt', 'output.out.txt', 'answer.ans.txt', 'code.new.py']:
return json_error('Invalid code filename!')
code_filename, code_extension = os.path.splitext(sec_filename)
if request.form['type'] == 'java' and code_extension != '.java':
return json_error('Missing .java file extension!')
elif request.form['type'] == 'cpp' and code_extension != '.cpp':
return json_error('Missing .cpp file extension!')
elif request.form['type'] == 'python' and code_extension != '.py':
return json_error('Missing .py file extension!')
# Make a temporary directory / save files there
tempdir = tempfile.mkdtemp(prefix='judge-')
request.files['code'].save(tempdir + '/' + code_filename + code_extension)
# Enqueue the job
job = q.enqueue_call(func=judge_submission, timeout=60,
ttl=RESULT_TTL, result_ttl=RESULT_TTL, failure_ttl=RESULT_TTL,
args=(tempdir, request.form['problem_id'], sec_filename,
request.form['type'], request.form['username'], run_bonus))
job.meta['status'] = 'queued'
job.save_meta()
if DEBUG_LOWEST:
log('New job id: {}'.format(job.get_id()))
# Return submitted, along with the job id
return {'status': 'success', 'job_id': job.get_id()}
@app.route('/api/get_status/<job_id>', methods=['GET'])
@cross_origin()
def get_status(job_id):
return _get_status(job_id)
def _get_status(job_id):
# Make sure the job id is valid
try:
job = Job.fetch(job_id, connection=REDIS_CONN)
except NoSuchJobError:
return {'status': 'internal_error', 'error': 'NO_SUCH_JOB', 'job_id': job_id}, 200
# Depending on the job's current status, return some info
if job.is_queued:
return job.meta, 202
elif job.is_finished:
return job.result, 200
elif job.is_failed:
return {'status': 'internal_error', 'error': 'JOB_FAILED', 'job_id': job_id}, 200
else:
return job.meta, 202 if job.meta['status'] != 'done' else 200
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 80)))
|
#! /usr/bin/python
# -*- coding: iso-8859-1 -*-
class Mapeamento(object):
def __init__(self, origem, destino, listaIncluidos = ['*'], listaExcluidos = []):
self.origem = origem
self.destino = destino
self.listaIncluidos = listaIncluidos
self.listaExcluidos = listaExcluidos
|
# -*- coding: utf-8 -*-
"""Tests for view serializers."""
from ..test_view_serializers import (
TestBaseViewFeatureViewSet, TestBaseViewFeatureUpdates)
from .base import NamespaceMixin
class TestViewFeatureViewSet(TestBaseViewFeatureViewSet, NamespaceMixin):
"""Test ViewFeaturesViewSet read operations."""
class TestViewFeatureUpdates(TestBaseViewFeatureUpdates):
"""Test ViewFeaturesViewSet update operations."""
|
from .nbconvert import *
def _jupyter_nbextension_paths():
return [
dict(section="notebook", src="static", dest="nbsimplegrader", require="nbsimplegrader/authoring_tools"),
dict(section="tree", src="static", dest="nbsimplegrader", require="nbsimplegrader/tree")
]
|
import marshaltools
prog = marshaltools.ProgramList('Cosmology')
t = prog.table
name = t['name'][1]
print (t)
print (name)
lc = prog.get_lightcurve(name)
|
'''
Usage:
test twoSum_1.py by using pytest
'''
import pytest
import os
import sys
# append parent path
sys.path.append(os.path.pardir)
from twoSum_1 import Solution
from twoSum_1 import Solution2
test_data = [
([2, 7, 11, 15], 13, [0, 2]),
([3, 2, 4], 6, [1, 2])
]
@pytest.mark.parametrize("nums,target,expected", test_data)
def test_solution(nums, target, expected):
cs = Solution()
assert cs.twoSum(nums, target) == expected
cs2 = Solution2()
assert cs2.twoSum(nums, target) == expected
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import re
from collections import OrderedDict
from dataclasses import dataclass
from enum import Enum
from pathlib import Path, PurePath
from typing import Collection
from pants.base.build_environment import get_buildroot
from pants.base.build_root import BuildRoot
from pants.core.util_rules.environments import EnvironmentTarget, LocalEnvironmentTarget
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.internals.selectors import Get
from pants.engine.rules import _uncacheable_rule, collect_rules
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
class AsdfPathString(str, Enum):
STANDARD = "<ASDF>"
LOCAL = "<ASDF_LOCAL>"
@staticmethod
def contains_strings(search_paths: Collection[str]) -> tuple[bool, bool]:
return AsdfPathString.STANDARD in search_paths, AsdfPathString.LOCAL in search_paths
def description(self, tool: str) -> str:
if self is self.STANDARD:
return softwrap(
f"""
all {tool} versions currently configured by ASDF `(asdf shell, ${{HOME}}/.tool-versions)`,
with a fallback to all installed versions
"""
)
if self is self.LOCAL:
return f"the ASDF {tool} with the version in `BUILD_ROOT/.tool-versions`"
raise NotImplementedError(f"{self} has no description.")
@dataclass(frozen=True)
class AsdfToolPathsRequest:
env_tgt: EnvironmentTarget
tool_name: str
tool_description: str
resolve_standard: bool
resolve_local: bool
paths_option_name: str
bin_relpath: str = "bin"
@dataclass(frozen=True)
class AsdfToolPathsResult:
tool_name: str
standard_tool_paths: tuple[str, ...] = ()
local_tool_paths: tuple[str, ...] = ()
@classmethod
async def get_un_cachable_search_paths(
cls,
search_paths: Collection[str],
env_tgt: EnvironmentTarget,
tool_name: str,
tool_description: str,
paths_option_name: str,
bin_relpath: str = "bin",
) -> AsdfToolPathsResult:
resolve_standard, resolve_local = AsdfPathString.contains_strings(search_paths)
if resolve_standard or resolve_local:
# AsdfToolPathsResult is not cacheable, so only request it if absolutely necessary.
return await Get(
AsdfToolPathsResult,
AsdfToolPathsRequest(
env_tgt=env_tgt,
tool_name=tool_name,
tool_description=tool_description,
resolve_standard=resolve_standard,
resolve_local=resolve_local,
paths_option_name=paths_option_name,
bin_relpath=bin_relpath,
),
)
return AsdfToolPathsResult(tool_name)
async def _resolve_asdf_tool_paths(
env_tgt: EnvironmentTarget,
tool_name: str,
paths_option_name: str,
tool_description: str,
tool_env_name: str,
bin_relpath: str,
env: EnvironmentVars,
local: bool,
) -> tuple[str, ...]:
if not (isinstance(env_tgt.val, LocalEnvironmentTarget) or env_tgt.val is None):
return ()
asdf_dir = get_asdf_data_dir(env)
if not asdf_dir:
return ()
asdf_dir = Path(asdf_dir)
# Ignore ASDF if the tool's plugin isn't installed.
asdf_tool_plugin = asdf_dir / "plugins" / tool_name
if not asdf_tool_plugin.exists():
return ()
# Ignore ASDF if no versions of the tool have ever been installed (the installs folder is
# missing).
asdf_installs_dir = asdf_dir / "installs" / tool_name
if not asdf_installs_dir.exists():
return ()
# Find all installed versions.
asdf_installed_paths: list[str] = []
for child in asdf_installs_dir.iterdir():
# Aliases, and non-cpython installs (for Python) may have odd names.
# Make sure that the entry is a subdirectory of the installs directory.
if child.is_dir():
# Make sure that the subdirectory has a bin directory.
bin_dir = child.joinpath(bin_relpath)
if bin_dir.exists():
asdf_installed_paths.append(str(bin_dir))
# Ignore ASDF if there are no installed versions.
if not asdf_installed_paths:
return ()
asdf_paths: list[str] = []
asdf_versions: OrderedDict[str, str] = OrderedDict()
tool_versions_file = None
# Support "shell" based ASDF configuration
tool_env_version = env.get(tool_env_name)
if tool_env_version:
asdf_versions.update([(v, tool_env_name) for v in re.split(r"\s+", tool_env_version)])
# Target the local .tool-versions file.
if local:
tool_versions_file = Path(get_buildroot(), ".tool-versions")
if not tool_versions_file.exists():
logger.warning(
softwrap(
f"""
No `.tool-versions` file found in the build root, but <ASDF_LOCAL> was set in
`{paths_option_name}`.
"""
)
)
tool_versions_file = None
# Target the home directory tool-versions file.
else:
home = env.get("HOME")
if home:
tool_versions_file = Path(home) / ".tool-versions"
if not tool_versions_file.exists():
tool_versions_file = None
if tool_versions_file:
# Parse the tool-versions file.
# A tool-versions file contains multiple lines, one or more per tool.
# Standardize that the last line for each tool wins.
#
# The definition of a tool-versions file can be found here:
# https://asdf-vm.com/#/core-configuration?id=tool-versions
tool_versions_lines = tool_versions_file.read_text().splitlines()
last_line_fields = None
for line in tool_versions_lines:
fields = re.split(r"\s+", line.strip())
if not fields or fields[0] != tool_name:
continue
last_line_fields = fields
if last_line_fields:
for v in last_line_fields[1:]:
if ":" in v:
key, _, value = v.partition(":")
if key.lower() == "path":
asdf_paths.append(value)
elif key.lower() == "ref":
asdf_versions[value] = str(tool_versions_file)
else:
logger.warning(
softwrap(
f"""
Unknown version format `{v}` from ASDF configured by
`{paths_option_name}`, ignoring. This
version will not be considered when determining which {tool_description}
to use. Please check that `{tool_versions_file}`
is accurate.
"""
)
)
elif v == "system":
logger.warning(
softwrap(
f"""
System path set by ASDF configured by `{paths_option_name}` is unsupported, ignoring.
This version will not be considered when determining which {tool_description} to use.
Please remove 'system' from `{tool_versions_file}` to disable this warning.
"""
)
)
else:
asdf_versions[v] = str(tool_versions_file)
for version, source in asdf_versions.items():
install_dir = asdf_installs_dir / version / bin_relpath
if install_dir.exists():
asdf_paths.append(str(install_dir))
else:
logger.warning(
softwrap(
f"""
Trying to use ASDF version `{version}` configured by
`{paths_option_name}` but `{install_dir}` does not
exist. This version will not be considered when determining which {tool_description}
to use. Please check that `{source}` is accurate.
"""
)
)
# For non-local, if no paths have been defined, fallback to every version installed
if not local and len(asdf_paths) == 0:
# This could be appended to asdf_paths, but there isn't any reason to
return tuple(asdf_installed_paths)
else:
return tuple(asdf_paths)
# TODO: This rule is marked uncacheable because it directly accsses the filesystem to examine ASDF configuration.
# See https://github.com/pantsbuild/pants/issues/10842 for potential future support for capturing from absolute
# paths that could allow this rule to be cached.
@_uncacheable_rule
async def resolve_asdf_tool_paths(
request: AsdfToolPathsRequest, build_root: BuildRoot
) -> AsdfToolPathsResult:
tool_env_name = f"ASDF_{request.tool_name.upper()}_VERSION"
env_vars_to_request = [
"ASDF_DIR",
"ASDF_DATA_DIR",
tool_env_name,
"HOME",
]
env = await Get(EnvironmentVars, EnvironmentVarsRequest(env_vars_to_request))
standard_tool_paths: tuple[str, ...] = ()
if request.resolve_standard:
standard_tool_paths = await _resolve_asdf_tool_paths(
env_tgt=request.env_tgt,
tool_name=request.tool_name,
paths_option_name=request.paths_option_name,
tool_description=request.tool_description,
tool_env_name=tool_env_name,
bin_relpath=request.bin_relpath,
env=env,
local=False,
)
local_tool_paths: tuple[str, ...] = ()
if request.resolve_local:
local_tool_paths = await _resolve_asdf_tool_paths(
env_tgt=request.env_tgt,
tool_name=request.tool_name,
paths_option_name=request.paths_option_name,
tool_description=request.tool_description,
tool_env_name=tool_env_name,
bin_relpath=request.bin_relpath,
env=env,
local=True,
)
return AsdfToolPathsResult(
tool_name=request.tool_name,
standard_tool_paths=standard_tool_paths,
local_tool_paths=local_tool_paths,
)
def get_asdf_data_dir(env: EnvironmentVars) -> PurePath | None:
"""Returns the location of asdf's installed tool versions.
See https://asdf-vm.com/manage/configuration.html#environment-variables.
`ASDF_DATA_DIR` is an environment variable that can be set to override the directory
in which the plugins, installs, and shims are installed.
`ASDF_DIR` is another environment variable that can be set, but we ignore it since
that location only specifies where the asdf tool itself is installed, not the managed versions.
Per the documentation, if `ASDF_DATA_DIR` is not specified, the tool will fall back to
`$HOME/.asdf`, so we do that as well.
:param env: The environment to use to look up asdf.
:return: Path to the data directory, or None if it couldn't be found in the environment.
"""
asdf_data_dir = env.get("ASDF_DATA_DIR")
if not asdf_data_dir:
home = env.get("HOME")
if home:
return PurePath(home) / ".asdf"
return PurePath(asdf_data_dir) if asdf_data_dir else None
def rules():
return collect_rules()
|
import hashlib
from cache_backends import get_backend
def get_cache_key(environ):
raw_key = unicode((environ['PATH_INFO'], environ['QUERY_STRING']))
return hashlib.sha1(raw_key).hexdigest()
def cache_middleware(app, backend=None):
cache = get_backend(backend)
def caching_app(environ, start_response):
mutable_headers = []
key = get_cache_key(environ)
def caching_start_response(status, headers):
mutable_headers.append((status, headers))
start_response(status, headers)
if cache.exists(key):
(status, headers), body = cache.get(key)
start_response(status, headers)
return body
else:
result = app(environ, caching_start_response)
cache.set(key, (mutable_headers[0], result))
return result
return caching_app
|
hungry=input("are you hungry")
if hungry=="yes":
print("eat something")
print("drink water")
print("eat biriyani")
else:
print("dont eat")
|
from math import ceil
print('Loja de tintas\n')
area_a_ser_pintada = float(input('Informe o tamanho em metros quadrados da área a ser pintada: '))
um_litro_pinta = 6
quantidade_de_uma_lata = 18
preco_de_cada_lata = 80.00
quantidade_de_um_galao = 3.6
preco_de_cada_galao = 25.00
litros_necessarios = area_a_ser_pintada / um_litro_pinta
latas_necessarias = int(ceil(litros_necessarios / quantidade_de_uma_lata))
valor_total_para_latas = latas_necessarias * preco_de_cada_lata
galoes_necessarios = int(ceil(litros_necessarios / quantidade_de_um_galao))
valor_total_para_galoes = galoes_necessarios * preco_de_cada_galao
print('')
print('=== SE VOCÊ COMPRAR LATAS ===')
print('Você vai precisar de {} latas de tinta a serem compradas'.format(latas_necessarias))
print('Preço total de latas: R$ {:.2f}'.format(valor_total_para_latas))
print('')
print('=== SE VOCÊ COMPRAR GALÕES ===')
print('Você vai precisar de {} galões de tinta a serem comprados'.format(galoes_necessarios))
print('Preço total de galões: R$ {:.2f}'.format(valor_total_para_galoes))
print('')
print('=== SE VOCÊ COMPRAR MISTO DE GALÕES E LATAS ===')
if litros_necessarios < (quantidade_de_um_galao * 3):
galoes_necessarios = litros_necessarios / quantidade_de_um_galao
valor_total_para_galoes = galoes_necessarios * preco_de_cada_galao
print('Você vai precisar de {} galões de tinta a ser comprado'.format(galoes_necessarios))
print('Preço total de galões: R$ {:.2f}'.format(valor_total_para_galoes))
else:
latas_necessarias = litros_necessarios / quantidade_de_uma_lata
valor_total_para_latas = latas_necessarias * preco_de_cada_lata
print('Você vai precisar de {} latas de tinta a ser comprado'.format(latas_necessarias))
print('Preço total de latas: R$ {:.2f}'.format(valor_total_para_galoes))
|
with open('input.txt', 'r') as f:
total = 0
s = set()
found = False
while found == False:
for num in f:
total += int(num)
if total in s:
print("found duplicate total")
print(total)
found = True
break
else:
s.add(total)
f.seek(0)
|
# -*- coding: utf-8 -*-
class Solution:
def fizzBuzz(self, n):
result = []
for i in range(1, n + 1):
if i % 15 == 0:
result.append("FizzBuzz")
elif i % 5 == 0:
result.append("Buzz")
elif i % 3 == 0:
result.append("Fizz")
else:
result.append(str(i))
return result
if __name__ == "__main__":
solution = Solution()
assert [
"1",
"2",
"Fizz",
"4",
"Buzz",
"Fizz",
"7",
"8",
"Fizz",
"Buzz",
"11",
"Fizz",
"13",
"14",
"FizzBuzz",
] == solution.fizzBuzz(15)
|
##########################################################
# #
# General clustering abstract class with specific #
# implementations for k-means, DBSCAN and biclustering #
# (from Cheng and Church's "Biclustering Expression #
# Data") #
# #
# Authors: Amy Peerlinck and Neil Walton #
# #
##########################################################
from sklearn.cluster import KMeans, DBSCAN
from abc import ABC, abstractmethod
import numpy as np
import time, skfuzzy
class Cluster(ABC):
'''
Abstract class for the various clustering techniques
'''
def __init__(self, data):
self.data = data
super().__init__()
@abstractmethod
def assign_clusters(self):
'''
Abstract method to assign data points to clusters
implementation varies by method. Returns the cluster
assignments as an array.
'''
pass
class Kmeans(Cluster):
'''
Implementation of the k-means clustering algorithm using
the scikit learn implementation
'''
def __init__(self, data, k=3):
self.k = k
super().__init__(data)
def assign_clusters(self):
'''
Use the fit and predict methods to assign data
to the k clusters
'''
km = KMeans(n_clusters=self.k).fit(self.data)
return np.array(km.predict(self.data))
def assign_fuzzy_clusters(self):
cntr, u, u0, d, jm, p, fpc = skfuzzy.cluster.cmeans(self.data.T, self.k, 2, error=0.005, maxiter=1000, init=None)
u_pred, u0, d, jm, p, fpc = skfuzzy.cluster.cmeans_predict(self.data.T, cntr, 2, error=0.005, maxiter=1000)
return u_pred
class Dbscan(Cluster):
'''
Wrapper for the scitkit learn implementation of DBSCAN
'''
def __init__(self, data, min_points=4, e=0.5):
self.min_points = min_points
self.e = e
super().__init__(data)
def assign_clusters(self):
'''
Assign the datapoints to clusters using DBSCAN
and return an array of the cluster assignments
'''
db = DBSCAN(eps=self.e, min_samples=self.min_points)
return db.fit_predict(self.data)
class Bicluster(Cluster):
'''
Biclustering implementation according to the algorithm
provided in Cheng and Church's "Biclustering Expression
Data"
'''
def __init__(self, data):
super().__init__(data)
self.remaining_rows = np.arange(data.shape[0])
self.remaining_cols = np.arange(data.shape[1])
def assign_clusters(self, delta=0.1, alpha=1.2, n=10):
'''
Using biclustering, cluster the data points into
n various biclusters and return and array of arrays
of those resulting cluster assignments.
Steps:
1) Remove multiple rows/columns until cluster converges
2) Remove individual rows/columns until msr < delta
3) Add rows/columns back in to get maximal cluster
4) Randomize elements that have been assigned to cluster
5) Repeat n times to create n biclusters
'''
self.alpha = alpha
input_data = np.copy(self.data)
clusters = []
min_val = np.min(self.data)
max_val = np.max(self.data)
for i in range(n):
print ('Finding bicluster ', i+1, ' of ',n)
self.remaining_rows = np.arange(self.data.shape[0])
self.remaining_cols = np.arange(self.data.shape[1])
cluster = self._get_bicluster(input_data, delta)
clusters.append(cluster)
#Randomize the data that has been assigned to n_clusters
for i in cluster[0]:
for j in cluster[1]:
input_data[i][j] = np.random.uniform(low=min_val, high=max_val)
return clusters
def _get_bicluster(self, input, delta):
'''
Using biclustering, cluster the data points into
n various biclusters and return and array of arrays
of those resulting cluster assignments.
Steps:
1) Remove multiple rows/columns until cluster converges
2) Remove individual rows/columns until msr < delta
3) Add rows/columns back in to get maximal cluster
4) Randomize elements that have been assigned to cluster
5) Repeat n times to create n biclusters
'''
bicluster = input
#bicluster = np.array([np.array([2,2,3]),np.array([4,5,6])])
msr = self._mean_squared_residual(bicluster)
next_cluster = self._multiple_node_deletion(bicluster, delta, alpha=self.alpha)
#Use _multiple_node_deletion until the cluster converges
while (not np.array_equal(bicluster, next_cluster)):
bicluster = next_cluster
next_cluster = self._multiple_node_deletion(bicluster, delta, alpha=self.alpha)
msr = self._mean_squared_residual(bicluster)
#Fine tune the bicluster until the threshold msr (delta) is met
while (msr > delta):
bicluster = self._single_node_deletion(bicluster)
msr = self._mean_squared_residual(bicluster)
#Cheng and Church state they run the addition step only once
bicluster = self._node_addition(bicluster)
print (bicluster.shape)
return (self.remaining_rows, self.remaining_cols)
def _node_addition(self, submatrix):
'''
Add rows and columns back in that do
not increase the MSR back to above
the threshold
'''
deleted_rows = np.setdiff1d(np.arange(self.data.shape[0]),
self.remaining_rows)
deleted_cols = np.setdiff1d(np.arange(self.data.shape[1]),
self.remaining_cols)
msr = self._mean_squared_residual(submatrix)
#Means of all the original columns, but only
#the selected rows of the bicluster
col_scores = self._add_col_scores(submatrix)
temp_cols = np.argwhere(col_scores < msr).flatten()
cols_to_add = np.intersect1d(temp_cols,deleted_cols)
#Add the columns selected above
if len(cols_to_add) != 0:
new_cols = np.take(self.data,
cols_to_add, axis=1)[self.remaining_rows,:]
submatrix = np.append(submatrix, new_cols, axis=1)
self.remaining_cols = np.hstack((self.remaining_cols, cols_to_add))
#Recalculate variables and add rows
msr = self._mean_squared_residual(submatrix)
row_scores = self._add_row_scores(submatrix)
temp_rows = np.argwhere(row_scores < msr).flatten()
rows_to_add = np.intersect1d(temp_rows,deleted_rows)
#Add the row selected above
if len(rows_to_add) != 0:
new_rows = np.take(self.data,
rows_to_add, axis=0)[:, self.remaining_cols]
submatrix = np.append(submatrix, new_rows, axis=0)
self.remaining_rows = np.hstack((self.remaining_rows, rows_to_add))
return submatrix
def _single_node_deletion(self, submatrix):
'''
Single node deletion algorithm (Algorithm 1 from
Cheng and Church's paper). Deletes the single row
or column with the highest mean squared residue
'''
row_scores = self._row_scores(submatrix)
col_scores = self._col_scores(submatrix)
if np.max(row_scores) >= np.max(col_scores):
ind = np.argmax(row_scores)
new_matrix = np.delete(submatrix, ind, 0)
self.remaining_rows = np.delete(self.remaining_rows, ind, 0)
elif np.max(row_scores) < np.max(col_scores):
ind = np.argmax(row_scores)
new_matrix = np.delete(submatrix, ind, 1)
self.remaining_cols = np.delete(self.remaining_cols, ind, 0)
return new_matrix
def _multiple_node_deletion(self, submatrix, delta, alpha=1.2):
'''
Delete all rows and columns whose mean squared residues
are greater than alpha times the MSR of the entire
submatrix
'''
#submatrix_mean = np.mean(submatrix)
msr = self._mean_squared_residual(submatrix)
threshold = alpha * msr
#MSR is already below the threshold amount delta
if msr < delta:
return submatrix
row_scores = self._row_scores(submatrix)
#
rows_to_delete = np.argwhere(row_scores > threshold).flatten()
#Delete the rows selected above
temp_matrix = np.delete(submatrix, rows_to_delete, 0)
self.remaining_rows = np.delete(self.remaining_rows, rows_to_delete, 0)
#Recalculate mean and msr for the new matrix
new_msr = self._mean_squared_residual(temp_matrix)
threshold = alpha * new_msr
#Return if removing rows made msr < delta
if new_msr < delta:
return temp_matrix
col_scores = self._col_scores(temp_matrix)
cols_to_delete = np.argwhere(col_scores > threshold).flatten()
#Delete the columns identified above
final_matrix = np.delete(temp_matrix, cols_to_delete, 1)
self.remaining_cols = np.delete(self.remaining_cols, cols_to_delete, 0)
return final_matrix
def _row_scores(self, submatrix):
'''
Return the scores for each row in the submatrix
'''
submatrix_mean = np.mean(submatrix)
col_means = np.mean(submatrix,axis=0).T
row_means = np.mean(submatrix,axis=1)
col_means = np.reshape(col_means, (1, len(col_means)))
row_means = np.reshape(row_means, (len(row_means), 1))
residues = np.add(np.subtract(np.subtract(submatrix,
row_means), col_means), submatrix_mean)
squared_residues = np.power(residues, 2)
msr = np.mean(squared_residues, axis=1)
return msr
def _col_scores(self, submatrix):
'''
Return the scores for each column in the submatrix
'''
submatrix_mean = np.mean(submatrix)
col_means = np.mean(submatrix,axis=0).T
row_means = np.mean(submatrix,axis=1)
col_means = np.reshape(col_means, (1, len(col_means)))
row_means = np.reshape(row_means, (len(row_means), 1))
residues = np.add(np.subtract(np.subtract(submatrix,
row_means), col_means), submatrix_mean)
squared_residues = np.power(residues, 2)
msr = np.mean(squared_residues, axis=0)
return msr
def _add_col_scores(self, submatrix):
'''
Return the scores for each column in the submatrix
for the column addition step
'''
submatrix_mean = np.mean(submatrix)
row_subset = np.take(self.data, self.remaining_rows, axis=0)
col_means = np.mean(row_subset,axis=0)
row_means = np.mean(submatrix,axis=1).T
col_means = np.reshape(col_means, (1, len(col_means)))
row_means = np.reshape(row_means, (len(row_means), 1))
residues = np.add(np.subtract(np.subtract(row_subset,
row_means), col_means), submatrix_mean)
squared_residues = np.power(residues, 2)
msr = np.mean(squared_residues, axis=0)
return msr
def _add_row_scores(self, submatrix):
'''
Return the scores for each column in the submatrix
for the row addition step
'''
submatrix_mean = np.mean(submatrix)
col_subset = np.take(self.data, self.remaining_cols, axis=1)
col_means = np.mean(submatrix,axis=0)
row_means = np.mean(col_subset,axis=1).T
col_means = np.reshape(col_means, (1, len(col_means)))
row_means = np.reshape(row_means, (len(row_means), 1))
residues = np.add(np.subtract(np.subtract(col_subset,
row_means), col_means), submatrix_mean)
squared_residues = np.power(residues, 2)
msr = np.mean(squared_residues, axis=1)
return msr
def _node_score(self, ind, submatrix, row_or_column, row_means,
column_means, submatrix_mean):
'''
Calculate the mean squared residual of the submatrix
for the specified row/column index. The row_or_column
parameter specifies whether the index refers to a row
or a column
'''
score = 0
if row_or_column == 'row':
size = submatrix.shape[1]
row_mean = row_means[ind]
for j in range(size):
column_mean = column_means[j]
residue = submatrix[ind,j] - row_mean - column_mean + submatrix_mean
squared_residue = np.power(residue, 2)
score += (squared_residue/float(size))
if row_or_column == 'column':
size = submatrix.shape[0]
column_mean = column_means[ind]
for i in range(size):
row_mean = row_means[i]
residue = submatrix[i,ind] - row_mean - column_mean + submatrix_mean
squared_residue = np.power(residue, 2)
score += (squared_residue/float(size))
return score
def _inverse_node_score(self, ind, submatrix, row_means,
column_means, submatrix_mean):
'''
Calculate the mean squared residual of the submatrix
for the specified row/column index. The row_or_column
parameter specifies whether the index refers to a row
or a column
'''
score = 0
size = submatrix.shape[1]
row_mean = row_means[ind]
for j in range(size):
column_mean = column_means[j]
residue = row_mean - submatrix[ind,j] - column_mean + submatrix_mean
squared_residue = np.power(residue, 2)
score += (squared_residue/float(size))
return score
def _mean_squared_residual(self, submatrix):
'''
Calculate the mean squared residual for the
specified submatrix
'''
submatrix_mean = np.mean(submatrix)
row_means = np.mean(submatrix,axis=0).T
col_means = np.mean(submatrix,axis=1)
row_means = np.reshape(row_means, (1, len(row_means)))
col_means = np.reshape(col_means, (len(col_means), 1))
residues = np.add(np.subtract(np.subtract(submatrix,
row_means), col_means), submatrix_mean)
squared_residues = np.power(residues, 2)
msr = np.mean(squared_residues)
return msr
def _msr(self, submatrix):
'''
Calculate the mean squared residual for the
specified submatrix
'''
submatrix_mean = np.mean(submatrix)
row_means = np.mean(submatrix,axis=0).T
col_means = np.mean(submatrix,axis=1)
row_means = np.reshape(row_means, (1, len(row_means)))
col_means = np.reshape(col_means, (len(col_means), 1))
residues = np.add(np.subtract(np.subtract(submatrix,
row_means), col_means), submatrix_mean)
squared_residues = np.power(residues, 2)
msr = np.mean(squared_residues)
return msr
def load_iris():
path = '../data/iris.txt'
iris_text = open(path, 'r');
data_matrix = []
labels = []
for line in iris_text:
temp_list = line.strip().split(',')
features = np.array([float(x) for x in temp_list[:4]])
data_matrix.append(features)
if temp_list[-1] == 'Iris-setosa':
labels.append(0)
elif temp_list[-1] == 'Iris-versicolor':
labels.append(1)
elif temp_list[-1] == 'Iris-virginica':
labels.append(2)
return (np.array(data_matrix), np.array(labels))
if __name__ == '__main__':
iris = load_iris()
in_ = iris[0]
out = iris[1]
k = Kmeans(in_, k=3)
d = Dbscan(in_, min_points=4, e=.5)
b = Bicluster(in_)
clusts = d.assign_clusters()
print (clusts)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#from itertools import zip
from openpyxl import load_workbook
from sklearn.decomposition import PCA
from scipy.stats.stats import pearsonr
from scipy.stats import linregress
#read from excel file
def read_excel(filename):
wb = load_workbook(filename)
ws = wb.active
# print(type(ws['A2'].value))
# print(type(ws['A2'].value < ws['A3'].value))
# print(ws.max_row)
# print(ws.cell(row=4, column=2).value)
times= []
means = []
# sheet_ranges = wb[name]
# df = pd.DataFrame(sheet_ranges.values)
for i in range(0, ws.max_row - 1):
times.append(ws.cell(row = i + 2, column = 1).value)
high = ws.cell(row = i + 2, column = 2).value
low = ws.cell(row = i + 2, column = 3).value
close = ws.cell(row = i + 2, column = 4).value
means.append((high + low + close) / 3)
return times, means
# given two list of key values pairs and combine them into a dicionary
def transformData(keys1, values1, keys2, values2):
dictionary1 = dict(zip(keys1, values1))
dictionary2 = dict(zip(keys2, values2))
return dictionary1, dictionary2
# given two dictionaries, combine them into a single dictionary in a way that
# only the key value pairs whose keys appear in both the dictionaries will be kept.
def combine(dictionary1, dictionary2):
i = 0
j = 0
dictionary = dict()
values = []
keys1 = list(dictionary1.keys())
keys2 = list(dictionary2.keys())
values1 = list(dictionary1.values())
values2 = list(dictionary2.values())
while i < len(dictionary1) and j < len(dictionary2):
if keys1[i] == keys2[j]:
value = [values1[i], values2[j]]
values.append(value)
dictionary[keys1[i]] = value
i += 1
j += 1
elif keys1[i] > keys2[j]:
j += 1
else:
i += 1
return values, dictionary
# read data
T_times, T_means = read_excel('T1.xlsx')
TF_times, TF_means = read_excel('T2.xlsx')
# transform to dictionary
T_dict, TF_dict = transformData(T_times, T_means, TF_times, TF_means)
# combine data
values, dictionary = combine(T_dict, TF_dict)
# Y[:,0] is the price of the ten year bond future
# Y[:.1] is the price of the five year bond future
Y = np.array(values)
Rti = [] # return of ten year stock for period i
Rtfi = [] # return of five year stock for period i
sigmaT = [] # standard derivation of ten year stock for period 20
sigmaTF = [] # standard derivation of five year stock for period 20
for i in range(14, Y[:,0].size):
# ten year return and standard derivation; calculate from the 20th element of the price array
Treturn = (Y[i,0] - Y[i-1,0]) / Y[i-1,0]
Rti.append(Treturn)
a = np.asarray(Y[i-19:i,0])
sigmaT.append(np.std(a))
# five year return and standard derivation
TFreturn = (Y[i,1] - Y[i-1,1]) / Y[i-1,1]
Rtfi.append(TFreturn)
a = np.asarray(Y[i-19:i,1])
sigmaTF.append(np.std(a))
# create signal
signals = []
for i in range(len(Rti)):
# signals.append(Rti[i])
signals.append((Rti[i] / sigmaT[i]) - (Rtfi[i] / sigmaTF[i]))
plt.plot(signals)
|
# _*_ coding: utf-8 _*_
# @Time : 2017/8/22 10:55
# @Author : GanZiB
# @Site :
# @File : JieBaDemo.py
# @Software: PyCharm
import jieba
titles = []
titles.append('中印边界纠纷会影响12天后召开的金砖厦门峰会吗?')
titles.append('历史上最著名的三个“女流氓”,连上海皇帝杜月笙都对她敬畏三分')
titles.append('美国海底发现巨大断层,科学家:与日本大地震构造一样,无法预防')
titles.append('带案督办:成吨鸡蛋壳去了哪里 省案件督办组组长、副组长到蒲江督办环境问题')
titles.append('新提的汉兰达,上牌却被喷字,车主:为何连SUV都要喷?')
titles.append('郭敬明事件公关太给力,朱梓骁当炮灰,陈学冬删评论拉黑网友!')
for title in titles:
seg_list = jieba.cut(title, cut_all=False)
print(title + "\n") # 精确模式
print(",".join(seg_list))
print("\n")
|
'''
高楼扔鸡蛋问题
你面前有一栋从 1 到 N 共 N 层的楼,
然后给你 K 个鸡蛋(K 至少为 1)。
现在确定这栋楼存在楼层 0 <= F <= N,在这层楼将鸡蛋扔下去,
鸡蛋恰好没摔碎(高于 F 的楼层都会碎,低于 F 的楼层都不会碎)。
现在问你,最坏情况下,你至少要扔几次鸡蛋,才能确定这个楼层 F 呢?
'''
def drop_egg(egg_num, building_height):
def dp(k,n):
# 当鸡蛋数 K 为 1 时,显然只能线性扫描所有楼层
if k == 1:
return n
# 当楼层数 N 等于 0 时,显然不需要扔鸡蛋
if n == 0:
return 0
res = float('INF')
for i in range(1,n+1):
# 鸡蛋碎了 # 鸡蛋没碎 # 在第i楼扔了一次
res = min(res, max(dp(k-1, i-1),dp(k,n-i)) + 1)
return res
return dp(egg_num, building_height)
egg_num = 3
building_height = 14
r = drop_egg(egg_num, building_height)
print(r)
|
import sqlite3
from datetime import date
import page
import bcolor
def markAnswer(conn, curr, aid):
'''
Mark the selected answer post as accepted and update it into the database.
Prompts the user whether to overwrite if an accepted answer already exists.
inputs:
conn -- sqlite3.Connection
curr -- sqlite3.Cursor
aid -- pid of answer post (str)
'''
print(bcolor.pink('\n< Mark as Accepted Answer >'))
curr.execute("SELECT * FROM answers where pid=?;", (aid, ))
qid = curr.fetchone()['qid']
prompt = 'Do you want to mark this post as an accepted answer? [y/n] '
uin = page.getValidInput(prompt, ['y','n'])
if uin == 'y':
curr.execute("SELECT * FROM questions where pid=? and theaid IS NOT NULL;", (qid, ))
aaExists = True if curr.fetchone() else False # aa: accepted answer
if aaExists:
prompt = bcolor.warning("Warning: Accepted answer already exists. Proceed to change? [y/n] ")
uin = page.getValidInput(prompt, ['y','n'])
if uin == 'y':
changeAA(conn, curr, qid, aid)
conn.commit()
else:
print('\nMarking answer is cancelled.')
else:
changeAA(conn, curr, qid, aid)
conn.commit()
def giveBadge(conn, curr, uid):
'''
Gives a badge to the poster of the selected post.
Inputs:
conn -- sqlite3.Connection
curr -- sqlite3.Cursor
uid -- poster of the selected post (str)
'''
bdate = str(date.today())
if not badgeAvailable(curr):
print(bcolor.errmsg("action failed: badge is not available now."))
elif isBadgeGivenTdy(curr, uid, bdate):
print(bcolor.errmsg("action failed: this poster has already received a badge today."))
else:
print(bcolor.pink('\n< Give a Badge >'))
displayAvailBadges(curr)
valid = False
while not valid:
bname = getBadge()
badgeRow = getBadgeRow(curr, bname)
if badgeRow: # badge already exists
prompt = 'Do you want to give badge: "{}" to the poster? [y/n] '.format(badgeRow['bname'])
uin = page.getValidInput(prompt, ['y','n'])
if uin == 'y':
curr.execute('INSERT INTO ubadges VALUES (?, ?, ?);',(uid, bdate, badgeRow['bname']))
conn.commit()
print(bcolor.green('\nBadge Awarded to the poster!'))
valid = True
else:
print(bcolor.errmsg('action failed: badge: "{}" is not available.'.format(bname)))
if not valid:
prompt = 'Do you still want to give a badge? [y/n] '
valid = not page.continueAction(prompt)
def addTag(conn, curr, pid):
'''
Add tags to the selected post.
Inputs:
conn -- sqlite3.Connection
curr -- sqllite3.Cursor
pid -- pid of the selected post (str)
'''
print(bcolor.pink('\n< Add Tags >'))
currentTags = getCurrentTag(curr, pid)
displayCurrentTag(currentTags)
valid = False
while not valid:
newTags = getValidTag()
numNewTags = len(newTags)
duplicates, nonDuplicates = getDuplicateTag(currentTags, newTags)
numDups = len(duplicates)
dsuffix = genSuffix(duplicates)
tagsToAdd = True
if numDups > 0:
print(bcolor.errmsg('error: post already has the following tag{}: {}'.format(dsuffix, ', '.join(duplicates))))
if numNewTags == numDups: # user enters duplicates only
tagsToAdd = False
prompt = 'Do you want to add another tag to the post? [y/n] '
valid = not page.continueAction(prompt)
else:
newTags = nonDuplicates
nsuffix = genSuffix(newTags)
if tagsToAdd:
prompt = 'Do you want to add: "{}" ? [y/n] '.format('", "'.join(newTags))
uin = page.getValidInput(prompt, ['y','n'])
if uin == 'y':
valid = True
insertTag(conn, curr, pid, newTags)
print(bcolor.green("\nTag{} Added!".format(nsuffix)))
else:
prompt = 'Do you still want to add tags to the post? [y/n] '
valid = not page.continueAction(prompt)
def editPost(conn, curr, pid):
'''
Edit the title and body of the selected post.
inputs:
conn: sqlite3.Connection
curr: sqlite3.Cursor
pid: pid
'''
curr.execute("SELECT title, body FROM posts WHERE pid=?;", (pid, ))
currT, currB = curr.fetchone()
print(bcolor.pink("\n< Edit Post >"))
confirmed = False
while not confirmed:
nTitle, nBody = changeTitleAndBody(currT, currB)
confirmed = isChangeValid(nTitle, nBody)
curr.execute('''
UPDATE
posts
SET
title = ?,
body = ?
WHERE
pid = ?;''', (nTitle, nBody, pid))
conn.commit()
print(bcolor.green("\nPost Edited!"))
def changeTitleAndBody(oldTitle, oldBody):
'''
Prompt the user for a new title and new body.
Inputs:
oldTitle -- str
oldBody -- str
Return:
nTitle -- str
nBody -- str
'''
print()
print(bcolor.cyan("You are currently editing:"))
print("\n Title: {}".format(oldTitle))
print("\n Body: {}".format(oldBody))
print()
print("Press enter with nothing typed if you want to keep the content the same.")
print()
nTitle = input("Enter a new title: ")
if nTitle == '':
nTitle = oldTitle
print()
nBody = input("Enter a new body: ")
if nBody == '':
nBody = oldBody
return nTitle, nBody
def isChangeValid(nTitle, nBody):
'''
Prompt the user to double check the new title and body, and return True if they are valid.
Inputs:
nTitle -- str
nBody -- str
Return:
bool
'''
print("\nIs the following information correct?")
print("\n Title: {}".format(nTitle))
print("\n Body: {}".format(nBody))
prompt = "\nType 'y' if it is correct. Type 'n' if you want to start over: "
check = page.getValidInput(prompt, ['y', 'n'])
if check == 'n':
return False
return True
def changeAA(conn, curr, pid, aid):
'''
Update the selected answer post as accepted to the database.
inputs:
conn -- sqlite3.Connection
curr -- sqlite3.Cursor
pid -- str
aid -- str
'''
curr.execute('''UPDATE
questions
SET
theaid = :aid
WHERE
pid = :pid;''', {'aid': aid,
'pid': pid})
conn.commit()
print(bcolor.green("\nAccepted Answer Updated!"))
def isBadgeGivenTdy(curr, uid, bdate):
'''
Return True if a badge is already given to the poster today.
Inputs:
curr -- sqlite3.Connection
uid --- str
bdate -- date
'''
curr.execute("SELECT * FROM ubadges WHERE uid = ? and bdate = ?;",(uid, bdate))
return True if curr.fetchone() else False
def badgeAvailable(curr):
'''
Return True if there is at least one badge in the database.
Input: curr -- sqlite3.Connection
'''
curr.execute("SELECT * FROM badges;")
return True if curr.fetchone() else False
def getBadgeRow(curr, bname):
'''
Return a row of the badge table that includes the same bname entered.
Inputs:
curr -- sqlite3.Connection
bname -- str
Returns:
sqlite3.Row
'''
curr.execute("SELECT * FROM badges WHERE bname = ? COLLATE NOCASE;",(bname,))
return curr.fetchone()
def displayAvailBadges(curr):
'''
Displays the badges available in the database.
Input: curr -- sqlite3.Cursor
'''
print('\nAvailable badges:')
curr.execute("SELECT type, bname FROM badges ORDER BY type;")
frame = '+'+'-'*10+'+'+'-'*25+'+'
print(frame)
print('|{:^10}|{:^25}|'.format('type','badge name'))
print(frame)
for aBadge in curr.fetchall():
print('|{:^10}|{:^25}|'.format(aBadge['type'],aBadge['bname']))
print(frame)
def getBadge():
'''
Prompt the user for a badge name.
Return:
bname -- str
'''
validBadge = False
while not validBadge:
bname = input('\nEnter a badge name to give from the list above: ').strip()
if bname != '':
validBadge = True
else:
print(bcolor.errmsg('error: badge name cannot be empty.'))
return bname
def getCurrentTag(curr, pid):
'''
Return currently attached tags to the selected post.
Inputs:
curr -- sqllite3.Cursor
pid -- pid of the selected post (str)
Return:
list
'''
curr.execute("SELECT tag FROM tags WHERE pid = ?;", (pid,))
return [r['tag'] for r in curr.fetchall()]
def displayCurrentTag(currentTags):
'''
Display the attached tags.
Input: currentTags -- list
'''
if len(currentTags) == 0:
print('There is no tag attached to this post yet.')
else:
suffix = genSuffix(currentTags)
print("Current Tag{}: {}".format(suffix, ', '.join(currentTags)))
def getDuplicateTag(currentTags, newTags):
'''
Classify the currently attached tags and the new tags entered by the user.
into duplicated and non-duplicate tags and return them.
Inputs:
currentTags -- list
newTags -- list of tags entered
Returns:
duplicates -- list
nonDuplicates -- list
'''
duplicates = []
nonDuplicates = []
lst = [x.lower() for x in currentTags]
for i in newTags:
if i.lower() in lst:
duplicates.append(i)
else:
nonDuplicates.append(i)
return duplicates, nonDuplicates
def getValidTag():
'''
Prompt the user for tags and return them.
Return:
newTags -- list
'''
validTag = False
while not validTag:
tags = input('\nEnter tags to add, each separated by a comma: ')
newTags = []
for x in tags.split(','):
x = x.strip()
if x and x not in newTags:
newTags.append(x)
if len(newTags) > 0:
validTag = True
else:
print(bcolor.errmsg('error: tag name cannot be empty'))
return newTags
def insertTag(conn, curr, pid, newTags):
'''
Insert the new tags into the database.
Inputs:
conn -- sqlite3.Connection
curr -- sqlite3.Cursor
pid -- str
newTags -- list
'''
for tag in newTags:
curr.execute("INSERT INTO tags VALUES (?, ?);", (pid, tag))
conn.commit()
def genSuffix(l):
'''
Return an appropriate suffix depending on the number of elements in the list.
'''
return 's' if len(l) > 1 else ''
|
import curses
import os
import subprocess
from curses.textpad import Textbox, rectangle
from time import sleep
def get_command(stdscr, win):
win.border()
win.overlay(stdscr)
k = ">"
win.addch(1, 1, k)
col = 2
row = 1
command = ""
MAX_ROW = win.getmaxyx()[0] - 2
MAX_COL = win.getmaxyx()[1] - 2
while k != "\n":
win.refresh()
k = win.getch()
k = chr(k)
# Handle deleting characters
if k == chr(127) and col > 1:
col -= 1
if col < 1:
row -= 1
win.delch(row, col)
else:
win.addch(row, col, k)
command += k
col += 1
# Make sure we don't go out of the window bounds
if col > MAX_COL:
col = 1
row += 1
if row > MAX_ROW:
break
win.clear()
return command[:-1]
def main(stdscr):
curses.noecho()
curses.cbreak()
curses.curs_set(2) # Set the cursor to a default starting position
stdscr.addstr(0, 0, "Enter command: (hit ENTER to send)")
y_max = stdscr.getmaxyx()[0]
x_max = stdscr.getmaxyx()[1]
y_2 = int(y_max/2)
x_2 = int(x_max/2)
y_4 = int(y_max/4)
x_4 = int(x_max/4)
y_8 = int(y_max/8)
x_8 = int(x_max/8)
command_win = curses.newwin(3, 30, y_8, x_2-15)
command_win.border()
command_win.overlay(stdscr)
while True: # Main event loop
stdscr.clear()
cmd = get_command(stdscr, command_win)
stdscr.addstr(y_max-10, 1, cmd)
result = subprocess.run(cmd.split(" "), stdout=subprocess.PIPE)
stdscr.addstr(y_max-11, 1, result.stdout)
stdscr.refresh()
sleep(1)
curses.nocbreak()
curses.echo()
if __name__ == "__main__":
curses.wrapper(main)
|
#-*-coding: utf-8-*-
from tkinter import *
from json import loads
with open("atoms.json", "r") as f:
ATOMS = loads(f.read())
with open("symbols.json", "r") as f:
SYMBOLS = loads(f.read())
def convert(element):
if element in SYMBOLS:
return SYMBOLS[element]
return "-*-*-*-*-"
def calculate(element1, element2, show_process=False):
print("?")
element1 = element1.lower().strip()
element2 = element2.lower().strip()
if not len(element1) or not len(element2):
return ""
if element1 not in ATOMS or element2 not in ATOMS:
element1 = convert(element1.title())
element2 = convert(element2.title())
if element1 not in ATOMS or element2 not in ATOMS:
return "Check the spelling of the elements."
electro_negativity1 = ATOMS[element1]
electro_negativity2 = ATOMS[element2]
difference = electro_negativity1 - electro_negativity2
if difference < 0:
difference *= -1
if show_process:
print("{} -> {}".format(element1, electro_negativity1))
print("{} -> {}".format(element2, electro_negativity2))
print("Difference: {}".format(difference))
if difference <= 0.4:
return "Non-polar"
elif difference <= 1.9:
return "Polar"
else:
return "Ionic"
def main():
root = Tk()
root.geometry("500x500")
root.title("By: Diego Perez")
mframe = Frame(root, bg="#F2F2F2")
mframe.place(relx=0, rely=0, relwidth=1, relheight=1)
title = Label(mframe, bg="#F78181",text="Know your bond", font=("Arial", 18), fg="white")
title.place(relx=0, rely=0.1, relwidth=1, relheight=0.1)
entryshade = Label(mframe, bg="#F78181")
entryshade.place(relx=0.15, rely=0.39, relwidth=0.7, relheight=0.07)
firstelement = Entry(mframe)
firstelement.place(relx=0.2, rely=0.4, relwidth=0.2, relheight=0.05)
secondelement = Entry(mframe)
secondelement.place(relx=0.6, rely=0.4, relwidth=0.2, relheight=0.05)
answare = Label(mframe, bg="#F2F2F2", text="", font=("Arial", 18))
answare.place(relx=0, rely=0.6, relwidth=1,relheight=0.1)
check = Button(mframe, text="check", command=lambda: answare.config(text="{}".format(calculate(firstelement.get(), secondelement.get()))))
check.place(relx=0.45, rely=0.405, relwidth=0.1, relheight=0.0375)
root.bind("<Return>", lambda *args, **kwargs:answare.config(text="{}".format(calculate(firstelement.get(), secondelement.get()))))
root.mainloop()
if __name__ == '__main__':
main()
|
from configparser import ConfigParser
parser = ConfigParser()
CONF='/home/pi/Desktop/inserttest-master/inserttest.config'
parser.read(CONF)
print("PSNERGY BREAK TEST")
print("PSNERGY INSERT TESTING")
##print("Select the default insert size")
##print("1. 5 inches")
##print("2. 5.5 inches")
##print("3. 7 inches")
print("Enter Insert Number")
g = input()
while True:
if int(g) != 0 and int(g) != '':
insert = str(g)
parser.read(CONF)
parser['inserttest_config']['insert_number'] = g
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
print("Insert Number "+g+"")
print("Enter the weight")
weight = input()
parser.read(CONF)
parser['inserttest_config']['weight'] = weight
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
print("Press 1 for ping test")
print("Press 2 for break test")
print("Press 3 for calibration")
print("Press 4 to change the insert size")
print("To test next Insert type next")
c =input()
if c == '1':
parser['inserttest_config']['ping'] = 'on'
parser['inserttest_config']['motor'] = 'off'
parser['inserttest_config']['loadcell'] = 'off'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
print("Ping test Selected")
print("press button to ping")
print("2 will take you to break test")
c = input()
if c == '2':
parser['inserttest_config']['ping'] = 'off'
parser['inserttest_config']['motor'] = 'on'
parser['inserttest_config']['loadcell'] = 'on'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
print('Break right wing')
parser['inserttest_config']['insert_section'] = 'right'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
input('Press enter to continue: ')
print('Break left wing')
parser['inserttest_config']['insert_section'] = 'left'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
input('Press enter to continue: ')
print('Break middle')
parser['inserttest_config']['insert_section'] = 'middle'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
input('Press enter to continue: ')
print('To test next part Type next')
c = input()
if c == '3':
print("not done yet, come back later")
if c == '4':
print("1. 5 inches")
print("2. 5.5 inches")
print("3. 7 inches")
insertsize = input()
if insertsize =='1':
parser['inserttest_config']['size'] = '5'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
if insertsize =='2':
parser['inserttest_config']['size'] = '5.5'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
if insertsize =='3':
parser['inserttest_config']['size'] = '7'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
print("To test next insert type next")
input(c)
if c == 'next':
parser['inserttest_config']['ping'] = 'off'
parser['inserttest_config']['motor'] = 'off'
parser['inserttest_config']['loadcell'] = 'off'
parser['inserttest_config']['insert_section'] = 'none'
with open(CONF, 'w') as updated_conf:
parser.write(updated_conf)
print("Insert Number:")
g = input()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.backend.docker.utils import format_rename_suggestion, suggest_renames
@pytest.mark.parametrize(
"tentative_paths, actual_files, actual_dirs, expected_renames",
[
(
("src/project/cmd.pex",),
("src.project/cmd.pex",),
(),
[("src/project/cmd.pex", "src.project/cmd.pex")],
),
(
("src/project/cmd.pex",),
("src/unrelated/file.py",),
("src/unrelated",),
[
# "false" positive, this is not an expected "correct" rename suggestion, but it was
# all we got here.
("src/project/cmd.pex", "src/unrelated/file.py"),
],
),
pytest.param(
("files",),
(
"src/docker/files/a.txt",
"src/docker/files/b.txt",
"src/docker/files/sub/c.txt",
"src/docker/config.ini",
),
("src", "src/docker", "src/docker/files"),
[
("files", "src/docker/files"),
("", "src/docker/config.ini"),
],
id="Copy'ing a folder, includes the entire tree below it",
),
pytest.param(
(
"src.proj/bin_a.pex",
"src.proj/binb.pex",
),
("src.proj/bin_a.pex",),
("src.proj",),
[
("src.proj/binb.pex", ""),
],
id="Should not suggest renaming to a file we already reference",
),
pytest.param(
(
"src.proj/binb.pex",
"src.proj/bin_a.pex",
),
("src.proj/bin_a.pex",),
("src.proj",),
[
("src.proj/binb.pex", ""),
],
id="Should not suggest renaming to a file we already reference, order should not matter",
),
pytest.param(
# I'm not entirely sure if `fnmatch` treats the ../*.pex the same as golangs
# filepath.Match does. See notice comment in
# pants.backend.docker.utils.suggest_renames().get_matches()
(
"src.proj/*.pex",
"src.proj/config.ini",
),
(
"src.proj/bin_a.pex",
"src.proj/bin_b.pex",
"src.proj/other.txt",
"src.proj/nested/file.txt",
"src/proj/config.ini",
),
(
"src.proj",
"src/proj",
"src.proj/nested",
),
[
("src.proj/config.ini", "src/proj/config.ini"),
("", "src.proj/nested/file.txt"),
("", "src.proj/other.txt"),
],
id="Glob pattern captures matching files only",
),
pytest.param(
(
"src/project/file",
"sources",
),
("src/project/file",),
(
"src",
"src/project",
),
[
("sources", ""),
],
id="Do not suggest renaming to an 'empty' directory",
),
pytest.param(
(
"testprojects/src/python/docker/Dockerfile.test-example-synth",
"testprojects.src.python.hello.main/mains.pez",
"blarg",
"baz",
),
(
"testprojects/src/python/docker/Dockerfile.test-example-synth",
"testprojects.src.python.hello.main/main.pex",
),
(
"testprojects",
"testprojects/src",
"testprojects/src/python",
"testprojects/src/python/docker",
"testprojects.src.python.hello.main",
),
[
("baz", ""),
("blarg", ""),
(
"testprojects.src.python.hello.main/mains.pez",
"testprojects.src.python.hello.main/main.pex",
),
],
id="Skip Dockerfile",
),
],
)
def test_suggest_renames(
tentative_paths: tuple[str, ...],
actual_files: tuple[str, ...],
actual_dirs: tuple[str, ...],
expected_renames: list[tuple[str, str]],
) -> None:
actual_renames = list(suggest_renames(tentative_paths, actual_files, actual_dirs))
assert actual_renames == expected_renames
@pytest.mark.parametrize(
"src, dst",
[
(
"src/project/cmd.pex",
"src.project/cmd.pex",
),
(
"srcs/projcet/cmd",
"src/project/cmd.pex",
),
(
"src/bar-foo/file",
"src/foo-bar/file",
),
],
)
def test_format_rename_suggestion(src: str, dst: str) -> None:
actual = format_rename_suggestion(src, dst, colors=False)
assert actual == f"{src} => {dst}"
|
# Copyright (c) 2017 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from influxdb import InfluxDBClient
# TODO: We need to think in a better design solution
# for this
class InfluxConnector:
def __init__(self, database_url, database_port, database_name,
database_user='root', database_password='root'):
self.database_url = database_url
self.database_port = database_port
self.database_name = database_name
self.database_user = database_user
self.database_password = database_password
def get_measurements(self):
out = {}
for i in self.get_job_progress():
out[i['time']] = {'job_progress': i['value']}
for i in self.get_time_progress():
out[i['time']].update({'time_progress': i['value']})
for i in self.get_replicas():
out[i['time']].update({'replicas': i['value']})
for i in self.get_error():
out[i['time']].update({'error': i['value']})
return out
def get_cost_measurements(self):
out = {}
for i in self.get_current_spent():
out[i['time']] = {'current_spent': i['value']}
for i in self.get_desired_cost():
out[i['time']].update({'desired_cost': i['value']})
for i in self.get_replicas():
out[i['time']].update({'replicas': i['value']})
for i in self.get_application_cost_error():
out[i['time']].update({'application_cost_error': i['value']})
return out
def get_stream_measurements(self):
out = {}
for i in self.get_runtime_output_flux():
out[i['time']] = {'real_output_flux': i['value']}
for i in self.get_estimated_output_flux():
out[i['time']].update({'expected_output_flux': i['value']})
for i in self.get_input_flux():
out[i['time']].update({'input_flux': i['value']})
for i in self.get_replicas():
out[i['time']].update({'replicas': i['value']})
for i in self.get_error():
out[i['time']].update({'error': i['value']})
for i in self.get_queue_size():
out[i['time']].update({'queue_size': i['value']})
for i in self.get_lease_expired_count():
out[i['time']].update({'lease_expired_count': i['value']})
return out
def get_queue_size(self):
result = self._get_influx_client().\
query('select value from queue_size;')
return list(result.get_points(measurement='queue_size'))
def get_current_spent(self):
result = self._get_influx_client().\
query('select value from current_spent;')
return list(result.get_points(measurement='current_spent'))
def get_desired_cost(self):
result = self._get_influx_client().\
query('select value from desired_cost;')
return list(result.get_points(measurement='desired_cost'))
def get_application_cost_error(self):
result = self._get_influx_client().\
query('select value from application_cost_error;')
return list(result.get_points(measurement='application_cost_error'))
def get_job_progress(self):
result = self._get_influx_client().\
query('select value from job_progress;')
return list(result.get_points(measurement='job_progress'))
def get_time_progress(self):
result = self._get_influx_client().\
query('select value from time_progress;')
return list(result.get_points(measurement='time_progress'))
def get_replicas(self):
result = self._get_influx_client().\
query('select value from job_parallelism;')
return list(result.get_points(measurement='job_parallelism'))
def get_runtime_output_flux(self):
result = self._get_influx_client().\
query('select value from real_output_flux;')
return list(result.get_points(measurement='real_output_flux')) #The measurement name should be changed later
def get_estimated_output_flux(self):
result = self._get_influx_client().\
query('select value from expected_output_flux;')
return list(result.get_points(measurement='expected_output_flux')) #The measurement name should be changed later
def get_input_flux(self):
result = self._get_influx_client().\
query('select value from input_flux;')
return list(result.get_points(measurement='input_flux'))
def get_lease_expired_count(self):
result = self._get_influx_client().\
query('select value from lease_expired_count;')
return list(result.get_points(measurement='lease_expired_count'))
def get_error(self):
result = self._get_influx_client().\
query('select value from application_progress_error;')
return \
list(result.
get_points(measurement='application_progress_error'))
def first_measurement(self, name, dimensions):
pass
def last_measurement(self, name, dimensions):
pass
def _get_influx_client(self):
client = InfluxDBClient(self.database_url, self.database_port,
self.database_user, self.database_password,
self.database_name)
return client
def send_metrics(self, measurements):
measurements = measurements[0]
metrics = {}
metrics['measurement'] = measurements['name']
metrics['time'] = datetime.\
fromtimestamp(measurements['timestamp'] /
1000).strftime('%Y-%m-%dT%H:%M:%SZ')
metrics['tags'] = {"host": "server01", "region": "sa-east-1",
"job": measurements['dimensions']['application_id']}
metrics['fields'] = {'value': measurements['value']}
self._get_influx_client().write_points([metrics])
|
import sys
sys.path.append("../logorec")
from app import App
import unittest
class AppTestSuite(unittest.TestCase):
"""
Test suite for the main app.
"""
def setUp(self):
"""
Set up componenets for each test.
:return: nothing
"""
self.app = App()
self.app.add_category("test")
self.app.add_image("data/features/banner/logos_01.jpg", "test")
def tearDown(self):
"""
Clean up after each test.
:return: nothing
"""
self.app.delete_category("test")
def test_feature_need_train(self):
"""
Test if the need train works correctly.
:return: nothing
"""
self.assertTrue(self.app.feature_need_train("Bow"))
def test_get_categories(self):
"""
Test if the get categories works correctly.
:return: nothing
"""
self.assertTrue(len(self.app.get_categories()) == 1)
def test_get_features(self):
"""
Test if the get features works correctly.
:return: nothing
"""
self.assertTrue("Bow" in self.app.get_features())
def test_get_classifiers(self):
"""
Test if the get classifiers works correctly.
:return: nothing
"""
self.assertTrue("RandomForest" in self.app.get_classifiers())
def test_get_default_classifier(self):
"""
Test if the get default classifier works correctly.
:return: nothing
"""
self.assertTrue(self.app.get_default_classifier() in self.app.get_classifiers())
def test_get_default_feature(self):
"""
Test if the get default feature works correctly.
:return: nothing
"""
self.assertTrue(self.app.get_default_feature() in self.app.get_features())
def test_error_get_probability(self):
"""
Test error get probability when the classifier is not trained.
:return: nothing
"""
with self.assertRaises(ModuleNotFoundError):
self.app.get_probability("website", [1], "RandomForest", "Bow")
def test_error_get_services(self):
"""
test error get services when the classifier is not trained.
:return: nothing
"""
with self.assertRaises(ModuleNotFoundError):
self.app.get_services("website", [1], "RandomForest", "Bow")
def test_fail_train_feature(self):
"""
Test fail train feature when a default feature's variation does not exist.
:return: nothing
"""
with self.assertRaises(ModuleNotFoundError):
self.app.train_feature("Bow")
def test_fail_train_classifier(self):
"""
Test fail train classifier when a default classifier' variation does not exist.
:return: nothing
"""
with self.assertRaises(ModuleNotFoundError):
self.app.train_classifier("RandomForest", "Bow")
def test_add_category(self, ):
"""
Test if add category works correctly.
:return: nothing
"""
self.app.add_category("testthree")
categories = self.app.get_categories()
self.app.delete_category("testthree")
self.assertTrue("testthree" in categories)
def test_add_image(self):
"""
Test if add image works correctly.
:return: nothing
"""
self.app.add_image("data/features/banner/logos_02.png", "test")
self.assertTrue(any("logos_02" in i for i in self.app.show_images_by_category("test")))
def test_add_classifier(self):
"""
Test add classifier's variation works correctly.
:return: nothing
"""
self.app.add_classifier("RandomForest", ['2'])
variations = self.app.show_classifier_variations("RandomForest")
self.app.delete_classifier("RandomForest", ["2"])
self.assertTrue(len(variations) == 1)
def test_add_feature(self):
"""
Test if add feature's variation works correctly.
:return: nothing
"""
self.app.add_feature("Bow", ['2', '2', '2', "True"])
variations = self.app.show_feature_variations("Bow")
self.app.delete_feature("Bow", ['2', '2', '2', "True"])
self.assertTrue(len(variations) == 1)
def test_show_images_by_category(self):
"""
Test if show image by category return the right amount of images.
:return: nothing
"""
self.assertTrue(len(self.app.show_images_by_category("test")) == 1)
def test_delete_image_by_category(self):
"""
Test if delete image by category deletes correctly.
:return: nothing
"""
self.app.add_image("data/features/banner/logos_02.png", "test")
self.app.delete_image_by_category("test", "logos_02.png")
self.assertTrue(len(self.app.show_images_by_category("test")) == 1)
def test_delete_category(self):
"""
Test if delete category work correctly.
:return: nothing
"""
self.app.add_category("testthree")
self.app.delete_category("testthree")
self.assertFalse("testthree" in self.app.get_categories())
def test_set_default_classifier(self):
"""
Test if set default classifier's variation work correctly.
:return: nothing
"""
self.app.add_classifier("RandomForest", ['2'])
self.app.add_classifier("RandomForest", ['1'])
self.app.delete_classifier("RandomForest", ['1'])
self.app.set_default_classifier("RandomForest", ['2'])
default = self.app.classifier.default_exist()
self.app.delete_classifier("RandomForest", ['2'])
self.assertTrue(default)
def test_set_default_feature(self):
"""
Test if set default feature's variation work correctly.
:return: nothing
"""
self.app.add_feature("Bow", ['10', '1', '1', 'True'])
self.app.add_feature("Bow", ['1', '1', '1', 'True'])
self.app.delete_feature("Bow", ['1', '1', '1', 'True'])
self.app.set_default_feature("Bow", ['10', '1', '1', 'True'])
default = self.app.feature.default_exist()
self.app.delete_feature("Bow", ['10', '1', '1', 'True'])
self.assertTrue(default)
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
from django.views.generic import ListView ,CreateView
from django.urls import reverse_lazy
from .forms import UserForm, UserProfileForm
from django.contrib.auth.models import User
from . models import Students ,Book
from .forms import BookForm
from django.views.generic.edit import UpdateView
from django.http import JsonResponse
from django.views import View
from .forms import PhotoForm
from .models import Photo , UserProfile
# Create your views here.
def index(request):
students = Students.objects.all()
return render(request,'newfiles/home.html',{'students':students})
def upload(request):
if request.method=="POST":
uploaded_file = request.FILES['doumentfile']
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
return render(request , 'uploadfile.html')
'''def upload_book(request):
if request.method=="POST":
form = BookForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('book_list')
else:
form = BookForm()
return render(request, 'upload_book.html',{'form':form,})'''
'''def book_list(request):
book = Book.objects.all()
return render(request, 'book_list.html',{'book':book})'''
class BookListView(ListView):
model = Book
template_name = 'book_list.html'
context_object_name = 'books'
class UploadBookView(CreateView):
model = Book
form_class= BookForm
template_name ='upload_book.html'
success_url = reverse_lazy('class_book_list')
def delete_book(request, pk):
book = Book.objects.get(pk=pk)
book.delete()
return redirect('/data/class/books/')
##############################################################################
class BookUpdateView(UpdateView):
model = Book
fields = ["title",]
template_name ='upload_book.html'
success_url = reverse_lazy('class_book_list')
# upload multiple FileS
class BasicUploadView(View):
def get(self, request):
photos_list = Photo.objects.all()
return render(self.request, 'photos/basic_upload/index.html', {'photos': photos_list})
def post(self, request):
form = PhotoForm(self.request.POST, self.request.FILES)
if form.is_valid():
photo = form.save()
data = {'is_valid': True, 'name': photo.file.name, 'url': photo.file.url}
else:
data = {'is_valid': False}
return JsonResponse(data)
##############################################one ot onefiled
def register(request):
if request.method =='POST':
user_form= UserForm(request.POST)
profile_form = UserProfileForm(request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
print("user regiser succsssulyyy")
return reverse_lazy('class_book_list')
else:
user_form= UserForm()
profile_form = UserProfileForm()
return render(request , 'data_app/register.html',{'user_form':user_form, 'profile_form':profile_form})
|
# #!/usr/bin/python
from __future__ import division, print_function
from math import sin, cos, pi
def disc(x):
"""
Returns 'x' rounded to the nearest integer.
"""
return int(round(x))
def to_signed(x, bits):
"""
Returns a signed representation of 'x'.
"""
if x >= 0:
return x
else:
return int('1{:0{}b}'.format(2**(bits-1)+x, bits-1), 2)
def hexlen(x):
"""
Returns the string length of 'x' in hex format.
"""
return len(hex(x))+2
def freq_to_ctrl(freq, clk=100000000, phase_bits=32):
"""
Returns a frequency control value from a frequency in Hertz.
"""
return disc(freq * 2**phase_bits / clk)
def ctrl_to_freq(ctrl, clk=100000000, phase_bits=32):
"""
Returns a frequency in Hertz from a frequency control value.
"""
return ctrl * clk / 2**phase_bits
def create_lut(sbits, rbits, wave, padding=4, name='lut_value',
unsigned=False, full=False):
"""
Returns a string of Verilog code for a sinusoidal LUT.
"""
S = 2**sbits
func = sin if wave == 'sin' else cos
result = ''
for s in range((S if full else S//4)):
if unsigned:
v = disc((2**(rbits)-1) * (func(2*pi*s/S)+1)/2)
else:
v = disc((2**(rbits-1)-1) * (func(2*pi*s/S)))
if full:
result += ' '*padding+'{:d}\'h{:0{}X} : {} <= {:d}\'h{:0{}X};\n' \
.format(sbits, s, sbits//4, name, rbits,
to_signed(v, rbits), rbits//4)
else:
result += ' '*padding+'{:d}\'h{:0{}X} : {} <= {:d}\'h{:0{}X};\n' \
.format(sbits-2, s, (sbits+1)//4, name, rbits,
to_signed(v, rbits), (rbits+3)//4)
return result[:-1]
def chirp_constants(min_ctrl, max_ctrl, T, clk=100000000, max_error=0.000001):
"""
Calculate the chirp constants.
"""
ratio = (T * clk) / (max_ctrl - min_ctrl)
div_rate, inc_rate = disc(ratio), 1
error = abs(div_rate / inc_rate - ratio)
d, i = div_rate, inc_rate
while div_rate < 2**32 and inc_rate < 2**32 and error > max_error:
i += 1
d = disc(ratio * i)
e = abs(d / i - ratio)
if e < error:
div_rate, inc_rate, error = d, i, e
return disc(ratio), 1, div_rate, inc_rate
|
# https://stackoverflow.com/questions/64394768/how-calculate-the-area-of-irregular-object-in-an-image-opencv-python-3-8
import cv2
import numpy as np
import math
# input image
path = "/home/pam/Desktop/streamlit_tcc/Mask_RCNN-Multi-Class-Detection/Leaf/test/30.jpg"
# 1 EUR coin diameter in cm
coinDiameter = 2.325
# real area for the coin in cm^2
coinArea = (coinDiameter/2)**2 * math.pi
# initializing the multiplying factor for real size
realAreaPerPixel = 1
# pixel to cm^2
def pixelToArea(objectSizeInPixel, coinSizeInPixel):
# how many cm^2 per pixel?
realAreaPerPixel = coinArea / coinSizeInPixel
print("realAreaPerPixel: ", realAreaPerPixel)
# object area in cm^2
objectArea = realAreaPerPixel * objectSizeInPixel
return objectArea
# finding coin and steak contours
def getContours(img, imgContour):
# find all the contours from the B&W image
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# needed to filter only our contours of interest
finalContours = []
# for each contour found
for cnt in contours:
# cv2.drawContours(imgContour, cnt, -1, (255, 0, 255), 2)
# find its area in pixel
area = cv2.contourArea(cnt)
print("Detected Contour with Area: ", area)
# minimum area value is to be fixed as the one that leaves the coin as the small object on the scene
if (area > 5000):
perimeter = cv2.arcLength(cnt, True)
# smaller epsilon -> more vertices detected [= more precision]
epsilon = 0.002*perimeter
# check how many vertices
approx = cv2.approxPolyDP(cnt, epsilon, True)
#print(len(approx))
finalContours.append([len(approx), area, approx, cnt])
# we want only two objects here: the coin and the meat slice
print("---\nFinal number of External Contours: ", len(finalContours))
# so at this point finalContours should have only two elements
# sorting in ascending order depending on the area
finalContours = sorted(finalContours, key = lambda x:x[1], reverse=False)
# drawing contours for the final objects
for con in finalContours:
cv2.drawContours(imgContour, con[3], -1, (0, 0, 255), 3)
return imgContour, finalContours
# sourcing the input image
img = cv2.imread(path)
cv2.imshow("Starting image", img)
cv2.waitKey()
# blurring
imgBlur = cv2.GaussianBlur(img, (7, 7), 1)
# graying
imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
# canny
imgCanny = cv2.Canny(imgGray, 255, 195)
kernel = np.ones((2, 2))
imgDil = cv2.dilate(imgCanny, kernel, iterations = 3)
# cv2.imshow("Diluted", imgDil)
imgThre = cv2.erode(imgDil, kernel, iterations = 3)
imgFinalContours, finalContours = getContours(imgThre, img)
# first final contour has the area of the coin in pixel
coinPixelArea = finalContours[0][1]
print("Coin Area in pixel", coinPixelArea)
# second final contour has the area of the meat slice in pixel
slicePixelArea = finalContours[1][1]
print("Entire Slice Area in pixel", slicePixelArea)
# let's go cm^2
print("Coin Area in cm^2:", coinArea)
print("Entire Slice Area in cm^2:", pixelToArea(slicePixelArea, coinPixelArea))
# show the contours on the unfiltered starting image
cv2.imshow("Final External Contours", imgFinalContours)
cv2.waitKey()
# now let's detect and quantify the lean part
# convert to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (36, 25, 25), (70, 255,255))
# apply mask to original image - this shows the green with white blackground
final = cv2.bitwise_and(img, img, mask= mask)
# show selection
cv2.imshow("Leaf", final)
cv2.waitKey()
# convert it to grayscale because countNonZero() wants 1 channel images
gray = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)
# cv2.imshow("Gray", gray)
# cv2.waitKey()
meatyPixelArea = cv2.countNonZero(gray)
print("Leaf Area in pixel: ", meatyPixelArea)
print("Leaf Area in cm^2: ", pixelToArea(meatyPixelArea, coinPixelArea))
# finally the body-fat ratio calculation
print("Body-Fat Ratio: ", meatyPixelArea/slicePixelArea*100, "%")
cv2.destroyAllWindows()
|
n = int(input())
a = input()
eight = 0
for el in a:
if(el == '8'):
eight += 1
ans = min(n//11, eight)
print(ans)
|
# Author: Ziga Trojer, zt0006@student.uni-lj.si
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.preprocessing import StandardScaler
from cvxopt import solvers
from cvxopt import matrix
def scale_data(X):
"""
:param X: Input data
:return: Scaled data
"""
scaler = StandardScaler()
scaler.fit(X)
return scaler.transform(X)
class Polynomial:
"""Polynomial kernel."""
def __init__(self, M):
self.M = M
def __call__(self, x1, x2):
try:
x1.shape[1]
except IndexError:
x1 = x1.reshape(1, x1.shape[0])
try:
x2.shape[1]
except IndexError:
x2 = x2.reshape(1, x2.shape[0])
return pow(1 + x1.dot(x2.T), self.M).squeeze()
class RBF:
"""RBF kernel."""
def __init__(self, sigma):
self.sigma = sigma
@staticmethod
def dist(a, b):
return np.sum(np.multiply(a, b), axis=1)
def __call__(self, x1, x2):
try:
x1.shape[1]
except IndexError:
x1 = x1.reshape(1, x1.shape[0])
try:
x2.shape[1]
except IndexError:
x2 = x2.reshape(1, x2.shape[0])
norm_x1 = self.dist(x1, x1)
norm_x2 = self.dist(x2, x2)
matrix_norm_x1 = np.tile(norm_x1, (x2.shape[0], 1)).T
matrix_norm_x2 = np.tile(norm_x2, (x1.shape[0], 1))
dot_product = - 2 * x1.dot(x2.T)
matrix_norm = matrix_norm_x1 + matrix_norm_x2 + dot_product
return np.exp(-matrix_norm / (2 * pow(self.sigma, 2))).squeeze()
class Model:
"""Model on which we predict"""
def __init__(self, X, kernel):
self.alpha = None
self.b = None
self.X = X
self.kernel = kernel
def update(self, alpha, all_alpha, b, support_vectors):
self.alpha = alpha
self.b = b
self.all_alpha = all_alpha
self.support_vectors = support_vectors
def get_alpha(self):
return self.all_alpha
def get_b(self):
return self.b
def get_support_vectors(self):
return self.support_vectors
def predict(self, Y):
krnl = self.kernel(Y, self.X)
return (np.dot(self.alpha, krnl.T) + self.b).squeeze()
class SVR:
def __init__(self, kernel, lambda_, epsilon):
self.kernel = kernel
self.lambda_ = lambda_
self.z = np.array([[1, -1], [-1, 1]])
self.eps = epsilon
self.small_eps = 1e-06
def fit(self, X, y):
model = Model(X, self.kernel)
C = (1 / self.lambda_)
krnl = self.kernel(X, X)
# constructing matrix P - signs needs to be alternating.
# also, matrix needs to be PD, so it is probably symmetric too
p_dash = np.repeat(krnl, 2).reshape((krnl.shape[1], krnl.shape[0] * 2))
p_dash = np.repeat(p_dash, 2, axis=0)
z = np.tile(self.z, (krnl.shape[0], krnl.shape[1]))
P = np.multiply(p_dash, z)
# constructing vector q
vec_ones = np.ones(P.shape[0])
y_vec = np.repeat(y, 2).reshape(1, y.shape[0] * 2)
y_vec = np.multiply(y_vec, np.tile(self.z[0], (1, y.shape[0])))
q = + self.eps * vec_ones - y_vec
# we need to construct other matrices & vectors too
h = np.ones(krnl.shape[0] * 2) * C
h = np.append(h, np.zeros(krnl.shape[0]*2))
h = h.reshape((h.shape[0], 1))
G = np.identity(krnl.shape[0] * 2)
G = np.vstack([G, -G])
A = np.tile(self.z[0], (1, y.shape[0]))
# transforming all numpy objects into new matrix type
# needed for optimization
P = matrix(P, tc='d')
q = matrix(q.T, tc='d')
G = matrix(G, tc='d')
h = matrix(h, tc='d')
A = matrix(A, tc='d')
b = matrix(np.zeros((1, 1)))
solvers.options['show_progress'] = False
# optimizing to get alphas
solution = solvers.qp(P, q, G, h, A, b)
alphas = np.array(solution['x'])
# calculating alpha - alpha*
alpha_diff = np.ediff1d(list(reversed(alphas)))
alpha_diff = np.array(list(reversed(alpha_diff[::2])))
alpha_diff = alpha_diff.reshape((1, alpha_diff.shape[0]))
# checking conditions for calculating b
ALPHA = alphas[::2]
ALPHA_STAR = alphas[1::2]
# only indexes that satisfy the condition
lower_idx = np.array(list(set(np.union1d(np.where(ALPHA < C-self.small_eps)[0],
np.where(ALPHA_STAR > 0+self.small_eps)[0]))))
upper_idx = np.array(list(set(np.union1d(np.where(ALPHA > 0 + self.small_eps)[0],
np.where(ALPHA_STAR < C - self.small_eps)[0]))))
# calculating weights w
weight = np.dot(alpha_diff, krnl).T
# calculating lower and upper bound and filtering it by index, calculated before
lower = y - self.eps - weight
try:
lower = lower[lower_idx]
except IndexError:
pass
upper = y + self.eps - weight
try:
upper = upper[upper_idx]
except IndexError:
pass
# max and min of lower and upper almost always coincide, but we handle the case when
# they do not by calculating their mean.
b = (np.min(upper) + np.max(lower)) / 2
#alpha_diff[abs(alpha_diff) <= self.small_eps] = 0
# saving which indexes are support vectors
support_vectors = np.where(abs(alpha_diff) >= self.small_eps)[1]
# updating the model
# we like already calculated alphas, so we save all alphas for the unit tests
model.update(np.array(alpha_diff), alphas.reshape((X.shape[0], 2)), np.array(b), support_vectors)
return model
def split_index(x_data, k):
"""Splits data into k folds"""
folds = list()
indexes = list(range(len(x_data)))
for j in range(k):
fold = random.Random(42).sample(indexes, round(len(x_data) / k))
folds.append(fold)
for element in fold:
indexes.remove(element)
return folds, list(range(len(x_data)))
def get_cross_validation_data(x_data, y_data, k):
"""Returns training and testing folds of x_data and y_data"""
train_x, train_y = list(), list()
test_x, test_y = list(), list()
indexes, all_index = split_index(x_data, k)
for test_index in indexes:
test_y.append(list(np.array(y_data)[test_index]))
test_x.append(x_data[test_index])
train_index = [i for i in all_index if i not in test_index]
train_x.append(x_data[train_index])
train_y.append(list(np.array(y_data)[train_index]))
return train_x, train_y, test_x, test_y
if __name__ == "__main__":
show_sine = False
show_house = True
show_polynomial = False
show_RBF = True
if show_sine:
sine = pd.read_csv('sine.csv', sep=',')
sine_x = sine['x'].values
sine_y = sine['y'].values
fig, ax = plt.subplots(1)
ax.plot(sine_x, sine_y, 'ko', alpha = 0.2, label='Original data')
new_data = np.arange(0, 20, step=0.1)
sine_x = sine_x.reshape((sine_x.shape[0], 1))
sine_y = sine_y.reshape((sine_y.shape[0], 1))
new_x = new_data.reshape((new_data.shape[0], 1))
# set the epsilon for sine
epsilon = 0.5
fitter = SVR(kernel=Polynomial(M=11), lambda_=0.1, epsilon=epsilon)
m = fitter.fit(scale_data(sine_x), sine_y)
pred = m.predict(scale_data(new_x))
ax.plot(sine_x[m.get_support_vectors()], (sine_y[m.get_support_vectors()]), '2r',
label='Support vectors Polynomial')
pred2 = pred
print(len(m.get_support_vectors()))
fitter = SVR(kernel=RBF(sigma=0.3), lambda_=0.1, epsilon=epsilon)
m = fitter.fit(scale_data(sine_x), sine_y)
pred = m.predict(scale_data(new_x))
ax.plot(sine_x[m.get_support_vectors()], (sine_y[m.get_support_vectors()]), '1',
label='Support vectors RBF')
print(len(m.get_support_vectors()))
ax.plot(new_data, pred2.reshape((len(new_data), 1)), '-', label='Polynomial M=11')
ax.plot(new_data, pred.reshape((len(new_data), 1)), '-', label='RBF sigma=0.3')
plt.legend()
plt.title('Fit sinus function using SVR')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
if show_house:
house = pd.read_csv('housing2r.csv', sep=',').values
X_train, X_test = house[:160, :5], house[160:, :5]
y_train, y_test = house[:160, 5], house[160:, 5]
fig, ax = plt.subplots(2)
# Here you set lambdas
lam = np.array([0.001, 0.01, 0.1, 1, 5, 10, 25, 50, 100])
train_x, train_y, test_x, test_y = get_cross_validation_data(X_train, y_train, 5)
# set the parameter epsilon
epsilon = 4
if show_polynomial:
ALL_RMSE = list()
for m in range(1, 11):
AVERAGE_RMSE = list()
for lamb in lam:
RMSE_CV = list()
for X, Y, Z, W in zip(train_x, train_y, test_x, test_y):
fitter = SVR(kernel=Polynomial(M=m), lambda_=lamb, epsilon=epsilon)
mod = fitter.fit(scale_data(X), np.array(Y))
pred = mod.predict(scale_data(Z))
print(f'Number of support vectors: {len(mod.get_support_vectors())}')
prediction_list = list()
for x, y in zip(W, pred):
prediction_list.append(pow(x - y, 2))
# scalar
RMSE_CV.append(np.sqrt(np.sum(np.array(prediction_list)) / len(pred)))
AVERAGE_RMSE.append((np.mean(RMSE_CV), lamb)) # skalarji za cross validation
ALL_RMSE.append(AVERAGE_RMSE) # list skalarjev za vsak m
best_lambdas = list()
for j in range(1, 11):
current_m = ALL_RMSE[j - 1]
best_lambda = min(current_m, key=lambda t: t[0])[1]
best_lambdas.append(best_lambda)
print(best_lambdas)
print(f'Those are best lambdas: {best_lambdas}')
RMSE_best = list()
number_support_vectors_best = list()
for m in range(1, 11):
fitter = SVR(kernel=Polynomial(M=m), lambda_=best_lambdas[m - 1], epsilon=epsilon)
m = fitter.fit(scale_data(X_train), y_train)
pred = m.predict(scale_data(X_test))
number_support_vectors_best.append(len(m.get_support_vectors()))
prediction_list = list()
for x, y in zip(y_test, pred):
prediction_list.append(pow(x - y, 2))
RMSE_best.append(np.sqrt(np.sum(np.array(prediction_list) / len(pred))))
ax[0].plot(list(range(1, 11)), RMSE_best, label='Polynomial lambda best')
ax[1].plot(list(range(1, 11)), number_support_vectors_best, label='Polynomial lambda best')
RMSE_fix = list()
number_support_vectors_fix = list()
for m in range(1, 11):
fitter = SVR(kernel=Polynomial(M=m), lambda_=1, epsilon=epsilon)
m = fitter.fit(scale_data(X_train), y_train)
pred = m.predict(scale_data(X_test))
number_support_vectors_fix.append(len(m.get_support_vectors()))
prediction_list = list()
for x, y in zip(y_test, pred):
prediction_list.append(pow(x - y, 2))
RMSE_fix.append(np.sqrt(np.sum(np.array(prediction_list)) / len(pred)))
ax[0].plot(list(range(1, 11)), RMSE_fix, label='Polynomial lambda=1')
plt.setp(ax[0], ylabel='RMSE')
ax[1].plot(list(range(1, 11)), number_support_vectors_fix, label='Polynomial lambda=1')
ax[0].set_title('RMSE depending on M')
plt.legend()
plt.title('Number of support vectors depending on M')
#plt.title('RMSE depending on kernel parameter M')
plt.ylabel('# support vectors')
plt.xlabel('parameter M')
handles, labels = ax[0].get_legend_handles_labels()
plt.show()
ALL_RMSE = list()
if show_RBF:
# Here you set the parameters for sigma
sigmas = np.array([0.05, 0.5, 1, 2, 5, 10])
for m in sigmas:
print(m)
AVERAGE_RMSE = list()
for lamb in lam:
RMSE_CV = list()
for X, Y, Z, W in zip(train_x, train_y, test_x, test_y):
fitter = SVR(kernel=RBF(sigma=m), lambda_=lamb, epsilon=epsilon)
mod = fitter.fit(scale_data(X), np.array(Y))
pred = mod.predict(scale_data(Z))
prediction_list = list()
for x, y in zip(W, pred):
prediction_list.append(pow(x - y, 2))
# scalar
RMSE_CV.append(np.sqrt(np.sum(np.array(prediction_list)) / len(pred)))
AVERAGE_RMSE.append((np.mean(RMSE_CV), lamb))
ALL_RMSE.append(AVERAGE_RMSE)
best_lambdas = list()
print(len(ALL_RMSE))
for j in range(1, len(sigmas) + 1):
current_m = ALL_RMSE[j - 1]
best_lambda = min(current_m, key=lambda t: t[0])[1]
best_lambdas.append(best_lambda)
print(f'Those are best lambdas: {best_lambdas}')
RMSE_best = list()
number_support_vectors_best = list()
for m in range(1, len(sigmas) + 1):
fitter = SVR(kernel=RBF(sigma=m), lambda_=best_lambdas[m - 1], epsilon=epsilon)
mod = fitter.fit(scale_data(X_train), y_train)
pred = mod.predict(scale_data(X_test))
number_support_vectors_best.append(len(mod.get_support_vectors()))
prediction_list = list()
for x, y in zip(y_test, pred):
prediction_list.append(pow(x - y, 2))
RMSE_best.append(np.sqrt(np.sum(np.array(prediction_list)) / len(pred)))
ax[0].plot(list(sigmas), RMSE_best, label='RBF lambda best')
ax[1].plot(list(sigmas), number_support_vectors_best, label='RBF lambda best')
RMSE_fix = list()
number_support_vectors_fix = list()
for m in sigmas:
fitter = SVR(kernel=RBF(sigma=m), lambda_=1, epsilon=epsilon)
m = fitter.fit(scale_data(X_train), y_train)
pred = m.predict(scale_data(X_test))
number_support_vectors_fix.append(len(m.get_support_vectors()))
prediction_list = list()
for x, y in zip(y_test, pred):
prediction_list.append(pow(x - y, 2))
RMSE_fix.append(np.sqrt(np.sum(np.array(prediction_list)) / len(pred)))
ax[0].plot(list(sigmas), RMSE_fix, label='RBF sigma=1')
plt.setp(ax[0], ylabel = 'RMSE')
ax[0].set_title('RMSE depending on sigma')
plt.title('Number of support vectors depending on sigma')
ax[1].plot(list(sigmas), number_support_vectors_fix, label='RBF lambda=1')
plt.legend()
plt.xlabel('parameter sigma')
plt.ylabel('# support vectors')
plt.show()
|
import param_estimate as pe
import matplotlib.pyplot as plt
import numpy as np
import time
import pickle
filename = 'fitting_data'
save_data_series = [1,2,3]
for j in save_data_series:
pkl_file = open(filename+'_'+str(j)+'.par', 'rb')
SAVE_DATA = None
SAVE_DATA = pickle.load(pkl_file)
pkl_file.close()
fig = plt.figure(j)
ax = None
ax2 = None
ax = fig.add_subplot(211)
# ------------------- EXPT DATA ---------------------------------=
Xr, Yr = pe.load_data(plot_data=0)
ax.scatter(Xr,Yr,3,marker="x",c="g")
# ------------------- DATA FROM OPTIMIZATION ---------------------
color_scheme = np.linspace(0,1,len(SAVE_DATA))
for i in range(len(SAVE_DATA)):
data_store = SAVE_DATA[i]
n_set = data_store["n_set"]
t_set = data_store["t_set"]
p = data_store["p"]
mse_list = data_store["mse_list"]
print(" i = ", i)
print(" p = ", p )
if i==0:
this_label = "init curve"
else:
this_label = str(i)
ax.plot(t_set,n_set, color=(1-color_scheme[i],0,color_scheme[i]), label=this_label)
if i ==0:
this_title = "initial G,k1,k2,k3 = " + str(p)
plt.title(this_title)
ax2 = fig.add_subplot(212)
ax2.plot(range(len(mse_list)),mse_list)
ax.legend()
ax.set_xlabel("t")
ax.set_ylabel("n")
ax2.set_xlabel("i")
ax2.set_ylabel("MSE")
plt.show()
print("\nClosing Post-Processing Program...")
|
# _*_ coding:UTF-8_*_
from zhihu import *
if __name__ == '__main__':
GoDownloadPage().startCrawl()
print 'done'
|
"""
for x in range (10000):
print(f"ループ変数:{x}")
"""
"""
i = input("繰り返し回数>")
count = int(i)
for x in range(count):
print(f"Hello,world{x + 1}")
"""
"""
count = int(input("繰り返し回数>"))
for x in range(count):
print("Hello")
"""
"""
for x in range(5):
print(f"残り{5-x}")
print("終了")
"""
"""
for x in range(5):
for y in range(5):
print(f"(x,y):({x},{y})")
"""
"""
values = [[1,2],[2,3]]
for x in values:
print(f"要素:{x}")
for y in x:
print(f"要素:{y}")
"""
"""
i = input("リスト>")
values = eval(i)
print(type(values))
for x in values:
print(f"要素:{x}")
"""
"""練習問題1
i = input("リスト>")
values = eval(i) #intとおなじようにevalはリストにする
for x in values:
print(x*2)
"""
"""
i = input("リスト>")
values = eval(i)
for x in values:
print(f"要素:{x}")
if x < 10:
print("OK")
else:
print("NG")
"""
"""
i = input("リスト>")
values = eval(i)
current = 0
for x in values:
print(f"ここまでの値(古い値):{current}+リスト要素:{x}")
current= current + x
print(f"これまでの値(新しい値):{current}")
print(f"合計:{current}")
"""
"""
i = input("リスト>")
values = eval(i)
current = values[0]
for x in values:
print(f"ここまでの値(古い値):{current}+リスト要素:{x}")
if x > current:
current = x
print(f"これまでの値(新しい値):{current}")
print(f"最大値:{current}")
"""
"""
i = input("リスト>")
values = eval(i)
for x in range(len(values)):
print(f"index:{x}")
print(f"要素数:{values[x]}")
"""
"""
i = input("リスト>")
values = eval(i)
count = 0
total = 0
for x in values:
count = count + 1
total = total + x
print(f"平均値:{total/count}")
"""
"""
i = input("リスト>")
values = eval(i)
current = values[0]
for x in values:
print(f"ここまでの最小値(古い値):{current}+リスト要素:{x}")
if x < current:
current = x
print(f"これまでの最小値(新しい値):{current}")
print(f"最小値:{current}")
"""
"""
values = eval(input("リスト>"))
current = len(values[0])
for x in values:
print(f"ここまでの最小値(古い値):{current}+リスト要素:{x}")
if len(x) < current:
current = len(x)
print(f"これまでの最小値(新しい値):{current}")
print(f"最小値:{current}")
"""
"""
#課題1のヒント
import turtle
t = turtle.Pen()
current =0
for x in range(10):
t.forward(50+current)
t.right(90)
current = current + 50
i = input()
for x in range(10):
t.forward(100)
t.left(90)
"""
#
|
import numpy as np
from sklearn import linear_model
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
Y = np.array([1, 1, 0, 0])
clf = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)
clf.fit(X, Y)
clf.partial_fit(X, Y)
print(clf.predict(np.array([[-1,-2],[2,2]])))
a = [[1,1], [12,2], [3,4], [5,6]]
b = [1,3]
print(a[b])
|
# I use this for simulating log, currently I only use the file, not the database
import pickle
import os
import os.path
WRITE_LOG_FILE = 'WRITE_LOG' # the write transaction id of WRITE_LOG
# when the write is involked
## Write Log ###################################################
def log_write(server_id, file_name,chunk_id,trans_id):
if trans_id == -1:
trans_id = -1
if os.path.exists(WRITE_LOG_FILE) == False:
f = open(WRITE_LOG_FILE,'w')
pickle.dump({},f)
f.close()
f1 = open(WRITE_LOG_FILE,'r')
w = pickle.load(f1)
f1.close()
if file_name not in w.keys():
w[file_name] = {}
if chunk_id not in w[file_name].keys():
w[file_name][chunk_id] = {}
#if server_id not in w[file_name][chunk_id].keys():
w[file_name][chunk_id][server_id] = trans_id
f2 = open(WRITE_LOG_FILE,'w')
pickle.dump(w,f2)
f2.close()
def get_write_version(server_id,file_name,chunk_id):
f1 = open(WRITE_LOG_FILE,'r')
w = pickle.load(f1)
f1.close()
if file_name in w.keys() and chunk_id in w[file_name].keys() and server_id in w[file_name][chunk_id].keys():
return w[file_name][chunk_id][server_id]
return -1
# get the last server ids, for a file and its chunk_id, return those chunks having the same last transaction id
def get_last_update_id(file_name,chunk_id):
f1 = open(WRITE_LOG_FILE,'r')
w = pickle.load(f1)
f1.close()
server_ids = []
max_trans = -1
if w.has_key(file_name) == False:
return []
if w[file_name].has_key(chunk_id) == False:
return []
for server_id in w[file_name][chunk_id]:
server_ids.append((server_id,w[file_name][chunk_id][server_id]))
max_trans = max(max_trans,w[file_name][chunk_id][server_id])
if max_trans == -1:
max_trans = -1
return [i[0] for i in server_ids if i[1] == max_trans]
|
# Generated by Django 2.0.9 on 2018-12-18 20:27
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('empresas', '0004_auto_20181218_2002'),
]
operations = [
migrations.CreateModel(
name='Cotacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.DateField(default=datetime.datetime(2018, 12, 18, 20, 27, 40, 116465))),
('valor', models.FloatField()),
],
),
migrations.AlterField(
model_name='acao',
name='data',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 18, 20, 27, 40, 115944)),
),
migrations.AddField(
model_name='cotacao',
name='acao',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='acao', to='empresas.Acao'),
),
]
|
"""Base runtime interface.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import logging
import os
import shutil
import six
from treadmill import appcfg
from treadmill import exc
from treadmill import supervisor
from treadmill.appcfg import abort as app_abort
from treadmill.appcfg import manifest as app_manifest
_LOGGER = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class RuntimeBase(object):
"""Base class for a Treadmill runtime.
:param tm_env:
The Treadmill application environment
:type tm_env:
`appenv.AppEnvironment`
:param container_dir:
Full path to the application container directory
:type container_dir:
``str``
"""
__slots__ = (
'_tm_env',
'_service',
'_param',
)
def __init__(self, tm_env, container_dir, param=None):
self._tm_env = tm_env
self._param = {} if param is None else param
self._service = supervisor.open_service(container_dir)
@abc.abstractmethod
def _can_run(self, manifest):
"""Determines if the manifest can run with the runtime.
:returns:
``True`` if can run
:rtype:
``Boolean``
"""
pass
@abc.abstractmethod
def _run(self, manifest):
"""Prepares container environment and exec's container."""
pass
def run(self):
"""Prepares container environment and exec's container
The function is intended to be invoked from 'run' script and never
returns.
:returns:
This function never returns
"""
manifest_file = os.path.join(self._service.data_dir, appcfg.APP_JSON)
manifest = app_manifest.read(manifest_file)
if not self._can_run(manifest):
raise exc.ContainerSetupError('invalid_type',
app_abort.AbortedReason.INVALID_TYPE)
self._run(manifest)
@abc.abstractmethod
def _finish(self):
"""Frees allocated resources and mark then as available."""
pass
def finish(self):
"""Frees allocated resources and mark then as available."""
# Required because on windows log files are archived and deleted
# which cannot happen when the supervisor/log is still running.
supervisor.ensure_not_supervised(self._service.directory)
self._finish()
shutil.rmtree(self._service.directory)
_LOGGER.info('Finished cleanup: %s', self._service.directory)
@abc.abstractmethod
def kill(self):
"""Kills a container."""
pass
# pylint: disable=W0613
# tm_env for method in child class to use
@classmethod
def manifest(cls, tm_env, manifest):
"""Add runtime modification to manifest, default does nothing
"""
app_manifest.add_manifest_features(manifest, cls.name)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The above encoding declaration is required and the file must be saved as UTF-8
################################################################################
### LISTS
# List indices begin with 0, not 1!
zoo_animals = ["pangolin", "cassowary", "sloth", "dog"];
if len(zoo_animals) > 3:
print "The first animal at the zoo is the " + zoo_animals[0]
print "The second animal at the zoo is the " + zoo_animals[1]
print "The third animal at the zoo is the " + zoo_animals[2]
print "The fourth animal at the zoo is the " + zoo_animals[3]
print zoo_animals[2] # Access by Index
################################################################################
# List Slicing (ЗРІЗИ)
# "Slicing" did not modify the original list!
# example_list[i:j] - вибере всі елементи списку з i-го (включно) по j-й (виключаючи),
# example_list[i:] - вибере всі елементи списку з i-го (включно) до кінця,
# example_list[:j] - вибере всі елементи списку з початку по j-й (виключаючи).
# Також операцію зрізу можна застосовувати і з 3 аргументами:
# example_list[i:j:k] -- вибере кожний k-й елемент списку з i-го (включно) по j-й (виключаючи),
# example_list[i::k] -- вибере кожний k-й елемент списку з i-го (включно) до кінця,
# example_list[:j:k] -- вибере кожний k-й елемент списку з початку по j-й (виключно),
# example_list[::k] -- вибере кожний k-й елемент списку.
letters = ['a', 'b', 'c', 'd', 'e']
slice = letters[1:3]
print slice # ['b', 'c']
print letters[::-1] # ['e', d', 'c', 'b', 'a'] -> revert a list!
print letters # ['a', 'b', 'c', 'd', 'e']
''' You can slice a string exactly like a list!
In fact, you can think of strings as lists of characters:
each character is a sequential item in the list, starting from index 0.
'''
animals = "catdogfrog"
cat = animals[:3] # The first three characters of animals
dog = animals[3:6] # The fourth through sixth characters
frog = animals[6:] # From the seventh character to the end
################################################################################
""" Append
x.append(y) - додає значення y в кінець списку x
append() takes exactly one argument!
"""
letters = ['a', 'b', 'c']
letters.append('d')
print len(letters) # 4
print letters # ['a', 'b', 'c', 'd']
################################################################################
""" Reverse
x.reverse() - змінює порядок елементів списку x на зворотний
"""
x = [1, 2, 3]
x.reverse()
print x # [3, 2, 1]
################################################################################
# We can also insert items into a list.
animals = ["ant", "bat", "cat"]
animals.insert(1, "dog") # We insert "dog" at index 1, which moves everything down by 1.
print animals # ["ant", "dog", "bat", "cat"]
################################################################################
# Remove item from a List
print "\nRemove item from list:"
beatles = ["john","paul","george","ringo","stuart"]
beatles.remove("stuart")
print beatles # ["john","paul","george","ringo"]
################################################################################
# sort()
print "\nSorting animals:"
animals = ["cat", "ant", "bat"]
animals.sort() # Note that .sort() modifies the list rather than returning a new list.
print animals # ['ant', 'bat', 'cat']
################################################################################
# search for an item in a list
animals = ["ant", "bat", "cat"]
print animals.index("bat") # 1
################################################################################
### Iterating over a list
# Method 1
# Useful to loop through the list, but it's not possible to modify the list this way.
for item in list:
print item
# Method 2 - iterate through indexes:
# Uses indexes to loop through the list, making it possible to also modify the list if needed.
for i in range(len(list)):
print list[i]
################################################################################
# FOR .. IN .. with List.
# If you want to do something with every item in the list, you can use a for loop.
# Example: for item in list_name:
print "\nFor .. in loop for List:"
my_list = [1,9,3,8,5,7]
for number in my_list:
print number * 2
# 2
# 18
# 6
# 16
# 10
# 14
################################################################################
# a for-loop that iterates over start_list and .append()s each number
# squared (x ** 2) to square_list. Then sort square_list
print "\nSorted square of each item original list: "
start_list = [5, 3, 1, 2, 4]
square_list = []
for item in start_list:
square_list.append(item ** 2)
square_list.sort()
print square_list
|
from typing import Any
from typing import Callable
from typing import cast
from typing import List
from typing import Optional
from typing import Type
from typing import Union
from fastapi_crudrouter.core import CRUDGenerator
from fastapi_crudrouter.core import NOT_FOUND
from fastapi_crudrouter.core._types import DEPENDENCIES
from fastapi_crudrouter.core._types import PAGINATION
from fastapi_crudrouter.core._types import PYDANTIC_SCHEMA as SCHEMA
from pydantic_aioredis.store import Store
CALLABLE = Callable[..., SCHEMA]
CALLABLE_LIST = Callable[..., List[SCHEMA]]
class PydanticAioredisCRUDRouter(CRUDGenerator[SCHEMA]):
def __init__(
self,
schema: Type[SCHEMA],
store: Store,
create_schema: Optional[Type[SCHEMA]] = None,
update_schema: Optional[Type[SCHEMA]] = None,
prefix: Optional[str] = None,
tags: Optional[List[str]] = None,
paginate: Optional[int] = None,
get_all_route: Union[bool, DEPENDENCIES] = True,
get_one_route: Union[bool, DEPENDENCIES] = True,
create_route: Union[bool, DEPENDENCIES] = True,
update_route: Union[bool, DEPENDENCIES] = True,
delete_one_route: Union[bool, DEPENDENCIES] = True,
delete_all_route: Union[bool, DEPENDENCIES] = True,
**kwargs: Any
) -> None:
super().__init__(
schema=schema,
create_schema=create_schema,
update_schema=update_schema,
prefix=prefix,
tags=tags,
paginate=paginate,
get_all_route=get_all_route,
get_one_route=get_one_route,
create_route=create_route,
update_route=update_route,
delete_one_route=delete_one_route,
delete_all_route=delete_all_route,
**kwargs
)
self.store = store
self.store.register_model(self.schema)
def _get_all(self, *args: Any, **kwargs: Any) -> CALLABLE_LIST:
async def route(
pagination: PAGINATION = self.pagination,
) -> List[SCHEMA]:
skip, limit = pagination.get("skip"), pagination.get("limit")
skip = cast(int, skip)
models = await self.schema.select(skip=skip, limit=limit)
return models if models is not None else []
return route
def _get_one(self, *args: Any, **kwargs: Any) -> CALLABLE:
async def route(item_id: str) -> SCHEMA:
model = await self.schema.select(ids=[item_id])
if model is None:
raise NOT_FOUND
return model[0]
return route
def _create(self, *args: Any, **kwargs: Any) -> CALLABLE:
async def route(model: self.create_schema) -> SCHEMA: # type: ignore
model = self.schema(**model.dict())
await self.schema.insert(model)
return model
return route
def _update(self, *args: Any, **kwargs: Any) -> CALLABLE:
async def route(item_id: str, model: self.update_schema) -> SCHEMA: # type: ignore
if await self.schema.select(ids=[item_id]) is None:
raise NOT_FOUND
await self.schema.update(item_id, data=model.dict())
result = await self.schema.select(ids=item_id)
return result[0]
return route
def _delete_all(self, *args: Any, **kwargs: Any) -> CALLABLE_LIST:
async def route() -> List[SCHEMA]:
await self.schema.delete()
return []
return route
def _delete_one(self, *args: Any, **kwargs: Any) -> CALLABLE:
async def route(item_id: str) -> SCHEMA:
model = await self.schema.select(ids=[item_id])
if model is None:
raise NOT_FOUND
await self.schema.delete(ids=[item_id])
return model[0]
return route
|
class SLL :
class node:
def __init__(self, element, nextlink = None) :
self.element = element
self.nextnode = nextlink
def __init__(self):
self.head = None
self.size = 0
def __str__(self):
result = " "
pointer = self.head
while pointer != None:
result = result + str(pointer.element) + " "
pointer = pointer.nextnode
return result
def tambah(self, element):
nodebaru = self.node(element)
nodebaru.nextnode = self.head
self.head = nodebaru
self.size += 1
def __len__(self):
return self.size
def main():
Listku = SLL()
Listku.tambah("Sangat")
Listku.tambah("Kamu")
Listku.tambah("Cinta")
Listku.tambah("Aku")
print("\n")
print("Single Linked List : ", str(Listku))
print("Jumlah : ", len(Listku))
print("\n")
main()
|
import traceback
import tornado
from tornado.gen import Return
from handlers.api.base import BaseApiHandler
from handlers.provider_wrapper import BaseProviderWrapper
class ViewApiHandler(BaseApiHandler):
def __init__(self, application, request, **kwargs):
super(ViewApiHandler, self).__init__(application, request, **kwargs)
@tornado.gen.coroutine
def handle_get(self, gid, gl_user, args, callback=None):
if 'sources' in args:
# build sources data structure
sources = {sid: self.data.get_gid_info(sid) for sid in self.data.get_gid_sources(gid)}
result = [self.format_google_source(src) for src in sources.itervalues()]
elif 'accounts' in args:
# get accounts
accounts = self.get_accounts(gid, BaseProviderWrapper(), linked=self.data.get_linked_accounts(gid) or dict())
# build sources data structure
sources = {sid: self.data.get_gid_info(sid) for sid in self.data.get_gid_sources(gid)}
# format result
result = self.format_result(accounts, sources)
elif 'selector' in args:
# prepare accounts
accounts_c = self.get_accounts(gid, BaseProviderWrapper(), linked=self.data.get_linked_accounts(gid) or dict())
accounts_t = self.get_accounts(gid, BaseProviderWrapper(), linked=self.data.get_linked_accounts(gid, True) or dict())
account_c_set = set(['{0}:{1}'.format(a['provider'], a['id']) for a in accounts_c])
account_t_set = set(['{0}:{1}'.format(a['provider'], a['id']) for a in accounts_t])
# filter temp accounts not in the main list
if not self.get_argument('full', default=None) is None:
accounts = accounts_c
elif not self.get_argument('refresh', default=None) is None:
account_set = account_t_set.intersection(account_c_set)
accounts = [a for a in accounts_t if '{0}:{1}'.format(a['provider'], a['id']) in account_set]
else:
account_set = account_t_set.difference(account_c_set)
accounts = [a for a in accounts_t if '{0}:{1}'.format(a['provider'], a['id']) in account_set]
# build sources data structure
sources = {sid: self.data.get_gid_info(sid) for sid in self.data.get_gid_sources(gid)}
# format result
result = {'sel': self.format_result(accounts, {}), 'src': sources}
else:
result = None
# sync
raise Return(result)
@staticmethod
def get_accounts(gid, wrapper, linked):
result = list()
# populate provider wrapper with account links
for link, raw in linked.iteritems():
a = wrapper.add_link(link, raw)
if not a:
continue
p = wrapper.get_provider_from_link(link)
result.append(
{
'id': a['id'],
'provider': p,
'account': a,
'link': link
})
return result
def format_result(self, accounts, sources):
# fill accounts record with sources and options
result = list()
for account in accounts:
try:
opt = dict()
self.data.populate_provider_bag(account['provider'], opt, account['id'])
result.append(
{
'a': account['account'],
'p': account['provider'],
'l': account['link'],
'op': opt['op'],
'src': [
{
'a': self.format_google_source(sources[gid]),
'filter': opt['filter'][gid] if gid in opt['filter'] else None,
'sch': self.data.buffer.get_schedule(gid, '{provider}:{id}'.format(**account))
}
for gid in opt['sources'] if gid in sources.keys()
]
})
except Exception as ex:
self.logger.error('Exception: format_result(): {0}, {1}'.format(ex, traceback.format_exc()))
return result
|
import torch
from torchvision import transforms
import argparse
from PIL import Image
import pickle
from model import EncoderCNN, DecoderRNN
import numpy as np
import matplotlib.pyplot as plt
from build_vocab import Vocabulary
import io
def load_image(bytes_stream, transform=None):
image =Image.open(io.BytesIO(bytes_stream))
image = image.resize((224, 224), Image.LANCZOS)
if transform is not None:
image = transform(image).unsqueeze(0)
return image
def main(bytestream):
encoder_path = 'models/encoder-5-3000.ckpt'
decoder_path = 'models/decoder-5-3000.ckpt'
vocab_path = './data/vocab.pkl'
embed_size = 256
hidden_size = 512
num_layers = 1
device = torch.device('cpu')
# Image preprocessing
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocab wrapper
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
encoder = EncoderCNN(embed_size).eval().to(device)
decoder = DecoderRNN(embed_size, hidden_size, len(vocab), num_layers).to(device)
# Load trained model params
encoder.load_state_dict(torch.load(encoder_path, map_location='cpu'))
decoder.load_state_dict(torch.load(decoder_path, map_location='cpu'))
# Prepare an image
image = load_image(bytestream, transform)
image_tensor = image.to(device)
# Generate an caption from the image
feature = encoder(image_tensor)
sampled_ids = decoder.sample(feature)
# sampled_ids = sampled_ids[0] # (max_seq_length)
# Convert word_ids to words
sampled_caption = []
for word_id in sampled_ids:
word = vocab.idx2word[word_id]
sampled_caption.append(word)
if word == '<end>':
break
sentence = ' '.join(sampled_caption)
return sentence
|
import json
filename="username.txt"
with open(filename) as file:
username=json.load(file)
print("Welcome back, "+username+"!")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import logging
from neutronclient._i18n import _
from neutronclient.common import utils as n_utils
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutron_client
from oslo_serialization import jsonutils
from gbpclient.common import utils
neutron_client.UUID_PATTERN = neutron_client.UUID_PATTERN + (
'|auto' + neutron_client.HEX_ELEM + '{32}')
def _format_fixed_ips(pt):
try:
return '\n'.join([jsonutils.dumps(ip) for ip in pt['fixed_ips']])
except (TypeError, KeyError):
return ''
def _format_network_service_params(net_svc_policy):
try:
return '\n'.join([jsonutils.dumps(param) for param in
net_svc_policy['network_service_params']])
except (TypeError, KeyError):
return ''
def _format_host_routes(subnet):
try:
return '\n'.join([jsonutils.dumps(route) for route in
subnet['host_routes']])
except (TypeError, KeyError):
return ''
class ListPolicyTarget(neutronV20.ListCommand):
"""List policy_targets that belong to a given tenant."""
resource = 'policy_target'
log = logging.getLogger(__name__ + '.ListPolicyTarget')
_formatters = {'fixed_ips': _format_fixed_ips, }
list_columns = ['id', 'name', 'description', 'policy_target_group_id',
'port_id', 'fixed_ips']
pagination_support = True
sorting_support = True
class ShowPolicyTarget(neutronV20.ShowCommand):
"""Show information of a given policy_target."""
resource = 'policy_target'
log = logging.getLogger(__name__ + '.ShowPolicyTarget')
class CreatePolicyTarget(neutronV20.CreateCommand):
"""Create a policy_target for a given tenant."""
resource = 'policy_target'
log = logging.getLogger(__name__ + '.CreatePolicyTarget')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the Policy Target'))
parser.add_argument(
'--policy-target-group',
help=_('Policy Target Group (required argument)'))
parser.add_argument(
'--fixed-ip', metavar='subnet_id=SUBNET,ip_address=IP_ADDR',
action='append',
type=n_utils.str2dict_type(optional_keys=['subnet_id',
'ip_address']),
help=_('Desired IP and/or subnet for this Policy Target: '
'subnet_id=<nid>,ip_address=<ip>. '
'You can repeat this option.'))
parser.add_argument(
'--fixed_ip', action='append', help=argparse.SUPPRESS)
parser.add_argument(
'--port-id', default='',
help=_('Neutron Port UUID'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of Policy Target to create (required argument)'))
parser.add_argument(
'--segmentation-labels', type=utils.str2list,
help=_('Comma separated list of segmentation labels, each label '
'can be upto 255 characters. This option is currently '
'only available with the APIC backend.'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'segmentation_labels'])
if parsed_args.policy_target_group:
body[self.resource]['policy_target_group_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_target_group',
parsed_args.policy_target_group))
if parsed_args.port_id:
body[self.resource]['port_id'] = (
parsed_args.port_id)
if parsed_args.fixed_ip:
body[self.resource]['fixed_ips'] = parsed_args.fixed_ip
return body
class DeletePolicyTarget(neutronV20.DeleteCommand):
"""Delete a given Policy Target."""
resource = 'policy_target'
log = logging.getLogger(__name__ + '.DeletePolicyTarget')
class UpdatePolicyTarget(neutronV20.UpdateCommand):
"""Update Policy Target's information."""
resource = 'policy_target'
log = logging.getLogger(__name__ + '.UpdatePolicyTarget')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the Policy Target'))
parser.add_argument(
'--name',
help=_('New name of the Policy Target'))
parser.add_argument(
'--fixed-ip', metavar='subnet_id=SUBNET,ip_address=IP_ADDR',
action='append',
type=n_utils.str2dict_type(optional_keys=['subnet_id',
'ip_address']),
help=_('Desired IP and/or subnet for this Policy Target: '
'subnet_id=<nid>,ip_address=<ip>. '
'You can repeat this option.'))
parser.add_argument(
'--fixed_ip', action='append', help=argparse.SUPPRESS)
parser.add_argument(
'--segmentation-labels', type=utils.str2list,
help=_('Comma separated list of segmentation labels, each label '
'can be upto 255 characters. This option is currently '
'only available with the APIC backend.'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'segmentation_labels'])
if parsed_args.fixed_ip:
body[self.resource]['fixed_ips'] = parsed_args.fixed_ip
return body
class ListPolicyTargetGroup(neutronV20.ListCommand):
"""List Policy Target Groups that belong to a given tenant."""
resource = 'policy_target_group'
log = logging.getLogger(__name__ + '.ListPolicyTargetGroup')
list_columns = ['id', 'name', 'description', 'application_policy_group_id',
'l2_policy_id', 'subnets']
pagination_support = True
sorting_support = True
class ShowPolicyTargetGroup(neutronV20.ShowCommand):
"""Show information of a given Policy Target Group."""
resource = 'policy_target_group'
log = logging.getLogger(__name__ + '.ShowPolicyTargetGroup')
class CreatePolicyTargetGroup(neutronV20.CreateCommand):
"""Create a Policy Target Group for a given tenant."""
resource = 'policy_target_group'
log = logging.getLogger(__name__ + '.CreatePolicyTargetGroup')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the Policy Target Group'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of Policy Target Group to create '
'(required argument)'))
parser.add_argument(
'--application-policy-group', metavar='APPLICATION_POLICY_GROUP',
default='',
help=_('Application Policy Group UUID (optional, default '
'is None)'))
parser.add_argument(
'--l2-policy', metavar='L2_POLICY',
default='',
help=_('L2 Policy UUID (if not specified, default is used)'))
parser.add_argument(
'--provided-policy-rule-sets', type=utils.str2dict,
help=_('Comma separated list of Policy Rule Sets'))
parser.add_argument(
'--consumed-policy-rule-sets', type=utils.str2dict,
help=_('Comma separated list of Policy Rule Sets'))
parser.add_argument(
'--network-service-policy', metavar='NETWORK_SERVICE_POLICY',
default='',
help=_('Network Service Policy'))
parser.add_argument(
'--subnets', type=utils.str2list,
help=_('Comma separated list of Neutron Subnet UUIDs'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
n_utils.add_boolean_argument(
parser, '--intra-ptg-allow', dest='intra_ptg_allow',
help=_("Allow or disallow communication between Policy Targets of "
"this Policy Target Group. Default is True, i.e., all "
"Policy Targets can communicate with each other. This "
"option is only available with the APIC backend."))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.l2_policy:
body[self.resource]['l2_policy_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'l2_policy',
parsed_args.l2_policy))
if parsed_args.application_policy_group:
body[self.resource]['application_policy_group_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'application_policy_group',
parsed_args.application_policy_group))
if parsed_args.network_service_policy:
body[self.resource]['network_service_policy_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'network_service_policy',
parsed_args.network_service_policy))
if parsed_args.provided_policy_rule_sets:
for key in list(
parsed_args.provided_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.provided_policy_rule_sets[id_key] = (
parsed_args.provided_policy_rule_sets.pop(key))
if parsed_args.consumed_policy_rule_sets:
for key in list(
parsed_args.consumed_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.consumed_policy_rule_sets[id_key] = (
parsed_args.consumed_policy_rule_sets.pop(key))
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'provided_policy_rule_sets', 'subnets',
'consumed_policy_rule_sets', 'shared',
'intra_ptg_allow'])
return body
class DeletePolicyTargetGroup(neutronV20.DeleteCommand):
"""Delete a given Policy Target Group."""
resource = 'policy_target_group'
log = logging.getLogger(__name__ + '.DeletePolicyTargetGroup')
class UpdatePolicyTargetGroup(neutronV20.UpdateCommand):
"""Update Policy Target Group's information."""
resource = 'policy_target_group'
log = logging.getLogger(__name__ + '.UpdatePolicyTargetGroup')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the Policy Target Group'))
parser.add_argument(
'--l2-policy', metavar='L2_POLICY',
help=_('New L2 policy'))
parser.add_argument(
'--application-policy-group', metavar='APPLICATION_POLICY_GROUP',
help=_('New Application Policy Group'))
parser.add_argument(
'--network-service-policy', metavar='NETWORK_SERVICE_POLICY',
help=_('New Network Service Policy'))
parser.add_argument(
'--provided-policy-rule-sets', type=utils.str2dict,
help=_('New comma separated list of Policy Rule Sets '
'(to unset use "")'))
parser.add_argument(
'--consumed-policy-rule-sets', type=utils.str2dict,
help=_('New comma separated list of Policy Rule Sets '
'(to unset use "")'))
parser.add_argument(
'--subnets', type=utils.str2list,
help=_('New comma separated list of Neutron Subnet UUIDs '
'(to unset use "")'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
n_utils.add_boolean_argument(
parser, '--intra-ptg-allow', dest='intra_ptg_allow',
help=_("Allow or disallow communication between Policy Targets of "
"this Policy Target Group. Default is True, i.e., all "
"Policy Targets can communicate with each other. This "
"option is only available with the APIC backend."))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.l2_policy:
body[self.resource]['l2_policy_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'l2_policy',
parsed_args.l2_policy))
if parsed_args.application_policy_group == '':
body[self.resource]['application_policy_group_id'] = None
elif parsed_args.application_policy_group:
body[self.resource]['application_policy_group_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'application_policy_group',
parsed_args.application_policy_group))
if parsed_args.network_service_policy == '':
body[self.resource]['network_service_policy_id'] = None
elif parsed_args.network_service_policy:
body[self.resource]['network_service_policy_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'network_service_policy',
parsed_args.network_service_policy))
if parsed_args.provided_policy_rule_sets:
for key in list(
parsed_args.provided_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.provided_policy_rule_sets[id_key] = (
parsed_args.provided_policy_rule_sets.pop(key))
if parsed_args.consumed_policy_rule_sets:
for key in list(
parsed_args.consumed_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.consumed_policy_rule_sets[id_key] = (
parsed_args.consumed_policy_rule_sets.pop(key))
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'provided_policy_rule_sets', 'subnets',
'consumed_policy_rule_sets', 'shared',
'intra_ptg_allow'])
return body
class ListL2Policy(neutronV20.ListCommand):
"""List L2 Policies that belong to a given tenant."""
resource = 'l2_policy'
log = logging.getLogger(__name__ + '.ListL2Policy')
_formatters = {}
list_columns = ['id', 'name', 'description', 'l3_policy_id', 'network_id',
'inject_default_route']
pagination_support = True
sorting_support = True
class ShowL2Policy(neutronV20.ShowCommand):
"""Show information of a given L2 Policy."""
resource = 'l2_policy'
log = logging.getLogger(__name__ + '.ShowL2Policy')
class CreateL2Policy(neutronV20.CreateCommand):
"""Create a L2 Policy for a given tenant."""
resource = 'l2_policy'
log = logging.getLogger(__name__ + '.CreateL2Policy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the L2 Policy'))
parser.add_argument(
'--network',
help=_('Neutron Network UUID to map the L2 Policy to '
'(if not specified, new Neutron Network is created '
'implicitly)'))
parser.add_argument(
'--l3-policy',
default='',
help=_('L3 Policy UUID (if not specified default is used)'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of L2 Policy to create (required argument)'))
n_utils.add_boolean_argument(
parser, '--inject-default-route', dest='inject_default_route',
help=_('Enable or disable injecting default route, '
'default is True'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
parser.add_argument(
'--reuse-bd',
default=None,
help=_('Name or UUID of L2 Policy whose BridgeDomain should '
'be reused (APIC only)'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description', 'shared',
'inject_default_route'])
if parsed_args.l3_policy:
body[self.resource]['l3_policy_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'l3_policy',
parsed_args.l3_policy))
if parsed_args.network:
body[self.resource]['network_id'] = (
parsed_args.network)
if parsed_args.reuse_bd:
body[self.resource]['reuse_bd'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'l2_policy',
parsed_args.reuse_bd))
return body
class DeleteL2Policy(neutronV20.DeleteCommand):
"""Delete a given L2 Policy."""
resource = 'l2_policy'
log = logging.getLogger(__name__ + '.DeleteL2Policy')
class UpdateL2Policy(neutronV20.UpdateCommand):
"""Update L2 Policy's information."""
resource = 'l2_policy'
log = logging.getLogger(__name__ + '.UpdateL2Policy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the L2 Policy'))
parser.add_argument(
'--l3-policy',
default='',
help=_('New L3 Policy'))
parser.add_argument(
'--name',
help=_('New name of the L2 Policy'))
n_utils.add_boolean_argument(
parser, '--inject-default-route', dest='inject_default_route',
help=_('Enable or disable injecting of default route'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description', 'shared',
'inject_default_route'])
if parsed_args.l3_policy:
body[self.resource]['l3_policy_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'l3_policy',
parsed_args.l3_policy))
return body
class ListL3Policy(neutronV20.ListCommand):
"""List l3_policies that belong to a given tenant."""
resource = 'l3_policy'
log = logging.getLogger(__name__ + '.ListL3Policy')
_formatters = {}
list_columns = ['id', 'name', 'description', 'ip_pool',
'subnet_prefix_length']
pagination_support = True
sorting_support = True
class ShowL3Policy(neutronV20.ShowCommand):
"""Show information of a given L3 Policy."""
resource = 'l3_policy'
log = logging.getLogger(__name__ + '.ShowL3Policy')
class CreateL3Policy(neutronV20.CreateCommand):
"""Create a L3 Policy for a given tenant."""
resource = 'l3_policy'
log = logging.getLogger(__name__ + '.CreateL3Policy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the L3 Policy'))
parser.add_argument(
'--ip-version',
type=int,
# default=4, choices=[4, 6],
help=_('IP version, default is 4'))
parser.add_argument(
'--ip-pool',
help=_('CIDR of IP pool to create, default is 10.0.0.0/8'))
parser.add_argument(
'--subnet-prefix-length',
type=int,
help=_('Subnet prefix length, default is 24'))
parser.add_argument(
'--address-scope-v4-id',
help=_('Neutron Address-scope v4 UUID '
'(if not specified, new Neutron Address-scope is '
'created implicitly based on ip_version)'))
parser.add_argument(
'--address-scope-v6-id',
help=_('Neutron Address-scope v6 UUID '
'(if not specified, new Neutron Address-scope is '
'created implicitly based on ip_version)'))
parser.add_argument(
'--subnetpools-v4', type=utils.str2list,
help=_('Comma separated list of Neutron Subnetpool v4 UUIDs '
'if ip_version and address scope is v4'))
parser.add_argument(
'--subnetpools-v6', type=utils.str2list,
help=_('Comma separated list of Neutron Subnetpool v6 UUIDs '
'if ip_version and address scope is v6'))
parser.add_argument(
'--external-segment',
action='append', dest='external_segments', type=utils.str2dict,
# Note: The following format is also supported but we do not
# show it to avoid confusion
# help=_('Use format <ext-segment-id-1>=<ip-addr1:ipaddr2:...>'
# '(this option can be repeated)'))
help=_('Comma separated list of External Segments'
'(this option can be repeated)'))
parser.add_argument(
'--routers', type=utils.str2list,
help=_('Comma separated list of Neutron Router UUIDs'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of L3 policy to create (required argument)'))
parser.add_argument(
'--allowed-vm-names', type=utils.str2list,
help=_('Comma separated list of allowed VM name regexes, each '
'regex can be up to 255 characters.'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.external_segments:
external_segments_dict = {}
for external_segment in parsed_args.external_segments:
external_segment_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'external_segment',
list(external_segment.keys())[0])
ipaddrs = next(iter(list(external_segment.values())))
if ipaddrs == "":
ipaddrs = []
else:
ipaddrs = next(
iter(list(external_segment.values()))).split(':')
external_segments_dict[external_segment_id] = ipaddrs
body[self.resource]['external_segments'] = external_segments_dict
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'ip_version', 'ip_pool',
'address_scope_v4_id', 'address_scope_v6_id',
'subnetpools_v4', 'subnetpools_v6',
'routers', 'subnet_prefix_length', 'shared',
'allowed_vm_names'])
return body
class DeleteL3Policy(neutronV20.DeleteCommand):
"""Delete a given L3 Policy."""
resource = 'l3_policy'
log = logging.getLogger(__name__ + '.DeleteL3Policy')
class UpdateL3Policy(neutronV20.UpdateCommand):
"""Update L3 Policy's information."""
resource = 'l3_policy'
log = logging.getLogger(__name__ + '.UpdateL3Policy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the L3 Policy'))
parser.add_argument(
'--subnet-prefix-length',
type=int,
help=_('New subnet prefix length'))
parser.add_argument(
'--external-segment',
action='append', dest='external_segments', type=utils.str2dict,
# Note: The following format is also supported but we do not
# show it to avoid confusion
# help=_('Use format <ext-segment-id-1>=<ip-addr1:ipaddr2:...>'
# '(this option can be repeated)'))
help=_('New comma separated list of External Segments'
'(this option can be repeated)'))
parser.add_argument(
'--subnetpools-v4', type=utils.str2list,
help=_('New comma separated list of Neutron Subnetpool v4 UUIDs '
'if ip_version and address scope is v4'))
parser.add_argument(
'--subnetpools-v6', type=utils.str2list,
help=_('New comma separated list of Neutron Subnetpool v6 UUIDs '
'if ip_version and address scope is v6'))
parser.add_argument(
'--routers', type=utils.str2list,
help=_('New comma separated list of Neutron Router UUIDs'))
parser.add_argument(
'--name',
help=_('New name of the L3 Policy'))
parser.add_argument(
'--allowed-vm-names', type=utils.str2list,
help=_('Comma separated list of allowed VM name regexes, each '
'regex can be up to 255 characters.'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.external_segments:
external_segments_dict = {}
for external_segment in parsed_args.external_segments:
if not external_segment:
break
external_segment_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'external_segment',
list(external_segment.keys())[0])
ipaddrs = next(iter(list(external_segment.values())))
if ipaddrs == "":
ipaddrs = []
else:
ipaddrs = next(
iter(list(external_segment.values()))).split(':')
external_segments_dict[external_segment_id] = ipaddrs
body[self.resource]['external_segments'] = external_segments_dict
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'ip_version', 'ip_pool', 'routers',
'subnetpools_v4', 'subnetpools_v6',
'subnet_prefix_length', 'shared',
'allowed_vm_names'])
return body
class ListApplicationPolicyGroup(neutronV20.ListCommand):
"""List l3_policies that belong to a given tenant."""
resource = 'application_policy_group'
log = logging.getLogger(__name__ + '.ListApplicationPolicyGroup')
_formatters = {}
list_columns = ['id', 'name', 'description', 'shared']
pagination_support = True
sorting_support = True
class ShowApplicationPolicyGroup(neutronV20.ShowCommand):
"""Show information of a given Application Policy Group."""
resource = 'application_policy_group'
log = logging.getLogger(__name__ + '.ShowApplicationPolicyGroup')
class CreateApplicationPolicyGroup(neutronV20.CreateCommand):
"""Create a Application Policy Group for a given tenant."""
resource = 'application_policy_group'
log = logging.getLogger(__name__ + '.CreateApplicationPolicyGroup')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the Application Policy Group'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of the Application Policy Group to create '
'(required argument)'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'shared'])
return body
class DeleteApplicationPolicyGroup(neutronV20.DeleteCommand):
"""Delete a given Application Policy Group."""
resource = 'application_policy_group'
log = logging.getLogger(__name__ + '.DeleteApplicationPolicyGroup')
class UpdateApplicationPolicyGroup(neutronV20.UpdateCommand):
"""Update Application Policy Group's information."""
resource = 'application_policy_group'
log = logging.getLogger(__name__ + '.UpdateApplicationPolicyGroup')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the Application Policy Group'))
parser.add_argument(
'--name',
help=_('New name of the Application Policy Group'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'shared'])
return body
class ListNetworkServicePolicy(neutronV20.ListCommand):
"""List Network Service Policies that belong to a given tenant."""
resource = 'network_service_policy'
log = logging.getLogger(__name__ + '.ListNetworkServicePolicy')
_formatters = {'network_servie_params': _format_network_service_params}
list_columns = ['id', 'name', 'description', 'network_service_params',
'policy_target_groups']
pagination_support = True
sorting_support = True
class ShowNetworkServicePolicy(neutronV20.ShowCommand):
"""Show information of a given network_service_policy."""
resource = 'network_service_policy'
log = logging.getLogger(__name__ + '.ShowNetworkServicePolicy')
class CreateNetworkServicePolicy(neutronV20.CreateCommand):
"""Create a Network Service Policy for a given tenant."""
resource = 'network_service_policy'
log = logging.getLogger(__name__ + '.CreateNetworkServicePolicy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the network_service_policy'))
parser.add_argument(
'name',
help=_('Name of network_service_policy to create (required '
'argument)'))
parser.add_argument(
'--network-service-params',
metavar='type=PARAM_TYPE,name=PARAM_NAME,value=PARAM_VALUE',
action='append', dest='network_service_params',
type=utils.str2dict,
help=_('Params for this Network Service '
'Policy (this option can be repeated)'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'network_service_params', 'shared'])
return body
class DeleteNetworkServicePolicy(neutronV20.DeleteCommand):
"""Delete a given network_service_policy."""
resource = 'network_service_policy'
log = logging.getLogger(__name__ + '.DeleteNetworkServicePolicy')
class UpdateNetworkServicePolicy(neutronV20.UpdateCommand):
"""Update network_service_policy's information."""
resource = 'network_service_policy'
log = logging.getLogger(__name__ + '.UpdateNetworkServicePolicy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the network_service_policy'))
parser.add_argument(
'--name',
help=_('New name of the network_service_policy'))
parser.add_argument(
'--network-service-params',
metavar='type=PARAM_TYPE,name=PARAM_NAME,value=PARAM_VALUE',
action='append', dest='network_service_params',
type=utils.str2dict,
help=_('New params for this Network Service '
'Policy (this option can be repeated)'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'network_service_params', 'shared'])
return body
class ListPolicyClassifier(neutronV20.ListCommand):
"""List classifiers that belong to a given tenant."""
resource = 'policy_classifier'
log = logging.getLogger(__name__ + '.ListPolicyClassifier')
_formatters = {}
list_columns = ['id', 'name', 'protocol', 'port_range', 'direction']
pagination_support = True
sorting_support = True
class ShowPolicyClassifier(neutronV20.ShowCommand):
"""Show information of a given classifier."""
resource = 'policy_classifier'
log = logging.getLogger(__name__ + '.ShowPolicyClassifier')
class CreatePolicyClassifier(neutronV20.CreateCommand):
"""Create a classifier for a given tenant."""
resource = 'policy_classifier'
log = logging.getLogger(__name__ + '.CreatePolicyClassifier')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('A description for the policy classifier'))
parser.add_argument(
'--protocol',
help=_('A protocol value: tcp, udp, icmp or a protocol number '
'between 0 and 255. When not set, all protocols are '
'matched'))
parser.add_argument(
'--port-range',
help=_('A port range value in the format p1[:p2]. '
'When not set all ports are matched'))
parser.add_argument(
'--direction',
choices=['in', 'out', 'bi', ''],
help=_('A direction value. When not set defaults to \'bi\''))
parser.add_argument(
'name', metavar='NAME',
help=_('A name for classifier to create (required argument)'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'protocol', 'port_range',
'direction', 'shared'])
if 'direction' not in body[self.resource]:
body[self.resource]['direction'] = 'bi'
return body
class DeletePolicyClassifier(neutronV20.DeleteCommand):
"""Delete a given classifier."""
resource = 'policy_classifier'
log = logging.getLogger(__name__ + '.DeletePolicyClassifier')
class UpdatePolicyClassifier(neutronV20.UpdateCommand):
"""Update classifier's information."""
resource = 'policy_classifier'
log = logging.getLogger(__name__ + '.UpdatePolicyClassifier')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description for the policy classifier'))
parser.add_argument(
'--protocol',
help=_('New protocol value: tcp, udp, icmp or a protocol number '
'between 0 and 255 '
'(use empty string \'\' to unset and match all protocols)'))
parser.add_argument(
'--port-range',
help=_('New port range value in the format p1[:p2] '
'(use empty string \'\' to unset)'))
parser.add_argument(
'--direction',
choices=['in', 'out', 'bi', ''],
help=_('New direction value'))
parser.add_argument(
'--name',
help=_('New name of the classifier'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'protocol', 'port_range',
'direction', 'shared'])
if parsed_args.protocol == '':
body[self.resource]['protocol'] = None
if parsed_args.port_range == '':
body[self.resource]['port_range'] = None
return body
class ListPolicyAction(neutronV20.ListCommand):
"""List actions that belong to a given tenant."""
resource = 'policy_action'
log = logging.getLogger(__name__ + '.ListPolicyAction')
_formatters = {}
list_columns = ['id', 'name', 'action_type', 'action_value']
pagination_support = True
sorting_support = True
class ShowPolicyAction(neutronV20.ShowCommand):
"""Show information of a given action."""
resource = 'policy_action'
log = logging.getLogger(__name__ + '.ShowPolicyAction')
class CreatePolicyAction(neutronV20.CreateCommand):
"""Create a action for a given tenant."""
resource = 'policy_action'
log = logging.getLogger(__name__ + '.CreatePolicyAction')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the policy action'))
parser.add_argument(
'--action-type',
help=_('Type of action'))
parser.add_argument(
'--action-value',
help=_('Name/UUID of servicechain spec for redirect action'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of action to create (required argument)'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.action_value:
body[self.resource]['action_value'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'servicechain_spec',
parsed_args.action_value))
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'action_type', 'shared'])
return body
class DeletePolicyAction(neutronV20.DeleteCommand):
"""Delete a given action."""
resource = 'policy_action'
log = logging.getLogger(__name__ + '.DeletePolicyAction')
class UpdatePolicyAction(neutronV20.UpdateCommand):
"""Update action's information."""
resource = 'policy_action'
log = logging.getLogger(__name__ + '.UpdatePolicyAction')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the policy action'))
parser.add_argument(
'--action-value',
help=_('New name/UUID of servicechain spec for redirect action'))
parser.add_argument(
'--name',
help=_('New name of the action'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.action_value:
body[self.resource]['action_value'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'servicechain_spec',
parsed_args.action_value))
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'shared'])
return body
class ListPolicyRule(neutronV20.ListCommand):
"""List policy_rules that belong to a given tenant."""
resource = 'policy_rule'
log = logging.getLogger(__name__ + '.ListPolicyRule')
_formatters = {}
list_columns = ['id', 'name', 'enabled', 'classifier_id',
'actions']
pagination_support = True
sorting_support = True
class ShowPolicyRule(neutronV20.ShowCommand):
"""Show information of a given policy_rule."""
resource = 'policy_rule'
log = logging.getLogger(__name__ + '.ShowPolicyRule')
class CreatePolicyRule(neutronV20.CreateCommand):
"""Create a policy_rule for a given tenant."""
resource = 'policy_rule'
log = logging.getLogger(__name__ + '.CreatePolicyRule')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the policy_rule'))
n_utils.add_boolean_argument(
parser, '--enabled', dest='enabled',
help=_('Enable flag, default is True '
'(if False, this Policy Rule is ignored)'))
parser.add_argument(
'--classifier',
help=_('Policy Classifier (required argument)'))
parser.add_argument(
'--actions', type=utils.str2list,
help=_('Comma separated list of Policy Action(s)'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of Policy Rule to create (required argument)'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.actions:
body[self.resource]['policy_actions'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_action',
elem) for elem in parsed_args.actions]
if parsed_args.classifier:
body[self.resource]['policy_classifier_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_classifier',
parsed_args.classifier))
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'enabled', 'shared'])
return body
class DeletePolicyRule(neutronV20.DeleteCommand):
"""Delete a given policy_rule."""
resource = 'policy_rule'
log = logging.getLogger(__name__ + '.DeletePolicyRule')
class UpdatePolicyRule(neutronV20.UpdateCommand):
"""Update policy_rule's information."""
resource = 'policy_rule'
log = logging.getLogger(__name__ + '.UpdatePolicyRule')
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('New name of the Policy Rule'))
parser.add_argument(
'--description',
help=_('New description of the Policy Rule'))
n_utils.add_boolean_argument(
parser, '--enabled', dest='enabled',
help=_('Enable flag (if False, this Policy Rule is ignored)'))
parser.add_argument(
'--classifier',
help=_('New Policy Classifier'))
parser.add_argument(
'--actions', type=utils.str2list,
help=_('New comma separated list of Policy Actions '
'(to unset use "")'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.actions == []:
body[self.resource]['policy_actions'] = []
elif parsed_args.actions:
body[self.resource]['policy_actions'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_action',
elem) for elem in parsed_args.actions]
if parsed_args.classifier:
body[self.resource]['policy_classifier_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_classifier',
parsed_args.classifier))
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'description',
'enabled', 'shared'])
return body
class ListPolicyRuleSet(neutronV20.ListCommand):
"""List policy_rule_sets that belong to a given tenant."""
resource = 'policy_rule_set'
log = logging.getLogger(__name__ + '.ListPolicyRuleSet')
_formatters = {}
list_columns = ['id', 'name', 'policy_rules']
pagination_support = True
sorting_support = True
class ShowPolicyRuleSet(neutronV20.ShowCommand):
"""Show information of a given policy_rule_set."""
resource = 'policy_rule_set'
log = logging.getLogger(__name__ + '.ShowPolicyRuleSet')
class CreatePolicyRuleSet(neutronV20.CreateCommand):
"""Create a policy rule set for a given tenant."""
resource = 'policy_rule_set'
log = logging.getLogger(__name__ + '.CreatePolicyRuleSet')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the policy rule set'))
parser.add_argument(
'--policy-rules', type=utils.str2list,
help=_('Comma separated list of Policy Rules'))
parser.add_argument(
'--child-policy-rule-sets', type=utils.str2list,
help=_('Comma separated list of child Policy Rule Sets'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of Policy Rule Set to create (required argument)'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.policy_rules:
body[self.resource]['policy_rules'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_rule',
elem) for elem in parsed_args.policy_rules]
if parsed_args.child_policy_rule_sets:
body[self.resource]['child_policy_rule_sets'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_rule_set',
elem) for elem in parsed_args.child_policy_rule_sets]
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description', 'shared'])
return body
class DeletePolicyRuleSet(neutronV20.DeleteCommand):
"""Delete a given policy rule set."""
resource = 'policy_rule_set'
log = logging.getLogger(__name__ + '.DeletePolicyRuleSet')
class UpdatePolicyRuleSet(neutronV20.UpdateCommand):
"""Update policy rule set's information."""
resource = 'policy_rule_set'
log = logging.getLogger(__name__ + '.UpdatePolicyRuleSet')
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('New name of the Policy Rule Set'))
parser.add_argument(
'--description',
help=_('New description of the Policy Rule Set'))
parser.add_argument(
'--policy-rules', type=utils.str2list,
help=_('New comma separated list of Policy Rules '
'(to unset use "")'))
parser.add_argument(
'--child-policy-rule-sets', type=utils.str2list,
help=_('New comma separated list of child Policy Rule Sets '
'(to unset use "")'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.policy_rules == []:
body[self.resource]['policy_rules'] = []
elif parsed_args.policy_rules:
body[self.resource]['policy_rules'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_rule',
elem) for elem in parsed_args.policy_rules]
if parsed_args.child_policy_rule_sets == []:
body[self.resource]['child_policy_rule_sets'] = []
elif parsed_args.child_policy_rule_sets:
body[self.resource]['child_policy_rule_sets'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'policy_rule_set',
elem) for elem in parsed_args.child_policy_rule_sets]
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'description', 'shared'])
return body
class ListExternalPolicy(neutronV20.ListCommand):
"""List External Policies that belong to a given tenant."""
resource = 'external_policy'
log = logging.getLogger(__name__ + '.ListExternalPolicy')
list_columns = ['id', 'name', 'description', 'shared', 'external_segments']
pagination_support = True
sorting_support = True
class ShowExternalPolicy(neutronV20.ShowCommand):
"""Show information of a given External Policy."""
resource = 'external_policy'
log = logging.getLogger(__name__ + '.ShowExternalPolicy')
class CreateExternalPolicy(neutronV20.CreateCommand):
"""Create a External Policy for a given tenant."""
resource = 'external_policy'
log = logging.getLogger(__name__ + '.CreateExternalPolicy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the External Policy'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of External Policy to create (required argument)'))
parser.add_argument(
'--external-segments', type=utils.str2list,
help=_('Comma separated list of External Segments'))
parser.add_argument(
'--provided-policy-rule-sets', type=utils.str2dict,
help=_('Comma separated list of Policy Rule Sets'))
parser.add_argument(
'--consumed-policy-rule-sets', type=utils.str2dict,
help=_('Comma separated list of Policy Rule Sets'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.provided_policy_rule_sets:
for key in list(
parsed_args.provided_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.provided_policy_rule_sets[id_key] = (
parsed_args.provided_policy_rule_sets.pop(key))
if parsed_args.consumed_policy_rule_sets:
for key in list(
parsed_args.consumed_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.consumed_policy_rule_sets[id_key] = (
parsed_args.consumed_policy_rule_sets.pop(key))
if parsed_args.external_segments:
body[self.resource]['external_segments'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'external_segment',
elem) for elem in parsed_args.external_segments]
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'provided_policy_rule_sets',
'consumed_policy_rule_sets', 'shared'])
return body
class DeleteExternalPolicy(neutronV20.DeleteCommand):
"""Delete a given External Policy."""
resource = 'external_policy'
log = logging.getLogger(__name__ + '.DeleteExternalPolicy')
class UpdateExternalPolicy(neutronV20.UpdateCommand):
"""Update External Policy's information."""
resource = 'external_policy'
log = logging.getLogger(__name__ + '.UpdateExternalPolicy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the External Policy'))
parser.add_argument(
'--name',
help=_('New name of the External Policy'))
parser.add_argument(
'--external-segments', type=utils.str2list,
help=_('New comma separated list of External Segments '
'(to unset use "")'))
parser.add_argument(
'--provided-policy-rule-sets', type=utils.str2dict,
help=_('New comma separated list of Policy Rule Sets '
'(to unset use "")'))
parser.add_argument(
'--consumed-policy-rule-sets', type=utils.str2dict,
help=_('New comma separated list of Policy Rule Sets '
'(to unset use "")'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.provided_policy_rule_sets:
for key in list(
parsed_args.provided_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.provided_policy_rule_sets[id_key] = (
parsed_args.provided_policy_rule_sets.pop(key))
if parsed_args.consumed_policy_rule_sets:
for key in list(
parsed_args.consumed_policy_rule_sets.keys()):
id_key = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'policy_rule_set',
key)
parsed_args.consumed_policy_rule_sets[id_key] = (
parsed_args.consumed_policy_rule_sets.pop(key))
if parsed_args.external_segments == []:
body[self.resource]['external_segments'] = []
elif parsed_args.external_segments:
body[self.resource]['external_segments'] = [
neutronV20.find_resourceid_by_name_or_id(
self.get_client(),
'external_segment',
elem) for elem in parsed_args.external_segments]
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'provided_policy_rule_sets',
'consumed_policy_rule_sets', 'shared'])
return body
class ListExternalSegment(neutronV20.ListCommand):
"""List External Segments that belong to a given tenant."""
resource = 'external_segment'
log = logging.getLogger(__name__ + '.ListExternalSegment')
_formatters = {'external_routes': _format_host_routes, }
list_columns = ['id', 'name', 'description', 'cidr',
'external_routes', 'port_address_translation', 'shared']
pagination_support = True
sorting_support = True
class ShowExternalSegment(neutronV20.ShowCommand):
"""Show information of a given External Segment."""
resource = 'external_segment'
log = logging.getLogger(__name__ + '.ShowExternalSegment')
class CreateExternalSegment(neutronV20.CreateCommand):
"""Create a External Segment for a given tenant."""
resource = 'external_segment'
log = logging.getLogger(__name__ + '.CreateExternalSegment')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the External Segment'))
parser.add_argument(
'name',
help=_('Name of External Segment to create (required argument)'))
parser.add_argument(
'--ip-version',
type=int, choices=[4, 6],
help=_('IP version, default is 4'))
parser.add_argument(
'--cidr',
help=_('CIDR of External Segment, default is 172.16.0.0/12'))
parser.add_argument(
'--external-route', metavar='destination=CIDR,nexthop=IP_ADDR',
action='append', dest='external_routes', type=utils.str2dict,
help=_('If no nexthop, use format: destination=CIDR,nexthop '
'(this option can be repeated)'))
n_utils.add_boolean_argument(
parser, '--port-address-translation',
dest='port_address_translation',
help=_('Enable port-based address translation, default is False'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.external_routes:
eroutes = []
for er in parsed_args.external_routes:
if 'nexthop' in er and er['nexthop'] == '':
er['nexthop'] = None
if er:
eroutes.append(er)
body['external_segment']['external_routes'] = eroutes
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'ip_version', 'cidr',
'port_address_translation', 'shared'])
return body
class DeleteExternalSegment(neutronV20.DeleteCommand):
"""Delete a given External Segment."""
resource = 'external_segment'
log = logging.getLogger(__name__ + '.DeleteExternalSegment')
class UpdateExternalSegment(neutronV20.UpdateCommand):
"""Update External Segment's information."""
resource = 'external_segment'
log = logging.getLogger(__name__ + '.UpdateExternalSegment')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the External Segment'))
parser.add_argument(
'--name',
help=_('New name of External Segment'))
parser.add_argument(
'--external-route', metavar='destination=CIDR,nexthop=IP_ADDR',
action='append', dest='external_routes', type=utils.str2dict,
help=_('If no nexthop, use format: destination=CIDR,nexthop '
'(this option can be repeated)'))
n_utils.add_boolean_argument(
parser, '--port-address-translation',
dest='port_address_translation',
help=_('Enable or disable port-based address translation'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
if parsed_args.external_routes == [{}]:
body[self.resource]['external_routes'] = []
elif parsed_args.external_routes:
eroutes = []
for er in parsed_args.external_routes:
if 'nexthop' in er and er['nexthop'] == '':
er['nexthop'] = None
if er:
eroutes.append(er)
body[self.resource]['external_routes'] = eroutes
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'ip_version', 'cidr',
'port_address_translation', 'shared'])
return body
class ListNatPool(neutronV20.ListCommand):
"""List NAT Pools that belong to a given tenant."""
resource = 'nat_pool'
log = logging.getLogger(__name__ + '.ListNatPool')
list_columns = ['id', 'name', 'description', 'ip_pool',
'external_segment_id', 'shared']
pagination_support = True
sorting_support = True
class ShowNatPool(neutronV20.ShowCommand):
"""Show information of a given NAT Pool."""
resource = 'nat_pool'
log = logging.getLogger(__name__ + '.ShowNatPool')
class CreateNatPool(neutronV20.CreateCommand):
"""Create a NAT Pool for a given tenant."""
resource = 'nat_pool'
log = logging.getLogger(__name__ + '.CreateNatPool')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the NAT Pool'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of NAT Pool to create (required argument)'))
parser.add_argument(
'--ip-version',
type=int, choices=[4, 6],
help=_('IP version, default is 4'))
parser.add_argument(
'--ip-pool',
help=_('CIDR for NAT Pool'))
parser.add_argument(
'--external-segment',
help=_('External Segment'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing, default is False'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'ip_version', 'ip_pool', 'shared'])
if parsed_args.external_segment:
body[self.resource]['external_segment_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'external_segment',
parsed_args.external_segment))
return body
class DeleteNatPool(neutronV20.DeleteCommand):
"""Delete a given NAT Pool."""
resource = 'nat_pool'
log = logging.getLogger(__name__ + '.DeleteNatPool')
class UpdateNatPool(neutronV20.UpdateCommand):
"""Update NAT Pool's information."""
resource = 'nat_pool'
log = logging.getLogger(__name__ + '.UpdateNatPool')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('New description of the NAT Pool'))
parser.add_argument(
'--name',
help=_('New name of NAT Pool'))
parser.add_argument(
'--external-segment',
help=_('New External Segment'))
n_utils.add_boolean_argument(
parser, '--shared', dest='shared',
help=_('Enable or disable resource sharing'))
def args2body(self, parsed_args):
body = {self.resource: {}, }
neutronV20.update_dict(parsed_args, body[self.resource],
['name', 'tenant_id', 'description',
'ip_version', 'ip_pool', 'shared'])
if parsed_args.external_segment:
body[self.resource]['external_segment_id'] = (
neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'external_segment',
parsed_args.external_segment))
return body
|
num = [2, 5, 9, 2, 1]
num[2] = 3
print(num,'\n')
num.append(7) #adiciona um novo espaço
print(num,'\n')
num.sort() #coloca em ordem
print(num,'\n')
num.sort(reverse=True)
print(num,'\n')
print(f'Essa lista tem {len(num)} elementos.')
num.insert(2, 0) #adiciona o 0 na posição 2 e realoca o resto
print(num,'\n')
num.pop() #remove o ultimo elemento
print(num,'\n')
num.pop(2) #remove o elemento na posição 2
print(num,'\n')
if 2 in num:
num.remove(2) #remove o primeiro numero 2 que tiver
print(num,'\n')
|
import numpy as np
from utils import resize, opt
from skimage import morphology
def find_focal_points(image, scope='local', maxima_areas='large', local_maxima_threshold=None, num_points=None):
"""
Finds the 'focal_points' of a model, given a low resolution CAM. Has two modes: a 'local' scope and a 'global' one.
If a 'local' scope is selected, the function looks for local maxima in the CAM. Due to the high sensitivity of the
algorithm finding the local maxima, usually a large number of maxima is identified (which is, in most cases,
undesirable. An interest_threshold can be selected that filters out possibly unwanted maxima (i.e. maxima whose
intensity is lower than the threshold). Due to the resizing of the CAM, these local maxima produce large areas in
the new image. If this is not desired, the option maxima_areas='small' should be selected, which "skeletonizes" the
large areas to shrink them.
The 'global' scope looks for global maxima in the CAM. This is accompanied by the parameter num_points, which
designates the number of points returned by the function.
:param image: An input image. Ideally this should be a low resolution CAM.
:param scope: Can either be 'local' or 'global'. A 'local' scope looks for local maxima in the image, while a
'global' scope looks for global ones.
:param maxima_areas: Can either be 'large' or 'small', depending on whether or not we want larger or smaller areas.
Only relevant for 'local' scopes.
:param local_maxima_threshold: A float that filters out any local maxima that are below the threshold. Its default
value is the average of the lowest-intensity local maxima with the highest-intensity
one. Only relevant for 'local' scopes.
:param num_points: An integer that specifies the number of points with the maximum intensity.
Only relevant for 'global' scopes.
:return: A list of tuples, each containing the x and y coordinates of the 'focal points' in the input CAM.
"""
# Global scope: looks for 'num_points' global maxima in the input image.
if scope == 'global':
# If 'num_points' is not defined, picks the square root of one of its dimensions:
# e.g. for a 224x224 image: num_points = sqrt(224) = 15
if num_points:
if not isinstance(num_points, int):
raise TypeError('num_points can only take integer values')
else:
num_points = int(round(np.sqrt(opt.im_size)))
# Resizes the image to the desired size and returns the coordinates of the top 'num_points' pixels that have
# the largest values. They are cast as python's default 32-bit integers to be compatible with SimpleITK's
# ConnectedThreshold function. The two axes are also reversed.
top_points = np.argpartition(resize(image).ravel(), -num_points)[-num_points:]
return [(int(x % opt.im_size), int(x // opt.im_size)) for x in top_points]
# Local scope: looks for local maxima in the input image.
elif scope == 'local':
# Identifies the image's local maxima.
candidate_points = morphology.local_maxima(image).astype(bool)
# Because of the high sensitivity of scikit-image's morphology.local_maxima function, it is often desired to
# filter some of the local maxima out via a threshold. If this is not passed explicitly the average of the
# local maxima with the minimum and maximum intensities is used.
if not isinstance(local_maxima_threshold, float):
local_maxima_threshold = (image[candidate_points].max() + image[candidate_points].min()) / 2
# Any local maxima that, whose intensity fails to exceed the threshold is ignored.
focal_points = candidate_points * image > local_maxima_threshold
# Resizes the map of the local maxima to the desired dimensions. This results in the enlargement of the areas
# of the each maxima. If this is undesired, as indicated by the option maxima_areas='small', scikit-image's
# morphology.skeletonize is applied, which shrinks the maxima areas.
focal_points = resize(focal_points.astype(float), resample_method='nearest')
if maxima_areas not in ('small', 'large'):
raise ValueError("maxima_areas can either be 'small' or 'large'")
elif maxima_areas == 'small':
focal_points = morphology.skeletonize(focal_points)
# Finally, the coordinates of the maxima are returned. They are cast as python's default 32-bit integers to be
# compatible with SimpleITK's ConnectedThreshold function. The two axes are also reversed.
focal_point_coods = np.where(focal_points)
return [(int(focal_point_coods[1][i]), int(focal_point_coods[0][i])) for i in range(len(focal_point_coods[0]))]
def remove_small_holes(image, max_hole_size=256):
"""
Wrapper to scikit-image's morphology.remove_small_holes that returns an image array with numbers instead of a
boolean array.
:param image: A segmentation mask (numpy.ndarray).
:param max_hole_size: The maximum size (in pixels) of a hole to fill (int).
:return: The filled segmentation mask (numpy.ndarray).
"""
return morphology.remove_small_holes(image > 0, area_threshold=max_hole_size).astype(float)
|
# -*- coding: utf-8; -*-
import logging
from pubsub import pub
import pygame.display
import pygame.font
import pygame.draw
logger = logging.getLogger("platakart.title")
from platakart.ui import Scene
from platakart.ui import Menu
from platakart.ui import labeled_button
from platakart.ui import WHITE
from platakart.ui import BLACK
PERCENT_COLOR = WHITE
FADE_COLOR = BLACK
class TitleScene(Scene):
def __init__(self, resources):
super(TitleScene, self).__init__()
self.background = None
self.fading_out = -0.1
self.group = pygame.sprite.RenderUpdates()
self.loaded = False
self.loaded_percent = 0
self.loader_gen = None
self.menu = None
self.percent_surf = None
self.rect = None
self.reloadable = False
self.render_button = False
self.render_percent = False
self.render_title = False
self.resources = resources
self.start_button_id = None
self.started_loading_resources = False
def get_name(self):
return "title"
def setup(self, options):
self.options = options
logger.debug("Setting up title scene")
self.font = pygame.font.SysFont("Verdana", 32)
pub.subscribe(self.on_resource_loaded, "resources.loading")
pub.subscribe(self.on_resources_loaded, "resources.loaded")
pub.subscribe(self.on_button_clicked, "button.clicked")
self.menu = Menu(None)
def teardown(self):
logger.debug("Tearing down title scene")
self.percent_surf = None
self.render_title = False
self.render_percent = False
pub.unsubscribe(self.on_resource_loaded, "resources.loading")
pub.unsubscribe(self.on_resources_loaded, "resources.loaded")
pub.unsubscribe(self.on_button_clicked, "button.clicked")
self.menu.teardown()
def on_resources_loaded(self):
pub.sendMessage("game.play-sound", name="menu-theme", loops=-1)
self.render_button = True
# make the start button
# work around this event being caught twice
logger.debug("making start button")
screen = pygame.display.get_surface()
screen_rect = screen.get_rect()
self.menu = Menu(screen_rect)
self.menu.setup()
button = labeled_button("start", "PLAY", self.font, (0, 0),
self.resources.images["red_button_up"],
self.resources.images["red_button_down"],
self.resources.images["red_button_focus"])
button.rect.centerx = screen_rect.centerx
button.rect.top = screen_rect.height * .75
self.menu.add(button)
self.start_button_id = button.id
# force the screen to redraw so the loading bar is covered up.
self.render_title = True
def on_resource_loaded(self, percent, category, key):
if category == "image" and key == "title":
self.render_title = True
else:
self.loaded_percent = percent
self.percent_surf = self.font.render(
"Loading %d%%" % int(percent * 100), True, PERCENT_COLOR)
self.render_percent = True
def on_button_clicked(self, id):
logger.debug("Mouse clicked button")
if id == self.start_button_id:
pub.sendMessage("game.stop-sound", name="menu-theme", fade_ms=100)
self.fading_out = 0.0
def update(self, delta):
self.menu.update(delta)
if not self.loaded:
if self.loader_gen is None:
self.loader_gen = self.resources.load()
else:
try:
self.loader_gen.next()
except StopIteration:
self.loaded = True
self.render_percent = False
if self.fading_out >= 100:
pub.sendMessage("game.switch-scene",
name="kart-select",
options={})
def draw(self, surface, rect):
dirty = list()
# detect a change in the screen size
if self.rect is not rect:
self.rect = rect
if self.background:
self.group.clear(surface, self.background)
self.menu.clear(surface, self.background)
if self.render_title:
self.background = pygame.Surface(rect.size)
image = self.resources.images["title"]
pygame.transform.scale(image, rect.size, self.background)
surface.blit(self.background, rect)
dirty.append(rect)
self.render_title = False
if self.render_percent:
w = rect.width / 2
h = self.percent_surf.get_height()
x = rect.centerx - (w // 2)
y = rect.height * .75
new_rect = surface.fill((0, 0, 0), (x, y, w, h))
surface.fill((255, 24, 24), (x, y, w * self.loaded_percent, h))
surface.blit(self.percent_surf, (x + 4, y))
dirty.append(new_rect)
self.render_percent = False
if 0 <= self.fading_out < 100:
self.fading_out += 10
fade_rect = surface.get_rect()
amt = fade_rect.height * float(self.fading_out) / 100.0
fade_rect.height = int(amt)
pygame.draw.rect(surface, FADE_COLOR, fade_rect)
dirty.append(fade_rect)
dirty.extend(self.menu.draw(surface))
return dirty
|
import os
async def main(args):
if len(args) == 0: print("What you want to test?")
module = __import__("test.{}".format(args[0]))
try:
command = args[0]
print(await getattr(module, command).main(args[1:]))
except Exception as e:
print(e)
print("Failed to run test file main...")
|
num1 = int(input('Type your first number: '))
num2 = int(input('Type your second number: '))
print(num1*num2)
|
def boo():
return "Boo"
|
def smallestDifference(arrayOne, arrayTwo):
arrayOne.sort()
arrayTwo.sort()
smallest = float("inf")
currentSum = float("inf")
smallest_array = []
firstIdx = 0
secondIdx = 0
while firstIdx < len(arrayOne) and secondIdx < len(arrayTwo):
firstNum = arrayOne[firstIdx]
secondNum = arrayTwo[secondIdx]
if firstNum < secondNum:
currentSum = secondNum - firstNum
firstIdx += 1
elif secondNum < firstNum:
currentSum = firstNum - secondNum
secondIdx += 1
else:
return [firstNum, secondNum]
if smallest > currentSum:
smallest = currentSum
smallest_array = [firstNum, secondNum]
return smallest_array
|
import random
some_list = [random.randint(1, 10) for i in range(10)]
print(some_list)
for k in range(len(some_list)):
if float(some_list[k]) % 2 != 0:
some_list[k] = 0
print(some_list.count(0))
|
from chrombpnet.evaluation.variant_effect_prediction.snp_generator import SNPGenerator
from scipy.spatial.distance import jensenshannon
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.models import load_model
import chrombpnet.training.utils.losses as losses
import pandas as pd
import os
import argparse
import numpy as np
import pickle as pkl
import tensorflow as tf
SNP_SCHEMA = ["CHR", "POS0", "REF", "ALT", "META_DATA"]
def fetch_variant_args():
parser=argparse.ArgumentParser(description="variant effect scoring scripts on SNPS")
parser.add_argument("-snps", "--snp_data", type=str, required=True, help="Path to a tsv output with the following information in columns - chr, position to insert allele (0-based), ref allele, alt allele")
parser.add_argument("-g", "--genome", type=str, required=True, help="Genome fasta")
parser.add_argument("-m","--model_h5", type=str, required=True, help="Path to model hdf5")
parser.add_argument("-op","--output-prefix", type=str, required=True, help="Path to storing snp effect score predictions from the script, directory should already exist")
parser.add_argument("-bs","--batch_size", type=int, default=64, help="Batch size to use for model")
parser.add_argument("-dm","--debug_mode_on", type=int, default=0, help="Use this mode to print the flanks of first five SNP insert locations")
args = parser.parse_args()
return args
def softmax(x, temp=1):
norm_x = x - np.mean(x,axis=1, keepdims=True)
return np.exp(temp*norm_x)/np.sum(np.exp(temp*norm_x), axis=1, keepdims=True)
def load_model_wrapper(args):
# read .h5 model
custom_objects={"tf": tf, "multinomial_nll":losses.multinomial_nll}
get_custom_objects().update(custom_objects)
model=load_model(args.model_h5, compile=False)
print("model loaded succesfully")
return model
def fetch_snp_predictions(model, snp_regions, inputlen, genome_fasta, batch_size, debug_mode_on=False):
'''
Returns model predictions (counts and profile probability predictions) at the given reference and alternate snp alleles.
Please note that if the SNP location is at the edge - i.e we are unable to form a given inputlen of sequence - we skip predictions at this SNP
Arguments::
model: chrombpnet model .h5 file to use for snp scoring.
snp_regions: pandas dataframe with the following columns "CHR", "POS0", "REF", "ALT"
inputlen: integer representing the input length to use, snp is inserted in the middle
genome_fasta: path to reference genome
batch_size: integer value with batch size to use for the model
debug_mode_on: Takes 0/1 value. Set this to 1 to print the flanks of first five SNP insert locations. Predictions will be provided only on the these 5 locations.
Returns:
rsids: Numpy array with (N,) SNP ids. SNP id is a string with the following values "CHR", "POS0", "REF", "ALT" concatenated with delimiter "_".
For each of these ids we return the predictions in the lists below.
ref_logcount_preds: log count predictions at the reference allele with size (N,)
alt_logcount_preds: log count predictions at the alternate alele with size (N,)
ref_prob_preds: profile probability predictions at the reference allele with size (N,outputlen). outputlen depends on the model.
alt_prob_preds: profile probability predictions at the alternate allele with size (N,outputlen). outputlen depends on the model.
'''
rsids = []
ref_logcount_preds=[]
alt_logcount_preds=[]
ref_prob_preds=[]
alt_prob_preds=[]
# snp sequence generator
snp_gen=SNPGenerator(snp_regions=snp_regions,
inputlen=inputlen,
genome_fasta=genome_fasta,
batch_size=batch_size,
debug_mode_on=debug_mode_on)
for i in range(len(snp_gen)):
batch_rsids, ref_seqs, alt_seqs = snp_gen[i]
ref_batch_preds=model.predict(ref_seqs)
alt_batch_preds=model.predict(alt_seqs)
ref_logcount_preds.extend(np.squeeze(ref_batch_preds[1]))
alt_logcount_preds.extend(np.squeeze(alt_batch_preds[1]))
ref_prob_preds.extend(np.squeeze(softmax(ref_batch_preds[0])))
alt_prob_preds.extend(np.squeeze(softmax(alt_batch_preds[0])))
rsids.extend(batch_rsids)
return np.array(rsids), np.array(ref_logcount_preds), np.array(alt_logcount_preds), np.array(ref_prob_preds), np.array(alt_prob_preds)
def predict_snp_effect_scores(rsids, ref_count_preds, alt_count_preds, ref_prob_preds, alt_prob_preds):
'''
Predicts variant effect scores based on model predictions.
Arguments::
ref_logcount_preds: log count predictions at the reference allele with size (N,)
alt_logcount_preds: log count predictions at the alternate alele with size (N,)
ref_prob_preds: profile probability predictions at the reference allele with size (N,outputlen). outputlen depends on the model.
alt_prob_preds: profile probability predictions at the alternate allele with size (N,outputlen). outputlen depends on the model.
Returns:
log_counts_diff: difference in log count predictions of alternate and reference allele (N,)
log_probs_diff_abs_sum: Sum of absolute difference in log probability prediction of alternate and reference allele per base. (N,)
probs_jsd_diff: Jensenshannon distance between probability predictions of alternate and reference allele (N,)
'''
log_counts_diff = alt_count_preds - ref_count_preds
log_probs_diff_abs_sum = np.sum(np.abs(np.log(alt_prob_preds) - np.log(ref_prob_preds)),axis=1)*np.sign(log_counts_diff)
probs_jsd_diff = np.array([jensenshannon(x,y) for x,y in zip(alt_prob_preds, ref_prob_preds)])*np.sign(log_counts_diff)
return log_counts_diff, log_probs_diff_abs_sum, probs_jsd_diff
def main(args):
debug_mode_on = args.debug_mode_on
# load the model
model = load_model_wrapper(args)
# load the snp data
snp_regions=pd.read_csv(args.snp_data,header=None,sep='\t', names=SNP_SCHEMA)
snp_regions["META_DATA"].fillna('', inplace=True)
snp_regions['RSID']=snp_regions['CHR'].astype(str)+'_'+snp_regions['POS0'].astype(str)+'_'+snp_regions['REF'].astype(str)+'_'+snp_regions['ALT'].astype('str')+"_"+snp_regions['META_DATA'].astype('str')
print("printing first 5 rows of the input SNP data provided..")
print(snp_regions.head(5))
if debug_mode_on:
snp_regions = snp_regions.head(5)
# infer input length
inputlen=model.input_shape[1]
print("input length inferred from the model: ", inputlen)
# fetch model prediction on snps
rsids, ref_logcount_preds, alt_logcount_preds, ref_prob_preds, alt_prob_preds = fetch_snp_predictions(model, snp_regions, inputlen, args.genome, args.batch_size, debug_mode_on)
# find varaint effect scores at snps
log_counts_diff, log_probs_diff_abs_sum, probs_jsd_diff = predict_snp_effect_scores(rsids, ref_logcount_preds, alt_logcount_preds, ref_prob_preds, alt_prob_preds)
# unpack rsids to write outputs and write score to output
snp_effect_scores_pd=pd.DataFrame()
snp_effect_scores_pd[["CHR", "POS0", "REF", "ALT", "META_DATA"]] = pd.Series(rsids).str.split('_', expand=True)
snp_effect_scores_pd["log_counts_diff"] = log_counts_diff
snp_effect_scores_pd["log_probs_diff_abs_sum"] = log_probs_diff_abs_sum
snp_effect_scores_pd["probs_jsd_diff"] = probs_jsd_diff
snp_effect_scores_pd.to_csv(args.output_prefix+"_snp_scores.tsv", sep="\t", index=False)
# store predictions at snps too - can compute variant effect metrics of your interest - let me know if you find something interesting :)
data={}
data["rsids"] = rsids
data["ref_logcount_preds"] = ref_logcount_preds
data["alt_logcount_preds"] = alt_logcount_preds
data["ref_prob_preds"] = ref_prob_preds
data["alt_prob_preds"] = alt_prob_preds
pkl.dump(data, open(args.output_prefix+"_predictions_at_snp.pkl",'wb'))
if __name__=="__main__":
args = fetch_variant_args()
main(args)
|
# Generated by Django 2.1.5 on 2019-01-23 23:27
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20190122_2039'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='avatar',
field=models.ImageField(default='pic_folder/None/no-img.jpg', upload_to=users.models.user_directory_path, verbose_name='profile_pic'),
),
]
|
name=input("请输入你的姓名")
c=input("请输入你的班级")
number=input("请输入你的学号")
QQ=input("请输入你的QQ")
print(name,c,number,QQ)
|
from mmpy_bot.bot import Bot
import re
from mmpy_bot.bot import listen_to
from mmpy_bot.bot import respond_to
import wisdom
import email_sender
@respond_to('$', re.IGNORECASE)
def hi(message):
leave_type = "Casual"
name = message.get_username()
date, leave_type, response = wisdom.text_analyser(message.get_message())
email_sender.send_the_mail(name,date,leave_type)
message.reply(response)
if __name__ == "__main__":
Bot().run()
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reorder sys.path to put $VTROOT/dist/* paths before others.
This ensures libraries installed there will be preferred over other versions
that may be present at the system level. We do this at runtime because
regardless of what we set in the PYTHONPATH environment variable, the system
dist-packages folder gets prepended sometimes.
To use this, just import it before importing packages that you want to make
sure are overridden from $VTROOT/dist.
from vtdb import prefer_vtroot_imports # pylint: disable=unused-import
"""
import os
import sys
def _prefer_vtroot_imports():
"""Reorder sys.path to put $VTROOT/dist before others."""
vtroot = os.environ.get('VTROOT')
if not vtroot:
# VTROOT is not set. Don't try anything.
return
dist = os.path.join(vtroot, 'dist')
dist_paths = []
other_paths = []
for path in sys.path:
if path:
if path.startswith(dist):
dist_paths.append(path)
else:
other_paths.append(path)
sys.path = [''] + dist_paths + other_paths
_prefer_vtroot_imports()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.