text stringlengths 8 6.05M |
|---|
from flask_restful import Resource
from ..eve_static_data import eve_static_data_service, consts
from ..utils.decorators import cache_control
class MarketGroup(Resource):
@staticmethod
def convert_effect_to_slot(effect_id):
if effect_id is consts.high_slot:
return 'high'
elif effect_id is consts.mid_slot:
return 'mid'
elif effect_id is consts.low_slot:
return 'low'
elif effect_id is consts.rig_slot:
return 'rig'
elif effect_id is consts.sub_system_slot:
return 'sub_system'
else:
return None
def convert_type_to_response(self, item):
slot = eve_static_data_service.get_type_fitting_slot(item.typeID)
return {
'id': item.typeID,
'name': item.typeName,
'slot': self.convert_effect_to_slot(slot),
}
@cache_control()
def get(self, market_group_id):
market_group = eve_static_data_service.get_market_group(market_group_id)
if market_group is not None:
response = [self.convert_type_to_response(x) for x in market_group.types]
else:
response = []
return response, 200
|
from django.shortcuts import render
from django.views.generic import ListView
from rest_framework.views import APIView
from rest_framework.response import Response
from source.models import Category, Vacancy
from source.serializers import CategoriesListSerializer, VacancyListSerializer
# Create your views here.
class CategoriesListView(ListView, APIView):
def get(self, request, format=None):
categories = Category.objects.filter(active=True)
serializer = CategoriesListSerializer(categories, many=True)
return Response(serializer.data)
class VacanciesListView(ListView, APIView):
def get(self, request, format=None):
vacancies = Vacancy.objects.filter(active=True)
serializer = VacancyListSerializer(vacancies, many=True)
return Response(serializer.data) |
# -*- coding: utf-8 -*-
SUCCESS = 0
PENDING = 1
RUNNING = 2
CANCEL = 3
FAIL = 4
d = {
SUCCESS: "Success",
PENDING: "Pending",
RUNNING: "Running",
CANCEL: "Cancelled",
FAIL: "Failed"
}
|
import json
import os
from flask import Flask
from flask import request
from flask import make_response
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate("mr-spock-25e2d-firebase-adminsdk-npk8u-34c04c79cf.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
res = processRequest(req)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
# Parsing the POST request body into a dictionary for easy access.
req_dict = json.loads(request.data)
# Accessing the fields on the POST request boduy of API.ai invocation of the webhook
intent = req_dict["queryResult"]["intent"]["displayName"]
if intent == 'ถามหนังน่าดู':
doc_ref = db.collection(u'movies').document(u'zuzqMHTmffFOLl0vUhZc')
doc = doc_ref.get().to_dict()
print(doc)
movie_name = doc['movie_name']
rel_date = doc['release_date']
speech = f'ตอนนี้มีเรื่อง {movie_name} เข้าโรงวันที่ {rel_date}'
else:
speech = "ผมไม่เข้าใจ คุณต้องการอะไร"
res = makeWebhookResult(speech)
return res
def makeWebhookResult(speech):
return {
"fulfillmentText": speech
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0', threaded=True)
|
# Generated by Django 2.0.6 on 2018-12-13 10:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('classes', '0002_auto_20181211_1745'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='gender',
),
]
|
userInput = int(input("Please enter 0-6: "))
if userInput == 1:
print("Monday")
elif userInput == 2:
print("Tuesday")
elif userInput == 3:
print("Wednesday")
elif userInput == 4:
print("Thursday")
elif userInput == 5:
print("Friday")
elif userInput == 6:
print("Saturday")
elif userInput == 0:
print("Sunday")
|
# coding=utf-8
# author=veficos
from .db import MySQLdb |
import random
from pacmangame import constants
from pacmangame import commands
from pacmangame.action import Action
import arcade
from pacmangame.draw_actors_action import DrawActorsAction
from pacmangame.commands import Commands
class HandleCollisionsAction(Action):
"""A code template for handling collisions. The responsibility of this class of objects is to update the game state when actors collide.
Stereotype:
Controller
"""
def __init__(self, score):
"""sets a variable to track how many rotations the game has gone through since the last time a sound was played for food collection"""
self.soundCount = 0
self.score = score
self.Command_Holder = Commands()
self.score_step = 403
self.count = 0
def execute(self, cast):
"""Executes the action using the given actors.
Args:
cast (dict): The game actors {key: tag, value: list}.
"""
self.soundCount -= 1
pacman = cast['pacman'][0]
ghosts = cast['ghosts']
walls = cast['walls']
food = cast['food']
icons = cast['icon']
if pacman.top > constants.MAX_Y:
pacman.top = constants.MAX_Y
elif pacman.right > constants.MAX_X:
pacman.left = 0
elif pacman.left < 0:
pacman.right = constants.MAX_X
elif pacman.bottom < 0:
pacman.bottom = 0
#Handle collisions with food
food_hit_list = arcade.check_for_collision_with_list(pacman, food)
for f in food_hit_list:
f.remove_from_sprite_lists()
self.score.change_Score(self.score_step)
if self.soundCount <= 0:
arcade.play_sound(constants.MOVE_SOUND)
self.soundCount = 33
#Handle collisions to ghosts
if len(pacman.collides_with_list(ghosts)) > 0:
arcade.play_sound(constants.DEATH_SOUND)
self.count = 100
if len(icons) <= 0:
arcade.close_window()
else:
icons.pop()
self.Command_Holder.reset_game(cast)
self.score_step = int(round(self.score_step/2, 0))
# if self.count > 0:
# ghosts[0].change_x = 1
# ghosts[0].change_y = 1
# ghosts[1].change_x = 1
# ghosts[1].change_y = 1
# ghosts[2].change_x = -1
# ghosts[2].change_y = 1
# ghosts[3].change_x = -1
# ghosts[3].change_y = 1
# self.count -= 1 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'lish'
from scrapy.cmdline import execute
execute(['scrapy', 'crawl', 'ipyqie_free'])
execute(['scrapy', 'crawl', 'kuaidaili_free'])
execute(['scrapy', 'crawl', 'qydaili_free'])
execute(['scrapy', 'crawl', 'y89ip_free'])
execute(['scrapy', 'crawl', 'ip3366_free'])
execute(['scrapy', 'crawl', 'iphai_free'])
|
from django.shortcuts import render,redirect
from django.contrib.auth import logout,login,authenticate
# Create your views here.
from django.contrib import messages
from .models import CustomUser
def loginProcess(request):
if request.method == "POST":
email=request.POST['email']
password=request.POST['password']
user =authenticate(username=email,password=password)
if user:
login(request,user)
messages.success(request,'Successfully Logged in')
return redirect("Home")
else:
messages.error(request,'You are not Registered')
return render(request,"login.html")
def registerProcess(request):
if request.method == "POST":
firstname=request.POST['First_Name']
lastname=request.POST['Last_Name']
username=request.POST['email']
email=request.POST['email']
password=request.POST['password']
confirm_password=request.POST['cpassword']
if password == confirm_password:
if CustomUser.objects.filter(username=username).exists():
messages.error(request,'Username already exists')
return redirect('register')
else:
if CustomUser.objects.filter(email=email).exists():
messages.error(request,'Email already exists')
return redirect('register')
else:
user=CustomUser.objects.create_user(username=username,email=email,password=password,first_name=firstname,last_name=lastname)
user.save()
login(request,user)
messages.success(request,'Successfully Registered')
return redirect('Home')
else:
messages.error(request,'Please Enter same input in Password and Confirm Password')
return redirect('register')
return render(request,"register.html")
def logoutProcess(request):
logout(request)
messages.success(request,"You are Successfully Logged Out")
return redirect("Home")
|
#!/usr/bin/python
import numpy as np
import pylab as py
from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv,mchirpfun,fmaxlso
from scipy import integrate
import pyPdf,os
#Input parameters:
outputdir='../plots/PPTA_horizon/' #Output directory for plots.
maxreds=100 #Maximum redshift considered for the plots.
minreds=1e-2 #Minimum redshift.
zbin=1000 #Number of z-bins to construct k(z).
mchvec=np.array([9.,9.5,10.,10.25,10.5,10.75,11.]) #Array of values of log10(chirp mass/msun). There will be a plot for each value of mch.
lsocondi=True #True to plot the area prohibited by the frequency of the last stable orbit. False to omit this.
candidate=False #True to plot the recent binary candidate.
tobs=1.*yr #Observation time (needed only to see when the binaries are monochromatic).
#-----------------------------------------------------------------
def Hfun(f,mch,amp):
'''H (in Mpc) function as defined in the paper, for binaries. It gives the horizon (luminosity) distance to a chirp mass disregarding its redshift (i.e. assuming that mass is redshifted).'''
return 2.*(10**(mch)*msun*grav)**(5./3.)/(amp*light**4.)*(np.pi*f)**(2./3.)*1./mpc
def zmonfun(mch,f,tobs):
'''Maximum redshift at which the binaries can be considered monochromatic (i.e. they take more than the observation time to evolve to the next frequency bin).'''
return ((f**(-8./3.)-(f+1./tobs)**(-8./3.))*(5.*light**5./(tobs*256.*np.pi**(8./3.)*(grav*msun*mch)**(5./3.))))**(3./5.)-1.
#Load PPTA upper limits data.
ifile1='../data/PPTA/LimSen4f.dat' #ZhuEtAl2014 limit.
ul1=np.array(np.loadtxt(ifile1,usecols=(1,2)))
ul2=np.array(np.loadtxt(ifile1,usecols=(1,3)))
ul3=np.array(np.loadtxt(ifile1,usecols=(1,4)))
fvec,hvec=ul1[:,0],ul1[:,1] #I should check which one is the most appropriate curve to use! +++
#fvec,hvec=ul2[:,0],ul2[:,1]
#fvec,hvec=ul3[:,0],ul3[:,1]
#Properties of the recent binary candidate (I could put it in a different file, if considering more candidates):
fgw_max=2./((542.+15.)*24.*3600.)
fgw_mean=2./((542.+0.)*24.*3600.)
fgw_min=2./((542.-15.)*24.*3600.)
q,m=0.25,10**(9.97+0.5)#Best case.
mch_max=np.log10(m*(q/(1.+q)**2.)**(3./5.))
q,m=0.05,10**(9.97-0.5)#Worst case.
mch_min=np.log10(m*(q/(1.+q)**2.)**(3./5.))
dl_mean=16109.1
zmean=2.06
#Calculate K(z) (as defined in the paper).
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbin) #Vector of redshifts logarithmically spaced.
kdistvec=np.zeros(len(reds)) #This will be K(z) in Mpc.
lumdistvec=np.zeros(len(reds)) #This will be D_L(z), the luminosity distance, in Mpc.
dist_const=light/(hub0*h0)/mpc #A constant that multiplies distances.
for zi in xrange(len(reds)):
kdistvec[zi]=(1.+reds[zi])**(-2./3.)*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds[zi])[0]*dist_const
lumdistvec[zi]=(1.+reds[zi])*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds[zi])[0]*dist_const
zpeak=reds[kdistvec.argmax()] #Redshift of the peak of K(z).
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=4.5
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
left, right, top, bottom, cb_fraction=0.15, 0.94, 0.96, 0.16, 0.145 #Borders of the plot.
xmin,xmax=min(fvec),max(fvec) #Edges of the x-axis.
ymin,ymax=minreds,maxreds #Edges of the y-axis.
#Plot horizons for each value of chirp mass.
for mi in xrange(len(mchvec)):
mch=mchvec[mi] #Chirp mass of the BH binary.
m=np.log10(10**(mch)*2**(1./5.)) #Mass of each individual BH, assuming equal masses.
#Create a plot.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
zapp=np.zeros(len(fvec)) #Apparent redshift horizon.
zabove=np.zeros(len(fvec)) #Upper absolute redshift horizon.
zbelow=np.zeros(len(fvec)) #Lower absolute redshift horizon.
zlso=np.zeros(len(fvec)) #Maximum redshift allowed at a particular obs freq, given the LSO limit.
zmon=np.zeros(len(fvec)) #Maximum redshift at which the binaries are monochromatic.
for fi in xrange(len(fvec)):
if lsocondi==True:
#Zlso must lie between zmin and zmax, and fulfill fmax_e=fmax_o[1+zlso]:
zlso[fi]=np.minimum(np.maximum((fmaxlso(10**m,10**m)*1./fvec[fi]-1.),minreds),maxreds)
zmon[fi]=np.maximum(zmonfun(10**(mch),fvec[fi],tobs),minreds) #I impose that zmon cannot be smaller than the minimum redshift considered.
else:
zlso[fi]=maxreds #If I disregard the LSO, then zlso will be the maximum redshift.
Hvalue=Hfun(fvec[fi],mch,hvec[fi]) #Value of H (as defined in the paper).
allowed=(Hvalue>kdistvec) #Condition for detectable redshifts.
zapp[fi]=reds[abs(lumdistvec-Hvalue).argmin()] #Definition of the apparent redshift.
#Look for the lower absolute horizon.
sel= allowed & (reds<zpeak)
if len(sel[sel==True])==0: #This can happen if Hvalue is smaller than K(zmin) (not relevant).
zbelow[fi]=minreds
else:
zbelow[fi]=max(reds[sel])
#Look for the upper absolute horizon.
sel= allowed & (reds>zpeak)
if len(sel[sel==True])==0: #This can happen if Hvalue is smaller than K(zmax) (not relevant).
zabove[fi]=maxreds
else:
zabove[fi]=min(reds[sel])
#Plot areas of detectable redshifts.
colorall='#6495ED'
ax.fill_between(fvec,zbelow,minreds,color=colorall,alpha=1.,edgecolor=colorall)
ax.fill_between(fvec,maxreds,zabove,color=colorall,alpha=1.,edgecolor=colorall)
#Plot area prohibited by the LSO.
ax.fill_between(fvec,np.ones(len(fvec))*maxreds,zlso,color='black',alpha=0.5)
#Plot apparent horizon.
ax.plot(fvec,zapp,color='black')
#Plot the redshift below which signals are monochromatic.
#ax.plot(fvec,zmon,color='red')
ax.fill_between(fvec,np.ones(len(fvec))*maxreds,zmon,color='red',alpha=0.5)
#Write chirp mass on the plot.
ax.text(3e-9,1.,'$10^{%.1f}M_{\\odot}$'%mchvec[mi],fontsize=9)
#ax.legend(loc='lower right',handlelength=3.5)
ax.grid()
ax.set_xlabel('$\\mathrm{GW\ observed\ frequency\ /\ Hz}$')
ax.set_ylabel('$\\mathrm{Redshift}$')
ax.set_xlim(xmin,xmax)
#ax.set_xticks([-8.5,-8.,-7.5,-7.,-6.5])
#ax.set_xticklabels(["$-8.5$","$-8$","$-7.5$","$-7$","$-6.5$"])
#ax.set_yticks([-14.5,-14.,-13.5,-13.])
#ax.set_yticklabels(["$-14.5$","$-14$","$-13.5$","$-13$"])
ax.set_ylim(ymin,ymax)
ax.set_xscale('log')
ax.set_yscale('log')
if candidate==True: #Plot recent binary candidate.
if mchvec[mi]==10.:
#ax.plot(fgw_max,zmean,'*',color='black')
ax.plot(fgw_mean,zmean,'*',color='black')
#ax.plot(fgw_min,zmean,'*',color='black')
#Save each individual plot.
oplot='hori_%i.pdf' %int(mi)
fig.savefig(outputdir+oplot, transparent=True)
#Combine the individual plots in one PDF file.
output=pyPdf.PdfFileWriter()
ofile=file(outputdir+'horizon.pdf',"wb")
listfiles=os.listdir(outputdir)
for page in listfiles:
if page[0:5]=='hori_':
input=pyPdf.PdfFileReader(file("%s" %(outputdir+page),"rb"))
output.addPage(input.getPage(0))
output.write(ofile)
ofile.close()
|
import config as app_config
from pgadmin.utils.route import BaseTestGenerator
from regression.feature_utils.pgadmin_page import PgadminPage
class BaseFeatureTest(BaseTestGenerator):
def setUp(self):
if app_config.SERVER_MODE:
self.skipTest("Currently, config is set to start pgadmin in server mode. "
"This test doesn't know username and password so doesn't work in server mode")
self.page = PgadminPage(self.driver, app_config)
self.page.wait_for_app()
self.page.wait_for_spinner_to_disappear()
self.page.reset_layout()
self.page.wait_for_spinner_to_disappear()
def failureException(self, *args, **kwargs):
self.page.driver.save_screenshot('/tmp/feature_test_failure.png')
return AssertionError(*args, **kwargs)
def runTest(self):
pass |
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################################
# #
# extract_bad_pix.py: find ACIS bad pixels and bad columns and records daily variations #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update Oct 27, 2021 #
# #
#############################################################################################
import os
import sys
import re
import string
import random
import operator
import math
import numpy
import time
import astropy.io.fits as pyfits
import Chandra.Time
import unittest
#
#--- from ska
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release', shell='tcsh')
#
#--- reading directory list
#
path = '/data/mta/Script/ACIS/Bad_pixels/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
import bad_pix_common_function as bcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- setting limits: factors for how much std out from the mean
#
factor = 5.0 #--- warm pixel
col_factor = 3.0 #--- warm column
hot_factor = 1000.0 #--- hot pixel
#
#-- day limits
#
day30 = 2592000.0 #---- (in sec)
day14 = 1209600.0
day7 = 604800.0
#
#--- arc5gl user name
#
arc_user = 'swolk'
#------------------------------------------------------------------------------------------
#--- find_bad_pix_main: contorl function to extract bad pixels and bad columns ---
#------------------------------------------------------------------------------------------
def find_bad_pix_main(tstart, tstop):
"""
contorl function to extract bad pixels and bad columns
input: tstart --- interval starting time in seconds from 1998.1.1
tstop --- interval stopping time in seconds from 1998.1.1
output: updated bad pixel and bad column list files
"""
if tstart == '':
[tstart, tstop] = find_data_collection_interval()
kcnt = int((tstop - tstart) / 86400.0)
if kcnt < 1:
kcnt = 1
for k in range(0, kcnt):
ctime = tstart + 86400 * k
get_bad_pix_data(ctime)
#
#--- move old data to archive
#
mv_old_file(ctime)
#------------------------------------------------------------------------------------------
#-- get_bad_pix_data: extract bad pixel data of 24 hr period --
#------------------------------------------------------------------------------------------
def get_bad_pix_data(ctime):
"""
extract bad pixel data of 24 hr period
input: ctime --- starting time in seconds from 1998.1.1
output: updated bad pixel and bad column list files
"""
#
#---check whether "Working_dir" exists
#
if os.path.isdir('./Working_dir'):
mcf.rm_files('./Working_dir/*')
else:
cmd = 'mkdir ./Working_dir'
os.system(cmd)
#
#--- date collection period is 24 hrs from ctime
#
stop = ctime + 86400
data_list = get_data_out(ctime, stop)
#
#--- create data lists in ./Working_dir/new_data_ccd<ccd>
#
nccd_list = int_file_for_day(data_list)
#
#--- check today's bad cols and pixs
#
stime = setup_to_extract(nccd_list)
#
#--- if there is no data, stime <= 0
#
if stime <= 0:
print("No data in the period")
return
for ccd in range(0, 10):
warm_data_list = []
hot_data_list = []
#
#--- bad pix selected at each quad; so go though all of them and combine them
#
for quad in range(0, 4):
(warm_data, hot_data) = select_bad_pix(ccd, quad)
if quad == 0:
warm_data_list = warm_data
hot_data_list = hot_data
else:
warm_data_list = combine_ccd(warm_data_list, warm_data, quad)
hot_data_list = combine_ccd(hot_data_list, hot_data, quad)
if len(warm_data_list) > 1:
warm_data_list = mcf.remove_duplicated_lines(warm_data_list, chk = 0)
if len(hot_data_list) > 1:
hot_data_list = mcf.remove_duplicated_lines(hot_data_list, chk = 0)
#
#---- print out newly found warm and hot pixels
#
print_bad_pix_data(ccd, warm_data_list, 'warm', today_time = stime)
print_bad_pix_data(ccd, hot_data_list, 'hot', today_time = stime)
bad_col_list = chk_bad_col(ccd)
print_bad_col(ccd, bad_col_list, stime)
#
#--- clean up the Exc area
#
mcf.rm_files('./Working_dir')
#------------------------------------------------------------------------------------------
#--- combine_ccd: combine bad pixel positions from a different quad to one CCD coordinate system
#------------------------------------------------------------------------------------------
def combine_ccd(base, new, quad):
"""
combine bad pixel positions from a different quad to one CCD coordinate system
input: base -- bad pixel positions already recorded in a CCD coordinates
data format: <ccd>:<quad>:<year>:<ydate>:<x>:<y>
new -- new bad pixel position listed in quad coordinated
quad -- quad # 0 - 3
output: base -- updated list of bad pixels in CCD coordinates
"""
for ent in new:
atemp = re.split(':', ent)
ccd = atemp[0]
mtime = atemp[2] + ':' + atemp[3]
x = str(int(atemp[4]) + 256 * int(quad))
y = atemp[5]
line = ccd + ':' + str(quad) + ':' + mtime + ':' + x + ':' + y
base.append(line)
return base
#-------------------------------------------------------------------------------------------
#--- int_file_for_day: separate each data into appropriate ccd data list ---
#-------------------------------------------------------------------------------------------
def int_file_for_day(data_list):
"""
separate each data into appropriate ccd data list
input: data_list --- a list of bias.fits files
output: <data_dir>/data_used.<ccd>
--- a record of which data used for the analysis
./Working_dir/new_data_ccd.<ccd>
--- a list of the data which will be used for today's analysis
"""
#
#--- check each data and select out appropriate data
#
a_list = []
for k in range(0, 10):
a_list.append([])
for ent in data_list:
stime = bcf.extractTimePart(ent)
if stime > 0:
head = 'acis' + str(int(stime))
#
#--- extract information of the ccd
#
[ccd, readmode, date_obs, overclock_a, overclock_b, overclock_c, overclock_d] = extractCCDInfo(ent)
#
#--- only TIMED data will be used
#
m = re.search('TIMED', readmode)
if m is not None:
#
#--- keep the record of which data we used
#
ccd = int(float(ccd))
ntemp = re.split('acisf', ent)
out = mcf.convert_date_format(date_obs, ifmt="%Y-%m-%dT%H:%M:%S", ofmt='%Y:%j')
line = out + ':acisf' + ntemp[1] + '\n'
out1 = data_dir + '/data_used.' + str(ccd)
out2 = './Working_dir/new_data_ccd.' + str(ccd)
with open(out1, 'a') as f:
f.write(line)
#
#--- a list of data to be analyzed kept in ./Working_dir
#
a_list[ccd].append(ent)
with open(out2, 'a') as f:
f.write(ent + '\n')
return a_list
#-------------------------------------------------------------------------------------------
#--- setup_to_extract: prepare to extract data --
#-------------------------------------------------------------------------------------------
def setup_to_extract(ccd_list):
"""
prepare to extract data
input: ccd_list --- a list of lists of ccd data
output: stime --- time in Chandra Time of the observation
(today, unless the original data are given)
output from a function "extract" written in ./Working_dir
"""
stime = -999
for ccd in range(0, 10):
#
#--- only when data exists, procced
#
if len(ccd_list[ccd]) == 0:
continue
ifile = ccd_list[ccd][0]
stime = bcf.extractTimePart(ifile)
if stime > 0:
out = mcf.convert_date_format(stime, ofmt='%Y:%j')
atemp = re.split(':', out)
date_obs = str(atemp[0]) + ':' + str(int(float(atemp[1])))
head = 'acis' + str(int(stime))
#
#--- comb.fits is an img fits file combined all image fits files extracted
#
wfile = './Working_dir/comb.fits'
mcf.rm_files(wfile)
cmd = 'cp ' + ifile + ' ' + wfile
os.system(cmd)
f = pyfits.open(wfile)
sdata = f[0].data
hdr = f[0].header
sdata[sdata < 0] = 0
sdata[sdata > 4000] = 0
pyfits.update(wfile, sdata, hdr)
f.close()
#
#--- if there are more than one file, merge all fits into one
#
if len(ccd_list[ccd]) > 1:
for j in range(1, len(ccd_list[ccd])):
f = pyfits.open(ccd_list[ccd][j])
tdata = f[0].data
tdata[tdata < 0] = 0
tdata[tdata > 4000] = 0
f.close()
sdata = sdata + tdata
pyfits.update(wfile, sdata, hdr)
#
#--- get time stamp of the last file
#
ifile = ccd_list[ccd][len(ccd_list[ccd]) -1]
stime = bcf.extractTimePart(ifile)
#
#--- extract(date_obs, ccd_dir, <fits header>, <input file>, <which quad>,
#--- <column position>, <x start>, <x end>)
#
ccd_dir = house_keeping + '/Defect/CCD' + str(ccd)
extract(ccd, date_obs, ccd_dir, head, wfile, 0, 0, 0, 255)
extract(ccd, date_obs, ccd_dir, head, wfile, 1, 256, 256, 511)
extract(ccd, date_obs, ccd_dir, head, wfile, 2, 512, 512, 767)
extract(ccd, date_obs, ccd_dir, head, wfile, 3, 768, 768, 1023)
return (stime)
#-------------------------------------------------------------------------------------------
#-- extract: find bad pix and bad column for the data given ---
#-------------------------------------------------------------------------------------------
def extract(ccd, date_obs, ccd_dir, head, infile, quad, cstart, rstart, rend):
"""
find bad pix and bad column for the data given
input: ccd --- ccd #
date_obs --- observation date
ccd_dir --- the location of ccd<ccd #> data kpet
head --- header for the file
infile --- the data fits file location
quad --- quad # (0 - 3)
cstart --- column postion
rstart --- column starting postion
rend --- column ending position
output: output from find_bad_col (warm/hot column locations)
output from find_bad_pix_candidate (warm/hot pixel positions)
"""
#
#--- create data files; it could be empty at the end, but it will be used for bookkeeping
#
max_file = head + '_q' + str(quad) + '_max'
hot_max_file = head + '_q' + str(quad) + '_hot'
#
#--- extract the region we need
#
f = pyfits.open(infile)
sdata = f[0].data
varray = sdata[0:1023,int(rstart):int(rend)]
f.close()
#
#---- find bad columns
#
wout_dir = './Working_dir/today_bad_col_' + str(ccd)
mcf.rm_files(wout_dir)
find_bad_col(varray, ccd, cstart, ccd_dir, head)
#
#--- find today's warm and hot pixel candidates
#
find_bad_pix_candidate(varray, ccd, quad, date_obs, ccd_dir, max_file, hot_max_file)
#---------------------------------------------------------------------------------------------------
#--- mv_old_data: move the older data to the achive directory ---
#---------------------------------------------------------------------------------------------------
def mv_old_data(ccd):
#
#--- find when is the 7 days ago in second from 1998.1.1
#
out = time.strftime("%Y:%j:%H:%M:%S", time.gmtime())
today = Chandra.Time.DateTime(out).secs
# cut_date = today - day7
cut_date = today - day30
#
#--- get the list in the directory
#
dfile = house_keeping + 'Defect/CCD' + str(ccd) + '/'
#
#--- check whether the directory is empty
#
if not os.listdir(dfile):
cmd = 'ls ' + house_keeping + 'Defect/CCD' + str(ccd) + '/* > ' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace)
mcf.rm_file(zspace)
#
#--- compare the time stamp to the cut off time and if the file is older
#--- than that date, move to gzip it and move to a save directory
#
for ent in data:
try:
atemp = re.split('acis', ent)
btemp = re.split('_', atemp[1])
bdate = float(btemp[0])
if bdate < cut_date:
cmd = 'gzip ' + ent
os.system(cmd)
cmd = 'mv ' + ent + '.gz ' + data_dir + 'Old_data/CCD' + str(ccd) + '/.'
os.system(cmd)
except:
pass
#-------------------------------------------------------------------------------------------
#--- find_bad_col: find warm columns ---
#-------------------------------------------------------------------------------------------
def find_bad_col(varray, ccd, cstart, ccd_dir, head ):
"""
find warm columns
input: varray --- data in 2 dim form [y, x] (numpy array)
ccd --- ccd #
cstart --- starting column #. the data are binned between 0 and 255.
ccd_dir --- location of data saved
head --- header of the file
output: <ccd_dir>/<head>col --- a file which keeps a list of warm columns
"""
#
#--- read known bad col list
#
bad_col_list = read_bad_col_list()
#
#--- set a data file name to record actual today's bad column positions
#
bad_col_name = head + '_col'
outdir_name = ccd_dir + '/' + bad_col_name
#
#--- today's bad column list name at a working directory
#
wout_dir = './Working_dir/today_bad_col_' + str(ccd)
#
#--- bad column list for ccd
#
bad_cols = bad_col_list[ccd]
bcnt = len(bad_cols)
bList = []
#
#---- make a list of just column # (no starting /ending row #)
#
if bcnt > 0:
for ent in bad_cols:
ent = ent.replace('"', '')
atemp = re.split(':', ent)
bList.append(int(atemp[0]))
#
#--- create a list of averaged column values
#
avg_cols = create_col_avg(varray)
#
#--- set a global limit to find outlyers
#
cfactor = col_factor
climit = find_local_col_limit(avg_cols, 0, 255, cfactor)
bcnum = 0
for i in range(0, 255):
#
#--- check whether the row is a known bad column
#
cloc = cstart + i #---- modify to the actual column position on the CCD
chk = 0
if bcnt > 0:
for comp in bList:
if cloc == comp:
chk = 1
break
if chk == 1:
continue
#
#--- find the average of the column and if the column is warmer than the limit, check farther
#
if avg_cols[i] > climit:
#
#--- local limit
#
(llow, ltop) = find_local_range(i)
cfactor = 2.0
l_lim = find_local_col_limit(avg_cols, llow, ltop, cfactor, ex = i)
#
#--- if the column is warmer than the local limit, record it
#
if avg_cols[i] > l_lim:
if cloc != 0:
print_result(outdir_name, cloc)
bcnum += 1
#
#---- clean up the file (removing duplicated lines)
#
if bcnum > 0:
mcf.remove_duplicated_lines(outdir_name)
#
#--- record today's bad column list name at a working directory
#
print_result(wout_dir, bad_col_name)
#-------------------------------------------------------------------------------------------
#-- find_local_range: set a local range --
#-------------------------------------------------------------------------------------------
def find_local_range(i):
"""
set a local range
input: i --- pixel postion
ouput: llow --- low limit
ltop --- top limit
"""
#
#--- setting a local area range
#
llow = i - 5
ltop = i + 5
#
#--- at the edge of the area, you still want to keep 10 column length
#
if llow < 0:
ltop -= llow
llow = 0
if ltop > 255:
llow -= ltop - 255
ltop = 255
return(llow, ltop)
#-------------------------------------------------------------------------------------------
#-- create_col_avg: compute the average of column value --
#-------------------------------------------------------------------------------------------
def create_col_avg(varray):
"""
compute the average of column value
input: varray --- a two dim array of the data
output: avg_cols --- a list of average values of columns
"""
avg_cols = [0 for x in range(0, 255)]
for i in range(0, 255):
l_mean = numpy.mean(varray[:,i])
avg_cols[i] = l_mean
return avg_cols
#-------------------------------------------------------------------------------------------
#-- find_local_col_limit: find local colun limit value --
#-------------------------------------------------------------------------------------------
def find_local_col_limit(avg_cols, llow, ltop, cfactor, ex=-999):
"""
find local colun limit value
input: avg_cols --- a list of average values of colums
llow --- a low limit range
ltop --- a top limit range
cfactor --- the factor to set the limit
output: l_lim --- local column limit
"""
csum = 0
tot = 0
for i in range(llow,ltop):
if i == ex:
continue
csum += avg_cols[i]
tot += 1
lavg = csum / tot
csum2 = 0
tot = 0
for i in range(llow,ltop):
if i == ex:
continue
ldiff = avg_cols[i] - lavg
csum2 += ldiff * ldiff
tot += 1
lstd = math.sqrt(csum2 / tot)
# l_lim = lavg + col_factor * lstd
l_lim = lavg + cfactor * lstd
return l_lim
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
def print_result(outdir_name, line):
with open(outdir_name, 'a') as fo:
fo.write(str(line) + '\n')
#-------------------------------------------------------------------------------------------
#--- chk_bad_col: find bad columns for a given ccd ---
#-------------------------------------------------------------------------------------------
def chk_bad_col(ccd):
"""
find bad columns for a given ccd
input: ccd --- ccd #
output: bad_cols --- a list of bad columns
"""
bad_cols = []
dfile = house_keeping + 'Defect/CCD' + str(ccd) + '/'
if os.listdir(dfile):
cmd = 'ls -rt ' + dfile + '/* > ' + zspace
os.system(cmd)
test = open(zspace).read()
mcf.rm_file(zspace)
m1 = re.search('col', test)
if m1 is not None:
chk = 1
else:
chk = 0
else:
chk = 0
if chk == 1:
cmd = 'ls -rt ' + dfile + '/*col > ' + zspace
os.system(cmd)
collist = mcf.read_data_file(zspace, remove=1)
#
#--- if there is more than three col files and if we have new bad col today, procced the process
#
dlen = len(collist)
if dlen > 2:
ifile = collist[dlen-1]
file1 = mcf.read_data_file(ifile)
ifile = collist[dlen-2]
file2 = mcf.read_data_file(ifile)
colcand = []
for ent in file1:
try:
val = float(ent) #--- try to weed out none digit entries
for comp in file2:
if ent == comp:
colcand.append(ent)
break
except:
pass
#
#--- if columns appear in both files, try the third
#
if len(colcand) > 0:
ifile = collist[dlen-2]
file3 = mcf.read_data_file(ifile)
for ent in colcand:
for comp in file3:
if ent == comp:
bad_cols.append(ent)
break
if len(bad_cols) > 0:
bad_cols = list(set(bad_cols))
bad_cols = [int(x) for x in bad_cols]
bad_cols = sorted(bad_cols)
bad_cols = [str(x) for x in bad_cols]
return bad_cols
#-------------------------------------------------------------------------------------------
#--- print_bad_col: update bad column output files ---
#-------------------------------------------------------------------------------------------
def print_bad_col(ccd, bad_col_list, stime):
"""
update bad column output files
input: ccd --- ccd #
bad_col_list --- a list of bad columns on the ccd
stime --- today's (or given data's) seconds from 1998.1.
output: <data_dir>/col<ccd#> --- today's bad columns
<data_dir>/hit_col<ccd#> --- history of bad columns
"""
blen = len(bad_col_list)
stime = int(stime)
date = mcf.convert_date_format(stime, ifmt='chandra', ofmt='%Y:%j')
atemp = re.split(':', date)
date = atemp[0] + ':' + atemp[1].lstrip('0')
#
#--- even if there is no data, update history file
#
line1 = ''
line2 = ''
if blen == 0:
line2 = line2 + str(stime) + '<>' + date + '<>:\n'
else:
#
#--- if there are bad col, update history file and update col<ccd> file
#
line2 = line2 + str(stime) + '<>' + date + '<>'
for ent in bad_col_list:
line1 = line1 + ent + '\n'
line2 = line2 + ':' + ent
line2 = line2 + '\n'
out1 = data_dir + 'col' + str(ccd)
out2 = data_dir + 'hist_col' + str(ccd)
if line1 != '':
with open(out1, 'w') as f1:
f1.write(line1)
with open(out2, 'a') as f2:
f2.write(line2)
#mcf.remove_duplicated_lines(out2, chk =1)
#-------------------------------------------------------------------------------------------
#--- find_bad_pix_candidate: find bad pixel candidates for the next step ----
#-------------------------------------------------------------------------------------------
def find_bad_pix_candidate(varray, ccd, quad, date_obs, ccd_dir, max_file, hot_max_file):
"""
find dad pixel candidates for the next step... they are bad pixel for just today's data
Input: varray --- 2x2 data surface [y, x] (numpy array)
ccd --- ccd #
quad --- quad 0 - 3
date_obs --- obs date
ccd_dir --- data location
max_file --- <head>_<quad>_max: a file name which will contain warm pixel data (e.g. acis485603580_q1_max)
hot_max_file --- <head>_<quad>_hot: a file name which will contain hot pixel data (e.g. acis485603580_q1_hot)
Output: <ccd_dir>/max_file --- file contains warm pixel list
<ccd_dir>/hot_max_file --- file contains hot pixel list
"""
#
#--- set a couple of arrays
#
warm_list = []
hot_list = []
wsave = ''
hsave = ''
#
#--- divide the quad to 8x32 areas so that we can compare the each pix to a local average
#
for ry in range(0, 32):
ybot = 32 * ry
ytop = ybot + 31
ybot2 = ybot
ytop2 = ytop
#
#--- even if the pixel is at the edge, use 8 x 32 area
#
if ytop > 1023:
diff = ytop -1023
ytop2 = 1023
ybot2 -= diff
for rx in range (0, 8):
xbot = 32 * rx
xtop = xbot + 31
xbot2 = xbot
xtop2 = xtop
if xtop > 255:
diff = xtop - 255
xtop2 = 255
xbot2 -= diff
lsum = 0.0
lsum2 = 0.0
lcnt = 0.0
for ix in range(xbot2, xtop2):
for iy in range(ybot2, ytop2):
lsum += varray[iy, ix]
lcnt += 1
if lcnt < 1:
continue
lmean = float(lsum) / float(lcnt)
for ix in range(xbot2, xtop2):
for iy in range(ybot2, ytop2):
lsum2 += (varray[iy, ix] - lmean) * (varray[iy, ix] - lmean)
lstd = math.sqrt(lsum2 / float(lcnt))
warm = lmean + factor * lstd
hot = lmean + hot_factor
for ix in range(xbot, xtop2):
for iy in range(ybot, ytop2):
if varray[iy, ix] >= warm:
(cwarm, chot, cmean, cstd) = local_chk(varray, ix, iy, lmean, lstd, warm , hot)
#
#--- hot pix check
#
if varray[iy, ix] > chot:
line = ccd_dir + '/' + hot_max_file
hot_list.append(line)
#
#--- adjusting to correction position
#
mix = ix + 1
miy = iy + 1
aline = str(mix) + '\t' + str(miy) + '\t' + str(varray[iy, ix]) + '\t'
aline = aline + date_obs + '\t' + str(cmean) + '\t' + str(cstd) + '\n'
with open(line, 'a') as fo:
fo.write(aline)
#
#--- warm pix check
#
elif varray[iy, ix] > cwarm:
line = ccd_dir + '/' + max_file
warm_list.append(line)
mix = ix + 1
miy = iy + 1
aline = str(mix) + '\t' + str(miy) + '\t' + str(varray[iy, ix]) + '\t'
aline = aline + date_obs + '\t' + str(cmean) + '\t' + str(cstd) + '\n'
with open(line, 'a') as fo:
fo.write(aline)
#
#--- remove dupulicated line.
#
if len(warm_list) > 0:
today_warm_list = mcf.remove_duplicated_lines(warm_list, chk = 0)
else:
today_warm_list = []
if len(hot_list) > 0:
today_hot_list = mcf.remove_duplicated_lines(hot_list, chk = 0)
else:
today_hot_list = []
#
#--- print out the data; even if it is empty, we still create a file
#
line = ccd_dir + '/' + max_file
try:
if len(today_warm_list) > 0:
#
#--- keep the record of today's data
#
aline = './Working_dir/today_bad_pix_' + str(ccd) + '_q' + str(quad)
with open(aline, 'a') as fo:
fo.write(line + '\n')
except:
pass
line = ccd_dir + '/' + hot_max_file
try:
if len(today_hot_list) > 0:
aline = './Working_dir/today_hot_pix_' + str(ccd) + '_q' + str(quad)
with open(aline, 'a') as fo:
fo.write(hot_max_file + '\n')
except:
pass
#-------------------------------------------------------------------------------------------
#--- select_bad_pix: find bad pixels ---
#-------------------------------------------------------------------------------------------
def select_bad_pix(ccd, quad):
"""
find bad pixels for a given ccd/quad
input: ccd --- ccd #
quad --- quad #
output: output from identifyBadEntry
warm_data_list
hot_data_list
"""
#
#--- warm pixels
#
warm_data_list = identifyBadEntry(ccd, quad, 'today_bad_pix', '_max')
#
#--- hot pixels
#
hot_data_list = identifyBadEntry(ccd, quad, 'today_hot_pix', '_hot')
return(warm_data_list, hot_data_list)
#-------------------------------------------------------------------------------------------
#--- identifyBadEntry: find which pixels are warm/hot the last three observations --
#-------------------------------------------------------------------------------------------
def identifyBadEntry(ccd, quad, today_list, ftail):
"""
find which pixels are warm/hot the last three observations
input: ccd --- ccd #
quad --- quad #
today_list --- today's list
ftail --- pix (warm case)/hot (hot_case)
output: bad_list --- warm/hot pixel list
"""
bad_list = []
#
#--- check whether we have any bad pixels/columns in today's data
#
ifile = './Working_dir/' + today_list + '_' + str(ccd) + '_q' + str(quad)
bad = mcf.read_data_file(ifile)
if len(bad) == 0:
return bad_list
#
#--- a list is not empty
#
for i in range(0, len(bad)):
#
#--- if there is a bad data, check the past data: find two previous records
#
if not os.path.isfile(bad[i]):
continue
if os.stat(bad[i]).st_size > 0:
cmd = 'ls ' + house_keeping + '/Defect/CCD' + str(ccd)
cmd = cmd + '/acis*_q' + str(quad) + ftail + '>' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
lcnt = len(data)
if lcnt == 0:
continue
lcnt1 = lcnt -1
for k in range(0, lcnt):
j = lcnt1 - k
#
#--- we located today's data in the data directory
#
if data[j] == bad[i]:
if j > 1:
#
#--- check whether one before is empty or not
#
if os.stat(data[j-1]).st_size > 0:
#
#--- if it is not, empty, check whether one before is empty or not
#
if os.stat(data[j-2]).st_size > 0:
#
#--- three consecuitve data sets are not empty, let check whether
#--- any pixels are warm three consecutive time.
#
file1 = bad[i]
file2 = data[j-1]
file3 = data[j-2]
#--- I AM NOT QUITE SURE THE FOLLOWING FIX IS CORRECT!!!! (03/21/19)
#bad_list = find_bad_pix(ccd, quad, file1, file2, file3)
bad_list = bad_list + find_bad_pix(ccd, quad, file1, file2, file3)
return bad_list
#-------------------------------------------------------------------------------------------
#-- print_bad_pix_data: update bad pixel data files ---
#-------------------------------------------------------------------------------------------
def print_bad_pix_data(ccd, data_list, bind, today_time = 'NA'):
"""
update bad pixel data files
input: ccd --- ccd #
data_list --- bad pixel list
bind --- warm/hot
today_time --- DOM of the data
output: totally_new<ccd>
all_past_bad_pix<ccd>
new_ccd<ccd>
ccd<ccd>
hist_ccd<ccd>
similar output for hot pixel data
"""
if today_time != 'NA':
stime = today_time
out = Chandra.Time.DateTime(stime).date
atemp = re.split(":", out)
else:
out = time.strftime("%Y:%j:%H:%M:%S", time.gmtime())
stime = Chandra.Time.DateTime(out).secs
atemp = re.split(':', out)
date = str(atemp[0]) + ':' + str(int(atemp[1]))
#
#--- check any pixels are listed in totally_new<ccd> list (which lists new
#--- bad pix occured only in the last two weeks)
#
if bind == 'warm':
file5 = data_dir + 'hist_ccd' + str(ccd)
else:
file5 = data_dir + 'hist_hccd' + str(ccd)
pline = str(stime) + '<>' + date + '<>'
if len(data_list) > 0:
for ent in data_list:
atemp = re.split(':', ent)
pline = pline + ':(' + atemp[4] + ',' + atemp[5] + ')'
pline = pline + '\n'
else:
pline = pline + ':' + '\n'
#
#--- add to history data
#--- first check whether this is a duplicated date, if so, just ignore
#
data = mcf.read_data_file(file5)
aaa = re.split('<>', data[-1])
if len(data) > 0:
dline = ''
for ent in data:
atemp = re.split('<>', ent)
try:
ltime = int(float(atemp[0]))
except:
continue
if ltime < stime:
dline = dline + ent + '\n'
else:
break
with open(file5, 'w') as fo:
fo.write(dline)
fo.write(pline)
else:
with open(file5, 'w') as fo:
fo.write(pline)
#-------------------------------------------------------------------------------------------
#--- find_bad_pix: find bad pixel by comparing three consecutive data ---
#-------------------------------------------------------------------------------------------
def find_bad_pix(ccd, quad, file1, file2, file3):
"""
find bad pixel by comparing three consecutive data
input: ccd --- ccd #
quad --- quad #
file1 --- first data file
file2 --- second data file
file3 --- thrid data file
output: cleaned --- bad pixel list
"""
out_file = []
[x1, y1, line1] = readBFile(ccd, file1)
[x2, y2, line2] = readBFile(ccd, file2)
#
#--- comparing first two files to see whether there are same pixels listed
#--- if they do, save the information $cnt_s will be > 0 if the results are positive
#
if len(x1) > 0 and len(x2) > 0:
[xs, ys, ls ] = pickSamePix(x1, y1, line1, x2, y2, line2)
if len(xs) > 0:
[x3, y3, line3] = readBFile(ccd, file3)
[xs2, ys2, ls2] = pickSamePix(xs, ys, ls, x3, y3, line3)
if len(xs2) > 0:
for i in range(0, len(xs2)):
try:
val = float(xs2[i])
atemp = re.split('\s+|\t+', ls2[i])
line = str(ccd) + ':' + str(quad) + ':' + atemp[3]
line = line + ':' + xs[i] + ':' + ys[i]
out_file.append(line)
except:
pass
if len(out_file) > 0:
cleaned = mcf.remove_duplicated_lines(out_file, chk = 0)
else:
cleaned = []
return cleaned
#-------------------------------------------------------------------------------------------
#--- readBFile: read out ccd data file ----
#-------------------------------------------------------------------------------------------
def readBFile(ccd, ifile): #--- ccd is not used!!!
"""
read out ccd data file
input: ccd --- ccd #
file --- file name
output: a list of (x position list, y position list, value list)
"""
data = mcf.read_data_file(ifile)
xa = []
ya = []
la = []
if len(data) > 0:
for ent in data:
atemp = re.split('\s+|\t+', ent)
xa.append(atemp[0])
ya.append(atemp[1])
la.append(ent)
return (xa, ya, la)
#-------------------------------------------------------------------------------------------
#--- pickSamePix: find pixels appear in two files given ---
#-------------------------------------------------------------------------------------------
def pickSamePix(x1, y1, line1, x2, y2, line2):
"""
find pixels appear in two files given
input: x1 --- x coordinates of the first file
y1 --- y coordinates of the first file
line1 --- all data information associates to x1, y1 pixel
x2 --- x coordinates of the second file
y2 --- y coordinates of the second file
line2 --- all data information associates to x2, y2 pixel
output: list of [x coordinates, y coordinates, pixel info]
"""
x_save = []
y_save = []
l_save = []
for i in range(0, len(x1)):
for j in range(0, len(x2)):
if x1[i] == x2[j] and y1[i] == y2[j] and x1[i] != '':
x_save.append(x1[i])
y_save.append(y1[i])
l_save.append(line1[i])
break
return (x_save, y_save, l_save)
#-------------------------------------------------------------------------------------------
#--- local_chk: compute local mean, std, warm limit and hot limit ---
#-------------------------------------------------------------------------------------------
def local_chk(varray, ix, iy, lmean, lstd, warm, hot):
"""
compute local mean, std, warm limit and hot limit
input: varray --- data array (2D) [y, x] (numpy array)
ix --- x coordinate of the pixel of interest
iy --- y coordinate of the pixel of interest
lmean --- mean value of the area
lstd --- standard deviation of the area
warm --- warm limit of the area
hot --- hot limit of the area
output: leanm --- mean value of the local area
lstd --- standard deviation of the local area
warm --- warm limit of the local area
hot --- hot limit of the local area
"""
#
#--- check the case, when the pixel is located at the coner, and cannot
#--- take 16x16 around it. if that is the case, shift the area
#
x1 = ix - 8
x2 = ix + 8
if(x1 < 0):
x2 += abs(x1)
x1 = 0
elif x2 > 255:
x1 -= (x2 - 255)
x2 = 255
y1 = iy - 8
y2 = iy + 8
if(y1 < 0):
y2 += abs(y1)
y1 = 0
elif y2 > 1023:
y1 -= (y2 - 1023)
y2 = 1023
csum = 0.0
csum2 = 0.0
ccnt = 0.0
for xx in range(x1, x2+1):
for yy in range(y1, y2+1):
try:
cval = float(varray[yy, xx])
cval = int(cval)
except:
cval = 0
csum += cval
csum2 += cval * cval
ccnt += 1
try:
cmean = float(csum) /float(ccnt)
cstd = math.sqrt(float(csum2) / float(ccnt) - cmean * cmean)
cwarm = cmean + factor * cstd
chot = cmean + hot_factor
return (cwarm, chot, cmean, cstd)
except:
cwarm = lmean + factor * lstd
chot = lmean + hot_factor
return (lmean, lstd, warm, hot)
#-------------------------------------------------------------------------------------------
#--- extractCCDInfo: extract CCD information from a fits file ---
#-------------------------------------------------------------------------------------------
def extractCCDInfo(ifile):
"""
extreact CCD infromation from a fits file
input: ifile --- fits file name
output: ccd_id --- ccd #
readmode --- read mode
date_obs --- observation date
overclock_a --- overclock a
overclock_b --- overclock b
overclock_c --- overclock c
overclock_d --- overclock d
"""
#
#--- read fits file header
#
try:
f = pyfits.open(ifile)
hdr = f[0].header
ccd_id = hdr['CCD_ID']
readmode = hdr['READMODE']
date_obs = hdr['DATE-OBS']
overclock_a = hdr['INITOCLA']
overclock_b = hdr['INITOCLB']
overclock_c = hdr['INITOCLC']
overclock_d = hdr['INITOCLD']
f.close()
return [ccd_id, readmode, date_obs, overclock_a, overclock_b, overclock_c, overclock_d]
except:
return ['NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA']
#-------------------------------------------------------------------------------------------
#--- read_bad_pix_list: read knwon bad pixel list ---
#-------------------------------------------------------------------------------------------
def read_bad_pix_list():
"""
read knwon bad pixel list
input: <house_keeping>/Defect/bad_pix_list
output: bad_pix_list --- a list of lists of bad pixels separated by CCD
"""
#
#--- initialize the list
#
bad_pix_list = []
for i in range(0, 10):
bad_pix_list.append([])
#
#--- read data
#
line = house_keeping + '/Defect/bad_pix_list'
data = mcf.read_data_file(line)
#
#--- separate bad pixels into different CCDs
#
for ent in data:
m = re.search('#', ent)
if m is None:
atemp = re.split(':', ent)
k = int(float(atemp[0]))
line = '"' + str(atemp[2]) + ':' + str(atemp[3]) + '"'
bad_pix_list[k].append(line)
return bad_pix_list
#-------------------------------------------------------------------------------------------
#--- read_bad_col_list: read in known bad column lists ---
#-------------------------------------------------------------------------------------------
def read_bad_col_list():
"""
read in known bad column lists
input: <house_keeping>/Defect/bad_col_list
output: bad_col_list --- a list of list of bad columns separated by CCDs
"""
#
#--- initialize the list
#
bad_col_list = []
for i in range(0, 10):
bad_col_list.append([])
#
#--- read data
#
line = house_keeping + '/Defect/bad_col_list'
data = mcf.read_data_file(line)
#
#--- separate bad columns into different CCDs
#
for ent in data:
m = re.search('#', ent)
if m is None:
atemp = re.split(':', ent)
k = int(float(atemp[0]))
line = '"' + str(atemp[3]) + ':' + str(atemp[2]) + ':' + str(atemp[3]) + '"'
bad_col_list[k].append(line)
return bad_col_list
#-------------------------------------------------------------------------------------------
#--- removeIncompteData: removing files which indicated "imcoplete" ----
#-------------------------------------------------------------------------------------------
def removeIncompteData(cut_time):
"""
remove files which are indicated "imcoplete" by cut_time
(if the file is created after cut_time)
input: cut_time --- the cut time which indicates when to remove the data file
time is in seconds from 1998.1.1
output: None, but delete files
"""
for ccd in range(0, 10):
ifile = data_dir + 'data_used' + str(ccd)
trimFile(ifile, cut_time, 0)
for head in ('change_ccd', 'change_col', 'imp_ccd', 'new_ccd', 'imp_col', 'new_col'):
for ccd in range(0, 10):
ifile = data_dir + head + str(ccd)
trimFile(ifile, cut_time, 1)
for ccd in range(0, 10):
ifile = data_dir + 'hist_ccd' + str(ccd)
trimFile(ifile, cut_time, 1)
#-------------------------------------------------------------------------------------------
#--- trimFile: drop the part of the data from the file if the data is created after cut_time
#-------------------------------------------------------------------------------------------
def trimFile(ifile, cut_time, dtype):
"""
drop the part of the data from the file if the data is created after cut_time
input: ifile --- file name
cut_time --- the cut time
dtype --- how to find a stime, if dtype == 0: the time is in form of 20013:135.
otherwise, in stime (seconds from 1998.1.1)
output: file --- updated file
"""
try:
data = mcf.read_data_file(ifile)
if len(data) > 0:
sline = ''
for ent in data:
try:
if dtype == 0:
atemp = re.split(':', ent)
year = int(float(atemp[0]))
ydate = int(float(atemp[1]))
ydate = mcf.add_leading_zero(ydate, 3)
ltime = atemp[0] + ':' + ydate + ':00:00:00'
dtime = int(Chandra.Time.DateTime(ltime).secs)
else:
atemp = re.split('<>', ent)
dtime = int(atemp[0])
if dtime >= cut_time:
break
else:
sline = sline + ent + '\n'
except:
pass
with open(zspace, 'w') as fo:
fo.write(sline)
cmd = 'mv ' + zspace + ' ' + ifile
os.system(cmd)
except:
pass
#-------------------------------------------------------------------------------------------
#-- get_data_out: extract acis bias data file --
#-------------------------------------------------------------------------------------------
def get_data_out(start, stop):
"""
extract acis bias data file
input: start --- start time in seconds from 1998.1.1
stop --- stop time in seconds from 1998.1.1
output: asic bias data fits files
save --- a list of fits files extracted
"""
#
#--- convert data format
#
tstart = mcf.convert_date_format(start, ofmt="%Y-%m-%dT%H:%M:%S")
tstop = mcf.convert_date_format(stop, ofmt="%Y-%m-%dT%H:%M:%S")
#
#---clean up the temporary output directory
#
fdir = exc_dir + 'Temp_data/'
if os.path.isdir(fdir):
cmd = 'rm -rf ' + fdir + '/*'
os.system(cmd)
else:
cmd = 'mkdir ' + fdir
os.system(cmd)
#
#--- write required arc5gl command
#
line = 'operation=retrieve\n'
line = line + 'dataset=flight\n'
line = line + 'detector=acis\n'
line = line + 'level=0\n'
line = line + 'filetype=bias0\n'
line = line + 'tstart=' + str(tstart) + '\n'
line = line + 'tstop=' + str(tstop) + '\n'
line = line + 'go\n'
with open(zspace, 'w') as f:
f.write(line)
#
#--- run arc5gl
#
outf = exc_dir + 'Temp_data/zout'
try:
cmd = 'cd ' + exc_dir + 'Temp_data; /proj/sot/ska/bin/arc5gl -user ' + arc_user + ' -script '
cmd = cmd + zspace + ' > ' + outf
os.system(cmd)
except:
cmd1 = "/usr/bin/env PERL5LIB= "
cmd2 = ' cd ' + exc_dir + 'Temp_data; /proj/axaf/simul/bin/arc5gl -user ' + arc_user + ' -script '
cmd2 = cmd2 + zspace + '> ' + outf
try:
os.system(cmd2)
except:
cmd = cmd1 + cmd2
bash(cmd, env=ascdsenv)
mcf.rm_files(zspace)
#
#--- get a list of retrieved fits files
#
data = mcf.read_data_file(outf)
mcf.rm_files(outf)
save = []
for ent in data:
mc = re.search('fits', ent)
if mc is not None:
fname = fdir + ent
save.append(fname)
return save
#---------------------------------------------------------------------------------------
#-- find_data_collection_interval: find data collection period in dom---
#---------------------------------------------------------------------------------------
def find_data_collection_interval():
"""
find data collection period in dom
input: none but read from <data_dir>/Dis_dir/hist_ccd3
output: ldate --- starting time in seconds from 1998.1.1
tdate --- stopping time in seconds from 1998.1.1
"""
#
#--- find today's date
#
tout = time.strftime('%Y:%j:00:00:00', time.gmtime())
tdate = int(mcf.convert_date_format(tout, ofmt='chandra'))
#
#--- find the date of the last entry
#
ifile = data_dir + 'hist_ccd3'
data = mcf.read_data_file(ifile)
data.reverse()
for ent in data:
atemp = re.split('<>', ent)
try:
ldate = int(float(atemp[0]))
break
except:
continue
#
#--- the data colleciton starts from the next day of the last entry date
#--- make sure that the time start 0hr.
#
ldate += 90000.0
ltime = Chandra.Time.DateTime(ldate).date
atemp = re.split(':', ltime)
ltime = atemp[0] + ':' + atemp[1] + ':00:00:00'
ldate = int(Chandra.Time.DateTime(ltime).secs)
return [ldate, tdate]
#---------------------------------------------------------------------------------------
#-- mv_old_file: move supplemental data file older than 30 day to a reserve --
#---------------------------------------------------------------------------------------
def mv_old_file(tdate):
"""
move supplemental data file older than 30 day to a reserve
input: tdate --- the current time in seconds from 1998.1.1
output: none but older files are moved
"""
tdate -= 30 * 86400
cmd = 'ls ' + house_keeping + '/Defect/CCD*/* > ' + zspace
os.system(cmd)
ldata = mcf.read_data_file(zspace, remove=1)
for ent in ldata:
atemp = re.split('\/acis', ent)
btemp = re.split('_', atemp[1])
if int(btemp[0]) < tdate:
out = ent
out = out.replace('Defect', 'Defect/Save')
cmd = 'mv ' + ent + ' ' + out
os.system(cmd)
#-----------------------------------------------------------------------------------------
#-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST ---
#-----------------------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
"""
testing functions
"""
#------------------------------------------------------------
def test_bad_pix(self):
mcf.mk_empty_dir('/data/mta/Script/ACIS/Bad_pixels/Data/Ztemp_dir')
cmd = 'mv /data/mta/Script/ACIS/Bad_pixels/Data/data_used* /data/mta/Script/ACIS/Bad_pixels/Data/Ztemp_dir'
os.system(cmd)
cmd = 'cp -r /data/mta/Script/ACIS/Bad_pixels/Data/Ztemp_dir/* /data/mta/Script/ACIS/Bad_pixels/Data/.'
os.system(cmd)
mcf.mk_empty_dir('./Working_dir')
mcf.rm_file('./Working_dir/*_list')
a_list = int_file_for_day(main_list[0])
dom = setup_to_extract()
ccd = 3
quad= 0
(warm_data, hot_data) = select_bad_pix(ccd, quad)
test_data = ['3:0:2014:255:21:95']
self.assertEquals(warm_data, test_data)
cmd = 'mv /data/mta/Script/ACIS/Bad_pixels/Data/Ztemp_dir/* /data/mta/Script/ACIS/Bad_pixels/Data/.'
os.system(cmd)
#--------------------------------------------------------------------
if __name__ == '__main__':
#unittest.main()
if len(sys.argv) > 1:
tstart = float(sys.argv[1])
tstop = float(sys.argv[2])
else:
tstart = ''
tstop = ''
find_bad_pix_main(tstart, tstop)
|
import logging
from config import API_DIR
async def save_log(log: dict) -> None:
# Определяем порядок в логе
fields_order = ['ip', 'uri', 'user_id', 'email', 'user-agent']
log_line = '; '.join('%s: %s' % (f, log.pop(f)) for f in fields_order if f in log)
logger = logging.getLogger('log')
set_formatter = logging.Formatter('date: %(asctime)s.%(msecs)d; %(message)s', '%Y-%m-%dT%H:%M:%S')
file_handler = logging.FileHandler(f'{API_DIR}/docs/logs.log')
file_handler.setFormatter(set_formatter)
logger.addHandler(file_handler)
logger.info(log_line)
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def test(selenium, base_url, variables):
step_01_open_tested_page(selenium, base_url)
step_02_click_on_login(selenium)
step_03_fill_username(selenium, variables['username'])
step_04_fill_password(selenium, variables['password'])
step_05_click_submit(selenium)
step_06_click_logout(selenium)
verify_user_logged_out(selenium)
def step_01_open_tested_page(selenium, base_url):
selenium.get(base_url)
# zkontrolujeme, ze na strance je napis <h2>Log In</h2>
el = selenium.find_element_by_tag_name('h1')
assert 'All products' in el.text
# # ExplicitWait - kdyz se nahravaji veci pomoci JavaScriptu a nereloadne se cela stranka
# WebDriverWait(selenium, 2).until(
# EC.presence_of_element_located(
# (By.TAG_NAME, 'h1')
# )
# )
def step_02_click_on_login(selenium):
el = selenium.find_element_by_link_text('Login or register')
el.click()
# zkontrolujeme, ze na strance je napis <h2>Log In</h2>
el = selenium.find_element_by_tag_name('h2')
assert 'Log In' in el.text
# # ExplicitWait - kdyz se nahravaji veci pomoci JavaScriptu a nereloadne se cela stranka
# WebDriverWait(selenium, 2).until(
# EC.text_to_be_present_in_element(
# (By.TAG_NAME, 'h2'),
# 'Log In'
# )
# )
def step_03_fill_username(selenium, username):
el = selenium.find_element_by_id('id_login-username')
el.send_keys(username)
def step_04_fill_password(selenium, password):
el = selenium.find_element_by_id('id_login-password')
el.send_keys(password)
def step_05_click_submit(selenium):
el = selenium.find_element_by_name('login_submit')
el.click()
def step_06_click_logout(selenium):
el = selenium.find_element_by_link_text('Logout')
el.click()
def verify_user_logged_out(selenium):
el = selenium.find_element_by_link_text('Login or register')
assert "Login" in el.text
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Gabriele Zambelli (Twitter: @gazambelli)
# Blog : http://forensenellanebbia.blogspot.it
#
# WARNING: This program is provided "as-is"
# See http://forensenellanebbia.blogspot.it/2015/12/wechat-script-to-convert-and-play-aud.html for further details.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You can view the GNU General Public License at <http://www.gnu.org/licenses/>
#
#
# Change history
# 2016-04-03 Added Silk v3 decoder
# 2015-12-01 First public release
#
# Prerequisites:
# - Python v2.7
# - libav for Microsoft Windows (Open source audio and video processing tools - https://www.libav.org/)
# Tested version: http://builds.libav.org/windows/nightly-gpl/libav-x86_64-w64-mingw32-20151130.7z
# - Silk v3 decoder (decoder.exe from https://github.com/netcharm/wechatvoice)
# - FFMpeg (https://ffmpeg.org/download.html)
#
# What you have to do first:
# - export WeChat chats in HTML format using the UFED Physical Analyzer software ("Export to HTML" option)
# - put libav, decoder.exe and ffmpeg in c:\tools
#
# What the script does:
# - this script converts WeChat audio messages to WAV files
# - it then modifies each HTML report by replacing the strings ".aud" and ".amr" with ".wav"
# Script based on these blog posts/scripts:
# http://ppwwyyxx.com/2014/Classify-WeChat-Audio-Messages/
# http://www.giacomovacca.com/2013/06/voip-calls-encoded-with-silk-from-rtp.html
# https://github.com/netcharm/wechatvoice (https://github.com/netcharm/wechatvoice/blob/master/amr2ogg.py)
from datetime import datetime
import os
import subprocess
import sys
path_to_tools = "c:\\tools"
def check_prequesites():
try:
subprocess.Popen([path_to_tools + '\\decoder.exe'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
subprocess.Popen([path_to_tools + '\\ffmpeg.exe'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
subprocess.Popen([path_to_tools + '\\avconv.exe', '-version'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
except:
print "\n!! Error: SILKv3 decoder or ffmpeg or avconv missing !!\n"
sys.exit()
def check():
os.system('cls')
if len(sys.argv) == 1:
print "\n**** WeChat audio file converter + UFED Physical Analyzer HTML Report fix ****"
print "\n(The script will search recursively)\n\n"
print "How to use:\n==> %s absolute_path_to_your_folder\n" % os.path.basename(sys.argv[0])
check_prequesites()
elif len(sys.argv) == 2:
if os.path.exists(sys.argv[1]) == True:
check_prequesites()
pass
else:
print "\n!! Error: %s not found !!\n\n" % sys.argv[1]
check_prequesites()
sys.exit()
# function to convert aud files into wav files by using avconv
# avconv options:
# -y overwrites existing wav files
# -i input file
def amr2wav(filename, path):
f_wav = '"' + path + '\\' + filename[:filename.find('.amr')] + '.wav' + '"'
f_source = '"' + path + '\\' + filename + '"'
cmd = path_to_tools + '\\avconv.exe -y -i ' + f_source + ' ' + f_wav
subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
def silk2pcm(filename, path):
f_source = '"' + path + '\\' + filename + '"'
f_pcm = '"' + path + '\\' + filename[:filename.find('.amr')] + '.pcm'+ '"'
cmd = path_to_tools + '\\decoder.exe ' + f_source + " " + f_pcm
subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
def pcm2wav(filename,path):
f_source = '"' + path + '\\' + filename + '"'
f_wav = '"' + path + '\\' + filename[:filename.find('.pcm')] + '.wav' + '"'
cmd = path_to_tools + '\\ffmpeg.exe -y -f s16le -ar 24000 -i ' + f_source + ' ' + f_wav
subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
check()
path = sys.argv[1]
os.chdir(path)
f_aud_counter = 0 #number of audio files converted
report_counter = 0 #number of reports checked
start_time = datetime.now()
print "\nPlease wait...\n"
for root, dirs, files in os.walk(path):
for f_audio in files:
if f_audio.endswith(".aud") or f_audio.endswith(".amr"):
f_audio_pfn = os.path.join(root, f_audio) #pfn = path + filename
with open(f_audio_pfn, 'rb') as fb_audio: #fb = file binary
data = fb_audio.read()
base = os.path.basename(f_audio_pfn)
f_amr = os.path.splitext(base)[0] + ".amr"
fb_audio.seek(0)
magic_silk = fb_audio.read(10)
magic_amr = fb_audio.read(6)
if magic_silk == "\x02\x23\x21\x53\x49\x4C\x4B\x5F\x56\x33": # #!SILK_V3
with open(os.path.join(root, f_amr), 'wb') as fb_amr:
fb_amr.write(data[1:]) #prepend amr header to amr files
fb_amr.close()
silk2pcm(f_amr,root)
f_aud_counter += 1
elif magic_silk == "\x23\x21\x53\x49\x4C\x4B\x5F\x56\x33\x0C": # #!SILK_V3
silk2pcm(f_amr,root)
f_aud_counter += 1
elif magic_amr == "\x23\x21\x41\x4D\x52": # #!AMR
amr2wav(f_amr,root)
f_aud_counter += 1
else:
with open(os.path.join(root, f_amr), 'wb') as fb_amr:
fb_amr.write('#!AMR\n'+data) #prepend amr header to amr files
fb_amr.close()
amr2wav(f_amr,root) #from amr to wav
f_aud_counter += 1
for root, dirs, files in os.walk(path):
for f_audio in files:
if f_audio.endswith(".pcm"):
pcm2wav(f_audio,root)
os.remove(os.path.join(root, f_audio))
elif f_audio.endswith(".html"):
report_counter += 1
path_to_report = os.path.join(root,f_audio)
report_source = open(path_to_report, 'r')
report_tmp = open(path_to_report+".tmp", 'w') #temporary report file
for line in report_source:
line = line.replace('.aud','.wav').replace('.amr','.wav')
report_tmp.write(line)
report_source.close()
report_tmp.close()
os.remove(root + "\\" + f_audio)
os.rename(root + "\\" + f_audio + ".tmp", root + "\\" + f_audio)
elif f_audio.endswith(".amr"):
os.remove(os.path.join(root, f_audio))
elif f_audio.endswith(".aud"):
os.remove(os.path.join(root, f_audio))
os.system('cls')
print "\n**** WeChat audio file converter + UFED Physical Analyzer HTML Report fix ****"
print "\nHTML report(s) checked ................ %d" % report_counter
print "Audio files converted to WAV files .... %d\n" % f_aud_counter
print "\nDone !!!\n"
end_time = datetime.now()
print "\n\nScript started : " + str(start_time)
print "Script finished: " + str(end_time)
print('Duration : {}'.format(end_time - start_time))
subprocess.Popen('explorer %s' % path)
|
import down_util
from os import path,mkdir
import pandas as pd
if __name__=='__main__':
filename = r"D:\download_work\index.csv"
ldst_df = pd.read_csv(filename,
usecols=['SCENE_ID', 'SENSOR_ID', 'SPACECRAFT_ID', 'DATE_ACQUIRED', 'COLLECTION_NUMBER',
'COLLECTION_CATEGORY', 'WRS_PATH', 'WRS_ROW', 'CLOUD_COVER', 'BASE_URL',
'NORTH_LAT'],
dtype={'COLLECTION_NUMBER': str}, parse_dates=['DATE_ACQUIRED'])
years=[2017]
base_dir=r'D:\download_work'
prlist=pd.read_csv(r'D:\download_work\path-row.txt')
for year in years:
print(year)
year_dir=path.join(base_dir,'jiang_'+str(year))
if not path.exists(year_dir):
mkdir(year_dir)
df = down_util.Get_zone(ldst_df, year,months=[6,7,8,9,10],inPathRows=prlist)
down_util.write_subs(df, year_dir, str(year)+'all.csv') |
import abc
import typing
from typing import TYPE_CHECKING
import requests
from kerasltisubmission.exceptions import (
KerasLTISubmissionBadResponseException,
KerasLTISubmissionConnectionFailedException,
)
if TYPE_CHECKING: # pragma: no cover
from kerasltisubmission.provider import SingleInputType # noqa: F401
from kerasltisubmission.provider import (
AnyIDType, # noqa: F401
InputsType, # noqa: F401
) # noqa: F401
class InputLoader(abc.ABC):
def __init__(self, assignment_id: "AnyIDType", input_api_endpoint: str) -> None:
self.assignment_id = assignment_id
self.input_api_endpoint = input_api_endpoint
def load_next(self) -> typing.Optional["SingleInputType"]:
pass
def is_empty(self) -> bool:
pass
class PartialLoader(InputLoader):
def __init__(self, assignment_id: "AnyIDType", input_api_endpoint: str) -> None:
super().__init__(assignment_id, input_api_endpoint)
self.input_api_endpoint = input_api_endpoint
self.currentIndex = 0
self.batchIndex = 0
self.batched: "InputsType" = list()
def load_batch(self, input_id: int) -> "InputsType":
try:
r = requests.get(
f"{self.input_api_endpoint}/assignment/{self.assignment_id}/inputs/{input_id}"
)
rr = r.json()
except Exception as e:
raise KerasLTISubmissionConnectionFailedException(
self.input_api_endpoint, e
) from None
if r.status_code == 200 and rr.get("success", True) is True:
prediction_inputs: "InputsType" = rr.get("predict")
return prediction_inputs
else:
raise KerasLTISubmissionBadResponseException(
api_endpoint=self.input_api_endpoint,
return_code=r.status_code,
assignment_id=self.assignment_id,
message=rr.get("error"),
)
def load_next(self) -> typing.Optional["SingleInputType"]:
if self.batchIndex >= len(self.batched):
if len(self.batched) == 0:
self.batched = self.load_batch(0)
else:
self.batched = self.load_batch(self.currentIndex // len(self.batched))
self.batchIndex = 0
n = (
None
if self.batchIndex >= len(self.batched)
else self.batched[self.batchIndex]
)
self.currentIndex += 1
self.batchIndex += 1
return n
def is_empty(self) -> bool:
try:
return len(self.load_batch(0)) < 1
except (
KerasLTISubmissionConnectionFailedException,
KerasLTISubmissionBadResponseException,
):
pass
return False
class TotalLoader(InputLoader):
""" Loads the entire validation set.
This is marked deprecated due to performance implications
"""
def __init__(self, assignment_id: "AnyIDType", input_api_endpoint: str) -> None:
try:
r = requests.get(f"{input_api_endpoint}/assignment/{assignment_id}/inputs")
rr = r.json()
except Exception as e:
raise KerasLTISubmissionConnectionFailedException(
input_api_endpoint, e
) from None
if r.status_code == 200 and rr.get("success", True) is True:
self.inputs = rr.get("predict")
self.currentIndex = 0
else:
raise KerasLTISubmissionBadResponseException(
api_endpoint=input_api_endpoint,
return_code=r.status_code,
assignment_id=assignment_id,
message=rr.get("error"),
)
raise DeprecationWarning
def load_next(self) -> typing.Optional["SingleInputType"]:
n = (
None
if self.currentIndex >= len(self.inputs)
else self.inputs[self.currentIndex]
)
self.currentIndex += 1
return n
def is_empty(self) -> bool:
return len(self.inputs) < 1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-08 12:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('annonces', '0007_auto_20160308_1252'),
]
operations = [
migrations.RemoveField(
model_name='membre',
name='user',
),
migrations.AlterField(
model_name='logement',
name='complement',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='logement',
name='proprietaire',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='logements', to='account.Membre'),
),
migrations.DeleteModel(
name='Membre',
),
]
|
import misc, twistExtractor |
import pytz
from django.core.mail import send_mail
from datetime import datetime,time,date,timedelta
from user_input.models import UserDailyInput
from quicklook.calculations.calculation_driver import which_device
from garmin.models import UserLastSynced
from fitbit.models import UserFitbitLastSynced
def send_userinput_update_email(admin_users_email,instance_meta):
'''
Send email to all the admin users with the admin link to
newly created instance
'''
message = """
Hi there,
User {} ({}) has {} user inputs for {}. Please click following link to access it -
{}
If clicking the link above doesn't work, please copy and paste the URL in a new browser
window instead.
Sincerely,
JVB Health & Wellness
"""
message = message.format(
instance_meta['first_name']+" "+instance_meta['last_name'],
instance_meta['username'],
"submitted" if instance_meta['created'] else "updated",
instance_meta['created_at'],
instance_meta['instance_url']
)
if admin_users_email:
send_mail(
subject="New User Input" if instance_meta['created'] else "User Input Updated",
message = message,
from_email = "info@jvbwellness.com",
recipient_list = admin_users_email,
fail_silently = True
)
# returns the users,offsets which relates to user local_time
def get_users_having_local_time(email_timing,filter_username=None):
'''
Get the users infromation like local_time with their offsets
'''
garmin_offsets = set(q['offset'] for q in UserLastSynced.objects.values('offset'))
fitbit_offsets = set(q['offset'] for q in UserFitbitLastSynced.objects.values('offset'))
garmin_offsets_in_local_time = []
fitbit_offsets_in_local_time = []
for offset in garmin_offsets:
offset_localtime = (datetime.utcnow()+timedelta(seconds=offset))
for time in email_timing:
if offset_localtime.hour == time.hour:
garmin_offsets_in_local_time.append(offset)
for offset in fitbit_offsets:
offset_localtime = (datetime.utcnow()+timedelta(seconds=offset))
for time in email_timing:
if offset_localtime.hour == time.hour:
fitbit_offsets_in_local_time.append(offset)
if filter_username:
garmin_users = UserLastSynced.objects.filter(
offset__in = garmin_offsets_in_local_time,
user__username__in = filter_username).select_related('user')
fitbit_users = UserFitbitLastSynced.objects.filter(
offset__in = fitbit_offsets_in_local_time,
user__username__in = filter_username).select_related('user')
else:
garmin_users = UserLastSynced.objects.filter(
offset__in = garmin_offsets_in_local_time).select_related('user')
fitbit_users = UserFitbitLastSynced.objects.filter(
offset__in = fitbit_offsets_in_local_time).select_related('user')
g_usernames = [g.user.username for g in garmin_users]
f_usernames = [f.user.username for f in fitbit_users]
users_with_offset_in_local_time = []
if g_usernames:
for g_user in garmin_users:
if f_usernames and g_user.user.username in f_usernames:
for f_user in fitbit_users:
if f_user.user.username in g_usernames:
if (g_user.user.username == f_user.user.username and
g_user.last_synced < f_user.last_synced_fitbit):
users_with_offset_in_local_time.append(f_user)
elif (g_user.user.username == f_user.user.username and
g_user.last_synced > f_user.last_synced_fitbit):
users_with_offset_in_local_time.append(g_user)
else:
users_with_offset_in_local_time.append(f_user)
else:
users_with_offset_in_local_time.append(g_user)
elif f_usernames:
users_with_offset_in_local_time.append(fitbit_users)
users_with_offset_in_local_time = list(set(users_with_offset_in_local_time))
return users_with_offset_in_local_time
# remind selected users to submit UserDailyInput
def notify_user_to_submit_userinputs():
# RECEPIENTS_USERNAME = ["johnb",'pw',"Michelle","Brenda","BrookPorter",
# "cherylcasone","knitter61","lafmaf123","davelee","Justin","lalancaster",
# "MikeC","missbgymnast","squishyturtle24","yossi.leon@gmail.com",
# "atul","jvbhealth","Jvbtest","Vickykolovou","samle"]
RECEPIENTS_USERNAME = ["missbgymnast","squishyturtle24","jvbhealth",
"Dileep",]
# RECEPIENTS_USERNAME = ["dileep",'narendra','venky']
# Local time at which email notification should be sent to the user
# 10 PM local time
EMAIL_TIMING = [time(22)]
RECEPIENTS_WITH_OFFSET = get_users_having_local_time(
EMAIL_TIMING,RECEPIENTS_USERNAME)
FEEDBACK_EMAIL = "info@jvbwellness.com"
ROOT_URL = "https://app.jvbwellness.com/"
USER_INPUT_URL = ROOT_URL+"userinputs"
last_userinput_of_users = {}
for user_lsync in RECEPIENTS_WITH_OFFSET:
user = user_lsync.user
last_userinput_of_users[user.username] = {
"last_ui":None,
"user_email":user.email,
"user_first_name":user.first_name,
"user_offset":user_lsync.offset
}
try:
last_userinput_of_users[user.username]["last_ui"] = (
UserDailyInput.objects.filter(
user = user).order_by('-created_at')[0])
except (IndexError,UserDailyInput.DoesNotExist) as e:
last_userinput_of_users[user.username]["last_ui"] = None
for username,user_meta in last_userinput_of_users.items():
if user_meta['last_ui']:
today_utc = datetime.now()
today_local_time = today_utc + timedelta(seconds = user_meta['user_offset'])
last_ui = user_meta['last_ui']
user_email = user_meta['user_email']
user_first_name = user_meta['user_first_name']
last_ui_date = datetime.combine(last_ui.created_at,time(0))
message = """
Hi {},
We noticed that you have not submitted your user inputs {}. Click on the link below to submit them.
{}
If clicking the link above doesn't work, please copy and paste the URL into a new browser window instead.
Thanks and let us know if you have any questions by emailing {}
Sincerely,
JVB Health & Wellness"""
submission_from_text = ""
subject = "Submit User Inputs | {}"
if last_ui_date.date() == (today_local_time-timedelta(days = 1)).date():
submission_from_text = "today ({})".format(
today_local_time.strftime("%b %d, %Y")
)
subject = subject.format(today_local_time.strftime("%b %d, %Y"))
else:
submission_from_text = "from {}".format(
(last_ui_date+timedelta(days=1)).strftime("%b %d, %Y")
)
subject = subject.format(
(last_ui_date+timedelta(days=1)).strftime("%b %d, %Y"))
message = message.format(
user_first_name.capitalize(),submission_from_text,
USER_INPUT_URL,FEEDBACK_EMAIL
)
if last_ui_date.date() != today_local_time.date():
send_mail(
subject = subject,
message = message,
from_email = FEEDBACK_EMAIL,
recipient_list = [user_email],
fail_silently = True
)
# Reminding Users to Synchronize watch
def notify_users_to_sync_watch():
# RECEPIENTS_USERNAME = ["johnb",'pw',"BrookPorter",
# "Justin","lalancaster","MikeC","jvbhealth","Jvbtest",
# "missbgymnast","squishyturtle24","Vickykolovou","samle"]
RECEPIENTS_USERNAME = ["missbgymnast","squishyturtle24","jvbhealth",
"Dileep",]
# RECEPIENTS_USERNAME = ['venky','pavan','norm','mani','narendra']
EMAIL_TIMING = [time(9),time(21)]
RECEPIENTS_WITH_OFFSET = get_users_having_local_time(
EMAIL_TIMING,RECEPIENTS_USERNAME)
FEEDBACK_EMAIL = "info@jvbwellness.com"
last_synced_of_users = {}
for user_lsync in RECEPIENTS_WITH_OFFSET:
user = user_lsync.user
last_synced_of_users[user.username] = {
"last_sync":None,
"user_email":user.email,
"user_first_name":user.first_name,
"user_offset":user_lsync.offset
}
try:
garmin_last_sync = UserLastSynced.objects.filter(user = user)
fitbit_last_sync = UserFitbitLastSynced.objects.filter(user = user)
if garmin_last_sync and fitbit_last_sync and garmin_last_sync[0].last_synced > fitbit_last_sync[0].last_synced_fitbit:
last_synced_obj = [garmin_last_sync[0],"garmin"]
elif garmin_last_sync and fitbit_last_sync and garmin_last_sync[0].last_synced < fitbit_last_sync[0].last_synced_fitbit:
last_synced_obj = [fitbit_last_sync[0],"fitbit"]
elif garmin_last_sync:
last_synced_obj = [garmin_last_sync[0],"garmin"]
elif fitbit_last_sync:
last_synced_obj = [fitbit_last_sync[0],"fitbit"]
if last_synced_obj[1] == "garmin":
last_sync_local_dtime = last_synced_obj[0].last_synced + timedelta(
seconds=last_synced_obj[0].offset)
else:
last_sync_local_dtime = last_synced_obj[0].last_synced_fitbit + timedelta(
seconds=last_synced_obj[0].offset)
last_synced_of_users[user.username]["last_sync"] = last_sync_local_dtime
except (IndexError,UserLastSynced.DoesNotExist,UserFitbitLastSynced.DoesNotExist) as e:
last_synced_of_users[user.username]["last_sync"] = None
for username,user_meta in last_synced_of_users.items():
if user_meta['last_sync']:
today_utc = datetime.now()
today_local_time = today_utc + timedelta(seconds = user_meta['user_offset'])
last_sync = user_meta['last_sync']
user_email = user_meta['user_email']
user_first_name = user_meta['user_first_name']
synchronize_from_text = "(since {})".format(
last_sync.strftime("%b %d, %Y @ %I:%M %p"))
subject = "Synchronize Watch | {}".format(
last_sync.strftime("%b %d, %Y @ %I:%M %p"))
message="""
Hi {},
We just noticed that you have not sync’d your wearable device in a while {}. If you want to see all your cool health and activity stats and rankings, sync your watch now.
Thanks and let us know if you have any questions by emailing {}
Sincerely,
JVB Health and Wellness
"""
message = message.format(
user_first_name.capitalize(),synchronize_from_text,
FEEDBACK_EMAIL
)
synced_in_past_4_hours = False
notification_shift = 'evening' if today_local_time.hour > 11 else 'morning'
if (notification_shift == 'evening'
and last_sync.hour >= 17 and last_sync.hour <= 21):
synced_in_past_4_hours = True
elif (notification_shift == 'morning'
and (last_sync.hour >= 5 and last_sync.hour <= 9)):
synced_in_past_4_hours = True
if not(today_local_time.date() == last_sync.date() and
synced_in_past_4_hours):
send_mail(
subject = subject,
message = message,
from_email = FEEDBACK_EMAIL,
recipient_list = [user_email],
fail_silently = True
) |
n=input('正の整数を入力してください:')
x=int(n)
while x<=0:
n=input('正の整数を入力してください:')
x=int(n)
divisor=""
count = 0
for i in range(1,x+1):
if x%i==0:
divisor = divisor + " " + str(i)
count += 1
else:
pass
print(x,'の約数は',divisor,'です。')
print('全部で', count,'個あります') |
#Claire Yegian
#1/18/18
#loopDemo.py - how to use a loop
"""for i in range(1,101):
if '7' in str(i) or i%7==0:
print('Buzz')
else:
print(i)"""
i = 1
while i <= 100:
if '7' in str(i) or i%7==0:
print('Buzz')
else:
print(i)
i += 1 |
# -*- coding: utf-8 -*-
# from odoo import http
# class BaramegBirthdayReminder(http.Controller):
# @http.route('/barameg_birthday_reminder/barameg_birthday_reminder/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/barameg_birthday_reminder/barameg_birthday_reminder/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('barameg_birthday_reminder.listing', {
# 'root': '/barameg_birthday_reminder/barameg_birthday_reminder',
# 'objects': http.request.env['barameg_birthday_reminder.barameg_birthday_reminder'].search([]),
# })
# @http.route('/barameg_birthday_reminder/barameg_birthday_reminder/objects/<model("barameg_birthday_reminder.barameg_birthday_reminder"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('barameg_birthday_reminder.object', {
# 'object': obj
# })
|
import newt
def report(m):
newt.report(m,True)
api=newt.getTwitterAPI()
#----------------------------------------------------------------
#user settings
#----------------------------------------------------------------
twsn={}
tw=[]
tw2={}
tw2['newt::headerPlus']='party VARCHAR'
'''
twsn=newt.listDetailsByScreenName({},api.list_members,'tweetminster','libdems')
for i in twsn:
tw.append(i)
tw2[i]={}
tw2[i]['party']='libdems'
twsn=newt.listDetailsByScreenName({},api.list_members,'tweetminster','others')
for i in twsn:
tw.append(i)
tw2[i]={}
tw2[i]['party']='others'
'''
def doGroup(group,tw,tw2):
twsn=newt.listDetailsByScreenName({},api.list_members,'tweetminster',group)
for i in twsn:
tw.append(i)
tw2[i]={}
tw2[i]['party']=group
return tw,tw2
affiliations=["parliament","financialtimes","otherukmedia","skynews","telegraph","theindependent","channel4news","guardian","bbc","ukgovernmentdepartments","others","conservatives","labour","libdems"]
label='govtweeps'
#affiliations=["others","conservatives","labour","libdems"]
#label='ukmps'
for aff in affiliations:
tw,tw2=doGroup(aff,tw,tw2)
tw=newt.getTwitterUsersDetailsByScreenNames(api,tw)
newt.gephiOutputFilePlus(api,label, tw,tw2,'innerfriends')
newt.gephiOutputFilePlus(api,label, tw,tw2,'outerfriends')
newt.gephiOutputFilePlus(api,label, tw,tw2,'extrafriends') |
#!/usr/bin/env python
import os
import re
import sys
import json
from roshan import Roshan
from conf import *
import traceback
__usage__ = """Add node or update acl in zk.
Example
%(argv0)s add_node site_list
The site_list looks like
#domain_name full_path
topcar09.baidu.com /sandbox/ns/cms/other/topcar09
Or %(argv0)s update_acl conffile
The conffile looks like
[server]
xxx.xxx.xxx.xxx
xxx.xxx.xxx.xxx
[path]
/baidu/ns/cms/path/to/node
"""
_version_ = "0.0.0.3"
roshan=None
default_acls=['ip:127.0.0.1:7','ip:10.65.33.135:7','ip:10.65.33.235:7','ip:10.81.33.37:7']
def login():
global roshan
roshan = Roshan(*config['addr'])
if 'incorrect' in roshan.login(*config['account']):
print 'Wrong Username/Email and password combination.'
return -1
def add_abslute_path(path):
"""
actually add a node
"""
ret = roshan.add_node(path)
if ret.get('status','fail').lower() == 'ok':
print 'Create node %s success' % path
elif 'error' in ret:
print 'Error: %s' % ret['error']
else:
print 'Unknown error'
def add_one_site(domain, rootpath="/sandbox/cms-op"):
"""
add a single site node and update the data
"""
head=''
nodes = rootpath.split('/')
for n in nodes:
if n:
head = head + '/' + n
add_abslute_path(head)
path=head
add_abslute_path(path)
data=make_node_data(domain)
ret = roshan.update_node(path,generate_acl(),json.dumps(data),isappend=False)
if 'error' in ret:
print 'Error: %s'%(ret['error'])
elif 'status' in ret:
print 'create %s: %s' %( path, ret['status'])
def make_node_data(domain, port=1110, cmspm_num=1):
"""
make the data of a node
"""
data={}
data['check_cmspm_num']=1
data['cmspm_num']=cmspm_num
data['is_work']=1
data['service_conn_type']=0
data['service_name']=domain
data['service_port']=port
data['service_type']=0
serv={}
serv[domain]=data
node={}
node['name']="cms"
node['services']=serv
return node
def read_conf(file):
"""
load domains and path from a file
"""
f=open(file)
domain_path={}
while True:
line = f.readline().strip()
if len(line) == 0:
break
if line.startswith('#'):
continue
domain,path = line.split()
domain_path[domain]=path
return domain_path
def check_user():
if config['account'][0] == 'cms':
#ok
return 1
else:
return 0
#add_node node_conf
#the format of node_conf
#xxx.baidu.com /baidu/ns/cms/xxxx/xxxxxx
def add_node(node_conf):
##main program
if not check_user():
print "only user cms can add a new node."
return 0
login()
#add_one_site('test_domain','/sandbox/a/b/c/d')
datafile=node_conf
if datafile :
domain_path=read_conf(datafile)
for key in domain_path.keys():
add_one_site(key,domain_path[key])
else:
print """
add_node node_conf
the format of node_conf_file should be
xxx.baidu.com /baidu/ns/cms/xxxx/xxxx
"""
def get_node_data(path):
node = roshan.get_node(path)
if not node:
print "error in get node %s, error: %s" %(path, node)
return None,None,None
node_acls = node.get('acl')
node_data = node.get('data')
stat_arr = node.get('stat')
if node_data is not None and len(node_data) != 0:
if node_data[-1] == '\0' :
node_data = node_data.rstrip('\0')
node_dict = json.loads(node_data)
else:
node_dict = None
return node_dict
def update_node_dict(node_dict, cmspm_num=1):
serv=node_dict['services']
name = node_dict['name']
domain = str(serv.keys()[0])
domain_data = serv[domain]
port = domain_data['service_port']
return make_node_data(domain, port=port, cmspm_num=cmspm_num)
import re
def parse_acl_conf(acl_conf):
acl_file = file(acl_conf)
acl_lines = acl_file.readlines()
acl_dict = {}
key = None
for line in acl_lines:
#if match [xxx], add a key
#else add the string to last key
p=re.compile('\\[.*]',re.IGNORECASE)
line = line.rstrip()
if len(line) == 0:
continue
m=p.match(line)
if m is None:
#add to a certern array
if key :
acl_dict[key].append(line)
else:
last_key = key
key = str(m.group())[1:-1]
acl_dict[key]=[]
path =acl_dict['path'][0]
acl_list = acl_dict['server']
return path,acl_list
def generate_acl(acl_list=""):
acl = ""
for i in acl_list:
acl = acl+('ip:%s:7\n' % i)
for i in default_acls:
acl = acl +('%s\n' % i)
return acl
#update_acl acl_conf
#should replace the acls of the node and set "cmspm_num"
#
def update_acl(acl_file):
login()
#first get data and parse it
path,acl_list = parse_acl_conf(acl_file)
acl_count = len(acl_list)
node_dict = get_node_data(path)
#second, replace the acl and the data
node_dict = update_node_dict(node_dict,cmspm_num=acl_count)
print 'cmspm_num: '+ str(acl_count)+ ' newdata: '+str(node_dict)
acl = generate_acl(acl_list)
print 'acl: '+ str(acl)
roshan.update_node(path,data=json.dumps(node_dict),acl=acl,isappend=False)
def main():
func_map = {
'add_node': add_node,
'update_acl': update_acl,
}
try:
func = func_map.get(sys.argv[1], None)
if func:
try:
return func(*sys.argv[2:])
except TypeError,errmsg:
print 'CallFunctionError %s: %s'%(sys.argv[1], errmsg)
except IndexError:
pass
print __usage__ % (dict(argv0=sys.argv[0]))
if __name__ == '__main__':
exit(main())
|
#!/usr/bin/env python
import json
import csv
# RUN THIS SCRIPT AFTER YOU HAVE RUN THE MAIN PROGRAM
# ANOTHER SCRIPT TO EXTRACT JSON DATA AND EXPORT TO CSV FILE
# THIS EXAMPLE IS PRETTY MUCH STANDARD FOR THE MARVEL API
outputFile = open('marvelJSON.csv', 'w')
outputWriter = csv.writer(outputFile)
filea = 'last_api.json'
fileb = 'url_last_api.json'
sourceFile = open(filea, 'rU')
json_data = json.load(sourceFile)
for item in json_data['data']['results']: #[key][list]
for new in item['characters']['items']: #[key][list]
dicto1 = new.values()
list2 = [x for x in dicto1 if x != []]
outputWriter.writerow(list2)
sourceFile.close()
outputFile.close()
|
# Generated by Django 2.0.2 on 2019-03-10 09:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('delivery', '0005_delivery_enterprice'),
]
operations = [
migrations.RemoveField(
model_name='delivery',
name='interview',
),
]
|
#!/usr/bin/python
import sys
# Represents how we define a question.
class Question:
# Question identifier
id = None
# Question body length
questionLength = None
# Total of answer
answers = 0
# Sum of all answer's body
sumAnswersLength = 0
def __init__(self, id, questionLength):
self.id = id
self.questionLength = questionLength
# Add an answer. It increments our answers count and sum its length to our accumulator.
def addAnswer(self, answerLength):
self.answers += 1
self.sumAnswersLength += float(answerLength)
# Print the result
def printResult(self):
if self.answers > 0:
print self.id, '\t', self.questionLength, '\t', self.sumAnswersLength / self.answers
else:
print self.id, '\t', self.questionLength, '\t', self.answers
question = None
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 3:
# Something has gone wrong. Skip this line.
continue
# Load line to variables
identifier, type, bodyLength = data_mapped
# If it is a question
if type == 'A':
if question != None:
question.printResult()
question = Question(identifier, bodyLength)
# If it is an answer
elif type == 'B':
question.addAnswer(bodyLength)
# Print last question
if question != None:
question.printResult()
|
import unittest
from katas.kyu_7.filter_list import filter_list
class FilterListTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(filter_list([1, 2, 'a', 'b']), [1, 2])
def test_equals_2(self):
self.assertEqual(filter_list([1, 'a', 'b', 0, 15]), [1, 0, 15])
def test_equals_3(self):
self.assertEqual(filter_list([1, 2, 'aasf', '1', '123', 123]),
[1, 2, 123])
|
import re
class SpotifyTrack:
def __init__(self, body_dict):
b = body_dict
# General Track
self.id = b["id"]
self.name = b["name"]
self.duration_ms = b["duration_ms"]
self.explicit = b["explicit"]
self.album_name = b["album"]["name"]
self.track_numer = b["track_number"]
self.disc_number = b["disc_number"]
self.artist = b["artists"][0]["name"]
# Sanitized
self.sanitized_name1 = re.sub(r'\([^)]*\)', '', self.name)
self.sanitized_name2 = self.sanitized_name1.split('-')[0]
if __name__ == "__main__":
main() |
def is_check_none_space_length(pwd):
return pwd is not None and ' ' not in pwd and 8 <= len(pwd) <= 16
def is_check_char(pwd):
str_func = [str.isalpha, str.islower, str.isupper]
result = []
for item in str_func:
if any(item(char) for char in pwd):
result.append(True)
else:
result.append(False)
return all(result)
def is_check_special_char(pwd):
special_char = ['*', '.', '@', '!']
return any(char for char in pwd if char in special_char)
|
# WAP to Create a File and Write data to it
#===========================================
file_name = input("Enter File Name : ")
try:
f = open(file_name,"w")
data = input("Enter some data to File ")
f.write(data)
f.close()
except :
print("Invalid")
|
from django.urls import path
from . import compatviews, views
app_name = "osmcal.api"
urlpatterns = [
path("v1/events/", compatviews.EventListV1.as_view(), name="api-event-list"),
path("v1/events/past/", compatviews.PastEventListV1.as_view(), name="api-past-event-list"),
path("v2/events/", views.EventList.as_view(), name="api-event-list-v2"),
path("v2/events/past/", views.PastEventList.as_view(), name="api-past-event-list-v2"),
path("internal/timezone", views.Timezone.as_view(), name="api-internal-timezone"),
]
|
import numpy as np
from sklearn.decomposition import PCA
class ComponentAnalysis:
def __init__(self, n_components=30):
self.pca = PCA(n_components=n_components)
def dim_reduction(self, X):
X_new = self.pca.fit_transform(X)
return X_new
pca_model = ComponentAnalysis()
|
# imports
import sched, time, datetime, os
from datetime import datetime, timedelta
import os
import json
import requests
import configparser
config=configparser.ConfigParser()
config.read('config.ini')
header = {
"content-type": "application/json",
"authorization": "token " + config.get('myvars','GITHUBTOKEN')
}
def get_api_url(url):
if url[-4:] == '.git':
url = url[:-4]
return url.replace("github", "api.github").replace("/", "/repos/", 3).replace("/repos/", "/", 2)
def fetch_last_commit(repo_url):
url = get_api_url(repo_url) + "/commits/master"
try:
response = requests.get(url, headers=header).json()
date = response["commit"]["committer"]["date"]
except:
print('Invalid repo Url ', url)
return None
return datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ") - timedelta(seconds=18000)
def fetch_num_of_tasks(repo_url, deadline):
url = get_api_url(repo_url) + "/issues"
response = requests.get(url, headers=header).json()
valid_responses = 0
for item in response:
task_creation_time = datetime.strptime(item["created_at"], "%Y-%m-%dT%H:%M:%SZ")
if task_creation_time <= deadline:
valid_responses += 1
return valid_responses
|
__author__ = 'Bill'
import sys, time, math
def parse_case(file):
e, r, n = map(int, file.readline().split())
v = map(int, file.readline().split())
return e, r, list(v)
def process_case(case):
e, r, v = case
#print(e, r, v)
steps = int(math.ceil(e/r)) + 1
gain = calc_gain(e, r, v, steps, [e for i in v])
return gain
def calc_gain(e, r, v, steps, max_spend):
gain = 0
largest = max(v)
index = v.index(largest)
gain += largest * max_spend[index]
v_left = v[:index]
if len(v_left) > 0:
max_spend_left = max_spend[:index]
for i in range(steps):
n = i+1
if n > len(max_spend_left):
break
if n * r >= max_spend_left[-n]:
break
max_spend_left[-n] = n * r
gain += calc_gain(e, r, v_left, steps, max_spend_left)
v_right = v[index+1:]
if len(v_right) > 0:
max_spend_right = max_spend[index+1:]
for i in range(steps):
n = i+1
if n > len(max_spend_right):
break
if n * r >= max_spend_right[i]:
break
max_spend_right[i] = n * r
gain += calc_gain(e, r, v_right, steps, max_spend_right)
return gain
if __name__ == '__main__':
t0 = time.clock()
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = "sample.in"
input_file = open(filename, "r")
output_file = open(filename.replace('in','out'), "w")
case_count = int(input_file.readline())
for i in range(case_count):
result = process_case(parse_case(input_file))
output_line = 'Case #%d: %s\n' % (i+1, result)
print(output_line)
output_file.writelines(output_line)
input_file.close()
output_file.close()
print('Total Time: %s' % str(time.clock() - t0)) |
# Generated by Django 2.2.4 on 2019-11-15 11:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20191115_1219'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='item_json',
new_name='items_json',
),
migrations.AlterField(
model_name='order',
name='address2',
field=models.CharField(default='', max_length=100),
),
]
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from douban.items import DoubanItem
class MydoubanSpider(scrapy.Spider):
name = 'mydouban'
url = ['https://movie.douban.com/top250']
start_urls = {'https://movie.douban.com/top250'} #方法1
'''
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
}
def start_requests(self):
url = 'https://movie.douban.com/top250'
yield Request(url, headers=self.headers)
'''
def parse(self, response):
item = DoubanItem()
movies = response.xpath('//ol[@class="grid_view"]/li')
for movie in movies:
item['name'] = movie.xpath(".//div[@class='pic']/a/img/@alt").extract()[0]
item['movieInfo'] = movie.xpath(".//div[@class='info']/div[@class='bd']/p/text()").extract()[0].strip()
item['star'] = movie.xpath(".//div[@class='info']/div[@class='bd']/div[@class='star']/span[2]/text()").extract()[0]
item['quote'] = movie.xpath('.//div[@class="star"]/span/text()').re(r'(\d+)人评价')[0]
yield item
next_url = response.xpath('//span[@class="next"]/a/@href').extract()
if next_url:
next_url = 'https://movie.douban.com/top250' + next_url[0]
yield Request(next_url,callback=self.parse)
|
#Names,Variables,Codes,Functions
#this one is like your scripts with argv
def print_two(*args):
arg1,arg2=args
print "arg1: %r, arg2: %r" %(arg1,arg2)
def print_two_again(arg1,arg2):
print "arg1: %r, arg2: %r" %(arg1,arg2)
def print_one(arg1):
print "arg1: %r" %arg1
def print_none():
print "I got nothing!!"
print_two("Kalyan","Ghosh")
print_two_again("Kalyan","Ghosh")
print_one("First!")
print_none |
# Methods for calculating dielectric function and absorption
# based on RPA
# based on Eq. 5-13 in Electronic States and Optical Transitions by
# Bassan and Parravicini
import smear as sm
# dirac-delta & FD factor vectorized...
def absvec(pmn,delta_vec,fd_vec,kptwt,nkpt,nbnd,nfilled):
""" Procedure to calculate the absorption coefficient
at particular frequency, embedded in vecdirac (vectorization
of dirac delta smearing)
Computed in atomic units
Inputs:
pmn: complex momentum matrix elements;
[ikpt][ibnd][jbnd][coord]
delta_vec: smearing dirac delta
[nkpt][nempty][nfilled]
fd_vec: f_ik; [nkpt][nbnd]
kptwt: k-point weights
nkpt: number of kpts
nbnd: number electronic bands
nfilled: num filled bands; for index tracking
Outputs:
alpha: absorption coefficient at freq
"""
alpha = float(0)
for ikpt in range(nkpt):
for ibnd in range(nbnd-nfilled): # empty bands
fd_ik = fd_vec[ikpt][ibnd+nfilled]
pmntmp = pmn[ikpt][ibnd]
deltatmp = delta_vec[ikpt][ibnd]
for jbnd in range(nfilled): #filled bands
# Fermi-dirac factor
fd_jk = fd_vec[ikpt][jbnd]
fd_fac = fd_jk-fd_ik
alpha += kptwt[ikpt] * fd_fac * (pmntmp[jbnd]**2) * deltatmp[jbnd]
return alpha
# non-vectorized version
def absorb(pmn,eigs,efermi,T,freq,kptwt,nkpt,nbnd,aa,smear,nfilled):
# need choose which smearing method....
""" Procedure to calculate the absorption coefficient
at particular frequency
Computed in atomic units
Inputs:
pmn: complex momentum matrix elements;
[ikpt][ibnd][jbnd][coord]
eigs: eigenenergies [ikpt, bnd]
T: temperature
freq: frequency to calculate over
kptwt: k-point weights
nkpt: number of kpts
nbnd: number electronic bands
aa: adaptive smearing parameter
smear: regular constant smearing
nfilled: num filled bands; for index tracking
Outputs:
alpha: absorption coefficient at freq
"""
alpha = float(0)
for ikpt in range(nkpt):
for ibnd in range(nbnd-nfilled): # empty bands
eig_ik = eigs[ikpt,ibnd+nfilled]
fd_ik = sm.fermidirac(eig_ik,efermi,T)
for jbnd in range(nfilled): #filled bands
eig_jk = eigs[ikpt,jbnd]
# Fermi-dirac factor
fd_jk = sm.fermidirac(eig_jk,efermi,T)
fd_fac = fd_jk-fd_ik
# smearing
arg = eig_ik - eig_jk - freq
# Gaussian smearing
argsm = arg/(smear**2)
dirac = sm.w0gauss(argsm)
# MP smearing
#argsm = arg/smear
#dirac = sm.mpdelta(argsm,2)
#momentum matrix element
##pmn_tmp = pmn[ikpt][ibnd][jbnd]
##pmn_k = float(0)
##for coord in range(3):
## pmn_k += pmn_tmp[coord]**2
alpha += kptwt[ikpt] * fd_fac * (pmn[ikpt][ibnd][jbnd]**2) * dirac
# # Debugging
# print "ik", eig_ik
# print "jk", eig_jk
# print "arg", arg
# print "pmn", pmn_tmp
# print pmn_k
# print "dirac", dirac
# print "fd", fd_fac
# print alpha
# fermidirac(ebnd,efermi,T)
# w0gauss(x)
# sig_nk(nk1,nk2,nk3,vk,aa)
return alpha
|
from handlers.BaseHandler import BaseHandler
from models.TaskModel import TaskMethods
from models.TaskboardMemberModel import TaskboardMemberMethods
from models.TaskboardModel import *
import json
"""
Taskboard member association relation request's handlers
extends base handler
"""
class TaskboardMemberHandler(BaseHandler):
def __init__(self, request, response):
# calling super class constructor
super(TaskboardMemberHandler, self).__init__(request=request, response=response)
def index(self, taskboard_id):
"""
get all memebers of provided taskboard id
:param taskboard_id:
:return:
"""
# check if current user has permission to get taskboard i.e taskboad appears in users permitted taskboard list
if self.is_get_authorised(taskboard_id):
# get all members
members = TaskboardMemberMethods.get_all_taskboard_members_by_taskboard(taskboard_id)
# initialise response dictionary
response = {'success': True, 'data': []}
# for each members append to response in way required by view
for member in members:
response['data'].append(TaskboardMemberMethods.taskboard_member_to_dictionary(member))
else:
# if not authorised, send unauthorised response
response = {'success': False, 'data': [], 'errors': {'unauthorised': True}}
self.send_json_object(response)
# return members of taskboard only if current user has permission to view this taskboard
def is_get_authorised(self, id):
"""
# return members of taskboard only if current user has permission to view this taskboard
# current user can view this taskboard, only if this taskboard is in his authorised set of taskboard (i.e of which he is creator or member of)
:param id:
:return: boolean
"""
# taskboard object fetched by ID
taskboard_object = TaskboardMethods.get_by_id(int(id))
# get all authorised taskboardobjects of user
authorised_taskboard_objects = TaskboardMethods.get_all_authorised_taskboards()
# return true if this taskboard is in authorised taskboard object
return taskboard_object in authorised_taskboard_objects
def post(self):
"""
Add member in taskboard
i) check load request params,
ii) check request valid and operation is permitted with validate() method
iii) create TaskboardMember object for each user selected,
and store in datastore
:return:
"""
params = json.loads(self.request.body)
# validation and authorisation done in self.validate() method
params, validation_errors = self.validate(params)
# if validation errors send error response
if validation_errors:
response = {'success': False, 'validate': False, 'errors': validation_errors}
else:
# create each app_user, taskboard object for taskmember and store.
for app_user_id in params['app_user']:
TaskboardMemberMethods.insert_taskboard_member(params['taskboard'], int(app_user_id))
# create success response object
response = {
'success': True,
'message': 'Successfully added selected users to board',
'validate': True,
'errors': False
}
# send json response.
self.send_json_object(response)
def delete(self):
"""
Delete member from taskboard
Deletion of member will result into assigned task as unassigned.
:return:
"""
# load json request in python object
params = json.loads(self.request.body)
# perfom authorisation,i.e, check if operation is carried out by board creator
params, validation_errors = self.is_authorised(params)
# creator should not be able to remove himself from board
remove_user = params['app_user']
taskboard = params['taskboard']
# # admin is always a member
# if TaskboardMethods.get_by_id(taskboard).created_by.id() == remove_user:
# validation_errors['unauthorised'] = True
# if validation error, initialise failed response
if validation_errors:
response = {'success': False, 'validate': False, 'errors': validation_errors}
else:
# get associated tasks with taskboard_id and app_user_id
associated_tasks_in_board = TaskMethods.get_all_tasks_by_taskboard_and_member(params['taskboard'],
params['app_user'])
# first unaasign tasks
TaskMethods.unassign_tasks(associated_tasks_in_board)
# delete taskboard member method with provided taskboard and app_user
TaskboardMemberMethods.delete_taskboard_member(params['taskboard'], params['app_user'])
# create response dictionary for success
response = {'success': True, 'validate': True, 'errors': validation_errors}
# send response object
self.send_json_object(response)
def validate(self, params):
validation_error = {}
params, validation_error = self.is_authorised(params, validation_error)
if 'taskboard' not in params or len(str(params['taskboard']).strip()) == 0:
validation_error = {'user': 'Taskboard not selected.'}
if 'app_user' not in params or len(params['app_user']) == 0:
validation_error = {'user': 'Select users to invite.'}
if 'app_user' in params and len(params['app_user']) and 'taskboard' in params and len(
str(params['taskboard']).strip()):
for app_user_id in params['app_user']:
if TaskboardMemberMethods.exists_relation(params['taskboard'], app_user_id):
validation_error = {'user': AppUserMethods.get_user(app_user_id).email + " is already a member"}
return params, validation_error
def is_authorised(self, params, validation_error={}):
"""
raise error if logged in user is not creator of taskboard.
:param params:
:param validation_error:
:return: params, validation_error
"""
if 'taskboard' in params and len(str(params['taskboard']).strip()) >= 0:
taskboard = TaskboardMethods.get_by_id(int(params['taskboard']))
# unauthorised if current user is not the creator
if taskboard.created_by != AppUserMethods.get_current_user().key:
validation_error = {'unauthorised': True}
# return params, validation_error tuple.
return params, validation_error
|
import unittest
import json
from flask import current_app
from app import app
from tests.test_basics import BasicsTestCase
from tests.test_service import ServiceTestCase
BasicsTestCase()
#ServiceTestCase()
if __name__ == "__main__":
unittest.main()
|
from os import write
import requests
import urllib
from bs4 import BeautifulSoup
from fuzzywuzzy import fuzz
from time import strptime
import re
import comicutil
import comicdb
showFuture = False
urls = {'base': 'https://www.comixology.com',
'search':
{'searchBasic': 'https://www.comixology.com/search?search={}',
'searcCollectionsUrl': 'https://www.comixology.com/search/items?search={}&subType=COLLECTIONS',
'searchSeriesUrl': 'https://www.comixology.com/search/series?search={}',
'searchIssuesUrl': 'https://www.comixology.com/search/items?search={}&subType=SINGLE_ISSUES',
'SearchcomingSoonUrl': 'https://www.comixology.com/search/items?search={}&subType=comingsoon'}
}
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def getIssueNumberFromName(name):
if "#" in name:
split = name.split()
for part in split:
if "#" in part:
return re.sub("[^0-9]", "", part)
return "UNKNOWN"
else:
return "UNKNOWN"
def get_name(name):
return re.sub(r" ?\([^)]+\)", "", name)
def fix_name(name):
part1 = name.partition(" (")[0]
part2 = re.split("#[0-999]",name,1)[-1]
return f"{part1}{part2}"
def getYearFromName(name):
try:
#return re.search('\d{4}',name).group()
return re.findall('\d{4}',name)[-1]
except:
return "UNKNOWN"
def get_year(string):
try:
year = string.split("(")[1].replace(")","")
if not bool(re.search(r'\d', year)):
return "UNKNOWN-UNKNOWN"
else:
return year
return
except:
return "UNKNOWN-UNKNOWN"
def get_issue_by_id(id):
return {}
def get_series_by_id(id):
searchResults = []
response = requests.get(id, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
page_count = 1
try:
page_count = re.sub("[^0-9]", "",soup.find("div", {"class": "pager-jump-container"}).contents[4])
except:
pass
for n in range(int(page_count)):
url = f"{id}?Issues_pg={n+1}"
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
results = soup.find("div", {"class": "list Issues"}).find_all("li", {"class": "content-item"})
for result in results:
link = result.find("a", {"class": "content-img-link"})['href']
name = result.find("h5", {"class": "content-title"}).contents[0].partition('(')[0].strip()
volume = result.find("h5", {"class": "content-title"}).contents[0].partition('(')[0].strip()
coverImageBase = result.find("img", {"class": "content-img"})['src'].split('https://images-na.ssl-images-amazon.com/images/S/cmx-images-prod/Item/')
coverImageMedium = 'https://images-na.ssl-images-amazon.com/images/S/cmx-images-prod/Item/{}'.format(urllib.parse.quote(coverImageBase[1]))
coverImageLarge = coverImageMedium.replace('SX312','SX360')
coverImageSmall = coverImageMedium.replace('SX312','SX170')
credits = []
try:
name = '{} {}'.format(name,result.find("h6", {"class": "content-subtitle"}).contents[0].strip('Issue ').partition('(')[0].strip())
except Exception as e:
pass
if 'Bundle' not in name:
response = requests.get(link, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
description = ""
try:
description = soup.find("section", {"class": "item-description"}).text.replace("\n","").replace("\r","").replace("\t","")
except:
pass
publisher = soup.find("h3", {"title": "Publisher"}).contents[0].strip()
credits_section = soup.find("div", {"class": "credits"})
for credit in credits_section:
try:
c = {}
c["person"] = credit.find("h2").text.strip()
role = credit.find("h2")['title']
if "art" in role.lower():
role = "Artists"
if "written" in role.lower():
role = "Writer"
if "cover" in role.lower():
role = "Cover Artist"
if "pencil" in role.lower():
role = "Penciler"
if "colored" in role.lower():
role = "Colorist"
c["role"] = role
if c not in credits:
credits.append(c)
except:
pass
genre_list = []
genres = ""
asgroup = soup.find_all("a")
for a in asgroup:
try:
if "comics-genre" in a["href"]:
genre_list.append(a.text.strip())
except:
pass
if len(genre_list) > 0:
genres = ", ".join(genre_list)
aboutTitle = soup.find_all("h4", {"class": "subtitle"})
aboutText = soup.find_all("div", {"class": "aboutText"})
breadcrumb = soup.find("div", {"class": "breadcrumb"})
publisherLInk = crubms = breadcrumb.find_all('a')[1]
volumeLink = breadcrumb.find_all('a')[2]['href'].partition('?')[0]
pageCount = aboutText[0].contents[0].strip().replace(" Pages","")
printDate = aboutText[1].contents[0].strip()
digitalDate = aboutText[2].contents[0].strip()
ageRating = aboutText[3].contents[0].strip()
issueObject = {}
issueObject['coverImageMedium'] = coverImageMedium
issueObject['coverImageSmall'] = coverImageSmall
issueObject['coverImage'] = coverImageLarge
issueObject['volumeName'] = volume
issueObject['issueName'] = get_name(soup.find("h1", {"itemprop": "name"}).contents[0])
issueObject['issueNumber'] = getIssueNumberFromName(soup.find("h1", {"itemprop": "name"}).contents[0])
issueObject['issueLink'] = link.partition('?')[0]
issueObject['volumeLink'] = volumeLink
issueObject['volumeYear'] = getYearFromName(breadcrumb.find_all('a')[2].find('h3').contents[0].strip())
issueObject['publisher'] = comicdb.map_publisher(publisher)
issueObject['credits'] = credits
issueObject['description'] = description
issueObject['genres'] = genres
issueObject['page_count'] = pageCount
dateArray = printDate.split()
issueObject['storeDate'] = '{}-{}-{}'.format(dateArray[2],dateArray[1],strptime(dateArray[0],'%B').tm_mon)
issueObject['volumeId'] = volumeLink.strip(urls["base"])
issueObject['issueId'] = link.partition('?')[0].strip(urls["base"])
searchResults.append(issueObject)
return searchResults
def search_series(query,volumeConfidence=0,issueConfidence=0):
comic_details = comicutil.get_comic_details(query)
searchResults = []
url = urls["search"]["searchSeriesUrl"]
response = requests.get(url.format(urllib.parse.quote(query)), headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
results = soup.find_all("li", {"class": "content-item"})
for result in results:
link = result.find("a", {"class": "content-img-link"})['href']
name = result.find("h5", {"class": "content-title"}).contents[0].partition('(')[0].strip()
volume = result.find("h5", {"class": "content-title"}).contents[0].partition('(')[0].strip()
coverImageBase = result.find("img", {"class": "content-img"})['src'].split('https://images-na.ssl-images-amazon.com/images/S/cmx-images-prod/Series/')
coverImageMedium = 'https://images-na.ssl-images-amazon.com/images/S/cmx-images-prod/Series/{}'.format(urllib.parse.quote(coverImageBase[1]))
coverImageLarge = coverImageMedium.replace('SX312','SX360')
coverImageSmall = coverImageMedium.replace('SX312','SX170')
if 'Bundle' not in name:
ratio = fuzz.ratio(comicutil.stripBadChars(name),f"{comic_details.series} #{comic_details.issue}")
if ratio > issueConfidence:
response = requests.get(link, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
#description = soup.find("section", {"class": "item-description"}).contents[2]
year = get_year(soup.find("div", {"class": "title"}).find("h1", {"itemprop": "name"}).contents[0])
try:
if year.split("-")[1] == "":
year = year + "PRESENT"
except:
pass
end_year = "PRESENT"
try:
if year.split("-")[1] != "":
end_year = year.split("-")[1]
except:
pass
start_year = "UNKNOWN"
try:
start_year = year.split("-")[0]
except:
pass
publisher = soup.find("h3", {"title": "Publisher"}).contents[0].strip()
aboutTitle = soup.find_all("h4", {"class": "subtitle"})
aboutText = soup.find_all("div", {"class": "aboutText"})
breadcrumb = soup.find("div", {"class": "breadcrumb"})
publisherLInk = crubms = breadcrumb.find_all('a')[1]
volumeLink = link.partition('?')[0]
issueObject = {}
issueObject['coverImageMedium'] = coverImageMedium
issueObject['coverImageSmall'] = coverImageSmall
issueObject['coverImage'] = coverImageLarge
issueObject['volumeName'] = volume
issueObject['issueName'] = name
issueObject['name'] = name
issueObject["issue_count"] = "UNKNOWN"
issueObject['link'] = link.partition('?')[0]
issueObject['year'] = year
issueObject['start_year'] = year.split("-")[0]
issueObject['end_year'] = end_year
issueObject['issueNumber'] = getIssueNumberFromName(name)
issueObject['issueLink'] = link.partition('?')[0]
issueObject['volumeLink'] = volumeLink
issueObject['volumeYear'] = start_year
issueObject['publisher'] = comicdb.map_publisher(publisher)
issueObject['volumeId'] = volumeLink.strip(urls["base"])
issueObject['issueId'] = link.partition('?')[0].strip(urls["base"])
issueObject['confidence'] = str(ratio)
issueObject['id'] = link.partition('?')[0]
searchResults.append(issueObject)
#searchResults.sort(key=lambda x: int(x['confidence']),reverse = True)
return searchResults
def search_comics(query,volumeConfidence=0,issueConfidence=0):
comic_details = comicutil.get_comic_details(query)
urlList = [urls['search']['searchIssuesUrl'],urls['search']['searcCollectionsUrl']]
if showFuture:
urlList.append(urls['search']['SearchcomingSoonUrl'])
searchResults = []
for url in urlList:
response = requests.get(url.format(urllib.parse.quote(query)), headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
results = soup.find_all("li", {"class": "content-item"})
for result in results:
link = result.find("a", {"class": "content-img-link"})['href']
name = result.find("h5", {"class": "content-title"}).contents[0].partition('(')[0].strip()
volume = result.find("h5", {"class": "content-title"}).contents[0].partition('(')[0].strip()
coverImageBase = result.find("img", {"class": "content-img"})['src'].split('https://images-na.ssl-images-amazon.com/images/S/cmx-images-prod/Item/')
coverImageMedium = 'https://images-na.ssl-images-amazon.com/images/S/cmx-images-prod/Item/{}'.format(urllib.parse.quote(coverImageBase[1]))
coverImageLarge = coverImageMedium.replace('SX312','SX360')
coverImageSmall = coverImageMedium.replace('SX312','SX170')
try:
name = '{} {}'.format(name,result.find("h6", {"class": "content-subtitle"}).contents[0].strip('Issue ').partition('(')[0].strip())
except Exception as e:
pass
if 'Bundle' not in name:
ratio = fuzz.ratio(comicutil.stripBadChars(name),f"{comic_details.series} #{comic_details.issue}")
if ratio > issueConfidence:
response = requests.get(link, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
writers = []
try:
ws = soup.find_all("h2", {"title": "Written by"})
for writer in ws:
writers.append(writer.find("a").contents[0].strip())
except:
pass
#description = soup.find("section", {"class": "item-description"}).contents[2]
publisher = soup.find("h3", {"title": "Publisher"}).contents[0].strip()
artists = []
try:
ars = soup.find_all("h2", {"title": "Art by"})
for artist in ars:
artists.append(artist.find('a').contents[0].strip())
except:
pass
coverartists = []
try:
cas = soup.find_all("h2", {"title": "Cover by"})
for coverartist in cas:
coverartists.append(coverartist.find('a').contents[0].strip())
except:
pass
year = get_year(soup.find("div", {"id": "column2"}).find("h1", {"class": "title"}).contents[0])
try:
if year.split("-")[1] == "":
year = year + "PRESENT"
except:
pass
end_year = "PRESENT"
try:
if year.split("-")[1] != "":
end_year = year.split("-")[1]
except:
pass
start_year = "UNKNOWN"
try:
start_year = year.split("-")[0]
except:
pass
aboutTitle = soup.find_all("h4", {"class": "subtitle"})
aboutText = soup.find_all("div", {"class": "aboutText"})
breadcrumb = soup.find("div", {"class": "breadcrumb"})
publisherLInk = crubms = breadcrumb.find_all('a')[1]
volumeLink = breadcrumb.find_all('a')[2]['href'].partition('?')[0]
pageCount = aboutText[0].contents[0].strip().replace(" Pages","")
printDate = aboutText[1].contents[0].strip()
digitalDate = aboutText[2].contents[0].strip()
ageRating = aboutText[3].contents[0].strip()
issueObject = {}
issueObject['coverImageMedium'] = coverImageMedium
issueObject['coverImageSmall'] = coverImageSmall
issueObject['coverImage'] = coverImageLarge
issueObject['volumeName'] = volume
issueObject['issueName'] = name
issueObject['issueNumber'] = getIssueNumberFromName(name)
issueObject['issueLink'] = link.partition('?')[0]
issueObject['volumeLink'] = volumeLink
issueObject['volumeYear'] = start_year
issueObject['publisher'] = comicdb.map_publisher(publisher)
issueObject['writers'] = writers
issueObject['artists'] = artists
issueObject['covertartists'] = coverartists
dateArray = printDate.split()
issueObject['storeDate'] = '{}-{}-{}'.format(dateArray[2],dateArray[1],strptime(dateArray[0],'%B').tm_mon)
issueObject['volumeId'] = volumeLink.strip(urls["base"])
issueObject['issueId'] = link.partition('?')[0].strip(urls["base"])
issueObject['confidence'] = str(ratio)
searchResults.append(issueObject)
return searchResults
if __name__ == "__main__":
results = get_series_by_id("https://www.comixology.com/The-Walking-Dead-Deluxe/comics-series/148151?ref=Y29taWMvdmlldy9kZXNrdG9wL2JyZWFkY3J1bWJz")
print(results)
|
'''
Created on Mar 13, 2017
@author: vandia
'''
import numpy as np
import pandas as pd
import io_utilities as ut
import analysis_utilities as ad
import os.path as osp
def main():
fname="../../data_in/pumps_training.csv"
if osp.isfile(fname):
dataset = ut.load(fname, "id", date_cols=["date_recorded"])
else:
dataset = ut.load_and_join("../../data_in/training_data.csv", "../../data_in/training_labels.csv", "id",
date_cols1=["date_recorded"])
ut.save_csv(dataset, fname)
ad.colunmn_analysis(dataset,"../../data_out/","_training")
#===========================================================================
# Test data analysis
#===========================================================================
fname="../../data_in/pumps_test.csv"
dataset_t = ut.load(fname, "id", date_cols=["date_recorded"])
ad.colunmn_analysis(dataset_t,"../../data_out/","_test")
if __name__ == '__main__':
main() |
import eveapi
import os
import redis
import cPickle
import datetime
import time
import json
from .app import app, db
from .models import Corporation
r = redis.StrictRedis(host=app.config['REDIS'])
OUTPOSTS = json.loads(r.get('eve.stations'))
class RedisEveAPICacheHandler(object):
def __init__(self):
self.debug = app.config.get('DEBUG', False)
self.r = redis.StrictRedis(
host=app.config['REDIS'], port=int(os.getenv('REDIS_PORT', 6379)))
def log(self, what):
if self.debug:
print "[%s] %s" % (datetime.datetime.now().isoformat(), what)
def retrieve(self, host, path, params):
key = hash((host, path, frozenset(params.items())))
cached = self.r.get(key)
if cached is None:
self.log("%s: not cached, fetching from server..." % path)
return None
else:
cached = cPickle.loads(cached)
if time.time() < cached[0]:
self.log("%s: returning cached document" % path)
return cached[1]
self.log("%s: cache expired, purging !" % path)
self.r.delete(key)
def store(self, host, path, params, doc, obj):
key = hash((host, path, frozenset(params.items())))
cachedFor = obj.cachedUntil - obj.currentTime
if cachedFor:
self.log("%s: cached (%d seconds)" % (path, cachedFor))
cachedUntil = time.time() + cachedFor
self.r.set(key, cPickle.dumps((cachedUntil, doc), -1))
class EveBadMaskException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EveKeyExpirationException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EveTools(object):
client = eveapi.EVEAPIConnection(cacheHandler=RedisEveAPICacheHandler())
def __init__(self, key_id=None, vcode=None):
if key_id and vcode:
self.auth(key_id, vcode)
def auth(self, key_id, vcode):
self.key_id = key_id
self.vcode = vcode
self.client = self.client.auth(keyID=key_id, vCode=vcode)
self.authed = True
def assert_mask(self):
if not self.authed:
raise Exception('No auth info')
try:
api_key_info = self.client.account.APIKeyInfo()
except eveapi.Error, e:
raise e
except Exception, e:
raise e
else:
mask = api_key_info.key.accessMask
expiration = api_key_info.key.expires
status = 'Ineligible'
if mask == app.config['EVE']['alliance_mask']:
status = 'Alliance'
else:
raise EveBadMaskException('Bad mask')
if len(str(expiration)) > 0:
raise EveKeyExpirationException(
'This key has an expiration date')
return status, mask
def get_characters(self, full=False):
if not self.authed:
raise Exception('No auth info')
try:
api_characters = self.client.account.Characters().characters
except eveapi.Error, e:
raise e
except Exception, e:
raise e
if not full:
return api_characters
else:
characters = []
for character in api_characters:
characters.append(
self.client.char.CharacterSheet(characterID=character.characterID))
return characters
def get_character(self, character_id):
if not self.authed:
raise Exception('No auth info')
try:
api_character = self.client.char.CharacterSheet(
characterID=character_id)
except eveapi.Error, e:
raise e
except Exception, e:
raise e
return api_character
def check_eligibility(self, characters):
# Building corporations list
corporations = [
corporation.id for corporation in Corporation.query.all()]
for character in characters:
if character.corporationID in corporations:
character.disabled = True
else:
character.disabled = False
return characters
def get_type_name(type_id):
eve_db = db.get_engine(app, bind='eve')
query = 'SELECT invTypes.typeID, invTypes.typeName, invTypes.basePrice, invGroups.groupName FROM invTypes JOIN invGroups ON invTypes.groupID=invGroups.groupID WHERE invTypes.typeID = :type_id'
result = eve_db.engine.execute(query, type_id=type_id)
item = {'type_id': None, 'name': None, 'group_name': None}
for row in result:
item['type_id'] = row[0]
item['name'] = row[1]
item['base_price'] = row[2]
item['group_name'] = row[3]
break
return item
def get_location_name(location_id):
eve_db = db.get_engine(app, bind='eve')
location = None
if 66000000 < location_id < 66014933:
query = 'SELECT stationName FROM staStations WHERE stationID=:location_id;'
result = eve_db.engine.execute(
query, location_id=location_id - 6000001)
for row in result:
location = row[0]
break
if 66014934 < location_id < 67999999:
location = OUTPOSTS[str(location_id - 6000000)]
if 60014861 < location_id < 60014928:
location = OUTPOSTS[str(location_id)]
if 60000000 < location_id < 61000000:
query = 'SELECT stationName FROM staStations WHERE stationID=:location_id;'
result = eve_db.engine.execute(query, location_id=location_id)
for row in result:
location = row[0]
break
if location_id >= 61000000:
location = OUTPOSTS[str(location_id)]
else:
query = 'SELECT itemName FROM mapDenormalize WHERE itemID=:location_id;'
result = eve_db.engine.execute(query, location_id=location_id)
for row in result:
location = row[0]
break
return location
def parse_assets(assets_list):
assets = []
for api_asset in assets_list:
asset = {}
for index, key in enumerate(api_asset.__dict__['_cols']):
if key == 'contents':
continue
try:
asset[key] = api_asset.__dict__['_row'][index]
except IndexError:
asset[key] = None
item_type = get_type_name(asset['typeID'])
if 'locationID' in api_asset:
asset['location_name'] = get_location_name(api_asset.locationID)
asset['item_name'] = item_type['name']
try:
asset['base_price'] = item_type['base_price']
except KeyError, e:
asset['base_price'] = 0
asset['group_name'] = item_type['group_name']
assets.append(asset)
if 'contents' in api_asset:
parse_assets(api_asset.contents)
return assets
|
n_colors=3
n_shapes=3
epsilon=1e-3
n_theories=12
#machines=[(2,0), (1,1), (0,2)]
machines=[(blue, rectangle), (green, circle), (red, triangle)]
available_toys=[(blue, circle), (red, rectangle), (green, triangle)]
|
# -*- coding: utf-8 -*-
'''
Created on 2016年3月24日
@author: huke
'''
import math
def quadratic(a,b,c):
try:
x1=(-b+math.sqrt(b*b-4*a*c))/(2*a)
x2=(-b-math.sqrt(b*b-4*a*c))/(2*a)
return x1,x2
except Exception as e:
print(e)
if __name__ == '__main__':
try:
print(quadratic(2, -3, 1))
print(quadratic(1, 3, -4))
## a=input('a')
## b=input('b')
## c=input('c')
## print(quadratic(int(a),int(b),int(c)))
except Exception as e:
print(e)
|
import plotly.graph_objects as go
import plotly.io as pio
import numpy as np
x, l = np.linspace(-1, 1, 10), np.linspace(-1, 1, 10)
x, l = np.meshgrid(x, l)
z = x * x + x * l
fig = go.Figure(data=[go.Surface(z=z, x=x, y=l)])
fig.update_traces(contours_z=dict(show=True, usecolormap=True,
highlightcolor="limegreen", project_z=True))
fig.update_layout(
title='L(x, lambda)= x^2 + lambda * x',
scene = dict(
xaxis_title='x',
yaxis_title='lambda',
zaxis_title='L'),
width=500, height=500,
margin=dict(l=65, r=50, b=65, t=90))
fig.show()
pio.write_html(fig, file='_includes/charts/lagrange-multiplier.html', auto_open=True)
|
from django.apps import AppConfig
class ApicontentConfig(AppConfig):
name = 'apicontent'
|
'''Artisinal chat bot'''
# pylint: disable=I0011,C0103
import json
import urllib
import random
# response = urllib2.urlopen('http://localhost:5001/api/users')
# html = response.read()
# print html
# server_path = 'http://localhost:5001/api'
server_path = 'http://localhost:5014/api'
bot_name = 'Pyro'
bot_user_id = 0
bot_room_ids = [1]
bot_responses = ["PYROOOOOOOO!"]
# class CLI_Chat_Bot(object):
# '''Chatbot to interact with a chat via an API'''
def get_user_info():
response = urllib.request.urlopen('{}/users'.format(server_path))
jason = json.loads(response.read())
j_list = jason["users"]
for index, person in enumerate(j_list):
if person["name"] == bot_name:
bot_user_id = person["user_id"]
print index, ": ", person
# If the user doesn't exist, create it
# curl --data "name=Pyro" http://localhost:5001/api/users
# if there is a new message, parse it
# response = urllib2.urlopen(server_path,
# '/api/rooms/1/messages')
def get_chat_history_all():
'''Gets all messages'''
room_id = 1
room_attr = "messages"
response = urllib.request.urlopen('{}/rooms/{}/{}'.format(
server_path, room_id, room_attr))
jason = json.loads(response.read())
j_list = jason[room_attr]
return j_list
def get_chat_history_by_time():
'''Checks for a new chat message'''
# if there is a new message, parse it
# print self.get('/api/rooms/1/messages')
# test_app = TestApp(app)
# resp = test_app.get('/admin')
# self.assertEqual(resp.status_code, 200)
def parse_chat_message(json_message):
'''Takes a single JSON messages and acts upon it.
Responds to:
'''
print type(json_message), json_message
msg_data = json_message["data"]
print msg_data
if bot_name in msg_data and bot_name != json_message["user_name"]:
if "weather" in msg_data:
print "{} - Getting weather".format(bot_name)
# post_chat_message("{} - Getting weather".format(bot_name))
else:
print "Hello {}. I am {}".format(json_message["user_name"], bot_name)
# post_chat_message("Hello {}. I am {}".format(json_message["user_name"], bot_name))
# values = {'name' : 'Michael Foord',
# 'location' : 'Northampton',
# 'language' : 'Python' }
#
# data = urllib.urlencode(values)
# req = urllib2.Request(url, data)
# response = urllib2.urlopen(req)
# the_page = response.read()
def post_chat_message(chat_message):
'''Takes in a string to post'''
values = {'user_id': user_id, 'data': chat_message}
data = urllib.urlencode(values)
print data
# post_response = urllib2.urlopen('{}/rooms/{}/{}'.format(
# server_path, room_id, room_attr),
# data)
# get_chat_history_all()
# print type(jason), len(jason), jason
# print type(j_list), len(j_list), j_list
get_user_info()
# ja_list = get_chat_history_all()
# parse_chat_message(ja_list[-1])
|
n, m = map(int, input().split())
s = input().split()
t = input().split()
test_case = int(input())
for _ in range(test_case):
y = int(input())
print(s[(y % n)-1] + t[(y % m) -1])
|
# encrypt function
def encrypt(plain, lookup2):
cypher = ""
for j in plain:
cypher = cypher + lookup2[ord(j)] # ord turns ascii character to its respective value
return cypher
# decrypt function
def decrypt(cypher,lookup2):
oldplain = ""
for k in cypher:
count = 0
found = False
while not found:
if lookup2[count] == k:
found = True
else:
count += 1
oldplain = oldplain + chr(count)
return oldplain
# Populating lookup
lookup = []
for i in range(127):
a = 0
a = i - 15
if a < 0:
lookup.append(chr(127 + a))
else:
lookup.append(chr(a))
# The main program
choice = 0
while choice != 3:
choice = int(input(
"""#############################
To Encrypt text press 1
To Decrypt text press 2
To exit press 3
############################
Enter Num:"""))
if choice == 1:
plain = input("\nEnter the text you want to Encrypt: ")
cypher = encrypt(plain,lookup)
print("Plain Text: {} Encrypted Text: {}".format(plain, cypher))
elif choice == 2:
cypher = input("\nEnter the text you want to Decrypt: ")
decypher = decrypt(cypher, lookup)
print("Cypher Text: {} Decrypted Text: {}".format(cypher, decypher))
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for encoder module."""
import encoder
import optimizer
import unittest
import test_tools
import vp9
class TestVp9(test_tools.FileUsingCodecTest):
def test_Init(self):
codec = vp9.Vp9Codec()
self.assertEqual(codec.name, 'vp9')
def test_OneBlackFrame(self):
codec = vp9.Vp9Codec()
my_optimizer = optimizer.Optimizer(codec)
videofile = test_tools.MakeYuvFileWithOneBlankFrame(
'one_black_frame_1024_768_30.yuv')
encoding = my_optimizer.BestEncoding(1000, videofile)
encoding.Execute()
# Most codecs should be good at this.
self.assertLess(50.0, my_optimizer.Score(encoding))
self.assertEqual(1, len(encoding.result['frame']))
# Check that expected results are present and "reasonable".
print encoding.result
self.assertTrue(0.02 < encoding.result['encode_cputime'] < 15.0)
self.assertTrue(100 < encoding.result['bitrate'] < 500)
self.assertTrue(500 < encoding.result['frame'][0]['size'] < 12000)
def test_SpeedGroup(self):
codec = vp9.Vp9Codec()
self.assertEqual('5000', codec.SpeedGroup(5000))
def test_Passes(self):
"""This test checks that both 1-pass and 2-pass encoding works."""
codec = vp9.Vp9Codec()
my_optimizer = optimizer.Optimizer(codec)
videofile = test_tools.MakeYuvFileWithOneBlankFrame(
'one_black_frame_1024_768_30.yuv')
start_encoder = codec.StartEncoder(my_optimizer.context)
encoder1 = encoder.Encoder(my_optimizer.context,
start_encoder.parameters.ChangeValue('passes', 1))
encoding1 = encoder1.Encoding(1000, videofile)
encoder2 = encoder.Encoder(my_optimizer.context,
start_encoder.parameters.ChangeValue('passes', 2))
encoding2 = encoder2.Encoding(1000, videofile)
encoding1.Execute()
encoding2.Execute()
self.assertTrue(encoding1.result)
self.assertTrue(encoding2.result)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
# created by Roman Plevka
# this script takes a path as an argument and outputs its structure as a valid JSON object
# currently used attributes: type (file|dir), name (String), size (Number) [, children (Array of Obj)]
# 2014-07-16 by Roman initial version
# 2014-09-03 by Roman algorithm (fixed for links - no more links following)
import os, sys
path = sys.argv[1]
fid = 0
def getFiles(path):
global fid
first = True;
for item in os.listdir(path):
if first:
first = False
else:
print(", "),
if os.path.islink(os.path.join(path, item)):
getInfo(path, item)
elif os.path.isdir(os.path.join(path, item)):
#size = str(os.path.getsize(path+"/"+item))
size = str(totalSize(path+"/"+item))
print("{ \"id\": "+str(fid)+", \"type\": \"dir\", \"name\": \""+item+"\", \"size\": "+size+", \"children\": ["),
getFiles(path+"/"+item)
print("]}"),
fid += 1
else:
getInfo(path, item)
def getInfo(path, f):
global fid
if os.path.islink(os.path.join(path, f)):
size = "0"
else:
size = str(os.path.getsize(path+"/"+f))
print("{ \"id\": "+str(fid)+", \"type\": \"file\", \"name\": \""+f+"\", \"size\": "+size+" }"),
fid += 1
def totalSize(source):
total_size = os.path.getsize(source)
for item in os.listdir(source):
itempath = os.path.join(source, item)
if os.path.islink(itempath):
total_size += 17
elif os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += totalSize(itempath)
return total_size
#print(date: \"date\",),
#print("data : ["),
print("["),
getFiles(path)
print("]"),
|
# Collaborators:
import random # imports the random number package
x = random.randint(1, 10) # generates a random number between 1 and 10. You can rename the variable.
|
from django.conf.urls import patterns, url
urlpatterns = patterns('alipay.create_partner_trade_by_buyer.ptn.views',
url(r'^$', 'ptn', {'item_check_callable':None}, name='alipay-ptn'),
)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 29 16:31:27 2017
@author: ADITYA
"""
from PIL import Image
import base64
#Converting the image into black and white...
img = Image.open(r'C:\Users\ADITYA\Pictures\Untitled.png').convert('L')
img.save(r'C:\Users\ADITYA\Pictures\output_file.png')
#Converting the image into binary string
with open(r'C:\Users\ADITYA\Pictures\output_file.png', "rb") as imageFile:
str = base64.b64encode(imageFile.read())
print(str);
file1 = open(r'F:\Files\Projects\SignaturerRecognition\alpha.txt','a');
file1.write(str);
file1.close();
|
import copy
import cfg_common
from cfg import Cfg, RawBasicBlock, BlockList
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Page(models.Model):
slug = models.SlugField(_('slug'), db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
class Meta:
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('slug',)
def __str__(self):
return "%s -- %s" % (self.slug, self.title)
|
import random, os
from codecs import open
from DPjudge import Game, host, Map
from PayolaPower import PayolaPower
class PayolaGame(Game):
# ----------------------------------------------------------------------
class Key:
# ------------------------------------------------------------------
def __init__(self, power, unit, order):
total, seqs = 0, [None] * (len(power.game.map.powers) + 1)
cost, plateau = {}, {}
vars(self).update(locals())
# ------------------------------------------------------------------
def __repr__(self):
return ' '.join((self.power.name, self.unit, self.order,
`self.total`, `self.seqs`))
# ------------------------------------------------------------------
def __cmp__(self, key):
# --------------------------------------------------------------
# Decide all bribe winners. High bribe wins, and if two or more
# bribes are tied, the winner is that with the more acceptable
# bribes (from bribe positions, put in acceptance list order).
# --------------------------------------------------------------
return (cmp(key.total, self.total)
or cmp([(x, os.sys.maxint)[x is None] for x in self.seqs],
[(x, os.sys.maxint)[x is None] for x in key.seqs]))
# ------------------------------------------------------------------
def add(self, offer):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Determine the spot in the acceptance list to place the "position
# in list" data (in the seqs attribute). The "".find() call could
# result in -1, which is okay (actually, this is the case for all
# non-map powers) -- all investors, etc., are relegated to the
# rearmost (extra) spot. See NOTE in PayolaGame.bestOffer() for
# an important caution about the effect this can have.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if offer.power.type or not self.power.fullAccept: which = -1
else: which = self.power.fullAccept.find(offer.power.abbrev)
if self.seqs[which] is None: self.seqs[which] = offer.num
else: self.seqs[which] = min(self.seqs[which], offer.num)
self.total += offer.amt
self.cost.setdefault(offer.power, 0)
self.cost[offer.power] += offer.amt
self.plateau.setdefault(offer.power, 0)
if 'PUBLIC_TOTALS' not in self.power.game.rules:
self.plateau[offer.power] += offer.plateau
# ------------------------------------------------------------------
def format(self, power = 0, blind = 0, hide = 0):
if power: flag, self.amt = ' ', self.cost.get(power, 0)
else:
pay = self.power.fullAccept.index(self.power.abbrev)
flag = ' *'[self.seqs[pay] is None and not hide]
self.amt = self.total
return ' %s %3d : %s %s\n' % (flag, self.amt, self.unit,
blind and ('%s(%s)' % ((' ' * 9)[len(self.unit):],
self.power.game.map.ownWord[self.power.name])) or self.order)
# ----------------------------------------------------------------------
def __init__(self, gameName, fileName = 'status'):
self.variant, self.powerType = 'payola', PayolaPower
Game.__init__(self, gameName, fileName)
# ----------------------------------------------------------------------
def __repr__(self):
text = Game.__repr__(self).decode('latin-1')
if self.taxes:
for center, value in self.taxes.items():
text += '\nTAX %s %d' % (center, value)
if self.tax: text += '\nTAX %d' % self.tax
if self.cap: text += '\nCAP %d' % self.cap
return '\n'.join([x for x in text.split('\n')
if x not in self.directives]).encode('latin-1')
# ----------------------------------------------------------------------
def reinit(self, includeFlags = 6):
# ------------------------------------
# Initialize the persistent parameters
# ------------------------------------
if includeFlags & 2:
self.rules = ['ORDER_ANY']
self.taxes, self.tax, self.cap = {}, 0, 0
# -----------------------------------
# Initialize the transient parameters
# -----------------------------------
if includeFlags & 4:
self.offers, self.orders = {}, {}
Game.reinit(self, includeFlags)
# ----------------------------------------------------------------------
def parseGameData(self, word, includeFlags):
parsed = Game.parseGameData(self, word, includeFlags)
if parsed: return parsed
word = [x.upper() for x in word]
upline = ' '.join(word)
# -----
# Modes
# -----
if self.mode:
return 0
# --------------------------------------
# Game-specific information (persistent)
# --------------------------------------
if not includeFlags & 2:
return 0
# ----------------------------------------------
# Center tax income values (completely optional)
# ----------------------------------------------
elif word[0] == 'TAX':
self.tax = -1
if len(word) == 3 and self.map.areatype(word[1]) or len(word) < 3:
try: self.tax = int(word[-1])
except: pass
if self.tax < 0: self.error += ['BAD TAX: ' + upline]
elif len(word) == 3: self.taxes[word[1]] = self.tax
elif word[0] == 'CAP':
try:
self.cap = int('$'.join(word[1:]))
if self.cap < 1: raise
except: self.error += ['BAD CAP: ' + upline]
else: return 0
return 1
# ----------------------------------------------------------------------
def parsePowerData(self, power, word, includeFlags):
parsed = Game.parsePowerData(self, power, word, includeFlags)
if parsed: return parsed
word = [x.upper() for x in word]
upline = ' '.join(word)
# -----
# Modes
# -----
if self.mode:
return 0
# -------------------
# Offers and comments
# -------------------
elif word[0][0] in '0123456789%' or word[0] in ('A', 'F'):
if not includeFlags & 1: return -1
power.sheet += [upline]
if word[0][0] != '%': power.held = 1
return 1
# -------------------------------
# Power-specific data (transient)
# -------------------------------
found = 0
if includeFlags & 4:
found = 1
if word[0] == 'SENT': # (one_transfer)
try: power.sent += [word[1]]
except: self.error += ['BAD SENT FOR ' + power.name]
elif word[0] == 'ELECT': # (exchange)
for item in word[1:]:
company, candidate = item.split(':')
power.elect[company] = candidate
elif word[0] == 'STATE': # (exchange, undocumented?)
if len(word) == 2: power.state = word[1]
else: error += ['BAD STATE FOR ' + power.name]
elif word[0] == 'ACCEPT':
if power.accept: self.error += ['TWO ACCEPTS FOR ' + power.name]
elif len(word) != 2:
self.error += ['BAD ACCEPT FOR ' + power.name]
else: power.accept = word[1]
else: found = 0
return found
# ----------------------------------------------------------------------
def finishPowerData(self, power):
Game.finishPowerData(self, power)
power.liquid = power.balance
# ----------------------------------------------------------------------
def rollback(self, phase = None, includeFlags = 0):
error = Game.rollback(self, phase, includeFlags)
if error: return error
# -----------------------
# Truncate the chart file
# -----------------------
phase = self.phaseType == 'M' and self.phase or self.findNextPhase('M')
if len(phase.split()) == 3 and os.path.isfile(self.file('chart')):
file = open(self.file('chart'), 'r', 'latin-1')
lines = file.readlines()
file.close()
for num, text in enumerate(lines):
if phase == text.strip(): break
else:
num = len(lines)
if num == len(lines): pass
elif num < 3:
try: os.unlink(self.file('chart'))
except: pass
else:
file = open(self.file('chart'), 'w', 'latin-1')
file.writelines(lines[:num-2])
file.close()
try: os.chmod(self.file('chart'), 0666)
except: pass
# -------------------------------------------------
# Ledgers are a kind of press, so we leave it as is
# -------------------------------------------------
# ----------------------------------------------------------------------
def processExchangeReportsPhase(self):
for power in self.powers:
if power.type or not power.accept: continue
shareholders = [x.name for x in self.powers
if x.funds.get(power.abbrev)]
self.mailPress(None, shareholders,
'Status of %s for the beginning of fiscal year %s.\n\n'
'Current treasury balance: %d AgP\n'
'List of stockholders:%s\n' %
(power.name.title(), self.phase.split()[1],
power.balance, '\n '.join(shareholders)),
subject = 'Annual stockholder report for ' + power.name.title())
return ['Annual stockholder reports have been sent.\n']
# ----------------------------------------------------------------------
def processExchangeElectionsPhase(self):
results = {}
for power in self.powers:
for corp in self.powers:
vote = power.elect.get(corp)
if not vote: continue
results.setdefault(corp, {}).setdefault(vote, 0)
results[corp][vote] += power.funds[corp[0]]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Need to make a list of who won (what are the rules?)
# and act on it, and return it instead of just doing this:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return []
# ----------------------------------------------------------------------
def processExchangeDividendsPhase(self):
distributed = 0
for power in self.powers:
div, shareholders = power.funds.get('/share'), []
if div is None: continue
prevBal = power.balance
for player in self.powers:
shares = player.funds.get(power.abbrev)
if player is power or not shares: continue
if div:
self.transferCash(power, player, shares * div, receipt = 0)
else: shareholders += [player.name]
del power.funds['/share']
if shareholders:
self.mailPress(None, shareholders,
'%s has elected to withhold dividends for %d.' %
(power.name.title(), self.year),
subject = 'Dividends withheld by ' + power.name.title())
continue
self.openMail('Payola dividend distribution', 'ledgers')
self.mail.write('OFFICIAL Payola dividend distribution\n', 0)
template = ('PRESS TO %s' +
' QUIET' * ('EAVESDROP' not in self.rules))
self.mail.write(template % power.name +
'\nACCOUNT ACTIVITY FOR %s\n%s\n'
'BALANCE FORWARD %4d AgP\n'
'DIVIDEND DISTRIBUTION %4d AgP\n'
'CURRENT BALANCE %4d AgP\n'
'ENDPRESS\n' %
(power.name, '=' * (21 + len(power.name)),
prevBal, prevBal - power.balance, power.balance))
self.mail.write('SIGNOFF\n', 0)
self.mail.close()
self.mail, distributed = None, 1
return ['Corporate dividends have been disbursed.\n'] * distributed
# ----------------------------------------------------------------------
def processIncomePhase(self):
if self.findNextPhase().endswith('ADJUSTMENTS'):
list = Game.captureCenters(self, self.parseSupplyCount)
else:
Game.powerSizes(self, None, self.parseSupplyCount)
list = []
return (list + [self.phase.title() + ' has been distributed.\n']
['NO_LEDGERS' in self.rules or self.phase in (None, 'COMPLETED'):])
# ----------------------------------------------------------------------
def resolvePhase(self):
if self.phaseType == 'I': return self.processIncomePhase()
if self.phaseType == 'D': return self.processExchangeDividendsPhase()
if self.phaseType == 'Y': return self.processExchangeReportsPhase()
return Game.resolvePhase(self)
# ----------------------------------------------------------------------
def checkPhase(self, text):
if self.phaseType == 'I': text += self.processIncomePhase()
elif self.phaseType == 'D':
text += self.processExchangeDividendsPhase()
elif self.phaseType == 'Y':
text += self.processExchangeReportsPhase()
else: return Game.checkPhase(self, text)
return 1
# ----------------------------------------------------------------------
def checkAccept(self, power, accept = 0):
accept = accept and accept.upper() or power.accept
if len(accept) > len(self.map.powers):
return self.error.append('BAD ACCEPTANCE LIST FOR ' + power.name)
accept += '?'[len(accept) == len(self.map.powers) or '?' in accept:]
powers = [x.abbrev for x in self.powers if x.abbrev] + ['?']
for letter in accept:
if letter not in powers or accept.count(letter) > 1:
return self.error.append('BAD ACCEPT LIST FOR ' + power.name)
power.accept = accept
# ----------------------------------------------------------------------
def parseOffer(self, power, offer):
# -----------------------------------------------
# Comments are signified by leading percent-marks
# -----------------------------------------------
both = offer.split('%')
word, comment = both[0].upper().split(), '%'.join(both[1:])
if not word: return offer
# -----------------------------
# Provide 0 : default if needed
# -----------------------------
if word[0][0] in '!:>@': word[0] = '0' + word[0]
elif word[0][0].isalpha(): word = ['0', ':'] + word
# -----------------------------------------------------------
# Format bribe amount and plateau without embedded whitespace
# -----------------------------------------------------------
for ch in '*#+':
if len(word) > 1 and word[1][0] == ch:
if len(word[1]) > 1: word[:2] = [''.join(word[:2])]
else: word[:3] = [''.join(word[:3])]
# --------------------
# Segregate bribe type
# --------------------
if not word[0][0].isdigit():
return self.error.append('INVALID OFFER AMOUNT: ' + word[0])
for ch, char in enumerate(word[0]):
if not (char.isdigit() or char in '#*+'):
word[0:1] = [word[0][:ch], word[0][ch:]]
break
if len(word[1]) > 1: word[1:2] = [word[1][0], word[1][1:]]
# -----------------------------------------
# Validate the bribe amount, which may have
# the format rep*max#plateau+(another)+...
# -----------------------------------------
detail = []
for each in word[0].split('+'):
if not each: return self.error.append(
'ADDITIONAL BRIBE NOT GIVEN: ' + word[0])
try:
if '*' in each:
rep, rest = each.split('*')
if rep == '1': each = rest
else: rep, rest = 1, each
if '#' not in rest: amt, plateau = int(rest), 0
elif 'NO_PLATEAU' in self.rules or 'NO_SAVINGS' in self.rules:
return self.error.append('PLATEAUS NOT ALLOWED')
elif rest[-1] == '#': amt = plateau = int(rest[:-1])
else: amt, plateau = map(int, rest.split('#'))
if amt == 0: rep, each = 1, '0'
except: rep, amt, plateau = 0, 0, 1
if amt < plateau or amt > 9999 or not (0 < int(rep) < 100):
return self.error.append('INVALID OFFER AMOUNT: ' + each)
detail += [(rep, amt, plateau)]
# ------------------------------------
# Figure out what to do with the offer
# First see if it is a savings request
# ------------------------------------
if word[1] == '$':
if 'NO_SAVINGS' in self.rules:
return self.error.append('SAVINGS NOT ALLOWED IN THIS GAME')
if '#' in word[0]:
return self.error.append('PLATEAU AMOUNT ON SAVINGS REQUEST')
for rep, amt, plateau in detail: power.reserve(rep * amt)
newline = word
# -----------------------------
# Now see if it is a bribe line
# -----------------------------
elif word[1] in list(':!@>&'):
if len(word) < 3:
return self.error.append('INCOMPLETE OFFER: ' + ' '.join(word))
parts = [x.strip().split() for x in ' '.join(word[2:]).split('|')]
# --------------------------------------------------
# Convert all words in the first order to recognized
# tokens and add all missing unit types and any
# missing coasts (RUM-BUL becomes RUM-BUL/EC)
# --------------------------------------------------
first = self.map.defaultCoast(self.addUnitTypes(
self.expandOrder(parts[0])))
# ---------------------------------------------------
# Validate the unit and check for disallowed wildcard
# orders and and for 0 AgP bribes to foreign units.
# ---------------------------------------------------
if len(first) < 2:
return self.error.append('INCOMPLETE OFFER: ' + ' '.join(word))
unit, orders, newline = ' '.join(first[:2]), [], first[:2]
if word[:2] != ['0', ':']: newline = word[:2] + newline
# ---------------------------------------
# Check for 'Payola Classic' restrictions
# ---------------------------------------
if 'DIRECT_ONLY' in self.rules:
if [x for x in power.offers if x.unit == unit]:
return self.error.append(
'MULTIPLE OFFERS TO A SINGLE UNIT FORBIDDEN: ' + unit)
if word[1] != ':' or '|' in offer: return self.error.append(
'WILDCARD BRIBES NOT ALLOWED')
if '*' in word[0] or '+' in word[0]: return self.error.append(
'REPETITION AND AUGMENTATION NOT ALLOWED')
if ('LIMIT_OFFERS' in self.rules
and sum([x.amt for x in power.offers]) > power.liquid):
return self.error.append(
'TOTAL OFFERS EXCEED AMOUNT AVAILABLE')
# ---------------------------
# Check for dummy-only Payola
# ---------------------------
if 'PAY_DUMMIES' in self.rules:
if unit in power.units:
for rep, amt, plateau in detail:
if ((amt, word[1]) != (0, ':') or '|' in offer
or [x for x in power.offers if x.unit == unit]):
return self.error.append(
'BRIBE TO DOMESTIC UNIT: ' + unit)
else:
whose = self.unitOwner(unit)
if whose and whose.player and not whose.isDummy():
return self.error.append('BRIBE TO OWNED UNIT: ' + unit)
# ---------------------------------------------------
# Check for zero silver piece bribes to foreign units
# ---------------------------------------------------
for rep, amt, plateau in detail:
if (amt == 0 and unit not in power.units
and ('BLIND' in self.rules or self.unitOwner(unit))
and 'ZERO_FOREIGN' not in self.rules): return self.error.append(
'ZERO AgP OFFER TO FOREIGN UNIT: ' + unit)
# --------------------------------------------------
# Go through all bribes (separated by vertical bars)
# --------------------------------------------------
for part in parts:
# --------------------------------------------------
# Convert all words in the next orders to recognized
# tokens and add all missing unit types and any
# missing coasts (RUM-BUL becomes RUM-BUL/EC)
# --------------------------------------------------
if not orders: part = first[2:]
else: part = self.map.defaultCoast(self.addUnitTypes(
self.expandOrder(first[:2] + part)))[2:]
if part and len(part[-1]) == 1 and not part[-1].isalpha():
part = part[:-1]
if not part: return self.error.append('NO %sORDER GIVEN: ' %
('|' in ' '.join(word) and 'ALTERNATIVE ' or '') + unit)
# --------------------------------------------------
# The Payola Mailing List voted to outlaw duplicate
# orders in offers (i.e. "5 : F TYS - ION | - ION").
# Note that this form DOES have meaning, though --
# rather than a single bribe of 10 AgP, which would
# be reduced to 9 on overspending, two bribes of 5
# would be reduced each to four, for a total of 8.
# However, to achieve quicker reduction like this,
# players must use separate offers. The best
# reason to for this rule is to provide semantics
# consistent with that required for negative offers.
# --------------------------------------------------
order = ' '.join(part)
if order in orders or (order + ' ?') in orders:
return self.error.append(
'DUPLICATE ORDER IN OFFER: %s ' % unit + order)
if orders: newline += ['|']
newline += part
# --------------------------------------------------
# Validate and (if valid) add the order to the offer
# --------------------------------------------------
valid = self.validOrder(power, unit, order)
if valid is None: return
if not valid and 'FICTIONAL_OK' not in self.rules:
return self.error.append(
'NON-EXISTENT UNIT IN ORDER: %s ' % unit + order)
whose = self.unitOwner(unit)
if valid == -1:
if 'RESTRICT_SIGNAL' in self.rules:
if word[:2] != ['0', ':']:
return self.error.append(
'SIGNAL_SUPPORT ORDER MUST BE 0 AgP DIRECT ' +
'BRIBE: ' + ' '.join(word))
elif not whose or power not in (whose, whose.controller()):
return self.error.append(
'SIGNAL_SUPPORT ORDER TO FOREIGN UNIT: %s ' %
unit + order)
elif len(parts) > 1:
return self.error.append(
'BRANCHING NOT ALLOWED IN SIGNAL_SUPPORT ORDER: ' +
' '.join(word))
elif self.signalOrder(whose, unit):
return self.error.append(
'MORE THAN ONE SIGNAL_SUPPORT ORDER FOR UNIT: %s ' %
unit + order)
newline += ['?']
if (('TOUCH_BRIBE' in self.rules
or 'REMOTE_BRIBE' in self.rules)
and not ('CD_DUMMIES' in self.rules
and whose and whose.player and whose.isDummy())):
owner = whose or power
if power is not owner:
bad = [x for x in power.units
if self.validOrder(power, x, 'S ' + unit, report=0) == 1
or self.validOrder(owner, unit, 'S ' + x, report=0) == 1]
if 'TOUCH_BRIBE' in self.rules: bad = not bad
if bad: return self.error.append(
'BRIBED UNIT M%sT BE ADJACENT: ' %
('AY NO', 'US')['TOUCH_BRIBE' in self.rules] + unit)
orders += [order + ' ?' * (valid == -1)]
# -----------------------------------------------------
# Add the offer repeatedly (according to the "*" count)
# -----------------------------------------------------
for rep, amt, plateau in detail:
for repeat in range(int(rep)):
power.addOffer(word[1], unit, orders, amt, plateau)
else: return self.error.append('BAD OFFER TYPE: ' + offer)
return ' '.join(newline) + (comment and (' %' + comment) or '')
# ----------------------------------------------------------------------
def bestOffer(self, power, unit):
orders = {}
# -----------------------------------------
# Determine direct payment for each : order
# -----------------------------------------
while not orders:
for offerer in self.powers:
for offer in offerer.offers:
if (offer.unit == unit and offer.code == ':'
and self.validOrder(power, unit, offer.order)):
if offer.order not in orders: orders[offer.order] = (
self.Key(power, offer.unit, offer.order))
orders[offer.order].add(offer)
# ---------------------------------------------------
# In BLIND games, all the offers a unit gets MIGHT be
# invalid. If so, we'll have no order yet, and we'll
# need to add another (default hold) bribe to get one
# ---------------------------------------------------
if not orders: power.addOffer(':', unit, 'H')
# ----------------------
# Now add wildcard money
# ----------------------
for offerer in self.powers:
for offer in offerer.offers:
if offer.unit != unit or offer.code == ':': continue
for order, key in orders.items():
if 'RESTRICT_SIGNAL' in self.rules and order[-1:] == '?':
continue
if ((offer.code == '!' and order not in offer.order)
or '@>'.find(offer.code) == (order[0] == '-')
or offer.code == '&'): key.add(offer)
# ---------------------------------------------
# Sort the offers and pick the first (best) one
# ---------------------------------------------
self.offers[unit] = sorted(orders.values())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# NOTE -- in games with investors, we still could be "randomly"
# choosing one of the bids involved in an unresolvable tie. This
# could happen if two (or more) investors all offer the same
# high-bid amount to a unit for separate orders, and no power's
# money is involved. All investors appear at the same location
# in all acceptance lists, meaning the "num" attribute of the two
# Keys could be identical if each power listed its contending
# bid at the same position down their list.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.orders[unit] = best = self.offers[unit][0]
# --------------------------------------------------------
# When the RESTRICT_SIGNAL rule is in play, substitute a HOLD
# order with its SIGNAL_SUPPORT alternative, ifpresent.
# --------------------------------------------------------
if 'RESTRICT_SIGNAL' in self.rules and best.order == 'H':
best.order = self.signalOrder(power, unit) or 'H'
# --------------------------------------------------------
# If the eBayola rule is being used, the winning briber(s)
# will only be charged the sum of the highest NON-winning
# bribe amount plus one. Any resulting rebates will be
# given to the most-preferred contributing powers first
# (uncomment in the if clause to rebate the most-preferred
# contributing power among those offering the most gold).
# --------------------------------------------------------
if 'EBAYOLA' in self.rules:
for other in self.offers[unit][1:]:
for who, amt in other.cost.items():
other.cost[who] -= min(amt, best.cost.get(who, 0))
pref, obs, over = {}, [], best.total - 1
try: over -= max(sorted(self.offers[unit][1:])[0].total, 0)
except: pass
for who, amt in best.cost.items():
if amt: # == max(best.cost.values()):
try: pref[power.fullAccept.index(who.abbrev)] = who
except: obs += [who]
obs.sort(lambda x,y: cmp(best.cost[y], best.cost[x]))
for saver in [pref[x] for x in sorted(pref)] + obs:
rebate = min(best.cost[saver] - best.plateau[saver], over)
best.cost[saver] -= rebate
best.total -= rebate
over -= rebate
# --------------------------------------------------------------
# When a DUMMY unit is bribed, all ties involving bribes offered
# by a different power or set of powers are decided by having
# the unit HOLD. Find and accept the unit's HOLD offer, and set
# its total (for appearances) to one AgP better than the best.
# --------------------------------------------------------------
if power.isDummy() and not power.controller() and power.accept == '?':
id = [not x for x in self.orders[unit].seqs]
for guy in self.offers[unit][1:]:
if self.orders[unit].total != guy.total: return
if [not x for x in guy.seqs] != id: break
else: return
tops = self.offers[unit][0].total + 1
try:
hold = [x for x in self.offers[unit] if x.order == 'H'][0]
self.offers[unit].remove(hold)
except: hold = self.Key(power, unit, 'H')
hold.total, self.orders[unit] = tops, hold
self.offers[unit].insert(0, hold)
# ----------------------------------------------------------------------
def signalOrder(self, power, unit):
signals = [x.order for x in power.offers if x.unit == unit and x.order[-2:] == ' ?']
if signals: return signals[0]
# ----------------------------------------------------------------------
def determineOrders(self):
# ------------------------------------------------------------------
# All vassal states use the same acceptance list as their controller
# ------------------------------------------------------------------
if 'VASSAL_DUMMIES' in self.rules:
for power in self.powers:
for vassal in power.vassals():
vassal.accept = power.accept
# ---------------------------------
# Provide default hold offers and
# expand apathetic acceptance lists
# ---------------------------------
# Note -- initially, all apathetic lists were expanded every time
# any unit decided on an order. This caused problems, though, in
# that a second or subsequent order in a group of alternatives
# (separated by vertical bars) would often sort higher than the
# first order in the list, in direct contradiction of Rule 3.3.
# So then I made it so that the expansion occurred for all lists
# only before each run through all of the bribes -- including
# before any re-run-throughs caused by player overexpenditure.
# This made good sense to me, but proved too difficult to explain
# in the rules and could lead to players wondering why an
# overexpenditure had even occurred, because with the acceptance
# list expansion that was finally used, one would not have been
# necessary. So I decided it was cleaner all around to just do
# it up front, and the same expansion will hold true for all
# adjudication done in a particular movement phase.
# ---------------------------------------------------------------
for power in self.powers:
power.addDefaults()
if power.accept:
others = [x.abbrev for x in self.powers
if x.abbrev and x.abbrev not in power.accept]
random.shuffle(others)
power.fullAccept = power.accept.replace('?', ''.join(others))
# -------------------------------
# Determine the orders to be sent
# -------------------------------
self.await = 1
while 1:
self.orders, self.offers, overspent = {}, {}, 0
for power in self.powers:
power.left = power.liquid
if not power.accept: continue
others = [x.abbrev for x in self.powers
if x.abbrev and x.abbrev not in power.accept]
random.shuffle(others)
power.fullAccept = power.accept.replace('?', ''.join(others))
for power in self.powers:
for unit in power.units:
self.bestOffer(power, unit)
for payer, amount in self.orders[unit].cost.items():
payer.spend(amount)
overspent |= payer.left < 0
if not overspent: break
[x.reduce() for x in self.powers if x.left < 0]
# ----------------------------------------------------------------------
def writeChart(self):
header, num, dash, bribers = '%35s', '%4d ', ' - ', self.map.powers
powers, balance, liquid, left, overpaid, total = [], {}, {}, {}, {}, 0
for player in bribers:
for power in self.powers:
if power.name == player:
balance[player], left[player] = power.balance, power.left
liquid[player] = power.liquid
if power.overpaid: overpaid[player] = power.overpaid
if power.units: powers += [power]
break
if 'PAY_DUMMIES' in self.rules:
bribers = [x.name for x in self.powers
if x.name in self.map.powers and not x.isDummy()]
powers = [x for x in powers if x.isDummy()]
line = '-' * (39 + 5 * len(bribers))
try: file = open(self.file('chart'), 'a')
except: return self.error.append('CANNOT WRITE CHART: INFORM MASTER')
temp = '\n%s\n%s\n%s\n%35s' % (line, self.phase, line, '')
file.write(temp.encode('latin-1'))
for player in bribers: file.write(' %.3s' % player)
if 'PAY_DUMMIES' not in self.rules:
for power in powers:
file.write('\nACCEPTANCE LIST FOR %-15s' % power.name)
others = [x.abbrev for x in self.powers
if x.abbrev and x.abbrev not in power.accept]
accept = power.accept.replace('?', '?' * len(others))
for each in self.powers:
if each.abbrev:
if each.abbrev in others: file.write(' ? ')
else: file.write(num % (accept.index(each.abbrev) + 1))
file.write('\n' + line.encode('latin-1'))
for power in powers:
file.write('\n')
for unit in power.units:
for offer in self.offers[unit]:
order = ('RESTRICT_SIGNAL' in self.rules and offer.total and
offer.order[-1:] == '?') and 'H' or offer.order
temp = '%-35s' % (unit + ' ' + order)
file.write(temp.encode('latin-1'))
for payer in bribers:
player = [x for x in self.powers if x.name == payer]
if player:
temp = (player[0] in offer.cost
and num % offer.cost[player[0]] or dash)
file.write(temp.encode('latin-1'))
temp = (unit[0] == ' ' and dash or num % offer.total) + '\n'
file.write(temp.encode('latin-1'))
unit = ' ' * len(unit)
file.write(line.encode('latin-1'))
if overpaid:
file.write('\n%-35s' % 'OVERBIDDING REDUCTIONS')
for power in bribers: file.write(
overpaid.get(power) and num % overpaid[power] or dash)
file.write('\n%-35s' % 'TREASURY BEFORE OFFERS')
for power in bribers:
balance[power] = balance.get(power, 0)
liquid[power] = liquid.get(power, 0)
left[power] = left.get(power, 0)
file.write(num % balance[power])
total += balance[power]
temp = num % total + '\n%-35s' % 'TOTAL PRICES PAID'
file.write(temp.encode('latin-1'))
total = 0
for power in bribers:
file.write(num % (liquid[power] - left[power]))
total += liquid[power] - left[power]
temp = num % total + '\n%-35s' % 'NEW TREASURY BALANCES'
file.write(temp.encode('latin-1'))
total = 0
for power in bribers:
temp = num % (balance[power] - liquid[power] + left[power])
file.write(temp.encode('latin-1'))
total += balance[power] - liquid[power] + left[power]
temp = num % total + '\n%s\n' % line
file.write(temp.encode('latin-1'))
file.close()
try: os.chmod(file.name, 0666)
except: pass
# ----------------------------------------------------------------------
def sendLedgers(self):
self.openMail('Payola orders', 'ledgers')
self.mail.write('OFFICIAL Payola bribe results %s %.1s%s%.1s\n' %
tuple([self.name] + self.phase.split()), 0)
blind = 'BLIND' in self.rules
for power in self.powers:
if not power.offers and (not power.balance
or power.isDummy() and not power.ceo): continue
self.mail.write('PRESS TO %s %s\n' %
(power.name, 'EAVESDROP' not in self.rules and 'QUIET' or ''))
self.mail.write(
'%s ACCOUNT STATEMENT FOR %s\n%s\n' %
(self.phase.upper(), power.name,
'=' * (len(self.phase + power.name) + 23)))
if power.units and 'PAY_DUMMIES' not in self.rules:
gain = 0
self.mail.write(
'YOUR UNITS WERE %sORDERED AS FOLLOWS:\n' %
('PAID AND ' * ('HIDE_COST' not in self.rules)))
for unit in power.units:
if 'HIDE_COST' not in self.rules:
self.mail.write(self.orders[unit].format(
hide = 'HIDE_OFFERS' in self.rules))
gain += self.orders[unit].total
if 'ZEROSUM' in self.rules:
self.mail.write(
'TOTAL NET INCOME FOR YOUR UNITS WAS:%5d AgP\n' % gain)
power.funds['+'] = power.funds.get('+', 0) + gain
if not power.offers: self.mail.write('YOU OFFERED NO BRIBES\n')
elif 'HIDE_OFFERS' not in self.rules:
status = 1
for key in [x for x in self.orders.values() if power in x.cost]:
off = key.format(power, blind)
if not key.amt:
if self.unitOwner(key.unit) == power:
if 'PAY_DUMMIES' in self.rules: continue
elif blind and 'ZERO_FOREIGN' not in self.rules:
continue
self.mail.write('YOUR ACCEPTED BRIBES WERE:\n' * status +
off)
status = 0
if status:
self.mail.write('NONE OF YOUR BRIBES WERE ACCEPTED\n')
self.mail.write('YOUR OFFER SHEET WAS:\n')
for offer in power.sheet:
if not offer[:1].isdigit(): offer = '0 : ' + offer
if 'PAY_DUMMIES' in self.rules and offer[:1] == '0':
continue
if ('RESTRICT_SIGNAL' in self.rules and offer[:1] != '0' and
offer[-1:] == '?'):
offer = ' '.join(offer.split()[:2]) + ' H'
self.mail.write(
'%*s%s\n' % (6 - offer.find(' '), '', offer))
self.mail.write(
'THE PREVIOUS BALANCE OF YOUR BANK ACCOUNT WAS:%7d AgP\n' %
power.balance)
if power.overpaid: self.mail.write(
'EACH OFFER WAS SUBJECT TO BRIBE REDUCTION RULES %5d TIME%s\n'
% (power.overpaid, 'S'[power.overpaid < 2:]))
self.mail.write(
'TOTAL COST TO YOU OF THE BRIBES YOU OFFERED WAS:%5d AgP\n' %
(power.liquid - power.left))
power.balance -= power.liquid - power.left
self.mail.write(
'THE REMAINING BALANCE IN YOUR BANK ACCOUNT IS:%7d AgP\n'
'ENDPRESS\n' % power.balance)
self.mail.write('SIGNOFF\n', 0)
self.mail.close()
self.mail = None
# ----------------------------------------------------------------------
def preMoveUpdate(self):
if 'BLIND' in self.rules or 'FICTIONAL_OK' in self.rules:
self.error = [x for x in self.error
if not x.startswith('IMPOSSIBLE ORDER')]
if self.error:
print 'ERRORS IMPEDING RESOLUTION:', self.error
return
self.writeChart()
if 'NO_LEDGERS' not in self.rules: self.sendLedgers()
# ----------------------
# Empty the offer sheets
# ----------------------
for power in self.powers:
if not power.offers and not power.isEliminated(False, True):
power.cd = 1
power.sheet = power.offers = []
return Game.preMoveUpdate(self)
# ----------------------------------------------------------------------
def postMoveUpdate(self):
if 'NO_DONATIONS' in self.rules:
self.findGoners(phase = 0)
for power in self.powers:
if power.goner: power.balance = 0
return Game.postMoveUpdate(self)
# ----------------------------------------------------------------------
def validateStatus(self):
# ------------------------------------------------------------
# Parsing offers needs to be done after all powers are loaded,
# thus not in finishPowerData(), because it checks whether an
# ordered unit has an owner.
# ------------------------------------------------------------
for power in self.powers:
for offer in power.sheet: self.parseOffer(power, offer)
self.validateOffers(power)
self.map = self.map or Map.Map()
# -------------------------------------------------
# If the map's flow already holds any INCOME phase,
# leave it alone. Otherwise, add a single INCOME
# phase into the flow after the first ADJUSTMENTS.
# -------------------------------------------------
if self.map.flow:
for item in [x.split(':')[1] for x in self.map.flow]:
if 'INCOME' in item.split(','): break
else:
for flow, item in enumerate(self.map.flow):
if 'ADJUSTMENTS' in item.split(':')[1].split(','):
self.map.flow[flow] = item.replace(
'ADJUSTMENTS', 'ADJUSTMENTS,INCOME')
break
(where, what) = [(x+1,y) for (x,y) in enumerate(self.map.seq)
if y.endswith('ADJUSTMENTS')][0]
self.map.seq.insert(where, what.replace('ADJUSTMENTS','INCOME'))
if self.rotate: self.error += ['CONTROL ROTATION IS INVALID IN PAYOLA']
self.error += [rule + ' RULE IS INVALID IN PAYOLA'
for rule in ('PROXY_OK', 'NO_CHECK') if rule in self.rules]
for power in self.powers:
if power.centers:
if type(power.accept) not in (str, unicode): power.initAccept()
else: self.checkAccept(power)
if power.balance is None and not power.isEliminated(False, True):
self.error += ['NO BALANCE FOR ' + power.name]
# for subvar in ('ZEROSUM', 'EXCHANGE', 'FLAT_TAX'):
# if subvar in self.rules:
# self.variant = subvar.lower() + ' ' + self.variant
Game.validateStatus(self)
# ----------------------------------------------------------------------
def transferCenter(self, loser, gainer, sc):
if 'ZEROSUM' in self.rules:
# --------------------------------------
# Add to the "gained" and "lost" lists.
# Each entry is an SC and an amount that
# is to be moved from one treasury to
# another. The amount is the current
# (pre-income) balance of the losing
# power divided by the number of SC's he
# held AT THE BEGINNING OF THE YEAR.
# --------------------------------------
if loser:
amt = ((loser.balance + loser.funds.get('+', 0)) /
(len(loser.centers) - len(loser.gained) + len(loser.lost)))
loser.lost += [(sc, amt)]
else: amt = 10
gainer.gained += [(sc, amt)]
Game.transferCenter(self, loser, gainer, sc)
# ----------------------------------------------------------------------
def captureCenters(self):
return Game.captureCenters(self, self.parseSupplyCount)
# ----------------------------------------------------------------------
def parseSupplyCount(self, power, word):
if not self.phase: return
if 'PAY_DUMMIES' in self.rules and not [x for x in self.powers
if x.isDummy() and x.centers]:
if 'NO_HOARDING' in self.rules: power.balance = 0
return
prev = 'NO_HOARDING' not in self.rules and power.balance
bal = prev + power.income(int(word[1]))
power.balance = (bal + sum([x[1] for x in power.gained])
- sum([x[1] for x in power.lost]))
if 'NO_LEDGERS' in self.rules: return
if not (bal or power.gained or power.lost): return
subject = 'Payola income report ' + self.phase.split()[1]
desc = ('TAX INCOME', 'BRIBE PROFITS')['ZEROSUM' in self.rules]
self.openMail(subject, 'ledgers')
self.mail.write('OFFICIAL %s\n' % subject, 0)
self.mail.write('PRESS TO %s %s\n' %
(power.name, 'EAVESDROP' not in self.rules and 'QUIET' or ''))
self.mail.write(
'INCOME FOR %s FOR YEAR END %s\n%s\n'
'BALANCE FORWARD%9d AgP\n'
'%-15s%9d AgP\n' %
(power.name, self.phase.split()[1],
'=' * (25 + len(power.name + self.phase.split()[1])),
prev, desc, bal - prev))
# -------
# ZeroSum
# -------
for sc, amt in power.gained:
self.mail.write('CAPTURE OF %s %9d AgP\n' % (sc, amt))
for sc, amt in power.lost:
self.mail.write('LOSS OF %s %9d AgP\n' % (sc, -amt))
self.mail.write(
'CURRENT BALANCE%9d AgP\n'
'ENDPRESS\n' % power.balance)
self.mail.write('SIGNOFF\n', 0)
self.mail.close()
self.mail = None
# ----------------------------------------------------------------------
def finishPhase(self):
if 'ONE_TRANSFER' in self.rules:
for power in self.powers: power.sent = []
# ----------------------------------------------------------------------
def transferCash(self, giver, receiver, amount, sc = None, receipt = 1):
for (who, where) in ((receiver, giver), (giver, receiver)):
if not who: continue
send = (who is receiver) or receipt
# ----------------------------------------------------------
# Start the mail. The master signon is put in automatically
# ----------------------------------------------------------
if send:
self.openMail('Payola transfer', 'ledgers')
self.mail.write('OFFICIAL Payola funds transfer\n', 0)
template = ('PRESS TO %%s%s\n' %
('EAVESDROP' not in self.rules and ' QUIET' or ''))
self.mail.write(template % who.name)
# ------------------------------
# Format and compose the message
# ------------------------------
if receiver is who:
if sc:
what = 'CAPTURE OF ' + sc
if where: what += ' FROM ' + where.name
elif 'ANON_TRANSFER' in self.rules:
what = 'TRANSFER FROM SOMEONE'
else: what = 'TRANSFER FROM ' + where.name
elif sc: what = 'LOSS OF %s TO ' % sc + receiver.name
else: what = 'TRANSFER TO ' + receiver.name
self.mail.write(
'ACCOUNT ACTIVITY FOR %s\n%s\n'
'BALANCE FORWARD %4d AgP\n'
'%-27s%4d AgP\n' % (who.name, '=' * (21 + len(who.name)),
who.balance, what, amount))
# ---------------------
# Update player balance
# ---------------------
if receiver is who: who.balance += amount
else: who.balance -= amount
if send:
self.mail.write(
'CURRENT BALANCE %4d AgP\n'
'ENDPRESS\n' % who.balance)
self.mail.write('SIGNOFF\n', 0)
self.mail.close()
self.mail = None
# ----------------------------------------------------------------------
def updateOrders(self, power, offers):
# ---------------------------------------------------------
# Offers for controlled powers should not be included here,
# as each power has its own purse, acceptance list, etc.
# ---------------------------------------------------------
hadOffers, hasOffers = power.offers, None
for line in filter(None, offers):
word = line.strip().split()
if not word: continue
if len(word) == 1 and word[0][word[0][:1] in '([':len(
word[0]) - (word[0][-1:] in '])')].upper() in ('NMR', 'CLEAR'):
power.offers, power.sheet, hasOffers = [], [], 0
else:
if hasOffers is None:
power.offers, power.sheet = [], []
offer = self.parseOffer(power, line)
if offer:
hasOffers = 1
power.sheet += [offer]
# ------------------------------------------
# Make sure the player can update his orders
# ------------------------------------------
if hasOffers is None: return 1
self.validateOffers(power)
self.canChangeOrders(hadOffers, power.offers)
if self.error: return self.error
# -------------------------------------------
# Clear CD flag, even if orders were cleared.
# -------------------------------------------
power.cd = 0
if hasOffers:
self.logAccess(power, '', 'Offers updated')
self.process()
else:
self.logAccess(power, '', 'Offers cleared')
self.save()
# ----------------------------------------------------------------------
def getOrders(self, power):
if self.phaseType in 'RA': return '\n'.join(power.adjust)
return '\n'.join([x['PAY_DUMMIES' in self.rules and x.startswith('0 :')
<< 2:] for x in power.sheet])
# ----------------------------------------------------------------------
def validateOffers(self, power):
if 'PAY_DUMMIES' not in self.rules: return
if power.isDummy(): return
if (power.offers and [x for x in power.units
if x not in [y.unit for y in power.offers]]
and 'DEFAULT_UNORDERED' not in self.rules):
self.error += ['ALL OWNED UNITS NOT ORDERED FOR %s' % power.name]
# ----------------------------------------------------------------------
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views import generic
class IndexView(generic.TemplateView):
template_name = 'app/index.html'
# Create your views here.
|
class Solution(object):
def gameOfLife(self, board):
neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]
rows = len(board)
cols = len(board[0])
# Create a copy of the original board
copy_board = [[board[row][col] for col in range(cols)] for row in range(rows)]
# Iterate through board cell by cell.
for row in range(rows):
for col in range(cols):
# For each cell count the number of live neighbors.
live_neighbors = 0
for neighbor in neighbors:
r = (row + neighbor[0])
c = (col + neighbor[1])
# Check the validity of the neighboring cell and if it was originally a live cell.
# The evaluation is done against the copy, since that is never updated.
if (r < rows and r >= 0) and (c < cols and c >= 0) and (copy_board[r][c] == 1):
live_neighbors += 1
# Rule 1 or Rule 3
if copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):
board[row][col] = 0
# Rule 4
if copy_board[row][col] == 0 and live_neighbors == 3:
board[row][col] = 1
class Solution(object):
def gameOfLife(self, board):
for i in range(len(board)):
for j in range(len(board[0])):
res = 0
direction = [(-1, -1), (1, 1), (-1, 0), (1, 0), (0, -1), (0, 1), (-1, 1), (1, -1)]
for a, b in direction:
if 0<= i+a < len(board) and 0<= j+b < len(board[0]) and abs(board[i+a][j+b]) == 1:
res += 1
if board[i][j] == 1 and (res <2 or res>3):
board[i][j] = -1
if board[i][j] == 0 and res ==3:
board[i][j] = 2
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] ==2:
board[i][j] = 1
if board[i][j] == -1:
board[i][j] = 0 |
"""
Lets you practice DEFINING and CALLING
FUNCTIONS
Authors: Many, many people over many, many years.
David Mutchler, Valerie Galluzzi, Mark Hays and Amanda Stouder
wrote this version. September 2015.
Muqing Zheng made modifications to this module.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
# Also look for other TODO's LATER in this file!
import turtle
import random
def main():
"""
Makes a window (turtle.Screen),
calls the other functions in this module to demo them,
and waits for the user to click anywhere in the window to close it.
"""
window = turtle.Screen()
turtle1()
turtle4()
turtle3()
turtle2()
turtle2()
window.exitonclick()
def turtle1():
""" Constructs a square Turtle and asks it to do some things. """
ada = turtle.Turtle('square')
ada._pencolor = 'aqua'
ada._fillcolor = 'yellow'
ada._pensize = 30
ada.begin_fill()
ada.circle(150)
ada.end_fill()
def turtle2():
""" Constructs a triangle Turtle and asks it to do some things. """
grace = turtle.Turtle('triangle')
grace._pensize = 15
grace._fillcolor = 'magenta'
grace._pencolor = 'blue'
distance = random.randrange(-150, 300) # Choose a RANDOM distance
grace.backward(distance)
grace.left(90)
grace.forward(200)
grace.begin_fill()
grace.circle(25)
grace.end_fill()
def turtle3():
""" Constructs a classic Turtle and asks it to do some things. """
maja = turtle.Turtle()
maja._pencolor = 'black'
maja._pensize = 10
maja.forward(300)
maja.begin_fill()
maja.circle(50)
maja.end_fill()
def turtle4():
luke = turtle.Turtle()
luke._pensize = 20
luke._pencolor = 'brown3'
luke.fillcolor('black')
luke.forward(200)
luke.left(30)
luke.back(50)
luke.begin_fill()
luke.circle(200)
luke.end_fill()
# ----------------------------------------------------------------------
# DONE: 2.
#
# a. READ the code above.
# Be sure you understand:
# -- How many functions are defined above? (Answer: 4)
# -- For each function definition, where does that function
# definition begin? Where does it end?
# -- How many times does main call the turtle1 function?
# (Answer: 1)
# -- How many times does main call the turtle2 function?
# (The answer is NOT 1.)
# -- What line of code calls the main function?
# (Answer: look at the LAST line of this module, far below.)
# ** ASK QUESTIONS if you are uncertain. **
#
# b. RUN this module.
# RELATE what is DRAWN to the CODE above. Be sure you understand:
# -- WHEN does the code in main run?
# -- WHEN does the code in turtle1 run? turtle2? turtle3?
# -- For each of the above, WHY does that code run when it does?
# ** ASK QUESTIONS if you are uncertain. **
#
# c. Define another function,
# immediately below the end of the definition of turtle3 above.
# Name your new function turtle4.
#
# The Python "pep8" coding standard says to leave exactly 2 blank
# lines between function definitions, so be sure to do so.
#
# Your new function should:
# -- Define a Turtle (as the turtle3 and other functions did).
# -- Set your Turtle's
# _pencolor
# (note the underscore) to some color.
# The COLORS.pdf file included lists all legal color-names.
# -- Make your Turtle move around a bit.
# ** Nothing fancy is required. **
#
# BTW, if you see a RED mark at one of your lines, that means that
# there is a SYNTAX (notation) error at that line or elsewhere.
# Get help as needed to fix any such errors.
#
# d. Add a line to main that CALLS your new function immediately
# AFTER main calls turtle1. So the Turtle from turtle1 should
# move, then your Turtle should move, then the other Turtles.
#
# Run this module. Check that there is another Turtle (yours)
# that uses the color you chose and moves around a bit.
# If your code has errors (shows RED in the Console window)
# or does not do what it should, get help as needed to fix it.
#
# As always COMMIT your work (which turns it in) as often as you want,
# but for sure after you have tested it and believe that it is correct,
# by selecting this module
# and doing SVN ~ Commit.
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
#!/usr/bin/python
import fileinput
import re
from socket import inet_aton
import struct
import socket
from sys import stdin
originalDomains = []
blockedDomains = []
blockedHosts = {}
blockedKeywords = []
headerString = ''
payloadString = ''
#fetch all blocked domains
for line in open('./domains.txt', 'r'):
blockedDomains.append(line[0:-2].lower())
originalDomains.append(line[0:-2])
for line in open('./keywords.txt', 'r'):
blockedKeywords.append(line[0:-1])
#resolve all blocked domains to their hosts
for domain in blockedDomains:
rawAdresses = socket.getaddrinfo(domain, 80)
for entry in rawAdresses:
ip = entry[4][0];
match = re.match('\d+\.\d+\.\d+\.\d+', ip)
if match:
if domain in blockedHosts:
blockedHosts[domain].append(ip)
else:
blockedHosts[domain] = [ip]
for domain in blockedDomains:
blockedHosts[domain] = list(set(blockedHosts[domain]))
#print blockedHosts
def fixCaps(lowerCase):
for original in originalDomains:
if lowerCase == original.lower():
return original
def q4(header, payload):
match = re.match('\s*(\d+\.\d+\.\d+\.\d+)\.(\d+) > \d+\.\d+\.\d+\.\d+\.\d+', header)
keywords = re.findall('[a-zA-Z0-9-]+', payload)
result = []
for item in keywords:
for blocked in blockedKeywords:
if blocked == item:
result.append(blocked)
result = list(set(result))
result.sort()
if len(result) > 0:
print '[Censored Keyword - Payload]: src:%s:%s, keyword(s):' \
%(match.group(1),match.group(2)) + ', '.join(result)
return True
else: return False
def q3(payload):
# lowercase[a-z], uppercase[A-Z], numeral[0-9] and hyphen[-]
match = re.match('.*?[H,h]ost:\.(([^\.]+\.)+)',payload)
get = re.match('.*?GET\.([^\.]+\.)',payload)
if match and get:
host = match.group(1)[:-1]
url = get.group(1)[:-1]
keywords = re.findall('[a-zA-Z0-9-]+', payload)
result = []
for item in keywords:
for blocked in blockedKeywords:
if blocked == item:
result.append(blocked)
result = list(set(result))
result.sort()
if len(result) > 0:
print '[Censored Keyword - URL]: URL:%s, keyword(s):' \
%(host+url) + ', '.join(result)
return True
else: return False
def q2(header,payload):
# print payload
flag = False
match = re.match('.*?[H,h]ost:\.(([^\.]+\.)+)',payload)
if match:
address = match.group(1)[:-1]
match = re.match('\s*(\d+\.\d+\.\d+\.\d+)\.(\d+) > (\d+\.\d+\.\d+\.\d+)\.\d+',header)
source = match.group(1)
port = match.group(2)
#print address
if (address[0:4] == "www."):
address = address[4:];
if address in blockedHosts.keys():
print '[Censored domain connection attempt]: src:%s:%s, host:%s, domain:%s' \
%(source,port,match.group(3),fixCaps(address))
flag = True
for domain in blockedHosts:
if address in blockedHosts[domain]:
print '[Censored domain connection attempt]: src:%s:%s, host:%s, domain:%s' \
%(source,port,address,fixCaps(domain))
flag = True
break
return flag
def q1(header):
###Known censored domains###
#does the string match a DNS reply format?
#xxx.xxx.xxx.xxx.xxxxxx > xxx.xxx.xxx.xxx.xxxxxx ... q: A? (xxx.xxx.xxx.......).
match = re.match(\
'\s*(\d+\.\d+\.\d+\.\d+)\.(\d+) > \d+\.\d+\.\d+\.\d+\.\d+.*?q: A\? (([^\.\s]+\.)+) ',\
header)
if match:
if match.group(3)[:-1] in blockedDomains:
#grabs everything before nameservers (if any are returned)
ns = re.match('.*?ns', header)
if ns:
relevantString = ns.group()
else:
relevantString = header
ips = re.findall('A (\d+\.\d+\.\d+\.\d+)', relevantString)
ips = sorted(ips, key=lambda ip: struct.unpack('!L', inet_aton(ip))[0])
print '[Censored domain name lookup]: domain:%s, src:%s:%s, IP(s):' \
%(fixCaps(match.group(3)[:-1]),match.group(1),match.group(2)) + ', '.join(ips)
return True
#check if it's a DNS request, if so don't filter further
match = re.match(\
'\s*(\d+\.\d+\.\d+\.\d+)\.(\d+) > \d+\.\d+\.\d+\.\d+\.\d+.*?A\? (([^\.\s]+\.)+) ',\
header)
if match:
return True
else: return False
def processPacket(header,payload):
#print header
#print payload
if (header == '' and payload == ''): return
if not q1(header):
if not q2(header, payload):
if not q3(payload):
if not q4(header, payload):
"aaa"
#parse out the header and payload from the input, and process once have entire packet
while True:
line = stdin.readline()
if not line:
break
# print line
ts = re.match('\d+:\d+:\d+.\d+.*length (\d+)', line)
header = re.match('\s*\d+\.\d+\.\d+\.\d+\.\d+ > \d+\.\d+\.\d+\.\d+\.\d+', line)
payload = re.match('\s*(0x.*?):', line)
if ts:
length = ts.group(1)
headerString = ''
payloadString = ''
if header:
headerString = line
if payload:
payloadString += line[51:-1]
value = int(payload.group(1), 0)
if value >= int(length)-2:
#print "GOT:" + payloadString
processPacket(headerString, payloadString)
length = 0
|
l = list()
while True:
cont = ' '
n = int(input('Digite um valor: '))
if n not in l:
l.append(n)
print('Valor adicionado com sucesso!')
else:
print('Esse número já está no banco de dados.')
while cont not in 'ns':
cont = str(input('Quer continuar? [S/N] ')).lower()[0]
if cont == 'n': break
l.sort()
print(f'Você digitou os valores {l}') |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-03 21:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0002_auto_20180302_1403'),
]
operations = [
migrations.RenameField(
model_name='resource',
old_name='org_type',
new_name='resource_type',
),
migrations.AlterField(
model_name='resourcetype',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resource_types', to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
题目:有一分数序列:2/1,3/2,5/3,8/5,13/8,21/13...求出这个数列的前20项之和。
程序分析:请抓住分子与分母的变化规律。
"""
denominador = 2.0
molecule = 1.0
str = [denominador/molecule]
print str
for i in range(2, 21):
molecule, denominador = denominador, denominador + molecule
# print "%3d , %3d , %3d" % (i, denominador, molecule)
str.append(denominador/molecule)
print str
print sum(str)
a = 2.0
b = 1.0
l = []
l.append(a / b)
for n in range(1,20):
b,a = a,a + b
l.append(a / b)
print reduce(lambda x,y: x + y,l)
|
def fn_sum(args):
def fn():
a = 0
for n in args:
a = a + n
return a
return fn
x = fn_sum([1, 3, 4, 5, 6])
# print(x())
a = fn_sum(1)
b = fn_sum(1)
print(a == b)
# 返回函数闭包
def wrapper():
_list = []
for i in range(1, 4):
def inner():
return i * i
_list.append(inner)
return _list
f1, f2, f3 = wrapper()
print(f1())
print(type(range(4)))
def wrapper():
def inner(j):
def out():
return j * j
return out
a = []
for i in range(3):
a.append(inner(i))
return a
f1, f2, f3 = wrapper()
print(f1(), f2(), f3())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pykeyboard import PyKeyboard
from pymouse import PyMouse
import time
from page.login_page import LoginPge
from page.login_page import LoginPge
from selenium import webdriver
driver = webdriver.Firefox()
a = LoginPge(driver)
a.login()
# 直接打开编辑页面
driver.get("http://127.0.0.1:82/zentao/bug-create-1-0-moduleID=0.html")
# 如果加载慢的时候会出错,所以在这里需要加入sleep方法等待一会
time.sleep(3)
driver.find_element_by_css_selector(".ke-toolbar-icon.ke-toolbar-icon-url.ke-icon-image").click()
time.sleep(3)
driver.find_element_by_xpath("html/body/div[3]/div[1]/div[2]/div/div[1]/ul/li[2]").click()
# driver.find_element_by_css_selector(".ke-tabs-li.ke-tabs-li-on").click()
time.sleep(2)
# 点击浏览
driver.find_element_by_css_selector(".ke-inline-block.ke-upload-button").click()
time.sleep(2)
# 默认在取消按钮上,先切换到保存文件上
k = PyKeyboard()
path="e:\hello.xlsx"
for i in path:
k.tap_key(i)
time.sleep(2)
k.tap_key(k.enter_key)
time.sleep(2)
k.tap_key(k.enter_key)
time.sleep(2)
k.tap_key(k.enter_key)
time.sleep(1)
driver.find_element_by_xpath("html/body/div[3]/div[1]/div[3]/span[1]/input").click()
# 轻轻按下然后放开,回车
# k.tap_key(k.enter_key)
# 发送tab
# k.press_key(k.tap_key)
# k.release_key(k.tap_key)
# 发送回车
|
# coding: utf-8
# In[54]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import imutils
import cv2
import numpy as np
# In[55]:
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
return cv2.resize(image, (self.width, self.height))
# In[56]:
class SimpleDatasetLoader:
def __init__(self, preprocessors=None):
self.preprocessors = preprocessors
if self.preprocessors is None:
self.preprocessors=[]
def load(self, imagePaths, verbose = -1):
data = []
labels = []
for (i, imagePath) in enumerate(imagePaths):
image = cv2.imread(imagePath)
label = imagePath.split(os.path.sep)[-2]
if self.preprocessors is not None:
for p in self.preprocessors:
image = p.preprocess(image)
#print(image.shape)
data.append(image)
labels.append(label)
if verbose > 0 and i>0 and (i+1)%verbose==0:
print("[INFO] processed {}/{}".format(i+1, len(imagePaths)))
return(np.array(data), np.array(labels))
# In[57]:
image_paths = list(paths.list_images('/home/padmach/data/DogVsCats/train'))
# In[58]:
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(image_paths, verbose=500)
# In[60]:
data = data.reshape((data.shape[0], 3072))
# In[63]:
le = LabelEncoder()
labels = le.fit_transform(labels)
# In[70]:
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size = 0.25, random_state=42)
# In[73]:
model = KNeighborsClassifier(n_neighbors=2)
model.fit(trainX, trainY)
# In[74]:
print(classification_report(testY, model.predict(testX),target_names=le.classes_))
# In[ ]:
|
"""
netcdf utilities
"""
|
#!/usr/bin/python
#####################
#DEFAULT WHITE LIGHT#
#Hue: 8731 #
#Brightness: 254 #
#Saturation: 106 #
#####################
import time as t
from phue import Bridge
from datetime import *
from astral import Astral
#establish location for sunset and sunrise times
city_name = 'Buffalo'
a = Astral()
a.solar_depression = 'civil'
city = a[city_name]
print('Information for %s/%s\n' % (city_name, city.region))
timezone = city.timezone
print('Timezone: %s' % timezone)
print('Latitude: %.02f; Longitude: %.02f\n' % \
(city.latitude, city.longitude))
sun = city.sun(date = datetime.now() ,local = True)
print('Dawn: %s' % str(sun['dawn']))
print('Sunrise: %s' % str(sun['sunrise']))
print('Noon: %s' % str(sun['noon']))
print('Sunset: %s' % str(sun['sunset']))
print('Dusk: %s' % str(sun['dusk']))
#establish connection to bridge
b=Bridge('10.10.22.213')
b.connect()
#create array of light objects
lights = b.lights
def startColorLoop():
"""This function begins the color loop on all lights"""
b.set_group(1, 'on', True)
b.set_group(1, 'bri', 254)
b.set_group(1, 'hue', 255)
b.set_group(1, 'sat', 255)
b.set_group(1, 'effect', 'colorloop')
def stopColorLoop():
"""This function stops the color loop on all lights and returns the lights to white"""
b.set_group(1, 'effect', 'none')
b.set_group(1, 'bri', 254)
b.set_group(1, 'hue', 8731)
b.set_group(1, 'sat', 106)
def setDayScene():
"""This Function determines the day of the week and sets the appropriate scene depending on the day"""
day = datetime.today().weekday()
week = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
today = week[day]
b.run_scene('Outdoor', today, transition_time=1)
#startColorLoop()
#t.sleep(30)
#stopColorLoop()
#setDayScene()
now = datetime.now()
sunset = sun['sunset']
dusk = sun['dusk']
sunrise = sun['sunrise']
while True:
now = datetime.now()
if (now.hour == sunset.hour and now.minute == (sunset.minute - 15)):
startColorLoop()
time.sleep(60)
if (now.hour == dusk.hour and now.minute == (dusk.minute - 15)):
stopColorLoop()
setDayScene()
if (now.hour == 0 and now.minute == 1):
setDayScene()
if (now.hour == sunrise.hour and now.minute == (sunrise.minute + 15)):
b.set_group(1, 'on', False)
|
# -*- coding: utf-8 -*-
import random
import string
from timeit import default_timer as timer
from selection_sort import SelectionSort
print "-"*10 + "sorting numbers" + "_"*10
items = []
for i in range(0,10):
items.append(random.randint(2,999))
print "original items: %r" % items
ssort = SelectionSort(items)
# calculate execution time for our selection sort method
start = timer()
ssort.sort()
end = timer()
duration1 = end - start
# calculate execution time for python built-in sort method
start = timer()
items.sort()
end = timer()
duration2 = end - start
assert ssort.items == items
print "sorted items: %r" % ssort.items
print "Duration: our selection sort method - %ds, python builtin sort - %ds" % (duration1, duration2)
print "-"*10 + "sorting alpha characters" + "_"*10
items=[]
for i in range(0,10):
items.append(random.choice(string.ascii_letters))
print "original items: %r" % items
ssort = SelectionSort(items)
ssort.sort()
items.sort()
assert ssort.items == items
print "sorted items: %r" % ssort.items
print "-"*10 + "sorting strings" + "_"*10
items=[]
for i in range(0,10):
items.append("".join(random.choice(string.ascii_letters+string.digits) for s in range(0,10) ))
print "original items: %r" % items
ssort = SelectionSort(items)
ssort.sort()
items.sort()
assert ssort.items == items
print "sorted items: %r" % ssort.items |
g,a,d=map(int,input().split())
sum=0
for i in range (g):
sum=sum+a
a=a+d
print(sum)
|
from django.shortcuts import render
from django.http import HttpRequest,HttpResponseRedirect
from PIL import Image
from io import BytesIO
from django.core.files import File
from .forms import ImageForm
from .models import ImgUpload
from login.decorators import login_required
@login_required
def upload(request):
userid = request.session['userid']
if request.method == 'GET':
form = ImageForm()
return render(request, 'upload/upload.html', {'form': form, 'userid': userid})
elif request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
form.save()
img_obj = form.instance
file_size = convert(img_obj.imgupload_file.size)
file_name = (img_obj.imgupload_file.url).split('/')
# compress(img_obj.imgupload_file.url)
print(file_name)
return render(request, 'upload/success.html', {'form': form, 'img_obj': img_obj, 'file_size': file_size, 'file_name': file_name[6]})
# def compress(image):
# im = Image.open("../media/uploads/2020/11/10/sample.jpg")
# # create a BytesIO object
# im_io = BytesIO()
# # save image to BytesIO object
# im.save(im_io, 'JPEG', quality=70)
# # create a django-friendly Files object
# new_image = File(im_io, name=image.name)
# return new_image
def convert(byte):
B = float(byte)
KB = float(1024)
MB = float(1024 ** 2) # 1,048,576
GB = float(1024 ** 3) # 1,073,741,824
TB = float(1024 ** 4) # 1,099,511,627,776
if B < KB:
return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB) |
#From the console, run the following
#pip install numpy
#pip install scipy
#pip install scikit-learn
#pip install matplotlib
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as mp
from pylab import show
data = np.loadtxt("data.csv")
#shuffle the data and select training and test data
np.random.seed(100)
np.random.shuffle(data)
features = []
digits = []
for row in data:
if(row[0]==1 or row[0]==5):
features.append(row[1:])
digits.append(str(row[0]))
#select the proportion of data to use for training
numTrain = int(len(features)*.2)
trainFeatures = features[:numTrain]
testFeatures = features[numTrain:]
trainDigits = digits[:numTrain]
testDigits = digits[numTrain:]
#create the model
#https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
X = []
Y = []
simpleTrain = []
colors = []
for index in range(len(trainFeatures)):
X.append(trainFeatures[index][72])
Y.append(trainFeatures[index][88])
simpleTrain.append([trainFeatures[index][72],trainFeatures[index][88]])
if(trainDigits[index]=="1.0"):
colors.append("b")
else:
colors.append("r")
#https://matplotlib.org/api/_as_gen/matplotlib.pyplot.scatter.html
#this just shows the points
#mp.scatter(X,Y,s=3,c=colors)
# show()
model = KNeighborsClassifier(n_neighbors=1)
model.fit(simpleTrain,trainDigits)
xPred = []
yPred = []
cPred = []
for xP in range(-100,100):
xP = xP/100.0
for yP in range(-100,100):
yP = yP/100.0
xPred.append(xP)
yPred.append(yP)
if(model.predict([[xP,yP]])=="1.0"):
cPred.append("r")
else:
cPred.append("b")
mp.scatter(xPred,yPred,s=3,c=cPred,alpha=.2)
show()
|
# Write a Python program to generate groups of five consecutive numbers in a list
def groupOfFive():
multofFive = [i*5 for i in range(1,6)]
listgiven = [j for j in range(0,25)]
for k in range(len(multofFive)):
print(listgiven[multofFive[k]-5:multofFive[k]])
groupOfFive() |
import random
def generate_verification_code_two():
'''随机成成6位验证码'''
code_list = []
for i in range(1,3):
# 随机成成0-9之间的数字
random_num = random.randint(0, 9)
# 利用random生成65-90之间的随机整数 使得65<=a<=90
# 对应从'A'到'Z'之家的ASCII码
a = random.randint(65, 90)
# 对应从'a'到'z'之家的ASCII码
b = random.randint(97, 120)
random_uppercase_letter = chr(a)
random_lowercase_letter = chr(b)
code_list.append(str(random_num))
code_list.append(random_uppercase_letter)
code_list.append(random_lowercase_letter)
verification_code = ''.join(code_list)
return verification_code
print(generate_verification_code_two())
def generate_verification_code(len=6):
''' 随机生成6位的验证码 '''
# 注意: 这里我们生成的是0-9A-Za-z的列表,当然你也可以指定这个list,这里很灵活
# 比如: code_list = ['P','y','t','h','o','n','T','a','b'] # PythonTab的字母
code_list = []
for i in range(10): # 0-9数字
code_list.append(str(i))
for i in range(65, 91): # 对应从“A”到“Z”的ASCII码
code_list.append(chr(i))
for i in range(97, 123): #对应从“a”到“z”的ASCII码
code_list.append(chr(i))
myslice = random.sample(code_list, len) # 从list中随机获取6个元素,作为一个片断返回
verification_code = ''.join(myslice) # list to string
return verification_code
print(generate_verification_code())
|
import numpy as np
import sys
import os
import re
import json
sys.path.append(os.path.dirname(os.getcwd()))
from django.shortcuts import render
from scrapy.cmdline import execute
from django.http import HttpResponse
from news.function import *
cwd = os.getcwd()
# Create your views here.
def test():
print(hello())
def get_content(filepath): # 根据文章路径读取内容
with open(filepath, 'r') as f:
data = json.load(f) # 直接读出来就是list
return data # 返回文章中所有单词list
def get_article_info(request): # 返回所有文章基本信息不包含内容
# 工作目录在root下和manage.py同级,然后res也在root下
dirname = os.path.dirname(cwd)
datapath = dirname + "\\seprojext\\res\\news.json"
articles = []
with open(datapath, 'r') as f:
while True:
line = f.readline()
if not line:
break
# print(line)
data = json.loads(line)
articles.append(data)
# print(data)
return HttpResponse(json.dumps(articles), content_type="application/json") # 返回所有文章的基本信息
def get_article(request): # 获取文章所有信息加内容
# 工作目录在root下和manage.py同级,然后res也在root下
dirname = os.path.dirname(cwd)
datapath = dirname + "\\seprojext\\res\\news.json"
article = []
with open(datapath, 'r') as f:
while True:
line = f.readline()
if not line:
break
data = json.loads(line)
filepath = data['content'] # 获取文章内容所在位置
data['content'] = get_content(filepath) # 获取文章内容
print(data['content'])
article.append(data)
return HttpResponse(json.dumps(article), content_type="application/json")
# TODO 获取读者词汇掌握程度,推荐难度最适合的文章
def get_similiar(request, paramList): # 推荐难度最合适的10篇文章,参数是读者的词汇覆盖比率
return True
# TODO 按不同难度返回文章
# filepath = "F:\\seprojext\\tutorial\\tutorial\\spiders\\res\\news\\5 questions about Iran's nuclear deal announcement.json"
# get_content(filepath)
test() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Tan Chao"
__email__ = "tanchao_bupt@163.com"
__version__ = "1.0.0"
import time
import numpy as np
import matplotlib.pyplot as plt
'''BezierCurve
1. 实现贝塞尔曲线
2. 支持随意阶数
3. 点支持多维
4. 相机跟随示例
5. 测试用例,并画图对比结果
'''
class BezierCurve(object):
"""docstring for BezierCurve
"""
def __init__(self, points):
super(BezierCurve, self).__init__()
self.resetPoints(points)
def resetPoints(self, points):
"""重置原始曲线上的关键点
Note:
degree 阶数: len(points)
dimension 维数: len(points[0])
"""
self.points = points or []
def getPointAt(self, t=0.5):
"""获取t时刻的值
Args:
t (float): 时刻
Returns:
返回t时刻的点
"""
return self.computePointAt(0, len(self.points)-1, t)
def computePointAt(self, start, end, t):
"""递归计算t时刻的点
Note:
t本质上反应了起点和终点所占的权重
t==0: 起点全部权重
t==1: 终点全部权重
Args:
start (int): 起点
end (int): 终点
t (float): 时刻
Returns:
返回t时刻的点
"""
if start == end:
return self.points[start]
b1 = self.computePointAt(start, end-1, t)
b2 = self.computePointAt(start+1, end, t)
point = []
for i in xrange(0, len(b1)):
point.append((1-t)*b1[i] + t*b2[i])
return point
class BezierCurveImprove(BezierCurve):
"""docstring for BezierCurveImprove
缓存中间计算结果,用空间换时间,提升效率
self.cache (map): 缓存中间结果
"""
def __init__(self, points):
super(BezierCurveImprove, self).__init__(points)
def resetPoints(self, points):
super(BezierCurveImprove, self).resetPoints(points)
self.cache = {} # {"start:end:t": point}
def computePointAt(self, start, end, t):
key = '%s:%s:%s' % (start, end, t)
point = self.cache.get(key, None)
if point:
return point
point = super(BezierCurveImprove, self).computePointAt(start, end, t)
self.cache[key] = point
return point
class CameraFollow(object):
"""docstring for CameraFollow"""
def __init__(self, delayFrame=5):
super(CameraFollow, self).__init__()
self.targetPath = [] # 相机需要跟随的路径
self.delayFrame = delayFrame # 延后跟随的帧数
self.bezierCurve = BezierCurve([])
def updateTarget(self, point):
"""根据玩家的坐标更新来不断更新相机目标
也可以玩家坐标更新与相机目标更新按不同的节奏
"""
self.targetPath.append(point)
if len(self.targetPath) > self.delayFrame:
self.targetPath.pop(0)
self.bezierCurve.resetPoints(self.targetPath)
return self.bezierCurve.getPointAt(0.5)
class Plotting(object):
"""docstring for Plotting"""
def __init__(self, title):
super(Plotting, self).__init__()
self.reset(title)
def reset(self, title):
plt.title(title)
plt.xlabel('x-coordinate')
plt.ylabel('y-coordinate')
def draw(self, points, style='.-', label=''):
x = [p[0] for p in points]
y = [p[1] for p in points]
plt.plot(x, y, style, label=label)
if label:
plt.legend(loc='upper left')
plt.tight_layout() # 自动调整布局
def show(self):
plt.show()
class PlottingSubplot(Plotting):
def __init__(self, title, row, col):
super(PlottingSubplot, self).__init__(title)
self.row = row
self.col = col
def draw(self, points, plotNumber, title='', style='.-'):
ax = plt.subplot(self.row, self.col, plotNumber)
ax.set_title(title)
super(PlottingSubplot, self).draw(points, style)
# ----------------------------------------------------------------
# --------------------------Test----------------------------------
def testDegreeImpl(points, count, ps, plotNumber):
bc = BezierCurve(points)
curvePoints = []
for t in xrange(0, count+1):
curvePoints.append(bc.getPointAt(1.0*t/count))
title = 'Bezier Curve: Degree %s' % (len(points, ))
ps.draw(points, plotNumber, title=title, style='.-')
ps.draw(curvePoints, plotNumber, title=title, style='.-r')
def testDegreeN():
ps = PlottingSubplot('Bezier Curve', 2, 2)
points3 = [(1, 1), (2, 3), (3, 1)]
points4 = [(1, 1), (0.5, 3), (3, 4), (4, 1)]
points5 = [(1, 1), (0.5, 3), (3, 4), (4, 1), (5, 5)]
points10 = [(1, 1), (0.5, 3), (3, 4), (4, 1), (5, 10), (6, 5), (6, 8), (7, 4), (8, 2), (9, 6)]
testDegreeImpl(points3, 10, ps, 1)
testDegreeImpl(points4, 10, ps, 2)
testDegreeImpl(points5, 10, ps, 3)
testDegreeImpl(points10, 20, ps, 4)
ps.show()
def testCamera():
"""
测试结果显示延迟的帧数越多,跟随越平滑
但是实际中不能延迟太多帧,否则会出现镜头跟随不及时,并且计算会非常耗时
"""
avatarPos = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (3, 2), (1, 4), (2, 4), (3, 4), (4, 4), (4, 5), (5, 1)]
avatarPos += [(6, 2)] * 10 # 表示avatar停在某个位置了,镜头最终还是会跟随到玩家位置
followPos3 = []
cf3 = CameraFollow(3)
for target in avatarPos:
followPos3.append(cf3.updateTarget(target))
followPos5 = []
cf5 = CameraFollow(5)
for target in avatarPos:
followPos5.append(cf5.updateTarget(target))
followPos10 = []
cf10 = CameraFollow(10)
for target in avatarPos:
followPos10.append(cf10.updateTarget(target))
p = Plotting('Bezier Curve: Camera Follow')
p.draw(avatarPos, '.-', label='avatar')
p.draw(followPos3, '.-r', label='delay 3')
p.draw(followPos5, '.-g', label='delay 5')
p.draw(followPos10, '.-y', label='delay 10')
p.show()
def countTimeCost(points, cls):
start = time.time()
bc = cls(points)
curvePoints = []
for t in xrange(0, 101):
curvePoints.append(bc.getPointAt(t/100.0))
# cache = getattr(bc, 'cache', {})
# print 'cache', len(cache) # cache num is ok, ((1+n)*n)/2 * 101
return curvePoints, time.time()-start
def compareTwoPlan(points):
curvePoints, timeNormal = countTimeCost(points, BezierCurve)
curvePointsImprove, timeImprove = countTimeCost(points, BezierCurveImprove)
print 'point', len(points), timeNormal, timeImprove, timeNormal/timeImprove
# print curvePoints, curvePointsImprove
def testPerformance():
"""测试结果:
point 5 0.00200009346008 0.00399994850159 0.500029802706
point 10 0.0670001506805 0.0160000324249 4.18750093132
point 15 2.13499999046 0.0349998474121 61.0002656676
5阶优化反而降低效率
10阶提升4倍
15阶提升6倍
随着阶数的增加,提升效率越发明显
"""
points5 = [(1, 1), (0.5, 3), (3, 4), (4, 1), (5, 10)]
points10 = [(6, 5), (6, 8), (7, 4), (8, 2), (9, 6), (10, 1), (11, 2), (12, 3), (13, 4), (14, 10)]
compareTwoPlan(points5)
compareTwoPlan(points10)
compareTwoPlan(points5 + points10)
if __name__ == '__main__':
testDegreeN()
# testCamera()
# testPerformance()
pass
|
from django.urls import path
from . import views
urlpatterns =[
path('', views.index, name='listings'),
# path('create', views.create, name='create'),
path('addSpace', views.addspace, name='addspace'),
path('rating', views.myindex, name='rating'),
path('<int:id>', views.listing, name='listing'),
#path('<int:id>', views.listing1, name='listing1'),
path('mylisting', views.mylisting, name='mylisting'),
path('search', views.search , name='search'),
# path('edit/<int:id>',views.edit, name='edit'),
path('update/<int:id>',views.update, name='update'),
path('delete/<int:id>',views.delete , name='delete'),
#path('<int:id>/comment/',views.add_comment_to_listing , name='add_comment_to_listing'),
path('<id>/favourite_listing', views.favourite, name='favourite'),
path('board/', views.board, name='board'),
path('photoshoot/', views.photoShoot_Search , name='photoshoot'),
path('birthdayParty/', views.birthday_Search , name='birthday'),
path('workshop/', views.workshop_Search , name='workshop'),
path('bridalShower/', views.bridalShower_Search , name='bridal'),
path('meeting/', views.meeting_Search , name='meeting'),
path('weddingDestination/', views.wedding_Search , name='wedding'),
]
|
ans = 0
a = 1
b = 1
c = 0
lim = int(4 * 1e6)
for i in range(lim):
c = a + b
if c > lim :
break
if c % 2 == 0:
ans += c
a = b
b = c
print(ans) |
from rest_framework import serializers
from project_apps.expiring_url.models import ExpiringUrl
class ExpiringUrlSerializer(serializers.ModelSerializer):
time_to_expiry = serializers.IntegerField()
class Meta:
model = ExpiringUrl
read_only_fields = ("uuid", "created_at", "expires_at", "image")
fields = ("uuid", "created_at", "expires_at", "image", "time_to_expiry")
def validate_time_to_expiry(self, value):
if value < 300 or value > 30000:
raise serializers.ValidationError(
"Time to expiry has to be between 300 and 300000."
)
return value
|
from collections.abc import Iterable
from typing import TypeVar, overload
from _typeshed import Incomplete
from networkx.classes.graph import Graph
_T = TypeVar("_T")
def has_path(G: Incomplete, source: Incomplete, target: Incomplete) -> Incomplete: ...
@overload
def shortest_path(
G: Graph[_T],
source: _T,
target: _T,
weight: Incomplete | None = ...,
method: str = ...,
) -> list[_T]: ...
@overload
def shortest_path(
G: Graph[_T], target: _T, method: str = ...
) -> dict[_T, list[_T]]: ...
@overload
def shortest_path(
G: Graph[_T], source: _T, method: str = ...
) -> dict[_T, list[_T]]: ...
def shortest_path_length(
G: Incomplete,
source: Incomplete | None = ...,
target: Incomplete | None = ...,
weight: Incomplete | None = ...,
method: str = ...,
) -> Incomplete: ...
def average_shortest_path_length(
G: Incomplete, weight: Incomplete | None = ..., method: str = ...
) -> Incomplete: ...
def all_shortest_paths(
G: Graph[_T],
source: _T,
target: _T,
weight: Incomplete | None = ...,
method: str = ...,
) -> Iterable[list[_T]]: ...
|
class Solution:
def rotateString(self, s: str, goal: str) -> bool:
t = s
i = 0
while(i < len(s)):
t = s[i:] + s[:i]
if t == goal:
break
i += 1
return i < len(s) |
# Напишите программу, которая вычисляет долю студентов, получивших оценку A.
# Используется пятибальная система оценивания с оценками A, B, C, D, F.
# Формат ввода:
# Строка, в которой через пробел записаны оценки студентов. Оценок всегда не меньше одной.
# Формат вывода:
# Дробное число с ровно двумя знаками после запятой.
# Sample Input 1:
# F B A A B C A D
# Sample Output 1:
# 0.38
# Sample Input 2:
# B C B
# Sample Output 2:
# 0.00
# Sample Input 3:
# A D
# Sample Output 3:
# 0.50
l = input().split()
if l.count('A') == 0:
print("0.0{:}".format(0))
elif l.count('A') >= len(l):
# print("{:.0f}".format(round((100.0 * l.count('A'))/len(l))))
print('{0:.2f}'.format(round((100.0 * l.count('A'))/len(l))))
else:
# print("0.{:.0f}".format(round((100.0 * l.count('A'))/len(l))))
print('{0:.2f}'.format(round((100.0 * l.count('A'))/len(l)))) |
import asyncio
from aiokafka import AIOKafkaProducer
import functools
import time
def on_done(i):
print('Send msg n°', i)
async def produce(mloop):
producer = AIOKafkaProducer(
loop=mloop, bootstrap_servers='infra-cp-kafka')
await producer.start()
try:
# Produce message
for i in range(0, 100):
coro = await producer.send_and_wait("my_favorite_topic", b"Super message")
print(coro)
time.sleep(0.4)
finally:
# Wait for all pending messages to be delivered or expire.
await producer.stop()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(produce(loop))
|
import torch
import torch.nn as nn
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#MODEL
"""
note: I often get confused by the dimensions of data going through the neural net; it might be caused by the fact that Pytorch build in neworks
(nn.RNN) always expect batch_size to be one dimension of the data
"""
class RNN(nn.Module):
def __init__(self,n_features,n_layers,hidden_size,output_size,sequence_length):
super(RNN,self).__init__()
self.sequence_length = sequence_length
self.n_layers = n_layers
self.hidden_size = hidden_size
self.rnn = nn.GRU(n_features, hidden_size, n_layers, batch_first=True) #expects input of shape --> batch_size, sequence_length, n_features
self.fc = nn.Linear(hidden_size, output_size) #fully connected layer
def forward(self,x):
"""forward pass of the data during training"""
#do not confuse HIDDEN state with WEIGHT matrix!
h0 = torch.zeros(self.n_layers,x.size(0),self.hidden_size).to(device) #h0 shape --> n_layers, batch_size, hidden_size
out, _ = self.rnn(x, h0) #returns hidden states for all time steps
out = out[:,-1,:] #I need only the last hidden state (many to one RNN, see README.md)
out = self.fc(out) #out shape --> batch_size, output_size
return out
#function predict accepts know_sequence as first input and future_sequence that must DIRECTLY follow the known_sequence
def predict(self,known_sequence, future_sequence):
"""function for predicting future values for arbitrary number of future time steps"""
with torch.no_grad():
self.eval() #setting the model to evaluation mode and turning off the gradient computation
if len(known_sequence)<self.sequence_length:
return "known_sequence must be longer than sequence length"
known_sequence = known_sequence.to(device)
future_sequence = future_sequence.to(device) #future sequence can be of arbitrary length
#x is going to be updated at each future time step
x = known_sequence[-self.sequence_length:]
x = torch.reshape(x,(1,-1,11)) #x must be of shape --> batch_size, sequence_length, n_features = 1, 100, 11
outputs = [] #list where outputs of future timesteps will be stored
for i in range(len(future_sequence)): #future_sequence shape --> future_sequence_length, n_features
"""calculating outputs and updating the x with the newly predicted target variables"""
#more or less repeating forward pass with batch_size = 1
h0 = torch.zeros(self.n_layers,1,self.hidden_size).to(device)
out, _ = self.rnn(x, h0) #out shape --> 1,batch_size,hidden_size
out = out[:,-1,:] #out shape --> 1,hidden_size
out = self.fc(out) #out shape --> 1,output_size = 1,3
out = out[0] #out shape --> output_size = 3
outputs.append(out)
#preparing the new data point new_t to be added to x
new_x = future_sequence[i,:8] #known data of the future time step
new_t = torch.cat((new_x,out),-1) #concatenating the 8 known features with 3 predicted
x = torch.roll(x,-1,1) #shifting the elements in the tensor by -1 (the first element becomes last)
x[0,-1] = new_t #replacing the last element with the newly made data point
return outputs |
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from . import forms
from .lib import integer_to_roman
def home(request, integer=None):
"""
Renders a form that can convert integers into roman numbers.
"""
form = forms.IntegerToRomanForm(request.POST or None)
if request.POST and form.is_valid():
return redirect(home, form.cleaned_data['integer'])
numeral = integer_to_roman(int(integer)) if integer is not None else None
return render_to_response('home.html', RequestContext(request, {
'form': form,
'integer': integer,
'numeral': numeral
}))
|
import random
import time
words=['rice','chicken','park','skateboard','ball','computer','inductor','transformer']
secretWord = random.choice(words)
wl = len(secretWord)
lettersGuessed =[]
alp='abcdefghijklmnopqrstuvwxyz'
word=""
string=""
def start():
print ("Welcome to Hangaroo!")
print ("Let's play together!")
time.sleep(2)
start()
def secw():
secretWord=random.choice(words)
wl=len(secretWord)
ch=2*wl
print("Guess the word with ", wl, "letters")
print("You have ", ch, "tries!")
time.sleep(2)
def Hangaroo():
secretWord=random.choice(words)
wl=len(secretWord)
ch=2*wl
word=""
string=""
i=0
alp='abcdefghijklmnopqrstuvwxyz'
secretWord = random.choice(words)
lettersGuessed =[]
while i<ch:
x = input("Enter a letter: ").lower()
for x in secretWord:
if x not in lettersGuessed:
word+=" _ "
print("Oh no!")
else:
word+=x
print("Nice")
return word
for x in alp:
if x in lettersGuessed:
alp.replace(x," ")
else:
string+=x
return string
ch+=1
secw()
Hangaroo()
print("You lose!") |
# Generated by Django 3.1.3 on 2021-06-05 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Game', '0003_game_price'),
]
operations = [
migrations.RemoveField(
model_name='game',
name='Gameplay_images',
),
migrations.RemoveField(
model_name='game',
name='poster',
),
migrations.AddField(
model_name='game',
name='Popular',
field=models.BooleanField(default=1),
preserve_default=False,
),
migrations.AlterField(
model_name='game',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
import time, unittest, os, sys
from selenium import webdriver
from main.activity.desktop_v3.activity_login import *
from main.activity.desktop_v3.activity_inbox_talk import *
from main.activity.desktop_v3.activity_product import *
from main.page.desktop_v3.inbox_talk.pe_inbox_talk import *
from utils.function.setup import *
from utils.lib.user_data import *
class TestDeletetalk(unittest.TestCase):
_site = "live"
def setUp(self):
self.driver = tsetup("firefox")
self.user = user7
self.login = loginActivity()
self.inbox_talk = inboxTalkActivity()
self.product = ProductActivity()
def test_delete_talk(self):
print('============================================')
print('TEST Talk/004 : DELETE TALK FROM INBOX TALK ')
print('============================================')
self.login.do_login(self.driver, self.user, self.user['email'], self.user['password'], self._site)
self.inbox_talk.setObject(self.driver)
deleted_talk_ID, product_link, total_list_talk = self.inbox_talk.delete_random_talk(self._site)
time.sleep(2)
self.inbox_talk.is_talk_deleted_from_sender_inbox(deleted_talk_ID, total_list_talk)
self.driver.close()
print('***Check if the talk is deleted from product***')
self.driver = tsetup("firefox")
self.driver.get(product_link)
self.product.setObject(self.driver)
self.product.is_talk_deleted_from_product(deleted_talk_ID)
print('')
print('=============================================')
def tearDown(self):
print("Test will be terminated in a second...")
time.sleep(5)
self.driver.quit()
# main
if(__name__ == "__main__"):
unittest.main() |
import sqlite3
import pandas
conn = sqlite3.connect("factbook.db")
query = 'SELECT SUM(area_land) FROM facts WHERE area_land != "" ;'
query2 = 'SELECT SUM(area_water) FROM facts WHERE area_water != "" ;'
area_land = pandas.read_sql_query(query, conn)
area_water = pandas.read_sql_query(query2, conn)
print(area_land['SUM(area_land)'][0] / area_water['SUM(area_water)'][0]) |
import random
from numpy import *
def linear_recom(vector1,vector2):
r = random.uniform(0.3,0.7)
new_bug = []
array(new_bug)
new_bug = r * vector1 +(1.0-r)*vector2
import judge
new_bug = judge.judge_lable(vector1,vector2,new_bug)
return (new_bug)
'''
from basic_method import MORPH
vec1 = MORPH.all_inputs_vector[2]
print "type(vec1):\n",type(vec1)
vec2 = MORPH.bug_inputs_vector[2]
print "type(vec2):\n",type(vec2)
vec3 = linear_recom(vec1,vec2)
print vec3
print "type(vec3):\n",type(vec3)
''' |
alpha = 0.33
beta = 0.33
gamma = 0.33
topic_similarity = 0.5
tau = 2
|
file = open('exampledir/data.txt')
print(file.read())
|
"""Treadmill context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import logging
from treadmill import plugin_manager
_LOGGER = logging.getLogger(__name__)
class ContextError(Exception):
"""Raised when unable to connect to LDAP or Zookeeper.
"""
pass
def required(msg):
"""Raises error if return value of function is None.
"""
def _decorator(func):
"""Actual decorator.
"""
@functools.wraps(func)
def decorated_function(*args, **kwargs):
"""Decorated function, checks result is not None.
"""
result = func(*args, **kwargs)
if result is None:
raise ContextError(msg)
return result
return decorated_function
return _decorator
class DnsContext(object):
"""DNS context.
"""
__slots__ = (
'_context',
'_dns',
)
def __init__(self, ctx):
self._context = ctx
self._dns = None
@property
def _resolver(self):
if self._dns is not None:
return self._dns
dns = plugin_manager.load('treadmill.context', 'dns')
dns.init(self._context)
self._dns = dns
return self._dns
def admin_api_srv(self):
"""Get Admin API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'admin_api'
)
return srv_entry
def state_api_srv(self, cell):
"""Get State API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'state_api',
scope=self._resolver.cell_scope(cell)
)
return srv_entry
def cell_api_srv(self, cell):
"""Get Cell API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'cell_api',
scope=self._resolver.cell_scope(cell)
)
return srv_entry
def ws_api_srv(self, cell):
"""Get Websocket API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'ws_api',
scope=self._resolver.cell_scope(cell)
)
return srv_entry
class AdminContext(object):
"""Ldap context.
"""
__slots__ = (
'_context',
'_conn',
)
def __init__(self, ctx):
self._context = ctx
self._conn = None
@property
@required('Cannot resolve LDAP suffix.')
def ldap_suffix(self):
"""LDAP suffix getter.
"""
return self._context.get('ldap_suffix', resolve=False)
@property
def user(self):
"""User, getter.
"""
return self._context.get('ldap_user', resolve=False)
@user.setter
def user(self, value):
"""User, setter.
"""
if value != self._context.get('ldap_user', resolve=False):
self._conn = None
self._context.set('ldap_user', value)
@property
def password(self):
"""Password, getter.
"""
return self._context.get('ldap_pwd', resolve=False)
@password.setter
def password(self, value):
"""Password, setter.
"""
self._context.set('ldap_pwd', value)
if self.user is None:
self.user = 'cn=Manager,%s' % self.ldap_suffix
@property
@required('Cannot resolve LDAP url.')
def url(self):
"""URL, getter.
"""
return self._context.get('ldap_url', resolve=True)
@url.setter
def url(self, value):
"""Set URL, then nullify the connection.
"""
self._context.set('ldap_url', value)
self._conn = None
@property
def conn(self):
"""Lazily establishes connection to admin LDAP.
"""
if self._conn:
return self._conn
plugin = plugin_manager.load('treadmill.context', 'admin')
self._conn = plugin.connect(self.url, self.ldap_suffix,
self.user, self.password)
return self._conn
class ZkContext(object):
"""Zookeeper context.
"""
__slots__ = (
'proid',
'_context',
'_conn',
'_listeners',
)
def __init__(self, ctx):
self._context = ctx
self._conn = None
self._listeners = []
self.proid = None
def add_listener(self, listener):
"""Add a listener.
"""
self._listeners.append(listener)
@property
@required('Cannot resolve Zookeeper connection string.')
def url(self):
"""Resolves and return context zk url.
"""
return self._context.get('zk_url', resolve=True)
@url.setter
def url(self, value):
"""Sets context zk url.
"""
self._context.set('zk_url', value)
@property
def conn(self):
"""Lazily creates Zookeeper client.
"""
if self._conn:
return self._conn
_LOGGER.debug('Connecting to Zookeeper %s', self.url)
self.proid, _ = self.url[len('zookeeper://'):].split('@')
plugin = plugin_manager.load('treadmill.context', 'zookeeper')
self._conn = plugin.connect(self.url)
if self._listeners:
for listener in self._listeners:
self._conn.add_listener(listener)
return self._conn
@conn.setter
def conn(self, zkclient):
"""Explicitely set connection."""
self._conn = zkclient
class Context(object):
"""Global connection context.
"""
__slots__ = (
'ldap',
'zk',
'dns',
'_resolvers',
'_plugins',
'_profile',
'_profile_name',
'_defaults',
'_stack',
)
def __init__(self):
self._profile_name = None
self._profile = {}
self._defaults = None
self._plugins = []
# Protect against recursive gets
self._stack = set()
# Lazy connections to Zookeeper, LDAP and DNS
self.zk = ZkContext(self)
self.ldap = AdminContext(self)
self.dns = DnsContext(self)
def _load_profile(self):
"""Loads the profile.
"""
if not self._profile_name:
return
# Load once.
if self._defaults is not None:
return
self._defaults = {}
try:
profile_mod = plugin_manager.load('treadmill.profiles',
self._profile_name)
self._defaults = profile_mod.PROFILE
except KeyError:
_LOGGER.warning('Profile not found: %s', self._profile_name)
def _init_plugins(self):
"""Initialize plugins.
"""
if self._plugins:
return
_LOGGER.debug('Loading plugins.')
# TODO: Thsi is a hack, need a better way to determine if plugin
# should be loaded.
if self.get('dns_domain', resolve=False):
_LOGGER.debug('Loading dns plugin.')
dns = plugin_manager.load('treadmill.context', 'dns')
dns.init(self)
self._plugins.append(dns)
if self.get('ldap_url', resolve=False):
_LOGGER.debug('Loading admin plugin.')
ldap = plugin_manager.load('treadmill.context', 'admin')
ldap.init(self)
self._plugins.append(ldap)
def get(self, attr, default=None, resolve=True, volatile=False):
"""Get attribute from profile or defaults.
"""
if attr in self._profile:
return self._profile[attr]
self._load_profile()
if resolve and attr not in self._stack:
self._stack.add(attr)
try:
self._init_plugins()
for plugin in self._plugins:
try:
self._profile[attr] = plugin.resolve(self, attr)
except ContextError:
_LOGGER.warning('Error resolving attribute - %s: %s',
plugin, attr)
except KeyError:
# Plugin is not responsible fot the attribute.
pass
finally:
self._stack.discard(attr)
if attr not in self._profile:
# Attr was not found, look for it in _defaults
if (self._defaults is not None and
self._defaults.get(attr) is not None):
self._profile[attr] = self._defaults[attr]
if attr not in self._profile and default is not None:
self._profile[attr] = default
# The end of the function attribute is recorded in the profile and
# never evaluated again.
#
# volatile attributes are evaluated all the time.
if volatile:
return self._profile.pop(attr, default)
else:
return self._profile.get(attr, default)
def set(self, attr, value):
"""Set profile attribute.
"""
self._profile[attr] = value
def set_profile_name(self, profile_name):
"""Sets current profile.
"""
self._profile_name = profile_name
def get_profile_name(self):
"""Returns profile name.
"""
return self._profile_name
@property
def profile(self):
"""Returns the profile name.
"""
self._load_profile()
return self._profile
@property
@required('Cannot resolve cell.')
def cell(self):
"""Returns cell name.
"""
return self.get('cell', resolve=False)
@cell.setter
def cell(self, value):
"""Sets cell name.
"""
self.set('cell', value)
@property
@required('Cannot resolve DNS domain.')
def dns_domain(self):
"""Returns DNS domain.
"""
return self.get('dns_domain', resolve=False)
@dns_domain.setter
def dns_domain(self, value):
"""Sets DNS domain.
"""
self.set('dns_domain', value)
@property
def dns_server(self):
"""Returns DNS server.
"""
return self.get('dns_server')
@dns_server.setter
def dns_server(self, value):
"""Sets DNS server.
"""
return self.set('dns_server', value)
@property
@required('Cannot resolve LDAP suffix.')
def ldap_suffix(self):
"""Returns LDAP suffix.
"""
return self.get('ldap_suffix')
@ldap_suffix.setter
def ldap_suffix(self, value):
"""Sets DNS server.
"""
return self.set('ldap_suffix', value)
def scopes(self):
"""Returns supported scopes.
"""
return self.get('scopes', ['cell'])
@required('Cannot resolve admin api.')
def admin_api(self, api=None):
"""Returns admin API.
"""
if api:
return [api]
return self.get('admin_api', volatile=True)
@required('Cannot resolve cell api.')
def cell_api(self, api=None):
"""Returns cell API.
"""
if api:
return [api]
return self.get('cell_api', volatile=True)
@required('Cannot resolve websocket api.')
def ws_api(self, api=None):
"""Returns cell API.
"""
if api:
return [api]
return self.get('ws_api', volatile=True)
@required('Cannot resolve state api.')
def state_api(self, api=None):
"""Returns cell API.
"""
if api:
return [api]
return self.get('state_api', volatile=True)
GLOBAL = Context()
|
from waitlist.storage.database import AccountNote
from waitlist.utility.constants import account_notes
from waitlist.utility.eve_id_utils import get_character_by_id
import json
import logging
logger = logging.getLogger(__name__)
def render_note_text(note: AccountNote) -> str:
if note.type == account_notes.TYPE_HUMAN:
if note.note is None:
return ''
return note.note
elif note.type == account_notes.TYPE_ACCOUNT_ACTIVE_CHANGED:
if note.jsonPayload['new_disabled']:
return "Account Disabled"
else:
return "Account Activated"
elif note.type == account_notes.TYPE_ACCOUNT_CREATED:
return f'Account created. Usernote: {note.note}'
elif note.type == account_notes.TYPE_ACCOUNT_ROLES_CHANGED:
if note.note is None:
return ''
return note.note
elif note.type == account_notes.TYPE_GOT_ACCOUNT_MAIL:
if note.jsonPayload['sender_character_id'] is None:
sender = '<Unknown>'
else:
sender = get_character_by_id(
note.jsonPayload['sender_character_id']).get_eve_name()
if note.jsonPayload['target_character_id'] is None:
target = '<Unknown>'
else:
target = get_character_by_id(
note.jsonPayload['target_character_id']).get_eve_name()
body = note.jsonPayload['mail_body']
subject = note.jsonPayload['subject']
if body is None:
body = '<Unknown>'
if subject is None:
subject = '<Unknown>'
return (f'Character {sender} sent mail to Character {target} which '
f'was the active char of this account. '
f'Mail Subject: {subject} Mail Body: {body}')
elif note.type == account_notes.TYPE_ROLE_CREATED:
return (f'Created Role with name={note.jsonPayload["role_name"]} '
f'and display name={note.jsonPayload["role_display_name"]}')
elif note.type == account_notes.TYPE_ROLE_REMOVED:
return (f'Removed Role with name={note.jsonPayload["role_name"]} '
f'and display name={note.jsonPayload["role_display_name"]}')
elif note.type == account_notes.TYPE_SENT_ACCOUNT_MAIL:
character: Character = get_character_by_id(
note.jsonPayload['sender_character_id'])
recipientsJsonText = json.dumps(note.jsonPayload['recipients'])
return (f'Account sent mail using character={character.eve_name} '
f'to={recipientsJsonText} '
f'with subject={note.jsonPayload["subject"]} '
f'and body={note.jsonPayload["body"]}')
elif note.type == account_notes.TYPE_ACCOUNT_NAME_CHANGED:
return (f'Changed name from "{note.jsonPayload["old_name"]}" '
f'to "{note.jsonPayload["new_name"]}"')
else:
logger.error('Unhandler AccountNote type: %s', note.type)
return f'Unhandled AccountNote type: {note.type}'
|
import time
color_list_1 = set(["White", "Black", "Red"])
color_list_2 = set(["Red", "Green"])
print("Checking colours which are there in set 1 but not in set 2 ...")
time.sleep(1)
final_set=set(color_list_1-color_list_2)
print("Final set :",final_set)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.