content stringlengths 5 1.05M |
|---|
quarterCodes = {
'Winter': '1',
'Spring': '2',
'Fall': '4'
}
subjectCodes = {
'ANTH': 'ANTH ',
'ART': 'ART ',
'ARTHI': 'ARTHI',
'ARTST': 'ARTST',
'ASAM': 'AS AM',
'ASTRO': 'ASTRO',
'BIO': 'BIOL ',
'BMSE': 'BMSE ',
'BLST': 'BL ST',
'CHEMENG': 'CH E ',
'CHEMBIO': 'CHEM ',
'CHST': 'CH ST',
'CHIN': 'CHIN ',
'CLASS': 'CLASS',
'COMM': 'COMM ',
'COMLIT': 'C LIT',
'CMPSC': 'CMPSC',
'CMPTG': 'CMPTG',
'CNCSP': 'CNCSP',
'DANCE': 'DANCE',
'DYNS': 'DYNS ',
'EARTH': 'EARTH',
'EACS': 'EACS ',
'EEMB': 'EEMB ',
'ECON': 'ECON ',
'EDU': 'ED ',
'ECE': 'ECE ',
'ENGR': 'ENGR ',
'ENGL': 'ENGL ',
'EDS': 'EDS ',
'ESM': 'ESM ',
'ENVST': 'ENV S',
'ESS': 'ESS ',
'ES': 'ES ',
'FEMST': 'FEMST',
'FAMST': 'FAMST',
'FRENCH': 'FR ',
'GENST': 'GEN S',
'GEOG': 'GEOG ',
'GER': 'GER ',
'GPS': 'GPS ',
'GLOBL': 'GLOBL',
'GRAD': 'GRAD ',
'GREEK': 'GREEK',
'HEB': 'HEB ',
'HIST': 'HIST ',
'INT': 'INT ',
'ITAL': 'ITAL ',
'JAPAN': 'JAPAN',
'KOR': 'KOR ',
'LATIN': 'LATIN',
'LATST': 'LAIS ',
'LING': 'LING ',
'LIT': 'LIT ',
'MARSC': 'MARSC',
'MATRL': 'MATRL',
'MATH': 'MATH ',
'MENG': 'ME ',
'MAT': 'MAT ',
'MST': 'ME ST',
'MEST': 'MES ',
'MS': 'MS ',
'MCDB': 'MCDB ',
'MUS': 'MUS ',
'MPL': 'MUS A',
'PHIL': 'PHIL ',
'PHYS': 'PHYS ',
'POLS': 'POL S',
'PORTU': 'PORT ',
'PSY': 'PSY ',
'RST': 'RG ST',
'RENST': 'RENST',
'RUSS': 'RUSS ',
'SLAV': 'SLAV ',
'SOC': 'SOC ',
'SPAN': 'SPAN ',
'SHS': 'SHS ',
'PSTAT': 'PSTAT',
'TMP': 'TMP ',
'THTR': 'THTR ',
'WRIT': 'WRIT ',
'WL': 'W&L '
}
|
import signal
class Tools():
def __init(self,):
pass
@staticmethod
def remove_special_chars(input_string):
special_chars = ["'"]
result = input_string
for char in special_chars:
result = result.replace(char, '')
return result
# Known exchanges:
# 'ISLAND' # NASDAQ / Island
# 'NASDAQ' # NASDAQ / Island
# 'NYSE' # NYSE
# 'NYSE ARCA' # Archipelago
# 'AMEX' # American Stock Exchange
# 'BATS' # Better Alternative Trading System
# 'VSE' # Vancouver Stock Exchange
# 'FWB' # Frankfurter Wertpapierbörse
# 'IBIS' # XETRA
# 'SWB' # Stuttgarter Wertpapierbörse
# 'LSE' # London Stock Exchange
# 'LSEETF' # London Stock Exchange: ETF
# 'SBF' # Euronext France
# 'ENEXT.BE' #
# 'AEB' #
def encode_exchange_tv(self, exchange):
# Encode from Barbucket-notation to TV-notation
exchange_codes = {
'NASDAQ': "NASDAQ", # NASDAQ / Island
'ARCA': "NYSE ARCA", # Archipelago
'IBIS': "XETR"} # XETRA
if exchange in exchange_codes.keys():
return exchange_codes[exchange]
else:
return exchange
def decode_exchange_tv(self, exchange):
# Decode from IB-notation to Barbucket-notation
exchange_codes = {
'ISLAND': "NASDAQ", # NASDAQ / Island
'NYSE ARCA': "ARCA", # Archipelago
'XETR': "IBIS"} # XETRA
if exchange in exchange_codes.keys():
return exchange_codes[exchange]
else:
return exchange
def encode_exchange_ib(self, exchange):
# Encode from Barbucket-notation to IB-notation
exchange_codes = {
'NASDAQ': "ISLAND", # NASDAQ / Island
'ARCA': "NYSE ARCA"} # Archipelago
if exchange in exchange_codes.keys():
return exchange_codes[exchange]
else:
return exchange
def decode_exchange_ib(self, exchange):
# Decode from IB-notation to Barbucket-notation
exchange_codes = {
'ISLAND': "NASDAQ", # NASDAQ / Island
'NYSE ARCA': "ARCA"} # Archipelago
if exchange in exchange_codes.keys():
return exchange_codes[exchange]
else:
return exchange
class GracefulExiter():
def __init__(self):
self.state = False
signal.signal(signal.SIGINT, self.change_state)
def change_state(self, signum, frame):
print("Gracefully stopping. Repeat to stop immediately.")
signal.signal(signal.SIGINT, signal.SIG_DFL)
self.state = True
def exit(self):
return self.state
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import h5py
import pandas as pd
import numpy as np
def write_profile( profile_array, lengths_array, hdf5_handle, metadata ):
start_site_coverage = merge_metagene()
stop_site_coverage = merge_metagene()
# Do this in array - pyothonic way
#UTR3_counts = merge_region_counts()
|
import FWCore.ParameterSet.Config as cms
preIdAnalyzer = cms.EDAnalyzer("PreIdAnalyzer",
PreIdMap=cms.InputTag("trackerDrivenElectronSeeds:preid"),
TrackCollection=cms.InputTag("generalTracks"),
)
|
from invoke import task
@task
def test(ctx):
ctx.run("pytest src")
# ... |
import numpy as np
import manga_tools as m
from astropy import wcs, nddata
from astropy import units as u, constants as c
def get_emline_qty(maps, qty, key, sn_th=3., maskbits=list(range(64))):
'''
return a masked array for a particular emission-line
'''
qty = qty.upper()
qty_extname = 'EMLINE_{}'.format(qty)
qty_ivar_extname = 'EMLINE_{}_IVAR'.format(qty)
qty_mask_extname = 'EMLINE_{}_MASK'.format(qty)
qty_hdu = maps[qty_extname]
qty_ivar_hdu = maps[qty_ivar_extname]
qty_mask_hdu = maps[qty_mask_extname]
# get a mapping from eline key to channel key
v2k = {v: k for (k, v) in qty_hdu.header.items()}
# get a mapping from channel key to channel
cstring2ix = lambda s: int(s[1:]) - 1
ix = cstring2ix(v2k[key])
meas = qty_hdu.data[ix, ...]
ivar = qty_ivar_hdu.data[ix, ...]
snr = meas * np.sqrt(ivar)
snr_mask = (snr < sn_th)
map_mask = m.mask_from_maskbits(qty_mask_hdu.data[ix, ...])
mask = np.logical_or.reduce((snr_mask, map_mask))
return np.ma.array(meas, mask=mask)
class MaNGAElines(object):
units = {'GFLUX': '1e-17 erg s-1 cm-2',
'SFLUX': '1e-17 erg s-1 cm-2',
'GEW': 'AA', 'SEW': 'AA'}
def __init__(self, hdulist, *args, **kwargs):
self.hdulist = hdulist
self.wcs = wcs.WCS(hdulist['BIN_AREA'].header)
def get_qty(self, qty, key, sn_th, maskbits=[30]):
maps = self.hdulist
qty = qty.upper()
qty_extname = 'EMLINE_{}'.format(qty)
qty_ivar_extname = 'EMLINE_{}_IVAR'.format(qty)
qty_mask_extname = 'EMLINE_{}_MASK'.format(qty)
qty_hdu = maps[qty_extname]
qty_ivar_hdu = maps[qty_ivar_extname]
qty_mask_hdu = maps[qty_mask_extname]
# get a mapping from eline key to channel key
v2k = {v: k for (k, v) in qty_hdu.header.items()}
# get a mapping from channel key to channel
cstring2ix = lambda s: int(s[1:]) - 1
ix = cstring2ix(v2k[key])
meas = qty_hdu.data[ix, ...]
ivar = qty_ivar_hdu.data[ix, ...]
ivar_zero = (ivar == 0.)
std = 1. / np.sqrt(ivar.clip(min=1.0e-20))
std[ivar_zero] = 0.
snr_mask = ((meas * np.sqrt(ivar)) < sn_th)
map_mask = m.mask_from_maskbits(qty_mask_hdu.data[ix, ...])
mask = np.logical_or.reduce((ivar_zero, map_mask, snr_mask))
unit = self.units[qty]
data = nddata.NDDataRef(
meas, uncertainty=nddata.StdDevUncertainty(std, unit=unit),
unit=unit, mask=mask, wcs=wcs)
return data
@classmethod
def DAP_from_plateifu(cls, plate, ifu, mpl_v, kind, *args, **kwargs):
hdulist = m.load_dap_maps(plate, ifu, mpl_v, kind)
return cls(hdulist, *args, **kwargs)
|
from entity.queueEntity.QueueEntity import QueueEntity
class Decide(QueueEntity):
def __init__(self, simSystem, Type, id, inputPointer, outputPointer, name, expression):
super(Decide, self).__init__(simSystem, Type, id, inputPointer, outputPointer, name)
self.expression = expression
def takeCustomer(self):
print "Decide Entity: " + self.name + " takes one customer"
self.releaseCustomer()
def releaseCustomer(self):
if self.calculate():
self.outputPointer[0].takeCustomer()
print "Decide Entity: " + self.name + " released one customer in True flow"
else:
self.outputPointer[1].takeCustomer()
print "Decide Entity: " + self.name + " released one customer in False flow"
def calculate(self):
import random
return bool(eval(self.expression))
def connect(self, other):
# other is array [YesEntity , NoEntity]
self.outputPointer.append(other[0])
self.outputPointer.append(other[1])
other[0].inputPointer.append(self)
other[1].inputPointer.append(self)
|
#!/usr/bin/python
#Title : vmc.py
#Usage : python vmc.py -h
#Author : pmorvalho
#Date : February 11, 2022
#Description : Encodes the problem of Virtual Machines Consolidation (VMC) into MaxSAT\
#Notes :
#Python Version: 3.8.5
# (C) Copyright 2022 Pedro Orvalho.
#==============================================================================
import sys
from sys import argv
import argparse
from pysat.pb import *
parts = 1
clauses = 0
formula = ""
num_vars = 0
n_servers = None
n_vms = None
vms = None
servers = None
soft = None
hard = None
vms_host = []
vms_memory = []
vms_cpu = []
servers_memory = []
servers_cpu = []
def parse_input(f):
global n_servers, n_vms, n_colors, vms, servers, parts, hard, args, k, vms_memory, vms_cpu, vms_host, servers_memory, servers_cpu
f = open(f, "r")
lines = f.readlines()
while "Total" in lines[0]:
lines.pop(0)
# getting servers' info
n_servers = int(lines[0])
lines.pop(0)
servers_memory = [None for _ in range(n_servers+1)]
servers_cpu = [None for _ in range(n_servers+1)]
for s in range(1,n_servers+1):
m, cpu = lines[0].split()
servers_memory[s]=int(m)
servers_cpu[s]=int(cpu)
lines.pop(0)
# getting VMs' info
n_vms = int(lines[0])
lines.pop(0)
vms_host = [None for _ in range(n_vms+1)]
vms_memory = [None for _ in range(n_vms+1)]
vms_cpu = [None for _ in range(n_vms+1)]
for v in range(1,n_vms+1):
m, cpu, s = lines[0].split()
vms_host[v] = int(s)
vms_memory[v] = int(m)
vms_cpu[v] = int(cpu)
lines.pop(0)
hard = (max(servers_memory[1:])*2)+1
k = int(lines[0])
def add_lessEqualK_constraint(lits, k, weights=None):
global formula, clauses, num_vars
cnf = PBEnc.leq(lits=lits, top_id=num_vars, weights=weights, bound=k)
num_vars = cnf.nv
for cl in cnf.clauses:
clauses += 1
if not args.complete_pwcnf:
print(str(parts)+" "+str(hard)+" "+" ".join([str(c) for c in cl])+" 0")
else:
formula = formula+str(parts)+" "+str(hard)+" "+" ".join([str(c) for c in cl])+" 0\n"
def encoding():
global formula, clauses
global n_servers, n_vms, n_colors, vms, servers, parts, hard, args, k, num_vars, vms_host, vms_memory, vms_cpu
#initializing variables for VMs (X_i j)
v = 1
vms = [[[]] for _ in range(n_vms+1)]
for _ in range(1,n_servers+1):
for v in range(1,n_vms+1):
num_vars += 1
vms[v].append(num_vars)
if args.debbug:
for v in range(1, n_vms+1):
print("VM {v}:".format(v=v), vms[v][1:], file=sys.stderr)
servers = [None for _ in range(n_servers+1)]
for s in range(1,n_servers+1):
num_vars += 1
servers[s] = num_vars
if args.debbug:
for s in range(1, n_servers+1):
print("Server {s}: ".format(s=s), servers[s], file=sys.stderr)
# Header, clauses are going to be defined later for efficiency concerns and variables
if not args.complete_pwcnf:
print("p pwcnf XX {h} {p}".format(h=hard,p=n_vms+1 if args.pwcnf_vms else n_servers + 1))
# Each VM should assigned to a physical server
for v in range(1,n_vms+1):
c = ""
for s in range(1,n_servers+1):
c = c+" "+str(vms[v][s])
clauses += 1
if not args.complete_pwcnf:
print(str(parts)+" "+str(hard)+c+" 0")
else:
formula = formula +str(parts)+" "+str(hard)+c+" 0\n"
# Each VM should assigned at most one server
for v in range(1,n_vms+1):
c = ""
for s in range(1,n_servers+1):
for s2 in range(s+1,n_servers+1):
clauses += 1
if not args.complete_pwcnf:
print(str(parts)+" "+str(hard)+" "+ str(-vms[v][s])+" "+ str(-vms[v][s2])+" 0")
else:
formula = formula +str(parts)+" "+str(hard)+" "+ str(-vms[v][s])+" "+ str(-vms[v][s2])+" 0\n"
# If at least one VM is assigned to a server, then that server is active
for s in range(1,n_servers+1):
for v in range(1,n_vms+1):
clauses += 1
if not args.complete_pwcnf:
print(str(parts)+" "+str(hard)+" "+ str(-vms[v][s])+" "+ str(servers[s])+" 0")
else:
formula = formula +str(parts)+" "+str(hard)+" "+ str(-vms[v][s])+" "+ str(servers[s])+" 0\n"
# If no VM is assigned to a server, then that server is inactive
for s in range(1,n_servers+1):
c = ""
for v in range(1,n_vms+1):
c = c+" "+str(vms[v][s])
clauses += 1
if not args.complete_pwcnf:
print(str(parts)+" "+str(hard)+c+" "+str(-servers[s])+" 0")
else:
formula = formula +str(parts)+" "+str(hard)+c+" "+str(-servers[s])+" 0\n"
# The memory and CPU resources used by the VMs at a given server do not exceed the memory and CPU resources of the server;
for s in range(1,n_servers+1):
lits = []
for v in range(1,n_vms+1):
lits.append(vms[v][s])
# Memory constraint
if args.debbug:
print("VMs literals:", lits, file=sys.stderr)
print("Server {s}\nMemory {m} VMs:{w}".format(s=s, m=servers_memory[s], w=vms_memory[1:]), file=sys.stderr)
add_lessEqualK_constraint(lits, servers_memory[s], weights=vms_memory[1:])
# CPU constraint
if args.debbug:
print("CPU {c} VMs:{w}".format(c=servers_cpu[s], w=vms_cpu[1:]), file=sys.stderr)
add_lessEqualK_constraint(lits, servers_cpu[s], weights=vms_cpu[1:])
# A maximum of K servers (K < n) are to be active
add_lessEqualK_constraint(servers[1:], k)
# Soft clauses
parts = n_vms+1 if args.pwcnf_vms else n_servers + 1
for v in range(1,n_vms+1):
for s in range(1,n_servers+1):
if vms_host[v] != s:
cost = vms_memory[v] if not args.unweighted else 1 # unweighted version (cost=1)
p = v+1 if args.pwcnf_vms else s+1
clauses += 1
if not args.complete_pwcnf:
print(str(p)+" "+str(cost)+" "+ str(-vms[v][s]) + " 0")
else:
formula = formula + str(p)+" "+str(cost)+" "+ str(-vms[v][s]) + " 0\n"
if not args.complete_pwcnf:
print(num_vars)
print(clauses)
def parser():
parser = argparse.ArgumentParser(prog='vmc.py', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-f', '--input_file', nargs='?',help='input file')
parser.add_argument('-d', '--debbug', action='store_true', default=False, help='Debug info')
parser.add_argument('-cpwcnf', '--complete_pwcnf', action='store_true', default=False, help='Writes the complete PWCNF with the value of variables and clauses in the header and not in the end of the file. This approach is quite slower (SLOW).')
parser.add_argument('-ps', '--pwcnf_servers', action='store_true', default=False, help='Output format: PWCNF (colors) (DEFAULT)')
parser.add_argument('-pv', '--pwcnf_vms', action='store_true', default=False, help='Output format: PWCNF (nodes)')
parser.add_argument('-uw', '--unweighted', action='store_true', default=False, help='Unweighted Version of the VMC problem. If this flag is not used then the weighted version is used.')
args = parser.parse_args(argv[1:])
return args
def main():
parse_input(args.input_file)
encoding()
if args.complete_pwcnf:
print("p pwcnf {v} {c} {h} {p}".format(v=num_vars,c=clauses,h=hard,p=parts))
print(formula, end="")
if __name__ == "__main__":
args = parser()
main()
|
from django.apps import AppConfig
class IrradiationsConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'irradiations'
|
'''test_jutil.py - test the high-level interface
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
'''
import gc
import os
import numpy as np
import threading
import unittest
import sys
import javabridge
# Monkey patch some half-corrent implementations of methods that only
# appeared in Python 2.7.
if not hasattr(unittest.TestCase, 'assertIn'):
unittest.TestCase.assertIn = lambda self, a, b: self.assertTrue(a in b)
if not hasattr(unittest.TestCase, 'assertNotIn'):
unittest.TestCase.assertNotIn = lambda self, a, b: self.assertTrue(a not in b)
if not hasattr(unittest.TestCase, 'assertSequenceEqual'):
unittest.TestCase.assertSequenceEqual = lambda self, a, b: self.assertTrue([aa == bb for aa, bb in zip(a, b)])
class TestJutil(unittest.TestCase):
def setUp(self):
self.env = javabridge.attach()
def tearDown(self):
javabridge.detach()
def test_01_01_to_string(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.to_string(jstring), "Hello, world")
def test_01_02_make_instance(self):
jobject = javabridge.make_instance("java/lang/Object", "()V")
self.assertTrue(javabridge.to_string(jobject).startswith("java.lang.Object"))
def test_01_03_call(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.call(jstring, "charAt", "(I)C", 0), "H")
def test_01_03_01_static_call(self):
result = javabridge.static_call("Ljava/lang/String;", "valueOf",
"(I)Ljava/lang/String;",123)
self.assertEqual(result, "123")
def test_01_04_make_method(self):
env = self.env
class String(object):
def __init__(self):
self.o = env.new_string_utf("Hello, world")
charAt = javabridge.make_method("charAt", "(I)C", "My documentation")
s = String()
self.assertEqual(s.charAt.__doc__, "My documentation")
self.assertEqual(s.charAt(0), "H")
def test_01_05_00_get_static_field(self):
klass = self.env.find_class("java/lang/Short")
self.assertEqual(javabridge.get_static_field(klass, "MAX_VALUE", "S"), 2**15 - 1)
def test_01_05_01_no_field_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'java/lang/Object', "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_02_no_class_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'no/such/class', "field", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_03_set_static_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
test_cases = (
("fs_char", "C", "A"),
("fs_byte", "B", 3),
("fs_short", "S", 15),
("fs_int", "I", 392),
("fs_long", "J", -14),
("fs_float", "F", 1.03),
("fs_double", "D", -889.1),
("fs_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("fs_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_static_field(class_name, field_name, signature, value)
v = javabridge.get_static_field(class_name, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_01_05_04_no_field_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'java/lang/Object', "NoSuchField", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_05_no_class_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'no/such/class', "field", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_06_get_enumeration_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
keys = javabridge.call(properties, "keys", "()Ljava/util/Enumeration;")
enum = javabridge.get_enumeration_wrapper(keys)
has_java_vm_name = False
while(enum.hasMoreElements()):
key = javabridge.to_string(enum.nextElement())
if key == "java.vm.name":
has_java_vm_name = True
self.assertTrue(has_java_vm_name)
def test_01_07_get_dictionary_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
self.assertTrue(d.size() > 10)
self.assertFalse(d.isEmpty())
keys = javabridge.get_enumeration_wrapper(d.keys())
values = javabridge.get_enumeration_wrapper(d.elements())
n_elems = d.size()
for i in range(n_elems):
self.assertTrue(keys.hasMoreElements())
key = javabridge.to_string(keys.nextElement())
self.assertTrue(values.hasMoreElements())
value = javabridge.to_string(values.nextElement())
self.assertEqual(javabridge.to_string(d.get(key)), value)
self.assertFalse(keys.hasMoreElements())
self.assertFalse(values.hasMoreElements())
def test_01_08_jenumeration_to_string_list(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
enum = javabridge.get_enumeration_wrapper(d.keys())
for i in range(d.size()):
key = javabridge.to_string(enum.nextElement())
self.assertEqual(key, keys[i])
def test_01_09_jdictionary_to_string_dictionary(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
pyd = javabridge.jdictionary_to_string_dictionary(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
for key in keys:
value = javabridge.to_string(d.get(key))
self.assertEqual(pyd[key], value)
def test_01_10_make_new(self):
env = self.env
class MyClass:
new_fn = javabridge.make_new("java/lang/Object", '()V')
def __init__(self):
self.new_fn()
my_instance = MyClass()
def test_01_11_class_for_name(self):
c = javabridge.class_for_name('java.lang.String')
name = javabridge.call(c, 'getCanonicalName', '()Ljava/lang/String;')
self.assertEqual(name, 'java.lang.String')
def test_02_01_access_object_across_environments(self):
#
# Create an object in one environment, close the environment,
# open a second environment, then use it and delete it.
#
env = self.env
self.assertTrue(isinstance(env,javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_02_delete_in_environment(self):
env = self.env
self.assertTrue(isinstance(env, javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
del my_integer
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_03_death_and_resurrection(self):
'''Put an object into another in Java, delete it in Python and recover it'''
np.random.seed(24)
my_value = np.random.randint(0, 1000)
jobj = javabridge.make_instance("java/lang/Integer", "(I)V", my_value)
integer_klass = self.env.find_class("java/lang/Integer")
jcontainer = self.env.make_object_array(1, integer_klass)
self.env.set_object_array_element(jcontainer, 0, jobj)
del jobj
gc.collect()
jobjs = self.env.get_object_array_elements(jcontainer)
jobj = jobjs[0]
self.assertEqual(javabridge.call(jobj, "intValue", "()I"), my_value)
def test_02_04_non_java_thread_deletes_it(self):
'''Delete a Java object on a not-Java thread'''
refs = [javabridge.make_instance("java/lang/Integer", "(I)V", 5)]
def run():
del refs[0]
gc.collect()
t = threading.Thread(target = run)
t.start()
t.join()
def test_03_01_cw_from_class(self):
'''Get a class wrapper from a class'''
c = javabridge.get_class_wrapper(javabridge.make_instance('java/lang/Integer', '(I)V',
14))
def test_03_02_cw_from_string(self):
'''Get a class wrapper from a string'''
c = javabridge.get_class_wrapper("java.lang.Number")
def test_03_03_cw_get_classes(self):
c = javabridge.get_class_wrapper('java.lang.Number')
classes = c.getClasses()
self.assertEqual(len(javabridge.get_env().get_object_array_elements(classes)), 0)
def test_03_04_cw_get_annotation(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotation = c.getAnnotation(javabridge.class_for_name('java.lang.Deprecated'))
self.assertTrue(annotation is not None)
def test_03_05_cw_get_annotations(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotations = c.getAnnotations()
annotations = javabridge.get_env().get_object_array_elements(annotations)
self.assertEqual(len(annotations), 1)
self.assertTrue(javabridge.to_string(annotations[0]).startswith('@java.lang.Deprecated'))
def test_03_06_cw_get_constructors(self):
c = javabridge.get_class_wrapper('java.lang.String')
constructors = c.getConstructors()
constructors = javabridge.get_env().get_object_array_elements(constructors)
self.assertEqual(len(constructors), 15)
def test_03_07_cw_get_fields(self):
c = javabridge.get_class_wrapper('java.lang.String')
fields = c.getFields()
fields = javabridge.get_env().get_object_array_elements(fields)
self.assertEqual(len(fields), 1)
self.assertEqual(javabridge.call(fields[0], 'getName', '()Ljava/lang/String;'),
"CASE_INSENSITIVE_ORDER")
def test_03_08_cw_get_field(self):
c = javabridge.get_class_wrapper('java.lang.String')
field = c.getField('CASE_INSENSITIVE_ORDER')
modifiers = javabridge.call(field, 'getModifiers', '()I')
static = javabridge.get_static_field('java/lang/reflect/Modifier','STATIC','I')
self.assertEqual((modifiers & static), static)
def test_03_09_cw_get_method(self):
sclass = javabridge.class_for_name('java.lang.String')
iclass = javabridge.get_static_field('java/lang/Integer', 'TYPE',
'Ljava/lang/Class;')
c = javabridge.get_class_wrapper('java.lang.String')
m = c.getMethod('charAt', [ iclass ])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')), 'char')
m = c.getMethod('concat', [ sclass])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')),
'class java.lang.String')
def test_03_10_cw_get_methods(self):
c = javabridge.get_class_wrapper('java.lang.String')
mmm = javabridge.get_env().get_object_array_elements(c.getMethods())
self.assertTrue(any([javabridge.call(m, 'getName', '()Ljava/lang/String;') == 'concat'
for m in mmm]))
def test_03_11_cw_get_constructor(self):
c = javabridge.get_class_wrapper('java.lang.String')
sclass = javabridge.class_for_name('java.lang.String')
constructor = c.getConstructor([sclass])
self.assertEqual(javabridge.call(constructor, 'getName', '()Ljava/lang/String;'),
'java.lang.String')
def test_04_01_field_get(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
v = f.get(None)
self.assertEqual(javabridge.to_string(v), '127')
def test_04_02_field_name(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
self.assertEqual(f.getName(), 'MAX_VALUE')
def test_04_03_field_type(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
t = f.getType()
self.assertEqual(javabridge.to_string(t), 'byte')
def test_05_01_run_script(self):
self.assertEqual(javabridge.run_script("2+2"), 4)
def test_05_02_run_script_with_inputs(self):
self.assertEqual(javabridge.run_script("a+b", bindings_in={"a":2, "b":3}), 5)
def test_05_03_run_script_with_outputs(self):
outputs = { "result": None}
javabridge.run_script("var result = 2+2;", bindings_out=outputs)
self.assertEqual(outputs["result"], 4)
def test_06_01_execute_asynch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"))
def test_06_02_execute_synch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"), True)
def test_06_03_future_main(self):
c = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
result = javabridge.execute_future_in_main_thread(
javabridge.make_future_task(c, fn_post_process=javabridge.unwrap_javascript))
self.assertEqual(result, 4)
def test_07_01_wrap_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
self.assertFalse(wfuture.isDone())
self.assertFalse(wfuture.isCancelled())
wfuture.run()
self.assertTrue(wfuture.isDone())
self.assertEqual(wfuture.get(), 4)
def test_07_02_cancel_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
wfuture.cancel(True)
self.assertTrue(wfuture.isCancelled())
self.assertRaises(javabridge.JavaException, wfuture.get)
def test_07_03_make_future_task_from_runnable(self):
future = javabridge.make_future_task(
javabridge.run_script("new java.lang.Runnable() { run: function() {}};"),
11)
future.run()
self.assertEqual(javabridge.call(future.get(), "intValue", "()I"), 11)
def test_07_04_make_future_task_from_callable(self):
call_able = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
future = javabridge.make_future_task(
call_able, fn_post_process=javabridge.unwrap_javascript)
future.run()
self.assertEqual(future.get(), 4)
def test_08_01_wrap_collection(self):
c = javabridge.make_instance("java/util/HashSet", "()V")
w = javabridge.get_collection_wrapper(c)
self.assertFalse(hasattr(w, "addI"))
self.assertEqual(w.size(), 0)
self.assertEqual(len(w), 0)
self.assertTrue(w.isEmpty())
def test_08_02_add(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
self.assertTrue(c.add("Foo"))
self.assertEqual(len(c), 1)
self.assertFalse(c.isEmpty())
def test_08_03_contains(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c.add("Foo")
self.assertTrue(c.contains("Foo"))
self.assertFalse(c.contains("Bar"))
self.assertIn("Foo", c)
self.assertNotIn("Bar", c)
def test_08_04_addAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2.addAll(c1.o)
self.assertIn("Foo", c2)
def test_08_05__add__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c3 = c1 + c2
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c3)
c4 = c3 + ["Hello", "World"]
self.assertIn("Hello", c4)
self.assertIn("World", c4)
def test_08_06__iadd__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2 += c1
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c2)
c2 += ["Hello", "World"]
self.assertIn("Hello", c2)
self.assertIn("World", c2)
def test_08_07_contains_all(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
self.assertFalse(c2.containsAll(c1.o))
c2 += c1
self.assertTrue(c2.containsAll(c1.o))
def test_08_08_remove(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c1.remove("Foo")
self.assertNotIn("Foo", c1)
def test_08_09_removeAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.removeAll(c2)
self.assertNotIn("Foo", c1)
def test_08_10_retainAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.retainAll(c2)
self.assertIn("Foo", c1)
self.assertNotIn("Bar", c1)
def test_08_11_toArray(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
result = [javabridge.to_string(x) for x in c1.toArray()]
self.assertIn("Foo", result)
self.assertIn("Bar", result)
def test_08_12_make_list(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertSequenceEqual(l, ["Foo", "Bar"])
self.assertTrue(hasattr(l, "addI"))
def test_08_13_addI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addI(1, "Baz")
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_14_addAllI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addAllI(1, javabridge.make_list(["Baz"]))
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_15_indexOf(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.indexOf("Bar"), 1)
self.assertEqual(l.lastIndexOf("Foo"), 0)
def test_08_16_get(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.get(1), "Bar")
def test_08_17_set(self):
l = javabridge.make_list(["Foo", "Bar"])
l.set(1, "Baz")
self.assertEqual(l.get(1), "Baz")
def test_08_18_subList(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertSequenceEqual(l.subList(1, 3), ["Bar", "Baz"])
def test_08_19__getitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertEqual(l[1], "Bar")
self.assertEqual(l[-2], "Hello")
self.assertSequenceEqual(l[1:3], ["Bar", "Baz"])
self.assertSequenceEqual(l[::3], ["Foo", "Hello"])
def test_08_20__setitem__(self):
l = javabridge.make_list(["Foo", "Bar"])
l[1] = "Baz"
self.assertEqual(l.get(1), "Baz")
def test_08_21__delitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz"])
del l[1]
self.assertSequenceEqual(l, ["Foo", "Baz"])
def test_09_01_00_get_field(self):
o = javabridge.make_instance("org/cellprofiler/javabridge/test/RealRect", "(DDDD)V", 1, 2, 3, 4)
self.assertEqual(javabridge.get_field(o, "x", "D"), 1)
def test_09_02_get_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.get_field(o, "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_09_03_set_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
o = javabridge.make_instance(class_name, "()V")
test_cases = (
("f_char", "C", "A"),
("f_byte", "B", 3),
("f_short", "S", 15),
("f_int", "I", 392),
("f_long", "J", -14),
("f_float", "F", 1.03),
("f_double", "D", -889.1),
("f_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("f_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_field(o, field_name, signature, value)
v = javabridge.get_field(o, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_09_04_set_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.set_field(o, "NoSuchField", "I", 1)
self.assertRaises(javabridge.JavaException, fn)
def test_10_01_iterate_java_on_non_iterator(self):
#
# Regression test of issue #11: the expression below segfaulted
#
def fn():
list(javabridge.iterate_java(javabridge.make_list(range(10)).o))
self.assertRaises(javabridge.JavaError, fn)
def test_10_01_class_path(self):
for arg in ['-cp', '-classpath', '-Djava.class.path=foo']:
self.assertRaises(ValueError, lambda: javabridge.start_vm([arg]))
def test_11_01_make_run_dictionary(self):
from javabridge.jutil import make_run_dictionary
o = javabridge.make_instance("java/util/Hashtable", "()V")
a = javabridge.make_instance("java/util/ArrayList", "()V")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"foo", "bar")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"baz", a)
d = make_run_dictionary(o)
self.assertIn("foo", d)
self.assertEqual(d["foo"], "bar")
self.assertIn("baz", d)
self.assertTrue(javabridge.call(d["baz"], "equals",
"(Ljava/lang/Object;)Z", a))
def test_12_01_jref(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
alt = javabridge.redeem_jref(ref_id)
o["bar"] = "bunny"
for key in o:
self.assertTrue(key in alt)
self.assertEqual(o[key], alt[key])
def test_12_02_jref_lost(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
del ref
self.assertRaises(KeyError, javabridge.redeem_jref, ref_id)
def test_12_03_jref_create_and_lock(self):
cpython = javabridge.JClassWrapper(
'org.cellprofiler.javabridge.CPython')()
d = javabridge.JClassWrapper('java.util.Hashtable')()
result = javabridge.JClassWrapper('java.util.ArrayList')()
d.put("result", result)
ref_self = javabridge.create_and_lock_jref(self)
d.put("self", ref_self)
cpython.execute(
'import javabridge\n'
'x = { "foo":"bar"}\n'
'ref_id = javabridge.create_and_lock_jref(x)\n'
'javabridge.JWrapper(result).add(ref_id)', d, d)
cpython.execute(
'import javabridge\n'
'ref_id = javabridge.JWrapper(result).get(0)\n'
'self = javabridge.redeem_jref(javabridge.to_string(self))\n'
'self.assertEqual(javabridge.redeem_jref(ref_id)["foo"], "bar")\n'
'javabridge.unlock_jref(ref_id)', d, d)
javabridge.unlock_jref(ref_self)
self.assertRaises(KeyError, javabridge.redeem_jref, ref_self)
def test_13_01_unicode_arg(self):
# On 2.x, check that a unicode argument is properly prepared
s = u"Hola ni\u00F1os"
s1, s2 = s.split(" ")
if sys.version_info.major == 2:
s2 = s2.encode("utf-8")
env = javabridge.get_env()
js1 = env.new_string(s1+" ")
result = javabridge.call(
js1, "concat", "(Ljava/lang/String;)Ljava/lang/String;", s2)
self.assertEqual(s, result)
if __name__=="__main__":
unittest.main()
|
import os
from lxml import etree
class Toc:
_TOC_NS = "http://www.daisy.org/z3986/2005/ncx/"
def __init__(self, toc):
self._toc = etree.parse(toc)
def getTitles(self):
def addTitles(parentNavPoint):
titles = []
childNavPoints = self._xpath(parentNavPoint, "toc:navPoint")
for navPoint in childNavPoints:
text = self._xpath(navPoint, "toc:navLabel/toc:text/text()")[0]
src = os.path.split(self._xpath(navPoint, "toc:content/@src")[0])[1]
childTitles = addTitles(navPoint)
titles.append((text, src, childTitles))
return titles
navMap = self._xpath(self._toc, "/toc:ncx/toc:navMap")[0]
return addTitles(navMap)
def _xpath(self, element, xpath):
return element.xpath(xpath, namespaces={"toc": Toc._TOC_NS}) |
# -*- coding: utf-8 -*-
import re
f = open('urls_6711', 'rb')
s = open('urls_6711_filtered', 'wb')
patterns = [
# 论坛 (论坛应该没有 参与改版)
r'bbs\.6711\.com\/thread-\d+-\d+-\d+.html', # bbs 帖子评论 分页
r'bbs\.6711\.com\/viewthread\.php\?.+', # 帖子
r'bbs\.6711\.com\/viewthread\.php\?extra=', # 帖子
r'bbs\.6711\.com\/viewthread\.php\?authorid=\d+', # 帖子
r'bbs\.6711\.com\/viewthread\.php\?tid=\d+', # 论坛帖子
r'bbs\.6711\.com\/viewthread\.php\?action=printable&tid=\d+', # 论坛帖子 打印版
r'bbs\.6711\.com\/redirect.php\?goto=lastpost&tid=\d+',
r'bbs\.6711\.com\/misc\.php\?action', # 引导?
r'bbs\.6711\.com\/post\.php\?action=reply', # 回复话题
r'bbs\.6711\.com\/post\.php\?action=newthread&fid=\d+', # 添加话题
r'bbs\.6711\.com\/index.html\?.+',
r'bbs\.6711\.com\/forum-\d+-\d+.html', # 游戏 论坛 翻页
r'bbs\.6711\.com\/forumdisplay\.php\?fid=', # 游戏 论坛 翻页 的动态链接, 改版后被 伪静态取代了
r'bbs\.6711\.com\/my\.php\?buddysubmit=yes&item=buddylist&newbuddyid=\d+',
r'bbs\.6711\.com\/space\.php\?.+', # 个人空间
r'bbs\.6711\.com\/pm\.php\?action=new&uid=\d+', # 发送信息
r'bbs\.6711\.com\/faq.php\?action=grouppermission',
r'bbs\.6711\.com\/faq.php\?action=faq',
r'bbs\.6711\.com\/faq.php\?action=credits',
r'bbs\.6711\.com\/attachment\.php\?aid=', # 附件, 图片居多
r'bbs\.6711\.com\/admincp\.php\?action=settings', # 管理员后台
r'bbs\.6711\.com\/redirect\.php\?goto=',
# ================== 改版 后 ==========================
# 改版后的新闻 url, 所有游戏站群的文章页面都被分配到这边指使用 url 管理
r'www\.6711\.com\/ziliao',
r'www\.6711\.com\/servers_list_\w+.html', # 游戏服务器列表
r'www\.6711\.com\/huodong\/new_server\/\w+.php\?.+',
r'www\.6711\.com\/news',
r'www\.6711\.com\/open_server',
r'www\.6711\.com\/yxgl',
r'www\.6711\.com\/gamedata', # 游戏的数据
r'www\.6711\.com\/hdzx', # 活动咨询
r'www\.6711\.com\/wenxuan', # 玩家文选
r'www\.6711\.com\/gonglue', # 攻略
r'www\.6711\.com\/yxjt', # 游戏截图
r'www\.6711\.com\/yxzl', # 游戏资料
r'www\.6711\.com\/youxigonglue', # 攻略
r'www\.6711\.com\/youxiziliao', # 攻略
r'www\.6711\.com\/67phone\/',
# ================== 改版 前 ==========================
# 改版前的新闻 url, 游戏站群的文章归属于某个特定的目录
r'www\.6711\.com\/\w+\/news',
r'www\.6711\.com\/\w+\/open_server',
r'www\.6711\.com\/\w+\/yxgl',
r'www\.6711\.com\/\w+\/gamedata', # 游戏的数据
r'www\.6711\.com\/\w+\/hdzx', # 活动咨询
r'www\.6711\.com\/\w+\/wenxuan', # 玩家文选
r'www\.6711\.com\/\w+\/gonglue', # 攻略
r'www\.6711\.com\/\w+\/huodong', # 活动
r'www\.6711\.com\/\w+\/yxjt', # 游戏截图
r'www\.6711\.com\/\w+\/yxzl', # 游戏资料
r'www\.6711\.com\/\w+\/youxigonglue', # 攻略
r'www\.6711\.com\/\w+\/youxiziliao', # 攻略
r'www\.6711\.com\/\w+\/jietu', # 攻略
r'www\.6711\.com\/\w+\/zonghe', # 攻略
r'www\.6711\.com\/getcard_content\.html\?game=\w+', # 礼品卡, 跳转 gift.6711
r'www\.6711\.com\/6711_logout\.php\?local=', # 退出登录, 跳转 my.6711.com
r'www\.6711\.com\/game_login.html\?game=\w+&server=S\d+', # 进游戏 跳转 game.67
# 改版后的游戏登录链接 [游戏服务器] [游戏名] [fid?]
r's\d+\.\w+\.6711\.com\/\?fid=',
# 改版后的游戏支付链接 [游戏id] [游戏服务器id]
r'pay\.6711\.com\/index\.php\?.+',
r'pay\.6711\.com\/vpaymap\.html', # 电信手机充值
r'www\.6711\.com\/users', # 改版前用户链接
r'my\.6711\.com\/', # 改版后用户链接
r'gift\.6711\.com\/\?a=giftDetail&c=index&game_id=\d+&gift_id=\d+',
r'game\.6711\.com\/\?c=play&game_id',
# ================== 其它站点 ==========================
# 百度授权接口
r'openapi\.baidu\.com\/oauth',
# qq 聊天
r'wpa\.qq\.com\/msgrd\?Menu=yes',
]
for k in range(len(patterns)):
patterns[k] = re.compile(patterns[k])
counter = 0
while True:
l = f.readline()
flag = False
if not l:
break
for v in patterns:
if v.search(l) is not None:
flag = True
break
if flag:
continue
counter += 1
s.write(l)
print(counter)
f.close()
s.close()
#
|
# Python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.ios.show_ntp import ShowNtpAssociations, \
ShowNtpStatus, \
ShowNtpConfig, \
ShowNtpAssociationsDetail
#=========================================================
# Unit test for show ntp associations
#=========================================================
class test_show_ntp_associations(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_1 = {
'clock_state':
{'system_status':
{'associations_address': '10.16.2.2',
'associations_local_mode': 'client',
'clock_offset': 27.027,
'clock_refid': '127.127.1.1',
'clock_state': 'synchronized',
'clock_stratum': 3,
'root_delay': 5.61}
},
'peer':
{'10.16.2.2':
{'local_mode':
{'client':
{'delay': 5.61,
'jitter': 3.342,
'mode': 'synchronized',
'offset': 27.027,
'poll': 64,
'reach': 7,
'receive_time': 25,
'refid': '127.127.1.1',
'remote': '10.16.2.2',
'stratum': 3,
'configured': True,
'local_mode': 'client'}
}
},
'10.36.3.3':
{'local_mode':
{'client':
{'delay': 0.0,
'jitter': 15937.0,
'mode': 'unsynchronized',
'offset': 0.0,
'poll': 512,
'reach': 0,
'receive_time': '-',
'refid': '.STEP.',
'remote': '10.36.3.3',
'stratum': 16,
'configured': True,
'local_mode': 'client'}
}
}
}
}
golden_output_1 = {'execute.return_value': '''
iosv-1#show ntp associations
address ref clock st when poll reach delay offset disp
*~10.16.2.2 127.127.1.1 3 25 64 7 5.610 27.027 3.342
~10.36.3.3 .STEP. 16 - 512 0 0.000 0.000 15937.
* sys.peer, # selected, + candidate, - outlyer, x falseticker, ~ configured
'''}
golden_parsed_output_2 = {
'clock_state': {
'system_status': {
'clock_state': 'unsynchronized'
}
},
'peer': {
'10.16.2.2': {
'local_mode': {
'client': {
'delay': 0.0,
'jitter': 15937.0,
'local_mode': 'client',
'mode': 'unsynchronized',
'offset': 0.0,
'poll': 64,
'reach': 0,
'receive_time': 41,
'refid': '127.127.1.1',
'remote': '10.16.2.2',
'configured': True,
'stratum': 3}
}
},
'10.36.3.3': {
'local_mode': {
'client': {
'delay': 0.0,
'jitter': 15937.0,
'local_mode': 'client',
'mode': 'unsynchronized',
'offset': 0.0,
'poll': 64,
'reach': 0,
'receive_time': '-',
'refid': '.INIT.',
'remote': '10.36.3.3',
'configured': True,
'stratum': 16}
}
}
}
}
golden_output_2 = {'execute.return_value': '''
show ntp associations
address ref clock st when poll reach delay offset disp
~10.16.2.2 127.127.1.1 3 41 64 0 0.000 0.000 15937.
~10.36.3.3 .INIT. 16 - 64 0 0.000 0.000 15937.
* sys.peer, # selected, + candidate, - outlyer, x falseticker, ~ configured
'''}
golden_parsed_output_3 = {
'clock_state': {
'system_status': {
'associations_address': '192.168.13.57',
'associations_local_mode': 'client',
'clock_offset': 11.18,
'clock_refid': '192.168.1.111',
'clock_state': 'synchronized',
'clock_stratum': 3,
'root_delay': 7.9}},
'peer': {
'172.31.32.2': {
'local_mode': {
'client': {
'configured': True,
'delay': 4.2,
'jitter': 1.6,
'local_mode': 'client',
'mode': 'None',
'offset': -8.59,
'poll': 1024,
'reach': 377,
'receive_time': 29,
'refid': '172.31.32.1',
'remote': '172.31.32.2',
'stratum': 5
}
}
},
'192.168.13.33': {
'local_mode': {
'client': {
'configured': True,
'delay': 4.1,
'jitter': 2.3,
'local_mode': 'client',
'mode': 'selected',
'offset': 3.48,
'poll': 128,
'reach': 377,
'receive_time': 69,
'refid': '192.168.1.111',
'remote': '192.168.13.33',
'stratum': 3
}
}
},
'192.168.13.57': {
'local_mode': {
'client': {
'configured': True,
'delay': 7.9,
'jitter': 3.6,
'local_mode': 'client',
'mode': 'synchronized',
'offset': 11.18,
'poll': 128,
'reach': 377,
'receive_time': 32,
'refid': '192.168.1.111',
'remote': '192.168.13.57',
'stratum': 3
}
}
}
}
}
golden_output_3 = {'execute.return_value': '''
Router#show ntp associations
address ref clock st when poll reach delay offset disp
*~192.168.13.57 192.168.1.111 3 32 128 377 7.9 11.18 3.6
~172.31.32.2 172.31.32.1 5 29 1024 377 4.2 -8.59 1.6
+~192.168.13.33 192.168.1.111 3 69 128 377 4.1 3.48 2.3
* master (synced), # master (unsynced), + selected, - candidate, ~ configured
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowNtpAssociations(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_1)
obj = ShowNtpAssociations(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_1)
def test_golden_2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_2)
obj = ShowNtpAssociations(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_2)
def test_golden_3(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_3)
obj = ShowNtpAssociations(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_3)
# ==============================================
# Unit test for 'show ntp status'
# ==============================================
class test_show_ntp_status(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_1 = {
'clock_state': {
'system_status': {
'act_freq': 1000.4589,
'last_update': '182 sec ago',
'nom_freq': 1000.0003,
'offset': 27.0279,
'peerdispersion': 3.34,
'poll': 64,
'precision': '2**14',
'refid': '10.16.2.2',
'reftime': 'DFA02517.D2F7B9F6 '
'(13:40:23.824 EST Wed Nov '
'21 2018)',
'resolution': 1000,
'rootdelay': 5.61,
'rootdispersion': 273.61,
'status': 'synchronized',
'stratum': 4,
'uptime': '239700 (1/100 of seconds)'}
}
}
golden_output_1 = {'execute.return_value': '''\
iosv-1#show ntp status
Clock is synchronized, stratum 4, reference is 10.16.2.2
nominal freq is 1000.0003 Hz, actual freq is 1000.4589 Hz, precision is 2**14
ntp uptime is 239700 (1/100 of seconds), resolution is 1000
reference time is DFA02517.D2F7B9F6 (13:40:23.824 EST Wed Nov 21 2018)
clock offset is 27.0279 msec, root delay is 5.61 msec
root dispersion is 273.61 msec, peer dispersion is 3.34 msec
loopfilter state is 'CTRL' (Normal Controlled Loop), drift is -0.000458631 s/s
system poll interval is 64, last update was 182 sec ago.
'''}
golden_parsed_output_2 = {
'clock_state': {
'system_status': {
'act_freq': 1000.4923,
'last_update': '1301 sec ago',
'nom_freq': 1000.0003,
'offset': 0.0,
'peerdispersion': 0.0,
'poll': 64,
'precision': '2**13',
'reftime': 'DFA98D6B.F2F229A7 '
'(16:55:55.949 EST Wed Nov '
'28 2018)',
'resolution': 1000,
'rootdelay': 0.0,
'rootdispersion': 18.84,
'status': 'unsynchronized',
'stratum': 16,
'uptime': '1938800 (1/100 of seconds)'}
}
}
golden_output_2 = {'execute.return_value': '''\
show ntp status
Clock is unsynchronized, stratum 16, no reference clock
nominal freq is 1000.0003 Hz, actual freq is 1000.4923 Hz, precision is 2**13
ntp uptime is 1938800 (1/100 of seconds), resolution is 1000
reference time is DFA98D6B.F2F229A7 (16:55:55.949 EST Wed Nov 28 2018)
clock offset is 0.0000 msec, root delay is 0.00 msec
root dispersion is 18.84 msec, peer dispersion is 0.00 msec
loopfilter state is 'SPIK' (Spike), drift is -0.000491998 s/s
system poll interval is 64, last update was 1301 sec ago.
iosv-1
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowNtpStatus(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_1(self):
self.device = Mock(**self.golden_output_1)
obj = ShowNtpStatus(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_1)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
obj = ShowNtpStatus(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_2)
# ===========================================================
# Unit test for 'show ntp config'
# ===========================================================
class test_show_ntp_config(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_1 = {
'vrf': {
'default': {
'address': {
'10.16.2.2': {
'isconfigured': {
'True': {
'address': '10.16.2.2',
'isconfigured': True}
},
'type': {
'server': {
'address': '10.16.2.2',
'source': 'Loopback0',
'type': 'server',
'vrf': 'default'}
}
},
'10.36.3.3': {
'isconfigured': {
'True': {
'address': '10.36.3.3',
'isconfigured': True}
},
'type': {
'server': {
'address': '10.36.3.3',
'type': 'server',
'vrf': 'default'}
}
}
}
}
}
}
golden_output_1 = {'execute.return_value': '''\
iosv-1#show ntp config
ntp server 10.16.2.2 source Loopback0
ntp server 10.36.3.3
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowNtpConfig(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_1(self):
self.device = Mock(**self.golden_output_1)
obj = ShowNtpConfig(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_1)
class test_show_ntp_associations_detail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
Device> show ntp associations detail
172.31.32.2 configured, insane, invalid, stratum 5
ref ID 172.31.32.1, time AFE252C1.6DBDDFF2 (00:12:01.428 PDT Mon Jul 5 1993)
our mode active, peer mode active, our poll intvl 1024, peer poll intvl 64
root delay 137.77 msec, root disp 142.75, reach 376, sync dist 215.363
delay 4.23 msec, offset -8.587 msec, dispersion 1.62
precision 2**19, version 4
assoc ID 1, assoc name 192.168.1.55,
assoc in packets 60, assoc out packets 60, assoc error packets 0
org time AFE252E2.3AC0E887 (00:12:34.229 PDT Tue Oct 4 2011)
rcv time AFE252E2.3D7E464D (00:12:34.240 PDT Mon Jan 1 1900)
xmt time AFE25301.6F83E753 (00:13:05.435 PDT Tue Oct 4 2011)
filtdelay = 4.23 4.14 2.41 5.95 2.37 2.33 4.26 4.33
filtoffset = -8.59 -8.82 -9.91 -8.42 -10.51 -10.77 -10.13 -10.11
filterror = 0.50 1.48 2.46 3.43 4.41 5.39 6.36 7.34
192.168.13.33 configured, selected, sane, valid, stratum 3
ref ID 192.168.1.111, time AFE24F0E.14283000 (23:56:14.078 PDT Sun Jul 4 1993)
our mode client, peer mode server, our poll intvl 128, peer poll intvl 128
root delay 83.72 msec, root disp 217.77, reach 377, sync dist 264.633
delay 4.07 msec, offset 3.483 msec, dispersion 2.33
precision 2**6, version 3
assoc ID 2, assoc name myserver
assoc in packets 0, assoc out packets 0, assoc error packets 0
org time AFE252B9.713E9000 (00:11:53.442 PDT Tue Oct 4 2011)
rcv time AFE252B9.7124E14A (00:11:53.441 PDT Mon Jan 1 1900)
xmt time AFE252B9.6F625195 (00:11:53.435 PDT Mon Jan 1 1900)
filtdelay = 6.47 4.07 3.94 3.86 7.31 7.20 9.52 8.71
filtoffset = 3.63 3.48 3.06 2.82 4.51 4.57 4.28 4.59
filterror = 0.00 1.95 3.91 4.88 5.84 6.82 7.80 8.77
192.168.13.57 configured, our_master, sane, valid, stratum 3
ref ID 192.168.1.111, time AFE252DC.1F2B3000 (00:12:28.121 PDT Mon Jul 5 1993)
our mode client, peer mode server, our poll intvl 128, peer poll intvl 128
root delay 125.50 msec, root disp 115.80, reach 377, sync dist 186.157
delay 7.86 msec, offset 11.176 msec, dispersion 3.62
precision 2**6, version 2
assoc ID 2, assoc name myserver
assoc in packets 0, assoc out packets 0, assoc error packets 0
org time AFE252DE.77C29000 (00:12:30.467 PDT Tue Oct 4 2011)
rcv time AFE252DE.7B2AE40B (00:12:30.481 PDT Mon Jan 1 1900)
xmt time AFE252DE.6E6D12E4 (00:12:30.431 PDT Mon Jan 1 1900)
filtdelay = 49.21 7.86 8.18 8.80 4.30 4.24 7.58 6.42
filtoffset = 11.30 11.18 11.13 11.28 8.91 9.09 9.27 9.57
filterror = 0.00 1.95 3.91 4.88 5.78 6.76 7.74 8.71
'''}
golden_parsed_output = {
"vrf": {
"default": {
"associations": {
"address": {
"172.31.32.2": {
"local_mode": {
"active": {
"isconfigured": {
"True": {
"selected": False,
"unsynced": False,
"address": "172.31.32.2",
"isconfigured": True,
"authenticated": False,
"sane": False,
"valid": False,
"master": False,
"stratum": 5,
"refid": "172.31.32.1",
"input_time": "AFE252C1.6DBDDFF2 (00:12:01.428 PDT Mon Jul 5 1993)",
"peer_interface": "172.31.32.1",
"poll": "1024",
"vrf": "default",
"local_mode": "active",
"peer": {
"172.31.32.1": {
"local_mode": {
"active": {
"poll": 64,
"local_mode": "active"
}
}
}
},
"root_delay_msec": "137.77",
"root_disp": "142.75",
"reach": "376",
"sync_dist": "215.363",
"delay_msec": "4.23",
"offset_msec": "-8.587",
"dispersion": "1.62",
"jitter_msec": "None",
"precision": "2**19",
"version": 4,
"assoc_name": "192.168.1.55",
"assoc_id": 1,
"ntp_statistics": {
"packet_received": 60,
"packet_sent": 60,
"packet_dropped": 0
},
"originate_time": "AFE252E2.3AC0E887 (00:12:34.229 PDT Tue Oct 4 2011)",
"receive_time": "AFE252E2.3D7E464D (00:12:34.240 PDT Mon Jan 1 1900)",
"transmit_time": "AFE25301.6F83E753 (00:13:05.435 PDT Tue Oct 4 2011)",
"filtdelay": "4.23 4.14 2.41 5.95 2.37 2.33 4.26 4.33",
"filtoffset": "-8.59 -8.82 -9.91 -8.42 -10.51 -10.77 -10.13 -10.11",
"filterror": "0.50 1.48 2.46 3.43 4.41 5.39 6.36 7.34"
}
}
}
}
},
"192.168.13.33": {
"local_mode": {
"client": {
"isconfigured": {
"True": {
"selected": True,
"unsynced": False,
"address": "192.168.13.33",
"isconfigured": True,
"authenticated": False,
"sane": True,
"valid": True,
"master": False,
"stratum": 3,
"refid": "192.168.1.111",
"input_time": "AFE24F0E.14283000 (23:56:14.078 PDT Sun Jul 4 1993)",
"peer_interface": "192.168.1.111",
"poll": "128",
"vrf": "default",
"local_mode": "client",
"peer": {
"192.168.1.111": {
"local_mode": {
"server": {
"poll": 128,
"local_mode": "server"
}
}
}
},
"root_delay_msec": "83.72",
"root_disp": "217.77",
"reach": "377",
"sync_dist": "264.633",
"delay_msec": "4.07",
"offset_msec": "3.483",
"dispersion": "2.33",
"jitter_msec": "None",
"precision": "2**6",
"version": 3,
"assoc_name": "myserver",
"assoc_id": 2,
"ntp_statistics": {
"packet_received": 0,
"packet_sent": 0,
"packet_dropped": 0
},
"originate_time": "AFE252B9.713E9000 (00:11:53.442 PDT Tue Oct 4 2011)",
"receive_time": "AFE252B9.7124E14A (00:11:53.441 PDT Mon Jan 1 1900)",
"transmit_time": "AFE252B9.6F625195 (00:11:53.435 PDT Mon Jan 1 1900)",
"filtdelay": "6.47 4.07 3.94 3.86 7.31 7.20 9.52 8.71",
"filtoffset": "3.63 3.48 3.06 2.82 4.51 4.57 4.28 4.59",
"filterror": "0.00 1.95 3.91 4.88 5.84 6.82 7.80 8.77"
}
}
}
}
},
"192.168.13.57": {
"local_mode": {
"client": {
"isconfigured": {
"True": {
"selected": False,
"unsynced": False,
"address": "192.168.13.57",
"isconfigured": True,
"authenticated": False,
"sane": True,
"valid": True,
"master": True,
"stratum": 3,
"refid": "192.168.1.111",
"input_time": "AFE252DC.1F2B3000 (00:12:28.121 PDT Mon Jul 5 1993)",
"peer_interface": "192.168.1.111",
"poll": "128",
"vrf": "default",
"local_mode": "client",
"peer": {
"192.168.1.111": {
"local_mode": {
"server": {
"poll": 128,
"local_mode": "server"
}
}
}
},
"root_delay_msec": "125.50",
"root_disp": "115.80",
"reach": "377",
"sync_dist": "186.157",
"delay_msec": "7.86",
"offset_msec": "11.176",
"dispersion": "3.62",
"jitter_msec": "None",
"precision": "2**6",
"version": 2,
"assoc_name": "myserver",
"assoc_id": 2,
"ntp_statistics": {
"packet_received": 0,
"packet_sent": 0,
"packet_dropped": 0
},
"originate_time": "AFE252DE.77C29000 (00:12:30.467 PDT Tue Oct 4 2011)",
"receive_time": "AFE252DE.7B2AE40B (00:12:30.481 PDT Mon Jan 1 1900)",
"transmit_time": "AFE252DE.6E6D12E4 (00:12:30.431 PDT Mon Jan 1 1900)",
"filtdelay": "49.21 7.86 8.18 8.80 4.30 4.24 7.58 6.42",
"filtoffset": "11.30 11.18 11.13 11.28 8.91 9.09 9.27 9.57",
"filterror": "0.00 1.95 3.91 4.88 5.78 6.76 7.74 8.71"
}
}
}
}
}
}
}
}
}
}
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowNtpAssociationsDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowNtpAssociationsDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
if __name__ == '__main__':
unittest.main()
|
import pandas as pd
import click
from astropy.table import Table
@click.command()
@click.argument('source_file_path',
type=click.Path(exists=True))
@click.argument('store_file_path',
type=click.Path(exists=False))
def main(source_file_path: str, store_file_path: str):
args = parser.parse_args()
df = pd.read_hdf(args.inputfile)
df.sort_values(['pixel', 'cell'], inplace=True)
t = Table()
for key in ['a', 'b', 'c']:
t[key] = df[key].astype('float32').values
t.write(args.outputfile)
if __name__ == '__main__':
main()
|
import bz2
import json
import os
import re
import shutil
import zipfile
def unzip(path_to_zip_file):
if os.path.isfile(path_to_zip_file):
zip_ref = zipfile.ZipFile(path_to_zip_file, "r")
zip_ref.extractall(".")
shutil.rmtree("./__MACOSX")
def unbz2(path):
filesList = list()
for (dirPath, dirNames, files) in os.walk(path):
for filename in files:
filepath = os.path.join(dirPath, filename)
if filepath.endswith(".bz2"):
zipFile = bz2.BZ2File(filepath)
data = zipFile.read()
newFilepath = filepath[:-4]
filesList.append(newFilepath)
open(newFilepath, "wb").write(data)
return filesList
def readFiles(path_to_file):
json_file = open(path_to_file, "r")
result = []
for line in json_file.readlines():
data = json.loads(line)
# print(data)
if "text" in data.keys() and data["user"]["lang"] == "es":
user = data["user"]["screen_name"]
tweet = data["text"]
result.append(tuple([user, tweet]))
return result
def quitaNoAlfa(s):
return re.sub(r'([^\s\wñáéíóú]|_)+', '', s.lower())
def rmSpaces(s):
return s.strip()
def pr(s):
print(s)
def sacaHashtags(tweet):
pattern = re.compile(r"#(\w+)")
return pattern.findall(tweet.lower())
|
# Given two strings word1 and word2, return the minimum number of operations required to convert word1 to word2.
# You have the following three operations permitted on a word:
# Insert a character
# Delete a character
# Replace a character
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
if not word1 and not word2: return 0
if not word1: return len(word2)
if not word2: return len(word1)
dp: list[list[int]] = [[0]*(len(word2)+1) for _ in range(len(word1)+1)]
for i in range(1, len(word1)+1):
dp[i][0] = i
for j in range(1, len(word2)+1):
dp[0][j] = j
for i in range(1, len(word1)+1):
for j in range(1, len(word2)+1):
if word1[i-1] == word2[j-1]: dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = 1+min(dp[i-1][j-1], dp[i][j-1], dp[i-1][j])
return dp[len(word1)][len(word2)]
def minDistance2(self, word1: str, word2: str) -> int:
if not word1 and not word2: return 0
if not word1: return len(word2)
if not word2: return len(word1)
dp: list[int] = [0 for _ in range(len(word2)+1)]
for j in range(1, len(word2)+1): dp[j] = j
for i in range(1, len(word1)+1):
lastReplaceCount = dp[0]
dp[0] = i
for j in range(1, len(word2)+1):
curReplaceCount = dp[j]
if word1[i-1] == word2[j-1]: dp[j] = lastReplaceCount
else: dp[j] = 1+min(lastReplaceCount, dp[j-1], dp[j]) # on word1: replace, insert, delete
lastReplaceCount = curReplaceCount
return dp[len(word2)]
|
from z3 import *
import cairo
import math
chip_size = (30, 30)
pow_min_offset = (18, 18)
pow_sizes = [
(4, 3),
(4, 3),
]
com_sizes = [
(4, 5),
(4, 6),
(5, 20),
(6, 9),
(6, 10),
(6, 11),
(7, 8),
(7, 12),
(10, 10),
(10, 20),
]
pow_positions = [(Int(f"pow{i}_pos_x"), Int(f"pow{i}_pos_y")) for i in range(len(pow_sizes))]
pow_rotated = [Bool(f"pow{i}_rot") for i in range(len(pow_sizes))]
com_positions = [(Int(f"com{i}_pos_x"), Int(f"com{i}_pos_y")) for i in range(len(com_sizes))]
com_rotated = [Bool(f"com{i}_rot") for i in range(len(com_sizes))]
all_sizes = pow_sizes + com_sizes
all_positions = pow_positions + com_positions
all_rotated = pow_rotated + com_rotated
s = Solver()
def z3_abs(x):
return If(x >= 0, x, -x)
def draw_solution(model):
with cairo.SVGSurface("2.svg", 1000, 1000) as surface:
context = cairo.Context(surface)
context.scale(chip_size[0] / 3, chip_size[1] / 3)
context.set_line_width(0.03)
for i in range(chip_size[0] + 1):
context.move_to(i, 0)
context.line_to(i, chip_size[0])
context.stroke()
for j in range(chip_size[1] + 1):
context.move_to(0, j)
context.line_to(chip_size[1], j)
context.stroke()
# draw normal components
context.set_font_size(2)
context.set_line_width(0.1)
for i in range(len(com_sizes)):
sx, sy = com_sizes[i]
r = is_true(model[com_rotated[i]])
x = model[com_positions[i][0]].as_long()
y = model[com_positions[i][1]].as_long()
w = sy if r else sx
h = sx if r else sy
context.rectangle(x, y, w, h)
context.set_source_rgba(0, 0, 1, 1)
context.stroke_preserve()
context.set_source_rgba(0, 0, 1, 0.5)
context.fill()
context.set_source_rgba(1, 1, 1, 1)
context.move_to(x + 0.05, y + 2.0)
context.show_text(f"C{i}")
# draw power components
context.set_line_width(0.1)
for i in range(len(pow_sizes)):
sx, sy = pow_sizes[i]
r = is_true(model[pow_rotated[i]])
x = model[pow_positions[i][0]].as_long()
y = model[pow_positions[i][1]].as_long()
w = sy if r else sx
h = sx if r else sy
context.rectangle(x, y, w, h)
context.set_source_rgba(1, 0, 0, 1)
context.stroke_preserve()
context.set_source_rgba(1, 0, 0, 0.5)
context.fill()
context.set_source_rgba(1, 1, 1, 1)
context.move_to(x, y + 2.0)
context.show_text(f"P{i}")
# ensure all components are placed in the chip
for i in range(len(all_sizes)):
sx, sy = all_sizes[i]
x, y = all_positions[i]
r = all_rotated[i]
s.add(x >= 0)
s.add(y >= 0)
s.add(x + If(r, sy, sx) <= chip_size[0])
s.add(y + If(r, sx, sy) <= chip_size[1])
# Create unique pairs of all components and ensure they don't overlap
for i in range(len(all_sizes)):
for j in range(i + 1, len(all_sizes)):
s1x, s1y = all_sizes[i]
s2x, s2y = all_sizes[j]
p1x1, p1y1 = all_positions[i]
p2x1, p2y1 = all_positions[j]
r1 = all_rotated[i]
r2 = all_rotated[j]
p1x2 = If(r1, p1x1 + s1y, p1x1 + s1x)
p1y2 = If(r1, p1y1 + s1x, p1y1 + s1y)
p2x2 = If(r2, p2x1 + s2y, p2x1 + s2x)
p2y2 = If(r2, p2y1 + s2x, p2y1 + s2y)
not_x_overlap = Or(p1x2 <= p2x1, p2x2 <= p1x1)
not_y_overlap = Or(p1y2 <= p2y1, p2y2 <= p1y1)
s.add(Or(not_x_overlap, not_y_overlap))
# create unique pairs of power components and ensure
# their centers are far enough away from eachother
for i in range(len(pow_sizes)):
for j in range(i + 1, len(pow_sizes)):
sx, sy = pow_sizes[i]
p1x, p1y = pow_positions[i]
p2x, p2y = pow_positions[j]
r1 = pow_rotated[i]
r2 = pow_rotated[j]
# to check whether a power component is placed close enough we
# create a virtual grid twice as large as the original
# where the components are also twice as large
# This way the center points of the power components
# always align with a grid point and we stay
# in the nice and comfy domain of integers
cp1x = If(r1, 2 * p1x + sy, 2 * p1x + sx)
cp1y = If(r1, 2 * p1y + sx, 2 * p1y + sy)
cp2x = If(r2, 2 * p2x + sy, 2 * p2x + sx)
cp2y = If(r2, 2 * p2y + sx, 2 * p2y + sy)
dx = z3_abs(cp1x - cp2x) >= 2 * pow_min_offset[0]
dy = z3_abs(cp1y - cp2y) >= 2 * pow_min_offset[1]
s.add(Or(dx, dy))
# Ensure every normal component has at least a single edge
# in common with a power componnent
for i in range(len(com_sizes)):
pow_edges = []
for j in range(len(pow_sizes)):
s1x, s1y = com_sizes[i]
s2x, s2y = pow_sizes[j]
p1x1, p1y1 = com_positions[i]
p2x1, p2y1 = pow_positions[j]
r1 = com_rotated[i]
r2 = pow_rotated[j]
p1x2 = If(r1, p1x1 + s1y, p1x1 + s1x)
p1y2 = If(r1, p1y1 + s1x, p1y1 + s1y)
p2x2 = If(r2, p2x1 + s2y, p2x1 + s2x)
p2y2 = If(r2, p2y1 + s2x, p2y1 + s2y)
x_edge_match = And(p1x2 > p2x1, p2x2 > p1x1, Or(p1y2 == p2y1, p2y2 == p1y1))
y_edge_match = And(p1y2 > p2y1, p2y2 > p1y1, Or(p1x2 == p2x1, p2x2 == p1x1))
connected = Or(x_edge_match, y_edge_match)
pow_edges.append(connected)
s.add(Or(*pow_edges))
res = s.check()
if res == unsat:
print("unsat")
sys.exit(1)
model = s.model()
print(model)
draw_solution(model) |
#!/usr/bin/env python
from cgi import parse_qs, escape
from time import sleep
import json
import os
import random
import re
from poem import *
html_template = """
<html>
<head>
<title>A simple poem generator</title>
<link href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
<script src=https://code.jquery.com/jquery-1.12.0.min.js"></script>
<script src="//maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-62257429-1', 'auto');
ga('send', 'pageview');
</script>
<style>
body {
background: #FFF;
color: #111;
font: 18px Baskerville, "Palatino Linotype", "Times New Roman", Times, serif;
text-align: center;
}
#poem div, h1, h2, p {
margin: 0;
padding: 0;
}
#poem {
margin: auto;
padding: 20px 0;
text-align: left;
width: 650px;
}
#poem h1, h2 {
font-weight: normal;
text-align: center;
}
#poem h1 {
font-size: 34px;
line-height: 1.2;
margin-bottom: 10px;
}
#poem h2 {
color: #666;
font-size: 18px;
font-style: italic;
margin-bottom: 30px;
}
#poem p {
line-height: 1.5;
margin-bottom: 15px;
}
/* The magic of selectors begins... */
#poem h2:before {
content: '- ';
}
#poem h2:after {
content: ' -';
}
#poem h2 + p:first-letter {
float: left;
font-size: 38px;
line-height: 1;
margin: 2px 5px 0 0;
}
#poem p:first-line {
font-variant: small-caps;
letter-spacing: 1px;
}
#poem p:last-child {
margin-bottom: 30px;
padding-bottom: 30px;
}
#footer {
position: fixed;
bottom: 0;
}
</style>
<script>
var msg = new SpeechSynthesisUtterance();
var voices = window.speechSynthesis.getVoices();
</script>
</head>
<body>
<h1>Poetry generator</h1>
<div class="well center-block" style="max-width: 600px;">
<form method="POST" action="/poem">
<div class="row">
<div class="col-md-6"><button type="submit" class="btn btn-primary btn-lg btn-block" name='poemtype' value='poem'>Regular Poem</button></div>
<div class="col-md-6"><button type="submit" class="btn btn-default btn-lg btn-block" name='poemtype' value='mushypoem'>Mushy poem</button></div>
</div>
</form>
</div>
<div id="poem">%(poem)s <h2>%(url)s</h2></div>
<footer class="footer">
<div class="container">
<p class="text-muted">See how this code passed the turing test <a href="http://rpiai.com/2015/01/24/turing-test-passed-using-computer-generated-poetry/">here</a> and <a href="http://motherboard.vice.com/read/the-poem-that-passed-the-turing-test">here</a>. Also check out the <a href="https://github.com/schollz/poetry-generator">source code!</a></p>
</div>
</footer>
<script src='https://code.responsivevoice.org/responsivevoice.js'></script>
<input onclick='partA();' type='button' value='Play' />
<script>
document.addEventListener("DOMContentLoaded", function(event) {
msg = new SpeechSynthesisUtterance();
voices = window.speechSynthesis.getVoices();
setTimeout(function () {
msg.voice = voices[10]; // Note: some voices don't support altering params
msg.voiceURI = 'native';
msg.volume = 1; // 0 to 1
msg.rate = 1; // 0.1 to 10
msg.pitch = 0; //0 to 2
msg.lang = 'en-US';
messages = %(lines)s;
var messageNumber = 0;
msg.text = messages[messageNumber];
msg.onend = function(e) {
console.log('Finished in ' + event.elapsedTime + ' seconds.');
messageNumber = messageNumber + 1;
msg.text = messages[messageNumber];
msg.voice = voices[10]; // Note: some voices don't support altering params
msg.voiceURI = 'native';
msg.rate = 1; // 0.1 to 10
msg.pitch = 0; //0 to 2
msg.lang = 'en-US';
msg.volume = 1; // 0 to 1
if (msg.text=="pause") {
msg.volume = 0;
} else {
}
if (messageNumber < messages.length) {
speechSynthesis.speak(msg);
}
};
speechSynthesis.speak(msg);
}, 1000);
});
</script>
</body>
</html>
"""
pages = {
'index' : html_template,
}
class Router():
def __init__(self, url):
self.url = url
def match(self, pattern):
match = re.search(pattern, self.url)
if match:
self.params = match.groupdict()
return True
else:
return False
def application(environ, start_response):
url = environ['PATH_INFO']
router = Router(url)
if router.match('^/(?P<type>poem|mushypoem)/(?P<seed>[0-9a-zA-Z]+)$'):
return show_poem(environ, start_response, router)
else: # '/' '/poem' or anything else
return redirect_to_poem(environ, start_response)
def redirect_to_poem(environ, start_response):
# We might have a POST body indicating the poem type; try to read it.
# The environment variable CONTENT_LENGTH may be empty or missing
try:
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
# Read and parse the HTTP request body which is passed by the WSGI server
request_body = environ['wsgi.input'].read(request_body_size)
poemtype = None
qs = parse_qs(request_body)
if qs:
poemtype = qs.get('poemtype')[0]
if poemtype != 'mushypoem':
poemtype = 'poem'
seed = os.urandom(8).encode('hex')
start_response('302 Found', [
('Location', '/' + poemtype + '/' + seed)
])
return []
def show_poem(environ, start_response, router):
# Ensure that we can always get back to a given poem
p,str_seed = bnf.generatePretty('<' + router.params['type'] + '>',router.params['seed'])
filtered = []
for line in re.sub("<.*?>", " ", p).split("\n"):
if len(line.strip()) > 0:
filtered.append(line.strip())
else:
filtered.append("pause")
response_body = pages['index'] % {
'poem': p,
'url': router.url,
'lines': json.dumps(filtered)
}
start_response('200 OK', [
('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))
])
return [response_body]
|
from enum import Enum
import pytest
from bs4 import BeautifulSoup
from django.contrib.auth.models import Group, User
from django_sql_dashboard.models import Dashboard
def test_anonymous_user_redirected_to_login(client):
response = client.get("/dashboard/?sql=select+1")
assert response.status_code == 302
assert response.url == "/admin/login/?next=/dashboard/%3Fsql%3Dselect%2B1"
def test_superusers_allowed(admin_client, dashboard_db):
response = admin_client.get("/dashboard/")
assert response.status_code == 200
assert b"<title>SQL Dashboard</title>" in response.content
def test_must_have_execute_sql_permission(
client, django_user_model, dashboard_db, execute_sql_permission
):
not_staff = django_user_model.objects.create(username="not_staff")
staff_no_permisssion = django_user_model.objects.create(
username="staff_no_permission", is_staff=True
)
staff_with_permission = django_user_model.objects.create(
username="staff_with_permission", is_staff=True
)
staff_with_permission.user_permissions.add(execute_sql_permission)
assert staff_with_permission.has_perm("django_sql_dashboard.execute_sql")
client.force_login(not_staff)
assert client.get("/dashboard/").status_code == 403
client.force_login(staff_no_permisssion)
assert client.get("/dashboard/").status_code == 403
client.force_login(staff_with_permission)
assert client.get("/dashboard/").status_code == 200
def test_user_without_execute_sql_permission_does_not_see_count_links_on_saved_dashboard(
client, django_user_model, execute_sql_permission, dashboard_db
):
dashboard = Dashboard.objects.create(slug="test", view_policy="public")
dashboard.queries.create(sql="select 11 + 34")
user = django_user_model.objects.create(username="regular")
client.force_login(user)
response = client.get("/dashboard/test/")
assert response.status_code == 200
html = response.content.decode("utf-8")
assert "data-count-url=" not in html
# If the user DOES have that permission they get the count links
user.user_permissions.add(execute_sql_permission)
response = client.get("/dashboard/test/")
html = response.content.decode("utf-8")
assert "data-count-url=" in html
def test_saved_dashboard_anonymous_users_denied_by_default(client, dashboard_db):
dashboard = Dashboard.objects.create(slug="test")
dashboard.queries.create(sql="select 11 + 34")
response = client.get("/dashboard/test/")
assert response.status_code == 403
class UserType(Enum):
owner = 1
anon = 2
loggedin = 3
groupmember = 4
staff = 5
superuser = 6
all_user_types = (
UserType.owner,
UserType.anon,
UserType.loggedin,
UserType.groupmember,
UserType.staff,
UserType.superuser,
)
@pytest.mark.parametrize(
"view_policy,user_types_who_can_see",
(
("private", (UserType.owner,)),
("public", all_user_types),
("unlisted", all_user_types),
(
"loggedin",
(
UserType.owner,
UserType.loggedin,
UserType.groupmember,
UserType.staff,
UserType.superuser,
),
),
("group", (UserType.owner, UserType.groupmember)),
("staff", (UserType.owner, UserType.staff, UserType.superuser)),
("superuser", (UserType.owner, UserType.superuser)),
),
)
def test_saved_dashboard_view_permissions(
client,
dashboard_db,
view_policy,
user_types_who_can_see,
django_user_model,
):
users = {
UserType.owner: django_user_model.objects.create(username="owner"),
UserType.anon: None,
UserType.loggedin: django_user_model.objects.create(username="loggedin"),
UserType.groupmember: django_user_model.objects.create(username="groupmember"),
UserType.staff: django_user_model.objects.create(
username="staff", is_staff=True
),
UserType.superuser: django_user_model.objects.create(
username="superuser", is_staff=True, is_superuser=True
),
}
group = Group.objects.create(name="view-group")
users[UserType.groupmember].groups.add(group)
Dashboard.objects.create(
slug="dash",
owned_by=users[UserType.owner],
view_policy=view_policy,
view_group=group,
)
for user_type, user in users.items():
if user is not None:
client.force_login(user)
else:
client.logout()
response = client.get("/dashboard/dash/")
if user_type in user_types_who_can_see:
assert response.status_code == 200
else:
assert response.status_code == 403
if user is not None:
assert response["cache-control"] == "private"
def test_unlisted_dashboard_has_meta_robots(client, dashboard_db):
dashboard = Dashboard.objects.create(slug="unlisted", view_policy="unlisted")
dashboard.queries.create(sql="select 11 + 34")
response = client.get("/dashboard/unlisted/")
assert response.status_code == 200
assert b'<meta name="robots" content="noindex">' in response.content
dashboard.view_policy = "public"
dashboard.save()
response2 = client.get("/dashboard/unlisted/")
assert response2.status_code == 200
assert b'<meta name="robots" content="noindex">' not in response2.content
@pytest.mark.parametrize(
"dashboard,expected,expected_if_staff,expected_if_superuser",
(
("owned_by_user", True, True, True),
("owned_by_other_private", False, False, False),
("owned_by_other_public", True, True, True),
("owned_by_other_unlisted", False, False, False),
("owned_by_other_loggedin", True, True, True),
("owned_by_other_group_not_member", False, False, False),
("owned_by_other_group_member", True, True, True),
("owned_by_other_staff", False, True, True),
("owned_by_other_superuser", False, False, True),
),
)
def test_get_visible_to_user(
db, dashboard, expected, expected_if_staff, expected_if_superuser
):
user = User.objects.create(username="test")
other = User.objects.create(username="other")
group_member = Group.objects.create(name="group_member")
user.groups.add(group_member)
group_not_member = Group.objects.create(name="group_not_member")
Dashboard.objects.create(slug="owned_by_user", owned_by=user, view_policy="private")
Dashboard.objects.create(
slug="owned_by_other_private", owned_by=other, view_policy="private"
)
Dashboard.objects.create(
slug="owned_by_other_public", owned_by=other, view_policy="public"
)
Dashboard.objects.create(
slug="owned_by_other_unlisted", owned_by=other, view_policy="unlisted"
)
Dashboard.objects.create(
slug="owned_by_other_loggedin", owned_by=other, view_policy="loggedin"
)
Dashboard.objects.create(
slug="owned_by_other_group_not_member",
owned_by=other,
view_policy="group",
view_group=group_not_member,
)
Dashboard.objects.create(
slug="owned_by_other_group_member",
owned_by=other,
view_policy="group",
view_group=group_member,
)
Dashboard.objects.create(
slug="owned_by_other_staff", owned_by=other, view_policy="staff"
)
Dashboard.objects.create(
slug="owned_by_other_superuser", owned_by=other, view_policy="superuser"
)
visible_dashboards = set(
Dashboard.get_visible_to_user(user).values_list("slug", flat=True)
)
if expected:
assert (
dashboard in visible_dashboards
), "Expected user to be able to see {}".format(dashboard)
else:
assert (
dashboard not in visible_dashboards
), "Expected user not to be able to see {}".format(dashboard)
user.is_staff = True
user.save()
visible_dashboards = set(
Dashboard.get_visible_to_user(user).values_list("slug", flat=True)
)
if expected_if_staff:
assert (
dashboard in visible_dashboards
), "Expected staff user to be able to see {}".format(dashboard)
else:
assert (
dashboard not in visible_dashboards
), "Expected staff user not to be able to see {}".format(dashboard)
user.is_superuser = True
user.save()
visible_dashboards = set(
Dashboard.get_visible_to_user(user).values_list("slug", flat=True)
)
if expected_if_superuser:
assert (
dashboard in visible_dashboards
), "Expected super user to be able to see {}".format(dashboard)
else:
assert (
dashboard not in visible_dashboards
), "Expected super user not to be able to see {}".format(dashboard)
def test_get_visible_to_user_no_dupes(db):
owner = User.objects.create(username="owner", is_staff=True)
group = Group.objects.create(name="group")
for i in range(3):
group.user_set.add(User.objects.create(username="user{}".format(i)))
Dashboard.objects.create(
owned_by=owner,
slug="example",
view_policy="public",
view_group=group,
)
dashboards = list(
Dashboard.get_visible_to_user(owner).values_list("slug", flat=True)
)
# This used to return ["example", "example", "example"]
# Until I fixed https://github.com/simonw/django-sql-dashboard/issues/90
assert dashboards == ["example"]
@pytest.mark.parametrize(
"dashboard,expected,expected_if_staff,expected_if_superuser",
(
("owned_by_user", True, True, True),
("owned_by_other_private", False, False, False),
("owned_by_other_loggedin", True, True, True),
("owned_by_other_group_not_member", False, False, False),
("owned_by_other_group_member", True, True, True),
("owned_by_other_staff", False, True, True),
("owned_by_other_superuser", False, False, True),
),
)
def test_user_can_edit(
db, client, dashboard, expected, expected_if_staff, expected_if_superuser
):
user = User.objects.create(username="test")
other = User.objects.create(username="other")
group_member = Group.objects.create(name="group_member")
user.groups.add(group_member)
group_not_member = Group.objects.create(name="group_not_member")
Dashboard.objects.create(slug="owned_by_user", owned_by=user, edit_policy="private")
Dashboard.objects.create(
slug="owned_by_other_private", owned_by=other, edit_policy="private"
)
Dashboard.objects.create(
slug="owned_by_other_loggedin", owned_by=other, edit_policy="loggedin"
)
Dashboard.objects.create(
slug="owned_by_other_group_not_member",
owned_by=other,
edit_policy="group",
edit_group=group_not_member,
)
Dashboard.objects.create(
slug="owned_by_other_group_member",
owned_by=other,
edit_policy="group",
edit_group=group_member,
)
Dashboard.objects.create(
slug="owned_by_other_staff", owned_by=other, edit_policy="staff"
)
Dashboard.objects.create(
slug="owned_by_other_superuser", owned_by=other, edit_policy="superuser"
)
dashboard_obj = Dashboard.objects.get(slug=dashboard)
dashboard_obj.queries.create(sql="select 1 + 1")
assert dashboard_obj.user_can_edit(user) == expected
if dashboard != "owned_by_other_staff":
# This test doesn't make sense for the 'staff' one, they cannot access admin
# https://github.com/simonw/django-sql-dashboard/issues/44#issuecomment-835653787
can_edit_using_admin = can_user_edit_using_admin(client, user, dashboard_obj)
assert can_edit_using_admin == expected
if can_edit_using_admin:
# Check that they cannot edit the SQL queries, because they do not
# have the execute_sql permisssion
assert not user.has_perm("django_sql_dashboard.execute_sql")
html = get_admin_change_form_html(client, user, dashboard_obj)
soup = BeautifulSoup(html, "html5lib")
assert soup.select("td.field-sql p")[0].text == "select 1 + 1"
user.is_staff = True
user.save()
assert dashboard_obj.user_can_edit(user) == expected_if_staff
assert can_user_edit_using_admin(client, user, dashboard_obj) == expected_if_staff
# Confirm that staff user can see the correct dashboards listed
client.force_login(user)
dashboard_change_list_response = client.get(
"/admin/django_sql_dashboard/dashboard/"
)
change_list_soup = BeautifulSoup(dashboard_change_list_response.content, "html5lib")
visible_in_change_list = [
a.text for a in change_list_soup.select("th.field-slug a")
]
assert set(visible_in_change_list) == {
"owned_by_other_staff",
"owned_by_other_group_member",
"owned_by_other_loggedin",
"owned_by_user",
}
# Promote to superuser
user.is_superuser = True
user.save()
assert dashboard_obj.user_can_edit(user) == expected_if_superuser
assert can_user_edit_using_admin(client, user, dashboard_obj)
def get_admin_change_form_html(client, user, dashboard):
# Only staff can access the admin:
original_is_staff = user.is_staff
user.is_staff = True
user.save()
client.force_login(user)
response = client.get(dashboard.get_edit_url())
if not original_is_staff:
user.is_staff = False
user.save()
return response.content.decode("utf-8")
def can_user_edit_using_admin(client, user, dashboard):
return (
'<input type="text" name="title" class="vTextField" maxlength="128" id="id_title">'
in get_admin_change_form_html(client, user, dashboard)
)
def test_superuser_can_reassign_ownership(client, db):
user = User.objects.create(username="test", is_staff=True)
dashboard = Dashboard.objects.create(
slug="dashboard", owned_by=user, view_policy="private", edit_policy="private"
)
client.force_login(user)
response = client.get(dashboard.get_edit_url())
assert (
b'<div class="readonly">test</div>' in response.content
or b'<div class="readonly"><a href="/admin/auth/user/' in response.content
)
assert b'<input type="text" name="owned_by" value="' not in response.content
user.is_superuser = True
user.save()
response2 = client.get(dashboard.get_edit_url())
assert b'<input type="text" name="owned_by" value="' in response2.content
def test_no_link_to_index_on_saved_dashboard_for_logged_out_user(client, db):
dashboard = Dashboard.objects.create(
slug="dashboard",
owned_by=User.objects.create(username="test", is_staff=True),
view_policy="public",
)
response = client.get(dashboard.get_absolute_url())
assert b'<a href="/dashboard/">' not in response.content
|
# Author: Takanori Nakane
# License: BSD-2 Clause
# Version: 0.3.20120830
'''
Dynamic Mesh
This script was tested on PyMOL 1.2 and 1.5.
Example:
run dynamic_mesh.py
fetch 1hwk, async=0
fetch 1hwk, 1hwk_map, type=2fofc, async=0
dynamic_mesh 1hwk_map, sym_source=1hwk
show sticks, resn 117
show ribbon
zoom chain A and resn 117
Note: On PyMOL <= 1.4, you have to download the electron density
map from the Uppsala Electron Density Server manually.
'''
from __future__ import print_function
from pymol.callback import Callback
from pymol import cmd
from chempy import cpv
class DynamicMesh(Callback):
def __init__(self, map_name, level, radius, name, sym_source):
self.level = level
self.radius = radius
self.map_name = map_name
self.name = name
self.callback_name = cmd.get_unused_name('_cb')
cmd.set("auto_zoom", 0)
symmetry = cmd.get_symmetry(sym_source or map_name)
if symmetry:
cmd.set("map_auto_expand_sym", 1)
cmd.set_key("pgup", self.contour_plus)
cmd.set_key("pgdn", self.contour_minus)
self.update()
def load(self):
cmd.load_callback(self, self.callback_name)
def contour_plus(self, d=0.1):
self.level += d
print("Map level: " + str(self.level))
self.update()
def contour_minus(self):
if self.level < 0.15:
return
self.contour_plus(-0.1)
def update(self):
self.center = cmd.get_position()
cmd.isomesh(self.name, self.map_name, self.level, "center", buffer=0.0, carve=self.radius)
def __call__(self):
if self.name not in cmd.get_names('objects'):
cmd.delete(self.callback_name)
cmd.set_key("pgup", lambda: None)
cmd.set_key("pgdn", lambda: None)
return
tmp = cmd.get_position()
r = cpv.distance_sq(self.center, tmp)
if (r > 0.01): # increase this number if it is too slow
self.update()
def get_extent(self):
tmp = cmd.get_position()
return [[i - self.radius for i in tmp], [i + self.radius for i in tmp]]
def dynamic_mesh(map_name, level=1.0, radius=8, name='dynamic_mesh', sym_source=None):
'''
DESCRIPTION
Make 'dynamic' mesh from volumetric data such as electron density map.
The mesh will dynamically follow the center of the view.
Contour level of isomesh can be changed by PageDown and PageUp keys.
NOTE: Crystallographic operations can be applied to the map.
USAGE
dynamic_mesh map_name [, level [, radius [, name [, sym_source ]]]]
ARGUMENTS
map_name = string: name of volumetric object(map) to display
level = float: contour level of isomesh {default: 1.0}
radius = float: radius of isomesh around the center of the view {default: 8}
name = string: name of mesh object {default: dynamic_mesh}
sym_source = string: name of object from which symmetry
information is derived {default: map_name}
EXAMPLE
fetch 1hwk, async=0
fetch 1hwk, 1hwk_map, type=2fofc, async=0
dynamic_mesh 1hwk_map
SEE ALSO
isomesh
'''
cmd.delete(name)
callback = DynamicMesh(map_name, float(level), float(radius), name, sym_source)
callback.load()
cmd.extend('dynamic_mesh', dynamic_mesh)
cmd.auto_arg[0]['dynamic_mesh'] = cmd.auto_arg[1]['isomesh']
|
"""Tests for alpacka.training_data.replay_buffers."""
import collections
import numpy as np
import pytest
from alpacka.trainers import replay_buffers
_TestTransition = collections.namedtuple('_TestTransition', ['test_field'])
# Keep _TestTransitions with a single number in the buffer.
_test_datapoint_spec = _TestTransition(test_field=())
def test_uniform_samples_added_transition():
buf = replay_buffers.UniformReplayBuffer(_test_datapoint_spec, capacity=10)
stacked_transitions = _TestTransition(np.array([123]))
buf.add(stacked_transitions)
assert buf.sample(batch_size=1) == stacked_transitions
def test_uniform_raises_when_sampling_from_an_empty_buffer():
buf = replay_buffers.UniformReplayBuffer(_test_datapoint_spec, capacity=10)
with pytest.raises(ValueError):
buf.sample(batch_size=1)
def test_uniform_samples_all_transitions_eventually_one_add():
buf = replay_buffers.UniformReplayBuffer(_test_datapoint_spec, capacity=10)
buf.add(_TestTransition(np.array([0, 1])))
sampled_transitions = set()
for _ in range(100):
sampled_transitions.add(buf.sample(batch_size=1).test_field.item())
assert sampled_transitions == {0, 1}
def test_uniform_samples_all_transitions_eventually_two_adds():
buf = replay_buffers.UniformReplayBuffer(_test_datapoint_spec, capacity=10)
buf.add(_TestTransition(np.array([0, 1])))
buf.add(_TestTransition(np.array([2, 3])))
sampled_transitions = set()
for _ in range(100):
sampled_transitions.add(buf.sample(batch_size=1).test_field.item())
assert sampled_transitions == {0, 1, 2, 3}
def test_uniform_samples_different_transitions():
buf = replay_buffers.UniformReplayBuffer(_test_datapoint_spec, capacity=100)
buf.add(_TestTransition(np.arange(100)))
assert len(set(buf.sample(batch_size=3).test_field)) > 1
def test_uniform_oversamples_transitions():
buf = replay_buffers.UniformReplayBuffer(_test_datapoint_spec, capacity=10)
stacked_transitions = _TestTransition(np.array([0, 1]))
buf.add(stacked_transitions)
assert set(buf.sample(batch_size=100).test_field) == {0, 1}
def test_uniform_overwrites_old_transitions():
buf = replay_buffers.UniformReplayBuffer(_test_datapoint_spec, capacity=4)
buf.add(_TestTransition(np.arange(3)))
buf.add(_TestTransition(np.arange(3, 6)))
# 0, 1 should get overriden.
assert set(buf.sample(batch_size=100).test_field) == {2, 3, 4, 5}
@pytest.mark.parametrize('hierarchy_depth', [0, 1, 2])
def test_hierarchical_samples_added_transitions(hierarchy_depth):
buf = replay_buffers.HierarchicalReplayBuffer(
_test_datapoint_spec, capacity=10, hierarchy_depth=hierarchy_depth
)
stacked_transitions = _TestTransition(np.array([123]))
buf.add(stacked_transitions, [0] * hierarchy_depth)
assert buf.sample(batch_size=1) == stacked_transitions
def test_hierarchical_samples_buckets_uniformly():
buf = replay_buffers.HierarchicalReplayBuffer(
_test_datapoint_spec, capacity=10, hierarchy_depth=1
)
# Add zeros and ones at a 10:1 ratio.
buf.add(_TestTransition(np.zeros(10)), [0])
buf.add(_TestTransition(np.ones(1)), [1])
# Assert that sampled transitions have a mean value of 0.5.
mean_value = np.mean(buf.sample(batch_size=1000).test_field)
np.testing.assert_allclose(mean_value, 0.5, atol=0.1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: vizfilter.py
# Author: Qian Ge <geqian1001@gmail.com>
import os
import scipy
import argparse
import tensorflow as tf
import config_path as config
import sys
sys.path.append('../')
from lib.nets.googlenet import BaseGoogLeNet
import lib.utils.viz as viz
import lib.utils.normalize as normlize
def get_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--filter', action='store_true',
help='Visualize filters')
parser.add_argument('--feature', action='store_true',
help='Visualize feature maps')
parser.add_argument('--im', type=str,
help='Image file name')
return parser.parse_args()
if __name__ == '__main__':
FLAGES = get_parse()
map_list = ['inception4a', 'inception4b', 'inception4c',
'inception4d', 'inception4e', 'inception3a',
'inception3b', 'inception5a', 'inception5b']
model = BaseGoogLeNet(config.googlenet_path)
filters = tf.get_default_graph().get_tensor_by_name(
'conv1_7x7_s2/weights:0')
if FLAGES.feature:
feature_map = []
for c_map in map_list:
feature_map.append(model.conv_layer[c_map])
assert FLAGES.im is not None, 'File name cannot be None!'
file_path = os.path.join(config.im_path, FLAGES.im)
assert os.path.isfile(file_path),\
'File does not exist! {}'.format(file_path)
im = scipy.misc.imread(file_path)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if FLAGES.filter:
learned_filter = sess.run(filters)
viz.viz_filters(
learned_filter,
[8, 8],
os.path.join(config.save_path, 'GoogLeNet_filter.png'),
gap=2,
nf=normlize.norm_std)
if FLAGES.feature:
maps = sess.run(feature_map, feed_dict={model.inputs: [im]})
for key, c_map in zip(map_list, maps):
viz.viz_filters(
c_map[0],
[10, 10],
os.path.join(config.save_path, 'GoogLeNet_{}.png'.format(key)),
gap=2,
gap_color=10,
# nf=normlize.norm_range
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-14 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('builds', '0012_add-predefined-match-arg-field'),
]
operations = [
migrations.AddField(
model_name='version',
name='documentation_type',
field=models.CharField(choices=[('sphinx', 'Sphinx Html'), ('mkdocs', 'Mkdocs (Markdown)'), ('sphinx_htmldir', 'Sphinx HtmlDir'), ('sphinx_singlehtml', 'Sphinx Single Page HTML')], default='sphinx', help_text='Type of documentation the version was built with.', max_length=20, verbose_name='Documentation type'),
),
]
|
import telnetlib as tn
from influxdb import InfluxDBClient
import time as t
import datetime as dt
import os
import sys
import logging
from logging.handlers import TimedRotatingFileHandler
import sdnotify
from configparser import ConfigParser
influx_ip = None
influx_port = None
influx_username = None
influx_password = None
influx_database = None
modem_ip = None
modem_username = None
modem_password = None
class ParsedStats:
def __init__(self, conn_stats_output, system_uptime):
conn_stats_output_split = conn_stats_output.decode().split("\r\n")
if len(conn_stats_output_split) == 176:
self.connection_up = True
max_line = conn_stats_output_split[5].replace("Max:\tUpstream rate = ", "")
max_split = max_line.split(", Downstream rate = ")
self.max_up = int(max_split[0].replace(" Kbps", ""))
self.max_down = int(max_split[1].replace(" Kbps", ""))
current_line = conn_stats_output_split[6].replace("Bearer:\t0, Upstream rate = ", "")
current_split = current_line.split(", Downstream rate = ")
self.current_up = int(current_split[0].replace(" Kbps", ""))
self.current_down = int(current_split[1].replace(" Kbps", ""))
snr_line = conn_stats_output_split[16].replace("SNR (dB):\t ", "")
snr_split = snr_line.split("\t\t ")
self.snr_down = float(snr_split[0])
self.snr_up = float(snr_split[1])
attn_line = conn_stats_output_split[17].replace("Attn(dB):\t ", "")
attn_split = attn_line.split("\t\t ")
self.attn_down = float(attn_split[0])
self.attn_up = float(attn_split[1])
pwr_line = conn_stats_output_split[18].replace("Pwr(dBm):\t ", "")
pwr_split = pwr_line.split("\t\t ")
self.pwr_down = float(pwr_split[0])
self.pwr_up = float(pwr_split[1])
err_secs_line = conn_stats_output_split[98].replace("ES:\t\t", "")
err_secs_split = err_secs_line.split("\t\t")
self.err_secs_up = int(err_secs_split[0])
self.err_secs_down = int(err_secs_split[1])
serious_err_secs_line = conn_stats_output_split[99].replace("SES:\t\t", "")
serious_err_secs_split = serious_err_secs_line.split("\t\t")
self.serious_err_secs_up = int(serious_err_secs_split[0])
self.serious_err_secs_down = int(serious_err_secs_split[1])
unavailable_secs_line = conn_stats_output_split[100].replace("UAS:\t\t", "")
unavailable_secs_split = unavailable_secs_line.split("\t\t")
self.unavailable_secs_up = int(unavailable_secs_split[0])
self.unavailable_secs_down = int(unavailable_secs_split[1])
self.available_secs = int(conn_stats_output_split[101].replace("AS:\t\t", ""))
else:
self.connection_up = False
system_uptime_split = system_uptime.decode().split("\r\n")
self.system_uptime = float(system_uptime_split[1].split(" ")[0])
def main():
while True:
timestamp = dt.datetime.fromtimestamp(t.time()).strftime("%Y-%m-%dT%H:%M:%S")
try:
parsed_stats = retrieve_stats()
send_stats_to_influxdb(parsed_stats, timestamp)
except Exception as ex:
ex_type, value, traceback = sys.exc_info()
filename = os.path.split(traceback.tb_frame.f_code.co_filename)[1]
logger.error("{0}, {1}: {2}".format(filename, traceback.tb_lineno, ex))
t.sleep(60)
def retrieve_stats():
try:
tnconn = tn.Telnet(modem_ip)
tnconn.read_until(b"Login:")
tnconn.write("{0}\n".format(modem_username).encode())
tnconn.read_until(b"Password:")
tnconn.write("{0}\n".format(modem_password).encode())
tnconn.read_until(b"ATP>")
tnconn.write(b"sh\n")
tnconn.read_until(b"#")
tnconn.write(b"xdslcmd info --stats\n")
stats_output = tnconn.read_until(b"#")
tnconn.write(b"cat /proc/uptime\n")
system_uptime = tnconn.read_until(b"#")
parsed_stats = ParsedStats(stats_output, system_uptime)
return parsed_stats
except Exception:
raise
def format_json(parsedStats, timestamp):
try:
if parsedStats.connection_up:
return [{"measurement": "connection", "time": timestamp,
"fields":
{"AttDown": parsedStats.attn_down,
"AttnUp": parsedStats.attn_up,
"AvailableSecs": parsedStats.available_secs,
"CurrDown": parsedStats.current_down,
"CurrUp": parsedStats.current_up,
"ErrSecsDown": parsedStats.err_secs_down,
"ErrSecsUp": parsedStats.err_secs_up,
"MaxDown": parsedStats.max_down,
"MaxUp": parsedStats.max_up,
"PwrDown": parsedStats.pwr_down,
"PwrUp": parsedStats.pwr_up,
"SeriousErrSecsDown": parsedStats.serious_err_secs_down,
"SeriousErrSecsUp": parsedStats.serious_err_secs_up,
"SNRDown": parsedStats.snr_down,
"SNRUp": parsedStats.snr_up,
"SystemUptime": parsedStats.system_uptime,
"UnavailableSecsDown": parsedStats.unavailable_secs_down,
"UnavailableSecsUp": parsedStats.unavailable_secs_up
}}]
else:
return [{"measurement": "connection", "time": timestamp,
"fields":
{"AttDown": -1,
"AttnUp": -1,
"AvailableSecs": -1,
"CurrDown": -1,
"CurrUp": -1,
"ErrSecsDown": -1,
"ErrSecsUp": -1,
"MaxDown": -1,
"MaxUp": -1,
"PwrDown": -1,
"PwrUp": -1,
"SeriousErrSecsDown": -1,
"SeriousErrSecsUp": -1,
"SNRDown": -1,
"SNRUp": -1,
"SystemUptime": parsedStats.system_uptime,
"UnavailableSecsDown": -1,
"UnavailableSecsUp": -1
}}]
except Exception:
raise
def send_stats_to_influxdb(parsed_stats, timestamp):
try:
db_client = InfluxDBClient(influx_ip, influx_port, influx_username, influx_password, influx_database)
if not {u'name': u'dslstats'} in db_client.get_list_database():
db_client.create_database("dslstats")
db_client.create_retention_policy("dslstats-retention-policy", "52w", "1", default=True)
json = format_json(parsed_stats, timestamp)
db_client.write_points(json)
except Exception:
raise
n = sdnotify.SystemdNotifier()
n.notify("READY=1")
config_path = "dsl-stats-to-influxdb-3_config.ini"
config = ConfigParser()
config.read(config_path)
if "InfluxDB" in config:
influx_ip = config["InfluxDB"].get("ip-address")
influx_port = config["InfluxDB"].get("port")
influx_username = config["InfluxDB"].get("username")
influx_password = config["InfluxDB"].get("password")
influx_database = config["InfluxDB"].get("database")
if influx_port is not None:
influx_port = int(influx_port)
else:
raise Exception("Wasn't able to find the 'InfluxDB' section in the config")
if influx_ip is None or influx_port is None or influx_username is None or influx_password is None or influx_database is None:
raise Exception("At least one piece of Influx connection information is missing from the config")
if "Modem" in config:
modem_ip = config["Modem"].get("ip-address")
modem_username = config["Modem"].get("username")
modem_password = config["Modem"].get("password")
else:
raise Exception("Wasn't able to find the 'Modem' section in the config")
if modem_ip is None or modem_username is None or modem_password is None:
raise Exception("At least one piece of Modem connection information is missing from the config")
logger = logging.getLogger("Rotating Error Log")
logger.setLevel(logging.ERROR)
handler = TimedRotatingFileHandler("dsl-stats-to-influxdb-3.log", when="midnight", backupCount=5)
formatter = logging.Formatter(fmt="%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
main()
|
import math
import numpy as np
import nibabel as nib
from scipy.special import gammaln
import warnings
warnings.filterwarnings("ignore")
def spm_vol(input_file):
"""
Get header information for images
"""
v = nib.load(input_file)
return v
def spm_read_vols(mapped_image_volume):
"""
Read in entire image volumes
"""
data = mapped_image_volume.get_data()
data = data.flatten(order='F')
return data
def spm_orth(X, OPT='pad'):
"""
Recursive Gram-Schmidt orthogonalisation of basis functions
@X - matrix
@OPT - 'norm' - for Euclidean normalisation
'pad' - for zero padding of null space (default)
"""
def gs_cofficient(v1, v2):
return np.dot(v2, v1) / np.dot(v1, v1)
def multiply(cofficient, v):
return map((lambda x: x * cofficient), v)
def proj(v1, v2):
return multiply(gs_cofficient(v1, v2), v1)
def gs(X, row_vecs=True, norm=True):
if not row_vecs:
X = X.T
Y = X[0:1, :].copy()
for i in range(1, X.shape[0]):
proj = np.diag((X[i, :].dot(Y.T) /
np.linalg.norm(Y, axis=1) ** 2).flat).dot(Y)
Y = np.vstack((Y, X[i, :] - proj.sum(0)))
if norm:
Y = np.diag(1 / np.linalg.norm(Y, axis=1)).dot(Y)
if row_vecs:
return Y
else:
return Y.T
if OPT == 'norm':
return gs(X, row_vecs=False, norm=True)
elif OPT == 'pad':
return gs(X, row_vecs=False, norm=False)
else:
return X
def spm_hrf(RT, P=None, fMRI_T=16):
"""
@RT - scan repeat time
@P - parameters of the response function (two gamma functions)
defaults (seconds)
% P[0] - Delay of Response (relative to onset) 6
% P[1] - Delay of Undershoot (relative to onset) 16
% P[2] - Dispersion of Response 1
% P[3] - Dispersion of Undershoot 1
% P[4] - Ratio of Response to Undershoot 6
% P[5] - Onset (seconds) 0
% P[6] - Length of Kernel (seconds) 32
hrf - hemodynamic response function
P - parameters of the response function
"""
p = np.array([6, 16, 1, 1, 6, 0, 32], dtype=float)
if P is not None:
p[0:len(P)] = P
_spm_Gpdf = lambda x, h, l: \
np.exp(h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h))
# modelled hemodynamic response function - {mixture of Gammas}
dt = RT / float(fMRI_T)
u = np.arange(0, int(p[6] / dt + 1)) - p[5] / dt
with np.errstate(divide='ignore'): # Known division-by-zero
hrf = _spm_Gpdf(
u, p[0] / p[2], dt / p[2]
) - _spm_Gpdf(
u, p[1] / p[3], dt / p[3]
) / p[4]
idx = np.arange(0, int((p[6] / RT) + 1)) * fMRI_T
hrf = hrf[idx]
hrf = np.nan_to_num(hrf)
hrf = hrf / np.sum(hrf)
return hrf
def spm_detrend(x, p=0):
"""
Polynomial detrending over columns
spm_detrend removes linear and nonlinear trends
from column-wise data matrices.
@x - data matrix
@p - order of polynomial [default : 0]
Returns:
y - detrended data matrix
"""
m, n = x.shape
if (not m) or (not n):
y = []
return y
if (not p):
y = x - np.ones((m, 1), dtype='int') * x.mean(axis=0)
return y
G = np.zeros((m, p + 1))
for i in range(0, p + 1):
d = np.arange(1, m + 1) ** i
G[:, i] = d.flatten(1)
y = x - G.dot(np.linalg.pinv(G).dot(x))
return y
def spm_write_vol(image_volume_info, image_voxels, image_name, file_type):
"""
Writes an image volume to disk
@image_volume_info - a structure containing image volume
information (see spm_vol)
@image_voxels - a one, two or three dimensional matrix
containing the image voxels
@image_name - name of the file to save the image in
"""
if file_type == ".nii" or file_type == ".nii.gz":
data = image_voxels
affine = image_volume_info.affine
image_volume_info = nib.Nifti1Image(data, affine)
nib.save(image_volume_info, image_name + file_type)
else:
file_type = '.gii'
data = image_voxels
gi = nib.GiftiImage()
gi.add_gifti_data_array(nib.gifti.GiftiDataArray(image_voxels))
nib.gifti.giftiio.write(gi, image_name + file_type)
|
from django.test import TestCase
from django.test.client import RequestFactory
from rest_framework.compat import patterns, url
from rest_framework.reverse import reverse
factory = RequestFactory()
def null_view(request):
pass
urlpatterns = patterns('',
url(r'^view$', null_view, name='view'),
)
class ReverseTests(TestCase):
"""
Tests for fully qualifed URLs when using `reverse`.
"""
urls = 'rest_framework.tests.reverse'
def test_reversed_urls_are_fully_qualified(self):
request = factory.get('/view')
url = reverse('view', request=request)
self.assertEqual(url, 'http://testserver/view')
|
from .gradient_descent import GradientDescent
from .least_squares import LeastSquares
|
from aql_testcase import AqlTestCase
from aql.util_types import UniqueList, split_list_type, List, value_list_type
# ==============================================================================
class TestListTypes(AqlTestCase):
def test_unique_list(self):
ul = UniqueList([1, 2, 3, 2, 1, 3])
ul.self_test()
self.assertEqual(ul, [2, 3, 1])
self.assertEqual(list(ul), [1, 2, 3])
ul = UniqueList()
ul.append(1)
ul.self_test()
ul.append(3)
ul.self_test()
ul.append(1)
ul.self_test()
ul.append(2)
ul.self_test()
ul.append(3)
ul.self_test()
ul.append(1)
ul.self_test()
self.assertEqual(list(ul), [1, 3, 2])
ul.append_front(2)
ul.self_test()
self.assertEqual(list(ul), [2, 1, 3])
ul.extend([4, 1, 2, 2, 5])
ul.self_test()
self.assertEqual(list(ul), [2, 1, 3, 4, 5])
ul.extend_front([1, 2, 2, 3, 1, 1, 5, 5])
ul.self_test()
self.assertEqual(list(ul), [1, 2, 3, 5, 4])
self.assertEqual(list(ul), [1, 2, 3, 5, 4])
ul.remove(1)
ul.self_test()
self.assertEqual(list(ul), [2, 3, 5, 4])
ul.remove(5)
ul.self_test()
self.assertEqual(list(ul), [2, 3, 4])
ul.remove(55)
ul.self_test()
self.assertEqual(list(ul), [2, 3, 4])
self.assertEqual(ul.pop(), 4)
ul.self_test()
self.assertEqual(ul.pop_front(), 2)
ul.self_test()
self.assertEqual(ul.pop_front(), 3)
ul.self_test()
ul += [1, 2, 2, 2, 3, 1, 2, 4, 3, 3, 5, 4, 5, 5]
ul.self_test()
self.assertEqual(list(ul), [1, 2, 3, 4, 5])
ul -= [2, 2, 2, 4, 33]
ul.self_test()
self.assertEqual(list(ul), [1, 3, 5])
self.assertEqual(ul[0], 1)
self.assertEqual(ul[2], 5)
self.assertEqual(ul[1], 3)
self.assertIn(1, ul)
self.assertEqual(list(reversed(ul)), [5, 3, 1])
ul.reverse()
ul.self_test()
self.assertEqual(ul, [5, 3, 1])
ul.reverse()
ul.self_test()
self.assertEqual(str(ul), "[1, 3, 5]")
self.assertEqual(ul, UniqueList([1, 3, 5]))
self.assertEqual(ul, UniqueList(ul))
self.assertLess(
UniqueList([1, 2, 2, 2, 3]), UniqueList([1, 2, 1, 1, 1, 4]))
self.assertLess(UniqueList([1, 2, 2, 2, 3]), [1, 2, 1, 1, 1, 4])
# ==============================================================================
def test_splitlist(self):
l = split_list_type(List, ", \t\n\r")("1,2, 3,,, \n\r\t4")
self.assertEqual(l, ['1', '2', '3', '4'])
self.assertEqual(l, "1,2,3,4")
self.assertEqual(l, "1 2 3 4")
self.assertEqual(str(l), "1,2,3,4")
l += "7, 8"
self.assertEqual(l, ['1', '2', '3', '4', '7', '8'])
l -= "2, 3"
self.assertEqual(l, ['1', '4', '7', '8'])
l -= "5"
self.assertEqual(l, ['1', '4', '7', '8'])
l.extend_front("10,12")
self.assertEqual(l, ['10', '12', '1', '4', '7', '8'])
l.extend("0,-1")
self.assertEqual(l, ['10', '12', '1', '4', '7', '8', '0', '-1'])
# ==============================================================================
def test_valuelist(self):
l = split_list_type(value_list_type(List, int), ", \t\n\r")(
"1,2, 3,,, \n\r\t4")
self.assertEqual(l, [1, 2, 3, 4])
self.assertEqual(l, "1,2,3,4")
self.assertEqual(l, "1 2 3 4")
self.assertEqual(str(l), "1,2,3,4")
l += [7, 8]
self.assertEqual(l, ['1', '2', '3', '4', '7', '8'])
l += 78
self.assertEqual(l, ['1', '2', '3', '4', '7', '8', 78])
l -= 78
self.assertEqual(l, ['1', '2', '3', '4', '7', '8'])
l -= "2, 3"
self.assertEqual(l, ['1', '4', '7', '8'])
l -= "5"
self.assertEqual(l, ['1', '4', '7', '8'])
l.extend_front("10,12")
self.assertEqual(l, ['10', '12', '1', '4', '7', '8'])
l.extend("0,-1")
self.assertEqual(l, [10, 12, 1, 4, 7, 8, 0, -1])
l[0] = "5"
self.assertEqual(l, [5, 12, 1, 4, 7, 8, 0, -1])
# ==============================================================================
def test_list(self):
l = List([1, 2, 3, 4])
self.assertEqual(l, [1, 2, 3, 4])
l += [7, 8]
self.assertEqual(l, [1, 2, 3, 4, 7, 8])
l += 78
self.assertEqual(l, [1, 2, 3, 4, 7, 8, 78])
l -= 78
self.assertEqual(l, [1, 2, 3, 4, 7, 8])
l -= [2, 3]
self.assertEqual(l, [1, 4, 7, 8])
l -= 5
self.assertEqual(l, [1, 4, 7, 8])
l.extend_front([10, 12])
self.assertEqual(l, [10, 12, 1, 4, 7, 8])
l.extend([0, -1])
self.assertEqual(l, [10, 12, 1, 4, 7, 8, 0, -1])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, request, session, redirect, url_for, jsonify
admin_bp = Blueprint('admin_bp', __name__)
@admin_bp.route('/')
def admin_index():
next = request.args.get('next', 'welcome')
return render_template('layout/base_iframe.html', next=next)
@admin_bp.route('/admin/login', methods=['GET', 'POST'])
def admin_login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
error = None
data = dict()
if error is None:
session.clear()
session['admin_name'] = username
data['status_code'] = 1
data['url'] = url_for('admin_bp.admin_index')
else:
data['status_code'] = 0
data['msg'] = error
return jsonify(data)
return render_template('admin/login.html')
@admin_bp.route('/admin/logout')
def admin_logout():
session.clear()
return redirect(url_for('admin_bp.admin_index'))
|
import argparse
import os
import unittest
from typing import Optional
from mock import MagicMock
from prometheus_client import REGISTRY
from bxcommon import node_runner
from bxcommon.models.node_type import NodeType
from bxcommon.services import http_service
from bxcommon.test_utils import helpers
from bxcommon.utils import cli
from bxcommon.utils.stats.memory_statistics_service import memory_statistics
from bxutils.common import url_helper
from bxutils.logging import log_config
from bxutils.logging.log_level import LogLevel
from bxutils.logging.log_record_type import LogRecordType
from bxutils.services.node_ssl_service import NodeSSLService
from bxutils.ssl.data import ssl_data_factory
from bxutils.ssl.data.ssl_certificate_info import SSLCertificateInfo
from bxutils.ssl.data.ssl_file_info import SSLFileInfo
from bxutils.ssl.data.ssl_storage_info import SSLStorageInfo
RELATIVE_PATH_SSL_FILES = "bxcommon/test/ssl_files"
def get_ssl_test_files(abs_path: str, relative_path_ssl_files: str) -> str:
# walk backwards to root till find the "ssl_files" folder
while abs_path is not None:
abs_path, _tail = os.path.split(abs_path)
if os.path.exists(os.path.join(abs_path, relative_path_ssl_files)):
break
return os.path.join(abs_path, relative_path_ssl_files)
class AbstractTestCase(unittest.TestCase):
ssl_folder_path: str = ""
ssl_folder_url: str = ""
@classmethod
def setUpClass(cls):
arg_parser = argparse.ArgumentParser(add_help=False)
cli.add_argument_parser_logging(arg_parser, default_log_level=LogLevel.DEBUG)
opts = arg_parser.parse_args(args=[])
log_config.setup_logging(
opts.log_format,
opts.log_level,
default_logger_names=["bxcommon", "bxgateway", "bxrelay", "bxgateway_internal", "bxapi"],
log_level_overrides=opts.log_level_overrides,
enable_fluent_logger=opts.log_fluentd_enable,
fluentd_host=opts.log_fluentd_host,
fluentd_queue_size=opts.log_fluentd_queue_size,
third_party_loggers=node_runner.THIRD_PARTY_LOGGERS,
fluent_log_level=opts.log_level_fluentd,
stdout_log_level=opts.log_level_stdout,
)
log_config.set_level([LogRecordType.Config.value], LogLevel.WARNING)
http_service.get_json = MagicMock()
http_service.post_json = MagicMock()
http_service.patch_json = MagicMock()
http_service.delete_json = MagicMock()
memory_statistics.start_recording = MagicMock()
REGISTRY.register = MagicMock()
helpers.set_extensions_parallelism()
def set_ssl_folder(self) -> None:
self.ssl_folder_path = get_ssl_test_files(
os.path.abspath(__file__), RELATIVE_PATH_SSL_FILES
)
self.ssl_folder_url = url_helper.url_join("file:", self.ssl_folder_path)
def create_ssl_service(
self, node_type: NodeType, ca_folder: str = "ca", node_folder: Optional[str] = None
) -> NodeSSLService:
if node_folder is None:
node_folder = node_type.name.lower()
self.set_ssl_folder()
cert_file_name = ssl_data_factory.get_cert_file_name(node_type)
key_file_name = ssl_data_factory.get_key_file_name(node_type)
ca_base_url = url_helper.url_join(
self.ssl_folder_url, ca_folder
)
registration_base_url = url_helper.url_join(
self.ssl_folder_url, node_folder, "registration_only"
)
node_ssl_service = NodeSSLService(
node_type,
SSLStorageInfo(
self.ssl_folder_path,
SSLCertificateInfo(
SSLFileInfo(
ca_folder,
"ca_cert.pem",
url_helper.url_join(ca_base_url, "ca_cert.pem"),
),
SSLFileInfo(
ca_folder,
"ca_key.pem",
url_helper.url_join(ca_base_url, "ca_key.pem"),
)
),
SSLCertificateInfo(
SSLFileInfo(
f"{node_folder}/private",
cert_file_name
),
SSLFileInfo(
f"{node_folder}/private",
key_file_name
)
),
SSLCertificateInfo(
SSLFileInfo(
f"{node_folder}/registration_only",
cert_file_name,
url_helper.url_join(registration_base_url, cert_file_name),
),
SSLFileInfo(
f"{node_folder}/registration_only",
key_file_name,
url_helper.url_join(registration_base_url, key_file_name),
)
)
)
)
node_ssl_service.blocking_load()
return node_ssl_service
def assert_almost_equal(
self,
expected_value: float,
actual_value: float,
allowed_error: float = 0.001
):
self.assertTrue(
abs(expected_value - actual_value) <= allowed_error,
msg=f"Expected {expected_value}, but "
f"{actual_value} was more than {allowed_error} away."
)
|
class UnboundFormException(Exception):
"""
Raise when an unbound form (no request data) calls a member function that requires request data
"""
pass |
# Ciholas, Inc. - www.ciholas.com
# Licensed under: creativecommons.org/licenses/by/4.0
# System libraries
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Local libraries
from cdp import PressureV2
from network_objects import *
from settings import *
class PlotPressureV2(QtGui.QMainWindow):
type = PressureV2.type
def __init__(self, serial):
QtGui.QMainWindow.__init__(self)
self.central = QtGui.QScrollArea()
self.central_inner_widget = QtGui.QWidget()
self.serial = serial
self.setWindowTitle('CUWB Monitor - Pressure V2 Devices ID: 0x{:08X}'.format(serial))
self.grid_layout = QtGui.QGridLayout()
self.running = True
self.sub_window = PlotPressureV2SubWindow(self.serial, self)
self.id_total = 0
self.from_id_id_labels = dict()
self.from_id_count_labels = dict()
self.from_id_freq_labels = dict()
self.from_id_enable_checks = dict()
self.from_id_frequency_deques = dict()
self.from_id_times = dict()
self.from_id_count = dict()
self.from_id_p_data = dict()
self.from_ids = np.array([])
self.previous_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type] - len(UwbNetwork.nodes[self.serial].cdp_pkts[self.type])
self.grid_layout.addWidget(QtGui.QLabel("Serial#"), 0, 0)
self.grid_layout.addWidget(QtGui.QLabel("Packet Count"), 0, 1)
self.grid_layout.addWidget(QtGui.QLabel("Frequency"), 0, 2)
self.grid_layout.addWidget(QtGui.QLabel("Enable"), 0, 3)
self.update_labels()
self.central_inner_widget.setLayout(self.grid_layout)
self.central.setWidget(self.central_inner_widget)
self.setCentralWidget(self.central)
self.resize(400, 50)
self.timer = self.startTimer(QPLOT_FREQUENCY)
def timerEvent(self, e):
if not UwbNetwork.running:
self.close()
return
self.update_labels()
def closeEvent(self, e):
self.killTimer(self.timer)
self.running = False
self.close()
def update_labels(self):
_current_size = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type] - self.previous_count
if _current_size > 1000: _current_size = 1000
self.previous_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type]
for idx in range(_current_size):
_target_id = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].serial_number.as_int
if not (_target_id in self.from_ids):
self.from_id_id_labels.update([(self.id_total, QtGui.QLabel())])
self.from_id_count_labels.update([(self.id_total, QtGui.QLabel())])
self.from_id_freq_labels.update([(self.id_total, QtGui.QLabel())])
self.from_id_enable_checks.update([(self.id_total, QtGui.QCheckBox())])
self.from_id_frequency_deques.update([(_target_id, deque([], FREQUENCY_CALCULATION_DEQUE_LENGTH))])
self.from_id_times.update([(_target_id, deque([], TRAIL_LENGTH))])
self.from_id_count.update([(_target_id, 0)])
self.from_id_p_data.update([(_target_id, deque([], TRAIL_LENGTH))])
self.from_ids = np.sort(np.append(self.from_ids, _target_id))
_row = self.id_total
_column = 0
self.grid_layout.addWidget(self.from_id_id_labels[self.id_total], _row + 1, _column + 0)
self.grid_layout.addWidget(self.from_id_count_labels[self.id_total], _row + 1, _column + 1)
self.grid_layout.addWidget(self.from_id_freq_labels[self.id_total], _row + 1, _column + 2)
self.grid_layout.addWidget(self.from_id_enable_checks[self.id_total], _row + 1, _column + 3)
if _column > 0:
_row = 2
self.grid_layout.addWidget(QtGui.QLabel("Serial#"), _row, _column + 0)
self.grid_layout.addWidget(QtGui.QLabel("Packet Count"), _row, _column + 1)
self.grid_layout.addWidget(QtGui.QLabel("Frequency"), _row, _column + 2)
self.grid_layout.addWidget(QtGui.QLabel("Enable"), _row, _column + 3)
self.id_total += 1
self.from_id_times[_target_id].append(UwbNetwork.nodes[self.serial].cdp_pkts_time[self.type][idx - _current_size])
self.from_id_count[_target_id] += 1
_scale = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].scale / 2147483647.0
self.from_id_p_data[_target_id].append(UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].pressure * _scale)
for _target_id in self.from_ids:
self.from_id_frequency_deques[_target_id].append((self.from_id_count[_target_id], time.time()))
for _row in range(self.id_total):
_target_id = int(self.from_ids[_row])
if self.from_id_id_labels[_row].text() != '0x{:08X}'.format(_target_id):
self.from_id_id_labels[_row].setText('0x{:08X}'.format(_target_id))
if len(self.from_id_times[_target_id]) == 0: continue
_freq = UwbNetwork.nodes[self.serial].calculate_frequency(self.from_id_frequency_deques[_target_id])
self.from_id_count_labels[_row].setText('{:5d}'.format(self.from_id_count[_target_id]))
self.from_id_freq_labels[_row].setText('{:5.1f}Hz'.format(_freq))
if self.from_id_enable_checks[_row].isChecked():
self.sub_window.update_data('pressure', '0x{:08X}'.format(_target_id),
np.array(self.from_id_p_data[_target_id]),
np.array(self.from_id_times[_target_id]))
else:
self.sub_window.update_data('pressure', '0x{:08X}'.format(_target_id),
np.array([]),
np.array([]))
def reset(self):
for target_id in self.from_ids:
self.from_id_count[target_id] = 0
self.from_id_frequency_deques[target_id] = deque([], FREQUENCY_CALCULATION_DEQUE_LENGTH)
self.from_id_p_data[target_id] = deque([], TRAIL_LENGTH)
self.from_id_times[target_id] = deque([], TRAIL_LENGTH)
self.sub_window.color_offset = 0
for row in range(self.id_total):
target_id = int(self.from_ids[row])
self.sub_window.update_data('pressure', '0x{:08X}'.format(target_id),
np.array([]),
np.array([]))
self.previous_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type]
class PlotPressureV2SubWindow(pg.GraphicsWindow):
def __init__(self, serial, parent):
pg.GraphicsWindow.__init__(self)
self.setWindowTitle('CUWB Monitor - Pressure V2 Plot ID: 0x{:08X}'.format(serial))
self.serial = serial
self.resize(1200, 1200)
self.parent = parent
self.colors = ['r', 'g', 'b', 'c', 'm', 'y', 'w']
self.color_offset = 0
self.p_graph = self.addPlot(title='Pressure', row=0, col=0)
self.legend_graph = self.addPlot(title='Legend', row=1, col=0)
#self.p_graph.setYRange(-5, 5)
self.p_graph.showGrid(x=True, y=True)
self.legend = self.legend_graph.addLegend()
self.p_data = dict()
self.legend_data = dict()
self.timer = self.startTimer(300)
self.running = True
def timerEvent(self, e):
if not UwbNetwork.running or not self.parent.running:
self.close()
def update_data(self, plot_type, serial, data, time):
if data.size == 0:
if serial in self.p_data:
self.p_data[serial].clear()
self.legend_data[serial].clear()
del self.p_data[serial]
del self.legend_data[serial]
return
if not (serial in self.p_data):
try:
self.legend_graph.legend.scene().removeItem(self.legend_graph.legend)
except Exception as e: print(e)
self.p_data.update([(serial, self.p_graph.plot(name=serial, pen=pg.mkPen(self.colors[self.color_offset % len(self.colors)], width=2)))])
self.legend_data.update([(serial, self.p_graph.plot(name=serial, pen=pg.mkPen(self.colors[self.color_offset % len(self.colors)], width=2)))])
self.color_offset += 1
self.legend = self.legend_graph.addLegend()
for _curve_serials in self.p_data.keys():
self.legend.addItem(self.legend_data[_curve_serials], _curve_serials)
if plot_type == 'pressure': self.p_data[serial].setData(time, data)
def closeEvent(self, e):
self.killTimer(self.timer)
self.running = False
|
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import traceback
import time
import sys
import os
NEXT_BUTTON = "//div[@class='h0']/div[2]"
# Keep looping until the next one is reached
def reachNextElement(i):
# If 50 is reached, wait to allow Chrome to load more
if (i == 50):
time.sleep(5)
# Try to reach next email
stale_element = True
while (stale_element):
try:
time.sleep(0.1)
driver.find_element_by_xpath(NEXT_BUTTON).click()
stale_element = False
except StaleElementReferenceException:
stale_element = True
except ElementClickInterceptedException:
stale_element = True
# Write out the emails to emails.txt
def writeEmails(email_senders):
"""Write out a list of email senders to a text file.
Args:
email_senders - the list of email sender names
"""
file = open("emails.txt", "w")
for name in email_senders:
file.write(name)
file.write('\n')
file.close()
# Read in the login information from the user
def getLogin():
""" Retrieve the login information for the user
Returns:
email - the email of the user
password - the password of the user
"""
file = open("credentials.txt", "r")
email = file.readline().rstrip()
password = file.readline().rstrip()
return email, password
if __name__ == "__main__":
# Define constants: login link and the path to the chromedriver.exe file
GMAIL_LOGIN = "https://accounts.google.com/ServiceLogin?service=mail"
CHROMEDRIVER_PATH = os.getcwd()
GMAIL, PASSWORD = getLogin()
TRACEBACK = "\n [LOGGING] Clean exit... goodbye"
try:
# Run the window without it being open
options = webdriver.ChromeOptions()
options.add_argument('headless')
######################################
# Asssumes that chromedriver.exe is placed in the current working directory
driver = webdriver.Chrome(CHROMEDRIVER_PATH + "/chromedriver.exe")
time.sleep(1)
# Tell the driver to open up the webpage
driver.get(GMAIL_LOGIN)
#driver.minimize_window()
border = " ===================================================================="
print(border)
print(" ==== PROGRAM ACTIVE, DO NOT USE WINDOW UNTIL PROGRAM COMPLETION ====")
print(border + '\n')
# Send the email to the login page
driver.find_element_by_xpath('//*[@id="identifierId"]').send_keys(GMAIL)
driver.find_element_by_xpath('//*[@id="identifierNext"]').click()
time.sleep(1)
# Send the password to the login page
driver.find_element_by_xpath('//input[@name="password"]').send_keys(PASSWORD)
driver.find_element_by_xpath('//*[@id="passwordNext"]').click()
time.sleep(1)
# Accept automated browsing --- this still needs to be tested
"""
try:
driver.find_element_by_xpath('//*[@id="identifierDone"]').click()
except:
print("Did not prompt user to accept automated browsing...")
try:
driver.find_element_by_xpath("//div[@role='link']").click()
except:
print("Did not prompt user for confirm email...")
"""
email_count = driver.find_element_by_xpath("//span[@class='Dj']/span[2]").text
email_count = int(email_count)
print(" [LOGGING] Found", email_count, "emails!")
# Load up all of the emails
driver.implicitly_wait(5)
emails = driver.find_elements_by_xpath("//*[@class='yW']/span")
# Access the first email
if email_count > 0:
driver.implicitly_wait(5)
emails[0].click()
# Create a dictionary to hold the emails to unsubscribe from
email_dict = {}
for i in range(email_count):
# Find the name of the sender
driver.implicitly_wait(5)
sender = driver.find_element_by_xpath("//span[@class='gD']").get_attribute("name")
# Continue if an unsubscribe link has already been found
if sender in email_dict:
print(" [LOGGING] Already processed \"" + sender + "\"...")
# If a new email is found, see if we can unsubscribe from it
else:
try:
driver.implicitly_wait(5)
if (driver.find_element_by_class_name("Ca") != None):
print(" [LOGGING] Can unsubscribe from \"" + sender + "\"...")
email_dict[sender] = 1
except NoSuchElementException:
print(" [LOGGING] \"" +sender+ "\": Already unsubscribed or unable to...")
reachNextElement(i)
writeEmails(email_dict)
except Exception as e:
print("\n" + border, border, sep="\n")
print(e)
TRACEBACK = traceback.format_exc()
finally:
print(TRACEBACK)
writeEmails(email_dict)
#driver.close()
sys.exit(0)
|
# Generated by Django 3.1.3 on 2020-12-30 15:35
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0012_auto_20201230_1730'),
]
operations = [
migrations.AddField(
model_name='student',
name='Phone_Number',
field=models.CharField(default=0, max_length=10, validators=[django.core.validators.MinLengthValidator(10)]),
preserve_default=False,
),
]
|
'''1.Faça um programa em Python que leia uma palavra pelo teclado e faça a impressão
conforme o exemplo a seguir para a palavra AMOR:
A
AM
AMO
AMOR'''
'''2.Faça um programa em Python que leia uma palavra pelo teclado e faça a impressão
conforme o exemplo a seguir para a palavra AMOR
AMOR
AMO
AM
A'''
#######################################################################################
def ex01():
string = input("Digite uma palavra: ")
for i in range(len(string)+1): print(string[0:i])
def ex02():
string = input("Digite uma palavra: ")
for i in range(0,len(string)+1,-1): print(string[0:i])
def main():
op = int(input(f"1 > exercicio 1"
"\n2 > exercicio 2"
"\nOpcao>> "))
if op == 1:
ex01()
elif op == 2:
ex02()
main()
|
from decaylanguage.decay.ampgen2goofit import ampgen2goofit
try:
from pathlib2 import Path
except ImportError:
from pathlib import Path
DIR = Path(__file__).parent.resolve()
def test_full_convert():
text = ampgen2goofit(DIR / '../models/DtoKpipipi_v2.txt', ret_output=True)
with (DIR / 'output/DtoKpipipi_v2.cu').open() as f:
assert (set(x.strip() for x in text.splitlines() if 'Generated on' not in x)
== set(x.strip() for x in f.readlines() if 'Generated on' not in x))
|
import torch
import torch.nn as nn
target = torch.zeros(2, dtype=torch.long)
target[0] = 1
target[1] = 2
output = torch.zeros(2,3)
output[0][0] = 0.5
output[0][1] = 0.2
output[0][2] = 0.4
output[1][0] = 0.7
output[1][1] = 0.5
output[1][2] = 0.1
criterion = nn.MultiMarginLoss(p=1, weight=torch.ones(3), margin=0.1)
print(criterion(output, target))
# tensor(0.3167)
criterion = nn.MultiMarginLoss(p=1, weight=torch.ones(3), margin=0.1, reduction='none')
print(criterion(output, target))
# tensor([0.2333, 0.4000]) |
# -*- coding: utf-8 -*-
import scrapy
from ..items import ZomatoItem
class ZomSpideySpider(scrapy.Spider):
name = 'zom_spidey'
allowed_domains = ['zomato.com']
start_urls = ['https://www.zomato.com/mumbai/restaurants']
page_num = 2
res_link= 'nikal gaya'
city='mumbai'
def parse(self, response):
items = ZomatoItem()
div = response.css(".search-card")
for c in div:
items['rest_name'] = c.css('.fontsize0::text').extract()
res_link = c.css('.fontsize0').css("::attr('href')").get()
items['res_link'] = c.css('.fontsize0').css("::attr('href')").get()
items['rating'] = c.css('.rating-value').css("::text").extract()
items['area'] = c.css('b').css("::text").extract()
items['address'] = c.css(".ln22").css("::text").extract()
div2 = c.css(".clearfix.row")
for d in div2:
items['cuisine'] = d.css(".col-s-11.col-m-12.nowrap.pl0").css("::text").extract()
items['cost_for_two'] = d.css(".res-cost .pl0").css("::text").extract()
link = c.css(".item.result-menu").css("::attr('href')").get()
order_link = c.css(".result-menu+ a").css("::attr('href')").get()
if order_link is None:
items['can_order_via_zomato'] = "No"
else:
items['can_order_via_zomato'] = "Yes"
request= scrapy.Request(res_link, callback=self.second_parser)
request.meta['items'] = items
yield request
# menu_url = "https://www.zomato.com"+str(link)
# if menu_url is not None:
# yield response.follow(menu_url, callback=self.second_parser, meta={'items':items})
# print("I came here!!!!!!\n\n")
if self.page_num < 2:
self.page_num+=1
next_url = "https://www.zomato.com/mumbai/restaurants?page="+str(self.page_num)
print("\n\n\n",next_url)
if next_url is not None:
yield response.follow(next_url, callback=self.parse)
def second_parser(self, response):
items = response.meta["items"]
sec = response.css('asc-cCbPEh.gNiJfC')
print("sec\n\n",sec)
return items
def third_parser(self, response):
print("Yes!") |
# coding: utf-8
# # Preamble
# In[3]:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
import seaborn as sns
sns.set(style="white")
# Allows for interactive shell - outputs all non variable statements
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input, decode_predictions
import numpy as np
model = VGG16(weights='imagenet', include_top=True)
# In[2]:
import os
import shutil
from glob import glob
np.random.seed(10)
current_dir = os.getcwd()
DATASET_DIR=os.path.join(current_dir, 'dataset')
CROSSVALID_DIR=os.path.join(DATASET_DIR, 'cross_valid')
TRAIN_DIR = os.path.join(DATASET_DIR, 'train')
TEST_DIR = os.path.join(DATASET_DIR, 'test')
CROSSVALID_DIR = os.path.join(DATASET_DIR, 'cross_valid')
SAMPLE_DIR = os.path.join(DATASET_DIR, 'sample')
WEIGHTS_DIR = os.path.join(current_dir, 'weights')
# # Use Keras Vgg16 to get the predictions
# * Download the dataset in the current directory.
# ```
# kg download -c 'dogs-vs-cats-redux-kernels-edition'
# ```
# * Inspect the data
# * Prepare a single image
# * Feed it into pretrained vgg16
# ## Inspect the data
#
# Graph the image
# In[6]:
# Unzip a single file to test on the pretrained model
#!unzip -oj "test.zip" "test/1.jpg" -d "/tmp/cats_dogs"
# Load the image
# img_path = '/tmp/cats_dogs/1.jpg'
img_path = 'dataset/train/cat/cat.1.jpg'
img = image.load_img(img_path, target_size=(224, 224))
# Plot the single image
f = plt.figure(figsize=(10, 5))
sp = f.add_subplot(1, 1, 1) ## (rows, cols, index)
sp.axis('On')
sp.set_title(img_path, fontsize=16)
plt.imshow(img)
# ## Predict using Keras Vgg16
# In[ ]:
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
decode_predictions(preds)
# In[ ]:
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
decode_predictions(preds)
# # Kaggle Competition
# 1. Prepare dataset
# 1. Download the dataset
# 1. Unzip training and test dataset
# 1. Create the training, validation, sample batch dataset
# 1. Create the labels
# 1. Model preparation
# 1. Finetune the keras model
# 1. Pop the last layer, freeze all layers, add a softmax layer and update set of classes
# 1. Fit the keras model
# 1. Train the updated keras model
# 1. Save and load the model after couple of epochs
# 1. Perform predictions
# 1. Debug
# 1. View the confusion matrix
# 1. Visual Inspection
# 1. Inspect correct labels
# 1. Inspect incorrect labels
# 1. Inspect correct labels with high probability
# 1. Inspect incorrect label with high probability
# 1. Inspect correct labels with medium probability
# 1. Kaggle Submission
# 1. Prepare csv file
# 1. Submit
# ## Prepare dataset
# See `lesson1_catsdogs-prepare_dataset.ipynb` which will download and create the various labeled datasets.
# ## Perform predictions
# In[ ]:
model.load_weights(os.path.join(WEIGHTS_DIR, 'intial_sample_run_2.h5'))
def get_data_as_np(path, batch_size=5):
batches = datagen.flow_from_directory(
path,
target_size=(224, 224),
batch_size=10,
class_mode=None,
shuffle=False
)
return np.concatenate([batches.next() for i in range(len(batches))])
model.predict(get_data_as_np(crossvalid_dir, 5), batch_size=5)
# In[ ]:
test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)
test_batches, self.model.predict_generator(test_batches, test_batches.nb_sample)
preds[1:4]
preds.shape
|
#!/usr/bin/env python
import socket
import sys
# Create a UDP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = ('0.0.0.0', 10000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
while True:
print >>sys.stderr, '\nwaiting to receive message'
data, address = sock.recvfrom(4096)
print >>sys.stderr, 'received %s bytes from %s' % (len(data), address)
print >>sys.stderr, data
responselines = data.splitlines()
src_ip = responselines[1]
udp_ip = address[0]
if src_ip is udp_ip:
print "udp src IP %s matches packet contents ip %s" % (udp_ip, src_ip)
else:
print "udp src IP %s DOES NOT MATCH packet contents ip %s" % (udp_ip, src_ip)
# if data:
# sent = sock.sendto(data, address)
# print >>sys.stderr, 'sent %s bytes back to %s' % (sent, address)
|
from beyonic.apis.abstract_api import AbstractAPI
class Payment(AbstractAPI):
"""
Payment api wrapper class
"""
_method_path = 'payments'
|
"""
SPDX-FileCopyrightText: 2019 oemof developer group <contact@oemof.org>
SPDX-License-Identifier: MIT
"""
import filecmp
import os
from shutil import copyfile
import pandas as pd
import pytest
import requests
from windpowerlib.data import (
check_data_integrity,
check_turbine_data,
get_turbine_types,
restore_default_turbine_data,
store_turbine_data_from_oedb,
)
class TestDataCheck:
@classmethod
def setup_class(cls):
cls.path = os.path.join(os.path.dirname(__file__), "oedb")
cls.filename = os.path.join(cls.path, "{0}.csv")
cls.df = pd.read_csv(cls.filename.format("turbine_data"), index_col=0)
cls.broken_fn = os.path.join(cls.path, "{0}_broken.csv")
cls.backup_fn = os.path.join(cls.path, "{0}_backup.csv")
cls.tmp_fn = os.path.join(cls.path, "{0}_tmp.csv")
cls.orig_path = os.path.join(
os.path.dirname(__file__), os.pardir, "windpowerlib", "oedb"
)
cls.orig_fn = os.path.join(cls.orig_path, "{0}.csv")
@classmethod
def teardown_class(cls):
cls.path = os.path.join(os.path.dirname(__file__), "oedb")
for f in os.listdir(cls.path):
if "error" in f or "backup" in f or "tmp" in f:
os.remove(os.path.join(cls.path, f))
restore_default_turbine_data()
def test_normal_data_check(self):
"""Check data which is fine."""
check_turbine_data(self.filename.format("turbine_data"))
def test_data_check_logging_warnings(self, caplog):
"""Check logging warnings about the checked data."""
self.df.loc["GE158/4800", "has_power_curve"] = True
self.df.loc["GE100/2750", "has_cp_curve"] = True
self.df.to_csv(self.tmp_fn.format("turbine_data"))
check_data_integrity(self.tmp_fn, min_pc_length=26)
assert "E48/800: power_curve is too short (25 values)" in caplog.text
assert "GE158/4800: No power curve" in caplog.text
assert "GE100/2750: No cp-curve but has_cp_curve" in caplog.text
def test_global_error(self):
"""Check Error message if turbine data is corrupt."""
msg = r"could not convert string to*"
name = "turbine_data"
copyfile(self.orig_fn.format(name), self.backup_fn.format(name))
copyfile(self.broken_fn.format(name), self.orig_fn.format(name))
with pytest.raises(ValueError, match=msg):
check_turbine_data(self.orig_fn)
copyfile(self.backup_fn.format(name), self.orig_fn.format(name))
def test_broken_pwr_curve(self):
"""Check Error message if power_curves data is corrupt."""
name = "power_curves"
copyfile(self.orig_fn.format(name), self.backup_fn.format(name))
copyfile(self.broken_fn.format(name), self.orig_fn.format(name))
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
check_turbine_data(self.orig_fn)
copyfile(self.backup_fn.format(name), self.orig_fn.format(name))
def test_get_turbine_types(self, capsys):
"""Test the `get_turbine_types` function."""
get_turbine_types()
captured = capsys.readouterr()
assert "Enercon" in captured.out
get_turbine_types("oedb", print_out=False, filter_=False)
msg = "`turbine_library` is 'wrong' but must be 'local' or 'oedb'."
with pytest.raises(ValueError, match=msg):
get_turbine_types("wrong")
def test_store_turbine_data_from_oedb(self):
"""Test `store_turbine_data_from_oedb` function."""
t = {}
for fn in os.listdir(self.orig_path):
t[fn] = os.path.getmtime(os.path.join(self.orig_path, fn))
store_turbine_data_from_oedb()
for fn in os.listdir(self.orig_path):
assert t[fn] < os.path.getmtime(os.path.join(self.orig_path, fn))
def test_wrong_url_load_turbine_data(self):
"""Load turbine data from oedb with a wrong schema."""
with pytest.raises(
ConnectionError,
match=r"Database \(oep\) connection not successful*",
):
store_turbine_data_from_oedb("wrong_schema")
@pytest.mark.skip(reason="Use it to check a persistent ssl error")
def test_wrong_ssl_connection(self):
"""Test failing ssl connection. To avoid this error in data.py the in
the function fetch_turbine_data_from_oedb in data.py verify was set
to False to ignore the exception in the requests statement.
If this test fails you can set verify to True and remove this test if
all the other tests work fine.
"""
schema = "supply"
table = "wind_turbine_library"
oep_url = "https://oep.iks.cs.ovgu.de/"
url = oep_url + "/api/v0/schema/{}/tables/{}/rows/?".format(
schema, table
)
with pytest.raises(requests.exceptions.SSLError):
requests.get(url, verify=True)
def test_restore_default_data(self):
"""Test the clean recovery of the data files."""
names = ["turbine_data", "power_curves", "power_coefficient_curves"]
default_path = os.path.join(
self.orig_path, os.pardir, "data", "default_turbine_data"
)
for name in names:
copyfile(self.broken_fn.format(name), self.orig_fn.format(name))
file = self.orig_fn.format(name)
default_file = os.path.join(
default_path, os.path.basename(self.orig_fn.format(name))
)
assert not filecmp.cmp(file, default_file)
restore_default_turbine_data()
for name in names:
file = self.orig_fn.format(name)
default_file = os.path.join(
default_path, os.path.basename(self.orig_fn.format(name))
)
assert filecmp.cmp(file, default_file)
|
# coding:utf-8
import re
import torch
import torch.nn as nn
from torch import optim
from torch.nn import functional as F
import random
import codecs
import pathlib
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from pathlib import Path
from functools import partial
SOS = '<sos>'
EOS = '<eos>'
PAD = '<pad>'
UNK = '<unk>'
PUNCTUATIONS = r"[!\"#$%&'()\*\+,-./:;<=>\?@\[\]^_`{\|}~“”?,!【】()、。:;’‘……¥·]"
### Preprocess the data ###
class DataProcess(object):
def __init__(self, data_path: pathlib.PosixPath) -> None:
# read the data
with codecs.open(data_path, 'r', 'utf-8') as file:
self.datas = file.read().split('\n')[:-1]
self.data_length = len(self.datas)
print('The data contains {} lines.'.format(self.data_length))
# preprocess the data
self.eng_datas = []
self.chs_datas = []
for line in self.datas:
line = line.split('\t')
self.eng_datas.append(self._cleanLine(line[0]))
self.chs_datas.append(self._cleanLine(line[1]))
# build vocab dict
self.eng_vocab_idx, self.eng_idx_vocab = self._buildDict(self.eng_datas, ' ')
self.chs_vocab_idx, self.chs_idx_vocab = self._buildDict(self.chs_datas, '')
print('English Words Number: {}\t Chinese Words Number: {}'\
.format(len(self.eng_vocab_idx), len(self.chs_vocab_idx)))
def _cleanLine(self, line: str) -> str:
"""Clean the line by upperdown the letters."""
line = re.sub(PUNCTUATIONS , '', line.lower().strip())
return line
def _buildDict(self, datas: list, split_tag: str) -> tuple:
"""Build vocab for datas."""
vocab_idx = {SOS: 0, EOS: 1, PAD: 2, UNK: 3}
idx_vocab = {0: SOS, 1: EOS, 2: PAD, 3: UNK}
for line in datas:
for word in line if split_tag == '' else line.split(split_tag):
if word not in vocab_idx:
vocab_idx[word] = len(vocab_idx)
idx_vocab[len(idx_vocab)] = word
print('There are {} words.'.format(len(vocab_idx)))
return vocab_idx, idx_vocab
def _createBatchIndices(self, data_length, batch_size):
"""Create batch indices according to the whole data."""
batch_number = data_length // batch_size
reminder = data_length % batch_size
batch_number = batch_number if reminder == 0 else batch_number + 1
for idx in range(batch_number):
yield (idx * batch_size, idx * batch_size + batch_size)
def _padding(self, data, pad_idx):
seq_length = [len(line) for line in data]
max_length = max(seq_length)
padding_func = lambda line : line + [pad_idx for _ in range(max_length - len(line))]
data_padded = list(map(padding_func, data))
return data_padded, seq_length
def _convertToIdx(self, line, vocab_idx, split_tag):
line = line if split_tag == '' else line.split(split_tag)
return [vocab_idx[word] if word in vocab_idx else vocab_idx[UNK] for word in line]
def convertToStr(self, line, idx_vocab):
if type(line) == torch.Tensor:
return [idx_vocab[idx.item()] if idx.item() in idx_vocab else idx_vocab[3] for idx in line]
else:
return [idx_vocab[idx] if idx in idx_vocab else idx_vocab[3] for idx in line]
def _add_tag(self, line, tag, is_front):
return [tag] + line if is_front else line + [tag]
def dataBatch(self, batch_size: int, is_shuffle: bool=True) -> dict:
"""Provide the batch data."""
data_pairs = list(zip(self.eng_datas, self.chs_datas))
if is_shuffle:
random.shuffle(data_pairs)
eng_datas, chs_datas = zip(*data_pairs)
for start, end in self._createBatchIndices(self.data_length, batch_size):
# 'temp' indicates that '<sos>', '<eos>' and '<pad>' will be added.
scr_temp_data = eng_datas[start : end]
tgt_temp_data = chs_datas[start : end]
# convert str to idx
convert_src_func = partial(self._convertToIdx, vocab_idx=self.eng_vocab_idx, split_tag=' ')
scr_temp_data = list(map(convert_src_func, scr_temp_data))
convert_tgt_func = partial(self._convertToIdx, vocab_idx=self.chs_vocab_idx, split_tag='')
tgt_temp_data = list(map(convert_tgt_func, tgt_temp_data))
# add <sos>, <eos> to target data
add_sos_func = partial(self._add_tag, tag=self.chs_vocab_idx[SOS], is_front=True)
tgt_temp_input_data = list(map(add_sos_func, tgt_temp_data))
add_eos_func = partial(self._add_tag, tag=self.chs_vocab_idx[EOS], is_front=False)
tgt_temp_output_data = list(map(add_eos_func, tgt_temp_data))
# padding the idx
scr_input_data, src_seq_length = self._padding(scr_temp_data, self.eng_vocab_idx[PAD])
tgt_input_data, _ = self._padding(tgt_temp_input_data, self.chs_vocab_idx[PAD])
tgt_output_data, tgt_seq_length = self._padding(tgt_temp_output_data, self.chs_vocab_idx[PAD])
data_batch = {'src_input_data': torch.tensor(scr_input_data),
'src_seq_length': torch.tensor(src_seq_length),
'tgt_input_data': torch.tensor(tgt_input_data),
'tgt_output_data': torch.tensor(tgt_output_data),
'tgt_seq_length': torch.tensor(tgt_seq_length),
'actual_bs': len(scr_input_data)}
yield data_batch
### Model ###
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=2):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=True,
batch_first=True)
self.out = nn.Linear(hidden_size * 2, hidden_size)
def forward(self, src_input_data, src_seq_length, hidden):
"""Forward the Bi-directional GRU.
Args:
src_input: [batch_size, seq_length].
hidden: [num_layers*2, batch_size, hidden]
Returns:
output: [batch_size, src_seq_length, hidden]
"""
# Embedding: (b, s) -> (b, s, h)
embedded = self.embedding(src_input_data)
# GRU
padded_embedded = torch.nn.utils.rnn.pack_padded_sequence(embedded,
src_seq_length,
batch_first=True,
enforce_sorted=False)
# output: (b, s, 2h), hidden: (2n, b, h)
output, hidden = self.gru(padded_embedded, hidden)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
# (b, s, 2h) -> (b, s, h)
output = self.out(output)
return output
def initHidden(self, batch_size):
return torch.zeros(self.num_layers * 2, batch_size, self.hidden_size)
class Decoder(nn.Module):
def __init__(self, hidden_size, output_size, num_layers=2, dropout=0.1):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = num_layers
self.dropout = dropout
self.embedding = nn.Embedding(output_size, hidden_size)
self.query_matrix = nn.Linear(hidden_size, hidden_size)
self.key_matrix = nn.Linear(hidden_size, hidden_size)
self.value_matrix = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout)
self.gru = nn.GRU(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=True,
batch_first=True)
self.out = nn.Linear(hidden_size*2, output_size)
def forward(self, input_word, hidden, encoder_outputs, attention_mask):
"""
Args:
input_word: [batch_size, 1].
hidden: [num_layers*2, batch_size, hidden_size].
encoder_outputs: [batch_size, src_seq_length, hidden_size].
scr_seq_length: [batch_size]
Returns:
output: [batch_size, output_size]
"""
# embedding: (b, 1) -> (b, 1, h)
embedded = self.embedding(input_word)
embedded = self.dropout(embedded)
# query: (b, 1, h)
query = self.query_matrix(embedded)
# key: (b, src_seq_length, hidden_size) -> (b, hidden_size, src_seq_length)
key = self.key_matrix(encoder_outputs)
key = key.permute(0, 2, 1)
# value: (b, src_seq_length, hidden_size)
value = self.value_matrix(encoder_outputs)
# attention_scores: (b, 1, src_seq_length)
attention_scores = torch.matmul(query, key)
attention_scores += attention_mask
attention_probs = F.softmax(attention_scores, dim=-1)
# context_layer (b, 1, h), no need to squeeze the second dimension,
# as the second dimension is time step.
context_layer = F.relu(torch.matmul(attention_probs, value))
output, hidden = self.gru(context_layer, hidden)
# output: (b, o)
output = F.log_softmax(self.out(output.squeeze(1)), dim=1)
return output, hidden
def initHidden(self, batch_size):
return torch.zeros(self.num_layers*2, batch_size, self.hidden_size)
def makeAttentionMask(src_seq_length):
max_length = torch.max(src_seq_length)
attention_mask = torch.arange(max_length)[None, :] < src_seq_length[:, None]
attention_mask = attention_mask.float()[:, None, :]
attention_mask = (attention_mask - 1) * 10000
return attention_mask
def train(datas, optimizers, is_predict=False, predict_path='prediciton.txt', sample_k=3):
src_input_data = datas['src_input_data']
src_seq_length = datas['src_seq_length']
tgt_input_data = datas['tgt_input_data']
tgt_output_data = datas['tgt_output_data']
tgt_seq_length = datas['tgt_seq_length']
batch_size = datas['actual_bs']
src_sentences = [data_process.convertToStr(line, data_process.eng_idx_vocab) for line in src_input_data]
tgt_sentences = [data_process.convertToStr(line, data_process.chs_idx_vocab) for line in tgt_output_data]
encoder_optimizer = optimizers['encoder']
decoder_optimizer = optimizers['decoder']
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Encoder
encoder_hidden = encoder.initHidden(batch_size)
encoder_outputs = encoder(src_input_data, src_seq_length, encoder_hidden)
# Decoder
attention_mask = makeAttentionMask(src_seq_length)
decoder_hidden = decoder.initHidden(batch_size)
seq_length = tgt_input_data.size()[1]
tgt_loss_mask = torch.arange(torch.max(tgt_seq_length))[None, :] < tgt_seq_length[:, None]
tgt_loss_mask = tgt_loss_mask.float()
loss = 0
predictions = []
for t in range(seq_length):
# (b, 1)
input_t = tgt_input_data[:, t][:, None]
golden_t = tgt_output_data[:, t]
mask_t = tgt_loss_mask[:, t]
output, decoder_hidden = decoder(input_t, decoder_hidden, encoder_outputs, attention_mask)
topv, topi = output.topk(1)
predictions.append(topi)
loss += torch.sum(criterion(output, golden_t) * mask_t)
predictions = [data_process.convertToStr(line, data_process.chs_idx_vocab) for line in torch.cat(predictions, dim=1)]
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
if is_predict:
random_idx = random.sample(range(batch_size), k=sample_k)
with codecs.open(predict_path, 'a', 'utf-8') as file:
for idx in random_idx:
to_write = 'SRC: {}\nTGT: {}\nGOLDEN: {}\n\n'.format(src_sentences[idx], predictions[idx], tgt_sentences[idx])
file.write(to_write)
file.flush()
return (loss / torch.sum(tgt_loss_mask)).item()
if __name__ == '__main__':
data_process = DataProcess('data/cmn-eng/cmn.txt')
encoder = Encoder(input_size=6707, hidden_size=320)
decoder = Decoder(hidden_size=320, output_size=3474)
encoder_optimizer = optim.Adam(encoder.parameters(), lr=5e-3)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=5e-3)
criterion = nn.NLLLoss(reduction='none')
train_steps = 1
all_losses = []
act_step = 1
for _ in tqdm(range(train_steps)):
for datas in data_process.dataBatch(100):
loss = train(datas, {'encoder': encoder_optimizer, 'decoder': decoder_optimizer},
True, 'data/prediction.txt')
all_losses.append(loss)
print('Step: {}\t Loss: {:2f}'.format(act_step, loss))
act_step +=1
print('Finish Training!')
torch.save(encoder.state_dict(), 'models/encoder.pth')
torch.save(decoder.state_dict(), 'models/decoder.pth')
plt.figure()
plt.plot(all_losses)
plt.show()
# step = 1
# all_losses = []
# for step in tqdm(range(train_steps)):
# for data_batch in data_process.dataBatch(10):
# src_input_data = data_batch['src_input_data']
# src_seq_length = data_batch['src_seq_length']
# tgt_input_data = data_batch['tgt_input_data']
# tgt_output_data = data_batch['tgt_output_data']
# tgt_seq_length = data_batch['tgt_seq_length']
# batch_size = data_batch['actual_bs']
# src_sentences = [data_process.convertToStr(line, data_process.eng_idx_vocab) for line in src_input_data]
# tgt_sentences = [data_process.convertToStr(line, data_process.chs_idx_vocab) for line in tgt_output_data]
# encoder_optimizer.zero_grad()
# decoder_optimizer.zero_grad()
# # Encoder
# encoder_hidden = encoder.initHidden(batch_size)
# encoder_outputs = encoder(src_input_data, src_seq_length, encoder_hidden)
# # attention mask
# max_length = torch.max(src_seq_length)
# attention_mask = torch.arange(max_length)[None, :] < src_seq_length[:, None]
# attention_mask = attention_mask.float()[:, None, :]
# attention_mask = (attention_mask - 1) * 10000
# # Decoder
# decoder_hidden = decoder.initHidden(batch_size)
# seq_length = tgt_input_data.size()[1]
# tgt_loss_mask = torch.arange(torch.max(tgt_seq_length))[None, :] < tgt_seq_length[:, None]
# tgt_loss_mask = tgt_loss_mask.float()
# loss = 0
# predictions = []
# for t in range(seq_length):
# # (b, 1)
# input_t = tgt_input_data[:, t][:, None]
# golden_t = tgt_output_data[:, t]
# mask_t = tgt_loss_mask[:, t]
# output, decoder_hidden = decoder(input_t, decoder_hidden, encoder_outputs, attention_mask)
# topv, topi = output.topk(1)
# predictions.append(topi)
# loss += torch.sum(criterion(output, golden_t) * mask_t)
# predictions = [data_process.convertToStr(line, data_process.chs_idx_vocab) for line in torch.cat(predictions, dim=1)]
# loss.backward()
# encoder_optimizer.step()
# decoder_optimizer.step()
# all_losses.append((loss / torch.sum(tgt_loss_mask).item()))
# print('Step: {}\t Loss: {:2f}'.format(step, loss / torch.sum(tgt_loss_mask)))
# predicted_idx = random.sample(range(batch_size), k=3)
# with codecs.open('data/cmn-eng/prediciton.txt', 'a', 'utf-8') as file:
# for idx in predicted_idx:
# to_write = 'SRC: {}\nTGT: {}\nGOLDEN: {}\n\n'.format(src_sentences[idx], predictions[idx], tgt_sentences[idx])
# file.write(to_write)
# file.flush()
# test_sentence = 'hug tom'
# input_data = data_process._convertToIdx(data_process._cleanLine(test_sentence), vocab_idx=data_process.eng_vocab_idx, split_tag=' ')
# seq_length = torch.tensor([len(input_data)])
# input_data = torch.tensor(input_data)[None, :]
# encoder.load_state_dict(torch.load('models/encoder.pth'))
# encoder_hidden = encoder.initHidden(1)
# encoder_outputs = encoder(input_data, seq_length, encoder_hidden)
# decoder.load_state_dict(torch.load('models/decoder.pth'))
# decoder_hidden = decoder.initHidden(1)
# # attention mask
# max_length = torch.max(seq_length)
# attention_mask = torch.arange(max_length)[None, :] < seq_length[:, None]
# attention_mask = attention_mask.float()[:, None, :]
# attention_mask = (attention_mask - 1) * 10000
# predict_t = torch.tensor([data_process.chs_vocab_idx[SOS]])[None, :]
# results = []
# while predict_t.item() != data_process.chs_vocab_idx[EOS]:
# output, decoder_hidden = decoder(predict_t, decoder_hidden, encoder_outputs, attention_mask)
# topv, topi = output.topk(1)
# predict_t = topi
# results.append(predict_t.item())
# print(data_process.convertToStr(results, data_process.chs_idx_vocab)) |
from nets.deeplab import Deeplabv3
from keras.utils.data_utils import get_file
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from PIL import Image
import time
import keras
from keras import backend as K
import numpy as np
ALPHA = 1.0
WEIGHTS_PATH_MOBILE = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5"
NCLASSES = 2
HEIGHT = 416
WIDTH = 416
def generate_arrays_from_file(lines,batch_size):
# 获取总长度
n = len(lines)
i = 0
while 1:
X_train = []
Y_train = []
# 获取一个batch_size大小的数据
for _ in range(batch_size):
if i==0:
np.random.shuffle(lines)
name = lines[i].split(';')[0]
# 从文件中读取图像
img = Image.open(r".\dataset2\jpg" + '/' + name)
img = img.resize((WIDTH,HEIGHT))
img = np.array(img)
img = img/255
X_train.append(img)
name = (lines[i].split(';')[1]).replace("\n", "")
# 从文件中读取图像
img = Image.open(r".\dataset2\png" + '/' + name)
img = img.resize((int(WIDTH),int(HEIGHT)))
img = np.array(img)
seg_labels = np.zeros((int(HEIGHT),int(WIDTH),NCLASSES))
for c in range(NCLASSES):
seg_labels[: , : , c ] = (img[:,:,0] == c ).astype(int)
seg_labels = np.reshape(seg_labels, (-1,NCLASSES))
Y_train.append(seg_labels)
# 读完一个周期后重新开始
i = (i+1) % n
yield (np.array(X_train),np.array(Y_train))
def loss(y_true, y_pred):
loss = K.categorical_crossentropy(y_true,y_pred)
return loss
if __name__ == "__main__":
log_dir = "logs/"
# 获取model
model = Deeplabv3(classes=2,input_shape=(HEIGHT,WIDTH,3))
# model.summary()
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_MOBILE,
cache_subdir='models')
# model.load_weights(weights_path,by_name=True,skip_mismatch=True)
model.load_weights(weights_path,by_name=True,skip_mismatch=True)
# 打开数据集的txt
with open(r".\dataset2\train.txt","r") as f:
lines = f.readlines()
# 打乱行,这个txt主要用于帮助读取数据来训练
# 打乱的数据更有利于训练
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
# 90%用于训练,10%用于估计。
num_val = int(len(lines)*0.1)
num_train = len(lines) - num_val
# 保存的方式,3世代保存一次
checkpoint_period = ModelCheckpoint(
log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss',
save_weights_only=True,
save_best_only=True,
period=3
)
# 学习率下降的方式,val_loss 2次不下降就下降学习率继续训练
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=2,
verbose=1
)
# 是否需要早停,当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
early_stopping = EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=6,
verbose=1
)
# 交叉熵
model.compile(loss = loss,
optimizer = Adam(lr=1e-4),
metrics = ['accuracy'])
batch_size = 4
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
# 开始训练
model.fit_generator(generate_arrays_from_file(lines[:num_train], batch_size),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=generate_arrays_from_file(lines[num_train:], batch_size),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[checkpoint_period, reduce_lr, early_stopping])
model.save_weights(log_dir+'last1.h5')
|
from constants import GAME_SPEED
from rlbot.utils.game_state_util import GameState, GameInfoState
from controller import *
from extra_math import *
from structs import *
# Base action class
class Action:
def update(self, agent, packet):
raise NotImplementedError
# Idle is called when a maneuver has precedence over an action.
def idle(self, agent, packet):
pass
# Base maneuver class
class Maneuver:
def update(self, agent, packet):
raise NotImplementedError
# This is the heart of the function. Tweaking these values should give one
# an idea of how tight the windows are for some of these movements.
class Triple_Dash(Maneuver):
def __init__(self):
self.first_frame = True
self.has_done_micro_jump = False
self.forward = Vec3()
self.start_location = Vec3()
self.stage = 0
self.start = 0
def update(self, agent, packet):
my_car = packet.game_cars[agent.index]
# The first part of this function could probably be its own maneuver, but I'm too lazy to do that.
# This performs the initial jump to set up the micro jump.
if not self.has_done_micro_jump:
if self.first_frame:
self.first_frame = False
self.start = packet.game_info.seconds_elapsed
agent.controller_state.jump = True
game_info_state = GameInfoState(game_speed=GAME_SPEED)
game_state = GameState(game_info=game_info_state)
agent.set_game_state(game_state)
delta = packet.game_info.seconds_elapsed - self.start
if my_car.location.z < 17 and delta > 0.7:
self.has_done_micro_jump = True
self.first_frame = True
else:
if self.first_frame:
self.first_frame = False
self.start = packet.game_info.seconds_elapsed
self.start_location = my_car.location
self.forward = Vec3(1).align_to(my_car.rotation).flatten().normal()
self.side = Vec3(self.forward.y, -self.forward.x)
# One frame jump
agent.controller_state.jump = True
# Calulate delta time
delta = packet.game_info.seconds_elapsed - self.start
# Boost through everything
agent.controller_state.boost = True
# Handbrake!
agent.controller_state.handbrake = self.stage >= 2
# The actual logic. More branches than an oak tree, but, ya know...
if delta < 0.35:
Align_Car_To(agent, packet, self.forward, Vec3(0, 0, 1))
elif delta < 0.74:
Align_Car_To(agent, packet, self.forward + self.side * 0.25 + Vec3(0, 0, 1), Vec3(0, 0, 1))
elif self.stage < 1:
self.stage += 1
agent.controller_state.jump = True
agent.controller_state.pitch = -1
agent.controller_state.yaw = 0
agent.controller_state.roll = -0.4
agent.controller_state.steer = 0
elif delta < 1:
agent.controller_state.roll = -1
elif self.stage < 2:
self.stage += 1
agent.controller_state.jump = True
agent.controller_state.pitch = -1
agent.controller_state.yaw = 1
agent.controller_state.roll = 1
agent.controller_state.steer = 0
elif delta < 1.2:
agent.controller_state.roll = 1
elif self.stage < 3:
self.stage += 1
agent.controller_state.jump = True
agent.controller_state.pitch = -1
agent.controller_state.yaw = -1
agent.controller_state.roll = -1
agent.controller_state.steer = 0
elif delta < 3:
drive(agent, packet, my_car.location + my_car.velocity)
agent.controller_state.boost = False
else:
# How to exit the maneuver
return True
return False
# Responsible for testing stuff
class Test_Maneuver(Action):
def __init__(self):
pass
def update(self, agent, packet):
# Drives to the middle of the field, stops, then runs the triple wave dash thingy.
my_car = packet.game_cars[agent.index]
drive(agent, packet, Vec3())
if Vec3(1).align_to(my_car.rotation).dot(my_car.location.normal()) < -0.95:
agent.controller_state.boost = False
agent.controller_state.throttle = 0
if my_car.velocity.length() <= 5 and packet.game_info.is_round_active:
agent.set_maneuver(Triple_Dash())
return self
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
from core import db
from sqlalchemy_utils import ArrowType
from workspaces.Access.types import SpaceAccessType, SpaceAccessRechargePeriod, SpaceAccessEntryAccounting
import arrow
# Define your database models here
class SpaceNodeMap(db.Model):
__tablename__ = 'spaceaccess_space_node_map'
id = db.Column(db.Integer, primary_key=True)
node_id = db.Column(db.Integer(), db.ForeignKey('nodes.id'))
space_id = db.Column(db.Integer(), db.ForeignKey('spaceaccess_spaces.id'))
class AccessgroupSpaceMap(db.Model):
__tablename__ = 'spaceaccess_accessgroup_space_map'
id = db.Column(db.Integer, primary_key=True)
space_id = db.Column(db.Integer(), db.ForeignKey('spaceaccess_spaces.id'))
group_id = db.Column(db.Integer(), db.ForeignKey('spaceaccess_groups.id'))
class AccessgroupUserMap(db.Model):
__tablename__ = 'spaceaccess_accessgroup_user_map'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('users.id'))
group_id = db.Column(db.Integer(), db.ForeignKey('spaceaccess_groups.id'))
class SpaceAccessSpace(db.Model):
__tablename__ = 'spaceaccess_spaces'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), default="")
description = db.Column(db.String(128), default="")
entrance_nodes = db.relationship(
"Node",
backref=db.backref("spaceaccess_spaces", lazy=True),
secondary=SpaceNodeMap.__tablename__,
lazy='subquery',
)
class SpaceAccessGroup(db.Model):
__tablename__ = 'spaceaccess_groups'
id = db.Column(db.Integer, primary_key=True)
users = db.relationship(
"User",
backref=db.backref("spaceaccess_accessgroup", uselist=False, lazy=True),
secondary=AccessgroupUserMap.__tablename__,
lazy='subquery',
)
spaces = db.relationship(
"SpaceAccessSpace",
backref=db.backref("spaceaccess_accessgroup", lazy=True),
secondary=AccessgroupSpaceMap.__tablename__,
lazy='subquery',
)
name = db.Column(db.String(120), default="")
note = db.Column(db.String(120), default="")
access_type = db.Column(db.Enum(SpaceAccessType), default=SpaceAccessType.NO_ACCESS)
entry_accounting_type = db.Column(db.Enum(SpaceAccessEntryAccounting), default=SpaceAccessEntryAccounting.DAYS)
access_need_budget = db.Column(db.Boolean, default=False)
access_gets_recharged = db.Column(db.Boolean, default=False)
access_recharge_budget_amount = db.Column(db.Integer, default=15)
access_recharge_budget_period = db.Column(db.Enum(SpaceAccessRechargePeriod),
default=SpaceAccessRechargePeriod.MONTHS)
access_recharge_budget_every_periods = db.Column(db.Integer, default=4)
access_recharge_budget_get_cutoff = db.Column(db.Boolean, default=True)
access_recharge_budget_cutoff_max = db.Column(db.Integer, default=15)
access_expires_as_default = db.Column(db.Boolean, default=False)
access_expires_default_days = db.Column(db.Integer, default=365)
access_use_group_budget = db.Column(db.Boolean, default=False)
last_access_at = db.Column(ArrowType, default=None)
group_budget = db.Column(db.Integer, default=0)
day_access_mask = db.Column(db.Integer, default=127)
daily_access_start_time = db.Column(ArrowType, default=arrow.get('00:00', 'HH:mm'))
daily_access_end_time = db.Column(ArrowType, default=arrow.get('23:59', 'HH:mm'))
class SpaceAccessProperties(db.Model):
__tablename__ = 'spaceaccess_properties'
id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
access_budget = db.Column(db.Integer, default=0)
access_need_budget = db.Column(db.Boolean, default=False)
access_starts = db.Column(db.Boolean, default=False)
access_expires = db.Column(db.Boolean, default=False)
access_start_date = db.Column(ArrowType, default=arrow.utcnow)
access_expire_date = db.Column(ArrowType, default=arrow.utcnow)
access_last_update_date = db.Column(ArrowType, default=arrow.utcnow)
last_access_at = db.Column(ArrowType, default=None)
user = db.relationship("User",
backref=db.backref("access",
uselist=False,
cascade="save-update, merge, delete, delete-orphan"))
|
# Generated by Django 3.0a1 on 2019-09-29 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='WebDevelopment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('framework', models.CharField(choices=[('DJ', 'Django'), ('NG', 'Angular'), ('RE', 'React')], default='DJ', max_length=10)),
],
),
]
|
from django.db.models import signals
from django.contrib.auth.models import User
from guardian import models as guardian_app
from guardian.conf import settings as guardian_settings
def create_anonymous_user(sender, **kwargs):
"""
Creates anonymous User instance with id from settings.
"""
try:
User.objects.get(pk=guardian_settings.ANONYMOUS_USER_ID)
except User.DoesNotExist:
User.objects.create(pk=guardian_settings.ANONYMOUS_USER_ID,
username='AnonymousUser')
signals.post_syncdb.connect(create_anonymous_user, sender=guardian_app,
dispatch_uid="guardian.management.create_anonymous_user")
|
# Generated by Django 2.2.5 on 2020-04-07 14:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0031_expressiondata_jbrowse_id'),
]
operations = [
migrations.AddField(
model_name='expressiondata',
name='has_jbrowse',
field=models.BooleanField(default=False),
),
]
|
from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404,HttpResponseRedirect
import datetime as dt
from .forms import ImagePost, NewCommentForm, NewStatusForm
from .models import Images, Profile, Comments,Like
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
# Create your views here.
@login_required(login_url='/accounts/login/')
def timelines(request):
current_user = request.user
images = Images.objects.all()
profiles = Profile.objects.order_by('-last_update')
comments = Comments.objects.order_by('-time_comment')
return render(request, 'timeline.html', {'images':images, 'profiles':profiles, 'comments':comments, 'user':request.user})
@login_required(login_url='/accounts/login/')
def profile(request):
current_user = request.user
profile = User.objects.get(id=current_user.id)
images = Images.objects.all().filter(profile_id=current_user.id)
return render(request, 'profile.html', {'images':images, 'profile':profile})
@login_required(login_url='/accounts/login/')
def new_status(request, username):
current_user = request.user
username = current_user.username
if request.method == 'POST':
form = NewStatusForm(request.POST, request.FILES)
if form.is_valid():
image = form.save()
image.user = request.user
image.save()
return redirect('myGram')
else:
form = NewStatusForm()
return render(request, 'new_status.html', {"form": form})
@login_required(login_url='/accounts/login')
def single_image(request, photo_id):
image = Images.objects.get(id = photo_id)
return render(request, 'single_image.html', {'image':image})
def find_profile(request):
if 'name' in request.GET and request.GET["name"]:
search_name = request.GET.get("name")
found_users = Profile.find_profile(search_name)
message =f"{search_name}"
return render(request,'all-grams/search_results.html',{"message":message,"found_users":found_users})
else:
message = "Please enter a valid username"
return render(request,'all-grams/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def like(request,image_id):
requested_image = Images.objects.get(id = image_id)
current_user = request.user
if_voted = Like.objects.filter(image = requested_image,user = current_user).count()
unlike_parameter = Like.objects.filter(image = requested_image,user = current_user)
if if_voted==0:
requested_image.likes +=1
requested_image.save_image()
like = Like(user = current_user, image = requested_image )
like.save_like()
return redirect(timelines)
else:
requested_image.likes -=1
requested_image.save_image()
for single_unlike in unlike_parameter:
single_unlike.unlike()
return redirect(timelines)
return render(request,'timeline.html')
@login_required(login_url='/accounts/login/')
def new_comment(request, username):
current_user = request.user
username = current_user.username
if request.method == 'POST':
form = NewCommentForm(request.POST, request.FILES)
if form.is_valid():
comment = form.save()
comment.user = request.user
comment.save()
return redirect(timelines)
else:
form = NewCommentForm()
return render(request, 'new_comment.html', {"form": form})
def post(request):
current_user = request.user
form = ImagePost()
if request.method == 'POST':
form = ImagePost(request.POST ,request.FILES)
if form.is_valid():
image = form.save(commit = False)
image.user = current_user
image.likes +=0
image.save()
return redirect( timelines)
else:
form = ImagePost()
return render(request,'post.html', {"form":form})
@login_required
def view_profile(request, pk=None):
current_user = request.user
user = User.objects.get(pk=pk)
images = Images.objects.all().filter(profile_id=current_user.id)
print(images)
user = request.user
args = {'user': user, 'images' : images}
return render(request, 'profile.html', args) |
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import arvados
import contextlib
import errno
import hashlib
import http.client
import httplib2
import io
import mock
import os
import pycurl
import queue
import shutil
import sys
import tempfile
import unittest
if sys.version_info >= (3, 0):
from io import StringIO, BytesIO
else:
from cStringIO import StringIO
BytesIO = StringIO
# Use this hostname when you want to make sure the traffic will be
# instantly refused. 100::/64 is a dedicated black hole.
TEST_HOST = '100::'
skip_sleep = mock.patch('time.sleep', lambda n: None) # clown'll eat me
def queue_with(items):
"""Return a thread-safe iterator that yields the given items.
+items+ can be given as an array or an iterator. If an iterator is
given, it will be consumed to fill the queue before queue_with()
returns.
"""
q = queue.Queue()
for val in items:
q.put(val)
return lambda *args, **kwargs: q.get(block=False)
# fake_httplib2_response and mock_responses
# mock calls to httplib2.Http.request()
def fake_httplib2_response(code, **headers):
headers.update(status=str(code),
reason=http.client.responses.get(code, "Unknown Response"))
return httplib2.Response(headers)
def mock_responses(body, *codes, **headers):
if not isinstance(body, bytes) and hasattr(body, 'encode'):
body = body.encode()
return mock.patch('httplib2.Http.request', side_effect=queue_with((
(fake_httplib2_response(code, **headers), body) for code in codes)))
def mock_api_responses(api_client, body, codes, headers={}):
if not isinstance(body, bytes) and hasattr(body, 'encode'):
body = body.encode()
return mock.patch.object(api_client._http, 'request', side_effect=queue_with((
(fake_httplib2_response(code, **headers), body) for code in codes)))
def str_keep_locator(s):
return '{}+{}'.format(hashlib.md5(s if isinstance(s, bytes) else s.encode()).hexdigest(), len(s))
@contextlib.contextmanager
def redirected_streams(stdout=None, stderr=None):
if stdout == StringIO:
stdout = StringIO()
if stderr == StringIO:
stderr = StringIO()
orig_stdout, sys.stdout = sys.stdout, stdout or sys.stdout
orig_stderr, sys.stderr = sys.stderr, stderr or sys.stderr
try:
yield (stdout, stderr)
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
class VersionChecker(object):
def assertVersionOutput(self, out, err):
if sys.version_info >= (3, 0):
self.assertEqual(err.getvalue(), '')
v = out.getvalue()
else:
# Python 2 writes version info on stderr.
self.assertEqual(out.getvalue(), '')
v = err.getvalue()
self.assertRegex(v, r"[0-9]+\.[0-9]+\.[0-9]+(\.dev[0-9]+)?$\n")
class FakeCurl(object):
@classmethod
def make(cls, code, body=b'', headers={}):
if not isinstance(body, bytes) and hasattr(body, 'encode'):
body = body.encode()
return mock.Mock(spec=cls, wraps=cls(code, body, headers))
def __init__(self, code=200, body=b'', headers={}):
self._opt = {}
self._got_url = None
self._writer = None
self._headerfunction = None
self._resp_code = code
self._resp_body = body
self._resp_headers = headers
def getopt(self, opt):
return self._opt.get(str(opt), None)
def setopt(self, opt, val):
self._opt[str(opt)] = val
if opt == pycurl.WRITEFUNCTION:
self._writer = val
elif opt == pycurl.HEADERFUNCTION:
self._headerfunction = val
def perform(self):
if not isinstance(self._resp_code, int):
raise self._resp_code
if self.getopt(pycurl.URL) is None:
raise ValueError
if self._writer is None:
raise ValueError
if self._headerfunction:
self._headerfunction("HTTP/1.1 {} Status".format(self._resp_code))
for k, v in self._resp_headers.items():
self._headerfunction(k + ': ' + str(v))
if type(self._resp_body) is not bool:
self._writer(self._resp_body)
def close(self):
pass
def reset(self):
"""Prevent fake UAs from going back into the user agent pool."""
raise Exception
def getinfo(self, opt):
if opt == pycurl.RESPONSE_CODE:
return self._resp_code
raise Exception
def mock_keep_responses(body, *codes, **headers):
"""Patch pycurl to return fake responses and raise exceptions.
body can be a string to return as the response body; an exception
to raise when perform() is called; or an iterable that returns a
sequence of such values.
"""
cm = mock.MagicMock()
if isinstance(body, tuple):
codes = list(codes)
codes.insert(0, body)
responses = [
FakeCurl.make(code=code, body=b, headers=headers)
for b, code in codes
]
else:
responses = [
FakeCurl.make(code=code, body=body, headers=headers)
for code in codes
]
cm.side_effect = queue_with(responses)
cm.responses = responses
return mock.patch('pycurl.Curl', cm)
class MockStreamReader(object):
def __init__(self, name='.', *data):
self._name = name
self._data = b''.join([
b if isinstance(b, bytes) else b.encode()
for b in data])
self._data_locators = [str_keep_locator(d) for d in data]
self.num_retries = 0
def name(self):
return self._name
def readfrom(self, start, size, num_retries=None):
return self._data[start:start + size]
class ApiClientMock(object):
def api_client_mock(self):
api_mock = mock.MagicMock(name='api_client_mock')
api_mock.config.return_value = {
'StorageClasses': {
'default': {'Default': True}
}
}
return api_mock
def mock_keep_services(self, api_mock=None, status=200, count=12,
service_type='disk',
service_host=None,
service_port=None,
service_ssl_flag=False,
additional_services=[],
read_only=False):
if api_mock is None:
api_mock = self.api_client_mock()
body = {
'items_available': count,
'items': [{
'uuid': 'zzzzz-bi6l4-{:015x}'.format(i),
'owner_uuid': 'zzzzz-tpzed-000000000000000',
'service_host': service_host or 'keep0x{:x}'.format(i),
'service_port': service_port or 65535-i,
'service_ssl_flag': service_ssl_flag,
'service_type': service_type,
'read_only': read_only,
} for i in range(0, count)] + additional_services
}
self._mock_api_call(api_mock.keep_services().accessible, status, body)
return api_mock
def _mock_api_call(self, mock_method, code, body):
mock_method = mock_method().execute
if code == 200:
mock_method.return_value = body
else:
mock_method.side_effect = arvados.errors.ApiError(
fake_httplib2_response(code), b"{}")
class ArvadosBaseTestCase(unittest.TestCase):
# This class provides common utility functions for our tests.
def setUp(self):
self._tempdirs = []
def tearDown(self):
for workdir in self._tempdirs:
shutil.rmtree(workdir, ignore_errors=True)
def make_tmpdir(self):
self._tempdirs.append(tempfile.mkdtemp())
return self._tempdirs[-1]
def data_file(self, filename):
try:
basedir = os.path.dirname(__file__)
except NameError:
basedir = '.'
return open(os.path.join(basedir, 'data', filename))
def build_directory_tree(self, tree):
tree_root = self.make_tmpdir()
for leaf in tree:
path = os.path.join(tree_root, leaf)
try:
os.makedirs(os.path.dirname(path))
except OSError as error:
if error.errno != errno.EEXIST:
raise
with open(path, 'w') as tmpfile:
tmpfile.write(leaf)
return tree_root
def make_test_file(self, text=b"test"):
testfile = tempfile.NamedTemporaryFile()
testfile.write(text)
testfile.flush()
return testfile
if sys.version_info < (3, 0):
# There is no assert[Not]Regex that works in both Python 2 and 3,
# so we backport Python 3 style to Python 2.
def assertRegex(self, *args, **kwargs):
return self.assertRegexpMatches(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return self.assertNotRegexpMatches(*args, **kwargs)
unittest.TestCase.assertRegex = assertRegex
unittest.TestCase.assertNotRegex = assertNotRegex
|
import sympy as sp
from src.equations.symbols import *
nomoto_first_order = sp.Eq(K*delta,
r + T_1*r_1d + T_2*r_2d) |
from __future__ import annotations
import asyncio
import datetime
import logging
import dateutil
from aiohttp import ClientSession
from tenacity import retry, retry_if_exception_type
from .const import (
AUTH_ENDPOINT,
BASE_ENDPOINT,
BASE_URL,
CLIENT_HEADERS,
LATEST_OD_READ_ENDPOINT,
METER_ENDPOINT,
OD_READ_ENDPOINT,
OD_READ_RETRY_TIME,
TOKEN_EXPRIATION,
USER_AGENT,
)
from .exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
SmartMeterTexasAuthExpired,
)
__author__ = "Graham Wetzler"
__email__ = "graham@wetzler.dev"
__version__ = "0.4.1"
_LOGGER = logging.getLogger(__name__)
class Meter:
def __init__(self, meter: str, esiid: str, address: str):
self.meter = meter
self.esiid = esiid
self.address = address
self.reading_data = None
async def read_meter(self, client: Client):
"""Triggers an on-demand meter read and returns it when complete."""
_LOGGER.debug("Requesting meter reading")
# Trigger an on-demand meter read.
await client.request(
OD_READ_ENDPOINT, json={"ESIID": self.esiid, "MeterNumber": self.meter},
)
# Occasionally check to see if on-demand meter reading is complete.
while True:
json_response = await client.request(
LATEST_OD_READ_ENDPOINT, json={"ESIID": self.esiid},
)
try:
data = json_response["data"]
status = data["odrstatus"]
except KeyError:
_LOGGER.error("Error reading meter: ", json_response)
raise SmartMeterTexasAPIError(
f"Error parsing response: {json_response}"
)
else:
if status == "COMPLETED":
_LOGGER.debug("Reading completed: %s", self.reading_data)
self.reading_data = data
return self.reading_data
elif status == "PENDING":
_LOGGER.debug("Meter reading %s", status)
_LOGGER.debug("Sleeping for %s seconds", OD_READ_RETRY_TIME)
await asyncio.sleep(OD_READ_RETRY_TIME)
else:
_LOGGER.error("Unknown meter reading status: %s", status)
raise SmartMeterTexasAPIError(f"Unknown meter status: {status}")
@property
def reading(self):
"""Returns the latest meter reading in kWh."""
return float(self.reading_data["odrread"])
@property
def reading_datetime(self):
"""Returns the UTC datetime of the latest reading."""
date = dateutil.parser.parse(self.reading_data["odrdate"])
date_as_utc = date.astimezone(datetime.timezone.utc)
return date_as_utc
class Account:
def __init__(self, username: str, password: str):
self.username = username
self.password = password
async def fetch_meters(self, client: "Client"):
"""Returns a list of the meters associated with the account"""
json_response = await client.request(METER_ENDPOINT, json={"esiid": "*"})
meters = []
for meter_data in json_response["data"]:
address = meter_data["address"]
meter = meter_data["meterNumber"]
esiid = meter_data["esiid"]
meter = Meter(meter, esiid, address)
meters.append(meter)
return meters
class Client:
def __init__(self, websession: ClientSession, account: "Account"):
self.websession = websession
self.account = account
self.token = None
self.authenticated = False
self.token_expiration = datetime.datetime.now()
async def _init_websession(self):
"""Make an initial GET request to initialize the session otherwise
future POST requests will timeout."""
await self.websession.get(BASE_URL, headers={"User-Agent": USER_AGENT})
def _update_token_expiration(self):
self.token_expiration = datetime.datetime.now() + TOKEN_EXPRIATION
@retry(retry=retry_if_exception_type(SmartMeterTexasAuthExpired))
async def request(
self, path: str, method: str = "post", **kwargs,
):
"""Helper method to make API calls against the SMT API."""
await self.authenticate()
resp = await self.websession.request(
method, f"{BASE_ENDPOINT}{path}", headers=self.headers, **kwargs
)
if resp.status == 401:
_LOGGER.debug("Authentication token expired; requesting new token")
self.authenticated = False
await self.authenticate()
raise SmartMeterTexasAuthExpired
# Since API call did not return a 400 code, update the token_expiration.
self._update_token_expiration()
json_response = await resp.json()
return json_response
async def authenticate(self):
if not self.token_valid:
_LOGGER.debug("Requesting login token")
# Make an initial GET request otherwise subsequent calls will timeout.
await self._init_websession()
resp = await self.websession.request(
"POST",
f"{BASE_ENDPOINT}{AUTH_ENDPOINT}",
json={
"username": self.account.username,
"password": self.account.password,
"rememberMe": "true",
},
headers=self.headers,
)
json_response = await resp.json()
if resp.status == 400:
raise SmartMeterTexasAuthError("Username or password was not accepted")
try:
self.token = json_response["token"]
except KeyError:
raise SmartMeterTexasAPIError(
"API returned unknown login json: %s", json_response
)
self._update_token_expiration()
self.authenticated = True
_LOGGER.debug("Successfully retrieved login token")
@property
def headers(self):
headers = {**CLIENT_HEADERS}
if self.token:
headers["Authorization"] = f"Bearer {self.token}"
return headers
@property
def token_valid(self):
if self.authenticated or (datetime.datetime.now() < self.token_expiration):
return True
return False
|
from sklearn.utils import shuffle
import pandas as pd
import os
import spams
from PIL import Image
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage import io
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
class TissueMaskException(Exception):
pass
######################################################################################################
def is_uint8_image(I):
if not is_image(I):
return False
if I.dtype != np.uint8:
return False
return True
######################################################################################################
def is_image(I):
if not isinstance(I, np.ndarray):
return False
if not I.ndim == 3:
return False
return True
######################################################################################################
# 通过亮度阈值得到主要组织的掩码
def get_tissue_mask(I, luminosity_threshold=0.8):
I_LAB = cv2.cvtColor(I, cv2.COLOR_RGB2LAB)
L = I_LAB[:, :, 0] / 255.0 # Convert to range [0,1].
mask = L < luminosity_threshold
# Check it's not empty
if mask.sum() == 0:
raise TissueMaskException("Empty tissue mask computed")
return mask
######################################################################################################
# 转换RGB到OD
def convert_RGB_to_OD(I):
mask = (I == 0)
I[mask] = 1
# return np.maximum(-1 * np.log(I / 255), 1e-6)
return np.maximum(-1 * np.log(I / 255), np.zeros(I.shape) + 0.1)
######################################################################################################
# 转换OD到RGB
def convert_OD_to_RGB(OD):
assert OD.min() >= 0, "Negative optical density."
OD = np.maximum(OD, 1e-6)
return (255 * np.exp(-1 * OD)).astype(np.uint8)
######################################################################################################
def normalize_matrix_rows(A):
return A / np.linalg.norm(A, axis=1)[:, None]
######################################################################################################
# 得到浓度
def get_concentrations(I, stain_matrix, regularizer=0.01):
OD = convert_RGB_to_OD(I).reshape((-1, 3))
# 稀疏优化
return spams.lasso(X=OD.T, D=stain_matrix.T, mode=2, lambda1=regularizer, pos=True).toarray().T
######################################################################################################
def get_stain_matrix(I, luminosity_threshold=0.8, angular_percentile=99):
# assert is_uint8_image(I), "Image should be RGB uint8."
# 转换为OD并忽略背景
tissue_mask = get_tissue_mask(I, luminosity_threshold=luminosity_threshold).reshape((-1,))
OD = convert_RGB_to_OD(I).reshape((-1, 3))
OD = OD[tissue_mask]
# Eigenvectors of cov in OD space (orthogonal as cov symmetric)
# OD空间中cov的特征向量(正交为cov对称)
_, V = np.linalg.eigh(np.cov(OD, rowvar=False))
# The two principle eigenvectors
# 两个主要的特征向量
V = V[:, [2, 1]]
# Make sure vectors are pointing the right way
# 确保向量指向正确的方向
if V[0, 0] < 0: V[:, 0] *= -1
if V[0, 1] < 0: V[:, 1] *= -1
# Project on this basis.
That = np.dot(OD, V)
# Angular coordinates with repect to the prinicple, orthogonal eigenvectors
# 相对于主要的正交特征向量的角度坐标
phi = np.arctan2(That[:, 1], That[:, 0])
# Min and max angles
minPhi = np.percentile(phi, 100 - angular_percentile)
maxPhi = np.percentile(phi, angular_percentile)
# the two principle colors
v1 = np.dot(V, np.array([np.cos(minPhi), np.sin(minPhi)]))
v2 = np.dot(V, np.array([np.cos(maxPhi), np.sin(maxPhi)]))
# Order of H and E.
# H first row.
if v1[0] > v2[0]:
HE = np.array([v1, v2])
else:
HE = np.array([v2, v1])
return normalize_matrix_rows(HE)
######################################################################################################
def mapping(target, source):
stain_matrix_target = get_stain_matrix(target)
target_concentrations = get_concentrations(target, stain_matrix_target)
maxC_target = np.percentile(target_concentrations, 99, axis=0).reshape((1, 2))
stain_matrix_target_RGB = convert_OD_to_RGB(stain_matrix_target)
stain_matrix_source = get_stain_matrix(source)
source_concentrations = get_concentrations(source, stain_matrix_source)
maxC_source = np.percentile(source_concentrations, 99, axis=0).reshape((1, 2))
source_concentrations *= (maxC_target / maxC_source)
tmp = 255 * np.exp(-1 * np.dot(source_concentrations, stain_matrix_target))
return tmp.reshape(source.shape).astype(np.uint8)
source_dir='/home/ty/docker/ty/chenkairun/an_img/test/'
target = cv2.imread('/home/ty/docker/ty/chenkairun/an_img/goal.png')
# Convert from cv2 standard of BGR to our convention of RGB.
target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
for fname in os.listdir(source_dir):
fpath=os.path.join(source_dir,fname)
source = cv2.imread(fpath)
# Convert from cv2 standard of BGR to our convention of RGB.
source = cv2.cvtColor(source, cv2.COLOR_BGR2RGB)
# Perform stain normalization
transformed = mapping(target,source)
io.imsave('/home/ty/docker/ty/chenkairun/an_img/test_result/'+fname,transformed)
cv2.waitKey()
|
# Generated by Django 3.1 on 2020-08-30 05:40
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0002_auto_20200701_1615'),
]
operations = [
migrations.AddField(
model_name='automatedtask',
name='script_args',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=255, null=True), blank=True, default=list, null=True, size=None),
),
]
|
import scrapy
import re
from locations.items import GeojsonPointItem
class DairyQueenSpider(scrapy.Spider):
name = "dairyqueen"
allowed_domains = ["www.dairyqueen.com"]
download_delay = 1.5
start_urls = (
'https://www.dairyqueen.com/us-en/Sitemap/?localechange=1&',
)
def parse_stores(self, response):
google_lnk = response.xpath('//a[@title="Click here to view on Google"]//@href').extract_first()
matches = re.finditer(r"([-0-9]+\.[0-9]+)", google_lnk)
if matches:
lat, lng = [float(next(matches).group(0)) for _ in range(2)]
properties = {
'addr_full': response.xpath('//hgroup[@class="store-address"]/h2/text()').extract_first(),
'phone': response.xpath('//a[@class="telephone-cta"]/text()').extract_first() ,
'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
'ref': response.url,
'website': response.url,
'lat': lat,
'lon': lng
}
yield GeojsonPointItem(**properties)
def parse(self, response):
stores = response.xpath('(//div[@class="center-960"]/ul/li/a/@href)').extract()
for store in stores:
yield scrapy.Request(
response.urljoin(store),
callback=self.parse_stores
)
|
print('$$$ CONVERSOR DE REAL PARA DÓLAR $$$')
n = float(input('Digite o valor que deseja fazer a conversão: '))
d = n / 5.65
print('O valor de R${} reais equivale a US${:.2f} dólares'.format(n, d))
|
"""
Fetch NWP files from NCEP NOMADS for select variables. Should primarily be used
as a CLI program.
The script fetches grib2 files from NOMADS as they are available using g2sub,
uses wgrib2 to convert the grib files to netCDF (and adds wind speed),
and optimizes the netCDF file for our expected usage accessing a time-series
for a single location.
Each possible model has an associated dictonary with parameters that are
passed in the HTTP request. Other keys in the dictionaries are used
to set the timing of the HTTP requests:
- update_freq: the time between full model runs
- valid_hr_gen: A generator to produce the valid forecast hours that the model
produces
- time_between_fcst_hrs: The time in seconds that it typically takes to produce
the grib2 file for the next forecasted hour
- delay_to_first_forecast: The time after the nominal initialization time
that the first forecast from the model is typically available
- avg_max_run_length: The average time it takes the model to produce the last
possible forecast file from the first forecast file.
Many of these parameters are inferred from
https://www.nco.ncep.noaa.gov/pmb/nwprod/prodstat/
This script uses features of asyncio that are likely not available in Windows.
"""
import asyncio
from itertools import chain
import logging
import os
from pathlib import Path
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import aiohttp
import pandas as pd
import xarray as xr
from solarforecastarbiter.io.fetch import (
make_session, run_in_executor, abort_all_on_exception)
from solarforecastarbiter.io.nwp import DOMAIN
logger = logging.getLogger(__name__)
CHECK_URL = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/{}/prod'
BASE_URL = 'https://nomads.ncep.noaa.gov/cgi-bin/'
GFS_0P25_1HR = {'endpoint': 'filter_gfs_0p25_1hr.pl',
'file': 'gfs.t{init_hr:02d}z.pgrb2.0p25.f{valid_hr:03d}',
# /atmos added to dir with March 22, 2021, 1200Z GFS upgrade
'dir': '/gfs.{init_date}/{init_hr}/atmos',
'lev_2_m_above_ground': 'on',
'lev_10_m_above_ground': 'on',
'lev_entire_atmosphere': 'on',
'lev_surface': 'on',
'var_DSWRF': 'on',
'var_TCDC': 'on',
'var_TMP': 'on',
'var_UGRD': 'on',
'var_VGRD': 'on',
'update_freq': '6h',
'valid_hr_gen': lambda x: chain(range(120), range(120, 240, 3),
range(240, 385, 12)),
'time_between_fcst_hrs': 60,
'delay_to_first_forecast': '200min',
'avg_max_run_length': '100min',
'filename': 'gfs_0p25.nc'}
NAM_CONUS = {'endpoint': 'filter_nam.pl',
'file': 'nam.t{init_hr:02d}z.awphys{valid_hr:02d}.tm00.grib2',
'dir': '/nam.{init_date}',
'lev_2_m_above_ground': 'on',
'lev_10_m_above_ground': 'on',
'lev_entire_atmosphere_\\(considered_as_a_single_layer\\)': 'on',
'lev_surface': 'on',
'var_DSWRF': 'on',
'var_TCDC': 'on',
'var_TMP': 'on',
'var_UGRD': 'on',
'var_VGRD': 'on',
'update_freq': '6h',
'valid_hr_gen': lambda x: chain(range(36), range(36, 85, 3)),
'time_between_fcst_hrs': 60,
'delay_to_first_forecast': '90min',
'avg_max_run_length': '80min',
'filename': 'nam_12km.nc'}
# should be able to use RANGE requests and get data directly from grib files
# like https://www.cpc.ncep.noaa.gov/products/wesley/fast_downloading_grib.html
# so we can get DSWRF for RAP
RAP = {'endpoint': 'filter_rap.pl',
'file': 'rap.t{init_hr:02d}z.awp130pgrbf{valid_hr:02d}.grib2',
'dir': '/rap.{init_date}',
'lev_2_m_above_ground': 'on',
'lev_10_m_above_ground': 'on',
'lev_entire_atmosphere': 'on',
'lev_surface': 'on',
'var_TCDC': 'on',
'var_TMP': 'on',
'var_UGRD': 'on',
'var_VGRD': 'on',
'update_freq': '1h',
'valid_hr_gen': (
lambda x: range(40) if x in (3, 9, 15, 21) else range(22)),
'time_between_fcst_hrs': 60,
'delay_to_first_forecast': '50min',
'avg_max_run_length': '30min',
'filename': 'rap.nc'}
HRRR_HOURLY = {
'endpoint': 'filter_hrrr_2d.pl',
'file': 'hrrr.t{init_hr:02d}z.wrfsfcf{valid_hr:02d}.grib2',
'dir': '/hrrr.{init_date}/conus',
'lev_2_m_above_ground': 'on',
'lev_10_m_above_ground': 'on',
'lev_entire_atmosphere': 'on',
'lev_surface': 'on',
'var_DSWRF': 'on',
'var_VBDSF': 'on',
'var_VDDSF': 'on',
'var_TCDC': 'on',
'var_TMP': 'on',
'var_UGRD': 'on',
'var_VGRD': 'on',
'update_freq': '1h',
'valid_hr_gen': (
lambda x: range(37) if x in (0, 6, 12, 18) else range(19)),
'time_between_fcst_hrs': 120,
'delay_to_first_forecast': '45min',
'avg_max_run_length': '70min',
'filename': 'hrrr_hourly.nc'}
HRRR_SUBHOURLY = {
'endpoint': 'filter_hrrr_sub.pl',
'file': 'hrrr.t{init_hr:02d}z.wrfsubhf{valid_hr:02d}.grib2',
'dir': '/hrrr.{init_date}/conus',
'lev_2_m_above_ground': 'on',
'lev_10_m_above_ground': 'on',
'lev_entire_atmosphere': 'on',
'lev_surface': 'on',
'var_DSWRF': 'on',
'var_VBDSF': 'on',
'var_VDDSF': 'on',
'var_TMP': 'on',
'var_WIND': 'on',
'update_freq': '1h',
'valid_hr_gen': (lambda x: range(19)),
'time_between_fcst_hrs': 120,
'delay_to_first_forecast': '45min',
'avg_max_run_length': '50min',
'filename': 'hrrr_subhourly.nc'}
# each GEFS stat_or_member is treated separately
# really makes use of async capabilities
GEFS_0P50_RAW = {'endpoint': 'filter_gefs_atmos_0p50a.pl',
'file': 'ge{stat_or_member}.t{init_hr:02d}z.pgrb2a.0p50.f{valid_hr:03d}', # NOQA
'dir': '/gefs.{init_date}/{init_hr}/atmos/pgrb2ap5',
'lev_2_m_above_ground': 'on',
'lev_10_m_above_ground': 'on',
'lev_entire_atmosphere': 'on',
'lev_surface': 'on',
'var_DSWRF': 'on',
'var_TCDC': 'on',
'var_TMP': 'on',
'var_UGRD': 'on',
'var_VGRD': 'on',
'update_freq': '6h',
'valid_hr_gen': lambda x: chain(range(0, 240, 3),
range(240, 385, 6)),
'time_between_fcst_hrs': 60,
'delay_to_first_forecast': '280min',
'avg_max_run_length': '60min',
'filename': 'gefs_{stat_or_member}.nc',
'members': (['avg', 'c00', 'spr'] +
[f'p{r:02d}' for r in range(1, 21)]),
'check_url_name': 'gens'}
EXTRA_KEYS = ['update_freq', 'valid_hr_gen', 'time_between_fcst_hrs',
'delay_to_first_forecast', 'avg_max_run_length', 'filename',
'check_url_name', 'member']
model_map = {'gfs_0p25': GFS_0P25_1HR, 'nam_12km': NAM_CONUS,
'rap': RAP, 'hrrr_hourly': HRRR_HOURLY,
'hrrr_subhourly': HRRR_SUBHOURLY,
'gefs': GEFS_0P50_RAW}
NC_TBL = """
TMP:surface:ignore
TMP:2 m above ground:t2m
UGRD:10 m above ground:ignore
VGRD:10 m above ground:ignore
TCDC:entire atmosphere:tcdc
TCDC:entire atmosphere (considered as a single layer):tcdc
DSWRF:surface:dswrf
VBDSF:surface:vbdsf
VDDSF:surface:vddsf
WIND:10 m above ground:si10
"""
COMPRESSION = {'zlib': True, 'complevel': 1, 'shuffle': True,
'fletcher32': True}
DEFAULT_ENCODING = {
# stores the time steps, not an actual time
'time': {'dtype': 'int16'},
'latitude': {'dtype': 'float32', 'least_significant_digit': 3},
'longitude': {'dtype': 'float32', 'least_significant_digit': 3}
}
LEAST_SIGNIFICANT_DIGITS = {
't2m': 2,
'tcdc': 1,
'si10': 2,
'dswrf': 1,
'vbdsf': 1,
'vddsf': 1
}
async def get_with_retries(get_func, *args, retries=5, **kwargs):
"""
Call get_func and retry if the request fails
Parameters
----------
get_func : function
Function that performs an aiohttp call to be retried
retries : int
Number of retries before raising the error
*args
Passed to get_func
**kwargs
Passed to get_func
Returns
-------
Result of get_func
Raises
------
aiohttp.ClientResponseError
When get_func fails after retrying retries times
"""
retried = 0
while True:
try:
res = await get_func(*args, **kwargs)
except aiohttp.ClientResponseError as e:
logger.warning('Request to %s failed with code %s, retrying',
e.request_info.url, e.status)
retried += 1
if retried >= retries:
raise
except aiohttp.ClientError:
logger.warning('Request failed in connection, retrying')
retried += 1
if retried >= retries:
raise
else:
return res
await asyncio.sleep(60)
def _simple_model(model):
return model['dir'].split('.')[0][1:]
@abort_all_on_exception
async def get_available_dirs(session, model):
"""Get the available date/date+init_hr directories"""
simple_model = _simple_model(model)
is_init_date = 'init_date' in model['dir']
model_url = BASE_URL + model['endpoint']
async def _get(model_url):
async with session.get(model_url, raise_for_status=True) as r:
return await r.text()
page = await get_with_retries(_get, model_url)
if is_init_date:
list_avail_days = set(
re.findall(simple_model + '\\.([0-9]{8})', page))
else:
list_avail_days = set(
re.findall(simple_model + '\\.([0-9]{10})', page))
return list_avail_days
def _process_params(model, init_time):
"""Generator to get the parameters for fetching forecasts for a given
model at a given init_time"""
params = model.copy()
params.update(DOMAIN)
valid_hr_gen = params['valid_hr_gen'](init_time.hour)
for p in EXTRA_KEYS:
if p in params:
del params[p]
params['dir'] = params['dir'].format(
init_date=init_time.strftime('%Y%m%d'),
init_dt=init_time.strftime('%Y%m%d%H'),
init_hr=init_time.strftime('%H'))
for i in valid_hr_gen:
newp = params.copy()
newp['file'] = newp['file'].format(
init_hr=init_time.hour,
valid_hr=i)
yield newp
async def check_next_inittime(session, init_time, model):
"""Check if data from the next model initializtion time is available"""
next_inittime = init_time + pd.Timedelta(model['update_freq'])
simple_model = _simple_model(model)
next_init_url = (CHECK_URL.format(model.get('check_url_name',
simple_model))
+ model['dir'].format(
init_date=next_inittime.strftime('%Y%m%d'),
init_dt=next_inittime.strftime('%Y%m%d%H'),
init_hr=next_inittime.strftime('%H'))
+ '/' + model['file'].format(init_hr=next_inittime.hour,
valid_hr=0))
try:
async with session.head(next_init_url) as r:
if r.status == 200:
logger.warning(
'Skipping to next init time at %s for %s %s',
next_inittime, simple_model, model.get('member', ''))
return True
else:
return False
except aiohttp.ClientOSError:
return False
def get_filename(basepath, init_time, params):
filename = (
basepath / init_time.strftime('%Y/%m/%d/%H') / params['file'])
if not filename.suffix == '.grib2':
filename = filename.with_suffix(filename.suffix + '.grib2')
return filename
async def files_to_retrieve(session, model, modelpath, init_time):
"""Generator to return the parameters of the available files for download
"""
possible_params = _process_params(model, init_time)
simple_model = _simple_model(model)
first_file_modified_at = None
for next_params in possible_params:
logger.debug('Checking if file is available for %s', next_params)
filename = get_filename(modelpath, init_time, next_params)
if filename.exists():
yield next_params
continue
next_model_url = (CHECK_URL.format(model.get('check_url_name',
simple_model))
+ next_params['dir'] + '/' + next_params['file'])
while True:
# is the next file ready?
try:
logger.debug('Calling HEAD %s', next_model_url)
async with session.head(
next_model_url, raise_for_status=True) as r:
if first_file_modified_at is None:
first_file_modified_at = pd.Timestamp(
r.headers['Last-Modified'])
logger.debug('First file was available at %s %s',
first_file_modified_at,
model.get('member', ''))
logger.debug('HEAD returned %s', next_model_url)
except aiohttp.ClientResponseError as e:
if e.status == 404: # Not found
logger.debug(
'Next file not ready yet for %s at %s %s\n%s %s',
simple_model, init_time, model.get('member', ''),
e.status, e.message)
else:
logger.error(
'Error checking if next file is ready %s\n'
'%s %s', model.get('member', ''), e.status, e.message)
except aiohttp.ClientError as e:
logger.warning('Error in checking for next file %s %s',
model.get('member', ''), str(e))
except Exception:
logger.exception(
'Uncaught exception when checking for next file %s',
model.get('member', ''))
else:
logger.debug('%s/%s is ready for download',
next_params['dir'], next_params['file'])
yield next_params
break
# if the current time is after 'avg_max_run_length' after the
# first forecast was available, check if forecasts from the
# next model run are available and if so, move on to that run
if (
first_file_modified_at is not None and
pd.Timestamp.utcnow() > first_file_modified_at +
pd.Timedelta(model['avg_max_run_length'])
):
nextrun_available = await check_next_inittime(
session, init_time, model)
if nextrun_available:
return
await asyncio.sleep(model['time_between_fcst_hrs'])
async def _get_file(session, url, params, tmpfile, chunksize):
timeout = aiohttp.ClientTimeout(total=660, connect=60, sock_read=600)
async with session.get(url, params=params, raise_for_status=True,
timeout=timeout) as r:
with open(tmpfile, 'wb') as f:
async for chunk in r.content.iter_chunked(chunksize * 1024):
f.write(chunk)
@abort_all_on_exception
async def fetch_grib_files(session, params, basepath, init_time, chunksize):
"""
Fetch the grib file referenced by params and save to the appropriate
folder under basepath. Retrieves the files in chunks.
Parameters
----------
session : aiohttp.ClientSession
The HTTP session to use to request the file
params : dict
Parameters to include in the GET query to params['endpoint']
basepath : Path
Path to the base directory where files will be saved. New directories
under basepath of the form basepath / year / month / day / hour
will be created as necessary.
init_time : datetime
Initialization time of the model we're trying to fetch
chunksize : int
Chunksize in KB to fetch and save at once
Returns
-------
filename : Path
Path of the successfully saved file
Raises
------
aiohttp.ClientResponseError
When the HTTP request fails/returns a status code >= 400
"""
endpoint = params.pop('endpoint')
url = BASE_URL + endpoint
filename = get_filename(basepath, init_time, params)
if filename.exists():
return filename
if not filename.parent.is_dir():
filename.parent.mkdir(parents=True)
logger.info('Getting file %s', filename)
tmpfile = filename.with_name('.tmp_' + filename.name)
await get_with_retries(_get_file, session, url, params, tmpfile, chunksize)
tmpfile.rename(filename)
logging.debug('Successfully saved %s', filename)
return filename
@abort_all_on_exception
async def process_grib_to_netcdf(folder, model):
logger.info('Converting GRIB files to NetCDF with wgrib2 %s',
model.get('member', ''))
_handle, nctmp = tempfile.mkstemp()
os.close(_handle)
nctmp = Path(nctmp)
# possible that this holds up processing on file io
# so run in separate process
grib_prefix = model['file'].split('.')[0]
wind_in_model = 'var_WIND' not in model
try:
await run_in_executor(_process_grib, folder, nctmp, grib_prefix,
wind_in_model)
except Exception:
nctmp.unlink()
raise
return nctmp
def _process_grib(folder, nctmp, grib_prefix, wind_in_model):
if wind_in_model:
# need to add wind to the grib files
for grbfile in folder.glob(f'{grib_prefix}*.grib2'):
path = str(grbfile.resolve())
try:
subprocess.run(
f'wgrib2 {path} -wind_speed - -match "(UGRD|VGRD)" | '
f'wgrib2 - -append -grib_out {path}',
shell=True, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
logger.error('Error converting wind in file %s\n%s',
grbfile, e.stderr)
raise OSError
if 'subhourly' in str(folder):
# for hrrr subhourly, assume TMP and VDDSF have no average but others
fmt = "-match 'ave|TMP|VDDSF'"
else:
fmt = ''
with tempfile.NamedTemporaryFile(mode='w') as tmp_nc_tbl:
tmp_nc_tbl.write(NC_TBL)
tmp_nc_tbl.flush()
try:
subprocess.run(
f'cat {str(folder)}/{grib_prefix}*.grib2 | '
f'wgrib2 - -nc4 -nc_table {tmp_nc_tbl.name} {fmt} -append -netcdf {str(nctmp)}', # NOQA
shell=True, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
logger.error('Error converting grib files %s*.grib2 to netCDF\n%s',
grib_prefix, e.stderr)
raise OSError
return nctmp
def _optimize_netcdf(nctmpfile, out_path):
"""Optmizes the netcdf file for accessing by time slice."""
ds = xr.open_dataset(nctmpfile, engine='netcdf4',
backend_kwargs={'mode': 'r'})
# time is likely unlimited
if 'unlimited_dims' in ds.encoding:
ds.encoding = {}
chunksizes = []
for dim, size in ds.dims.items():
if dim == 'time':
chunksizes.append(size)
else:
chunksizes.append(50)
encoding = DEFAULT_ENCODING.copy()
encoding.update(
{key: {'dtype': 'float32',
'least_significant_digit': LEAST_SIGNIFICANT_DIGITS[key],
'chunksizes': chunksizes,
**COMPRESSION}
for key in ds.keys()})
ds.to_netcdf(out_path, format='NETCDF4',
mode='w', unlimited_dims=None,
encoding=encoding)
ds.close()
async def optimize_netcdf(nctmpfile, final_path):
"""Compress the netcdf file and adjust the chunking for fast time-series
access"""
logger.info('Optimizing NetCDF file to save at %s', final_path)
parent = Path(final_path.parent)
if not parent.is_dir():
parent.mkdir(parents=True)
_handle, tmp_path = tempfile.mkstemp(dir=parent)
os.close(_handle)
tmp_path = Path(tmp_path)
# possible that this leaks memory, so run in separate process
# that is restarted after a number of jobs
try:
await run_in_executor(_optimize_netcdf, nctmpfile, tmp_path)
except Exception:
tmp_path.unlink()
raise
else:
tmp_path.rename(final_path)
final_path.chmod(stat.S_IRGRP | stat.S_IRUSR | stat.S_IROTH |
stat.S_IWUSR)
logger.info('Done optimizing NetCDF at %s', final_path)
finally:
nctmpfile.unlink()
async def sleep_until_inittime(inittime, model):
# don't bother requesting a file until it might be ready
now = pd.Timestamp.utcnow()
likely_ready_time = inittime + pd.Timedelta(
model['delay_to_first_forecast'])
if likely_ready_time > now:
seconds = (likely_ready_time - now).total_seconds()
logger.info('Sleeping %0.1fs for next model run %s', seconds,
model.get('member', ''))
await asyncio.sleep(seconds)
async def startup_find_next_runtime(model_path, session, model):
"""Find the next model run to get based on what is available
on NOMADS and what .nc files are present locally"""
dirs = await get_available_dirs(session, model)
no_file = []
first = pd.Timestamp('20000101T0000Z')
max_time = first
for dir_ in dirs:
if len(dir_) == 8:
path = model_path / dir_[:4] / dir_[4:6] / dir_[6:8]
for hr in range(0, 24, int(model['update_freq'].strip('h'))):
hrpath = path / f'{hr:02d}'
hrtime = pd.Timestamp(f'{dir_[:8]}T{hr:02d}00Z')
if not (hrpath / model['filename']).exists():
no_file.append(hrtime)
else:
max_time = max(max_time, hrtime)
else:
hrpath = model_path / dir_[:4] / dir_[4:6] / dir_[6:8] / dir_[8:10]
hrtime = pd.Timestamp(f'{dir_[:8]}T{dir_[8:10]}00Z')
if not (hrpath / model['filename']).exists():
no_file.append(hrtime)
else:
max_time = max(max_time, hrtime)
if len(no_file) == 0:
if max_time > first:
inittime = max_time + pd.Timedelta(model['update_freq'])
else: # No available dirs?
raise ValueError(
'Failed to find next available model from NOMADS %s' %
model.get('member', ''))
else:
inittime = min(no_file)
await sleep_until_inittime(inittime, model)
return inittime
async def next_run_time(inittime, modelpath, model):
inittime += pd.Timedelta(model['update_freq'])
# check if nc file exists for this inittime
if (
modelpath / inittime.strftime('%Y/%m/%d/%H') /
model['filename']).exists():
return await next_run_time(inittime, modelpath, model)
await sleep_until_inittime(inittime, model)
return inittime
async def _run_loop(session, model, modelpath, chunksize, once, use_tmp):
inittime = await startup_find_next_runtime(modelpath, session, model)
while True:
fetch_tasks = set()
finalpath = (modelpath / inittime.strftime('%Y/%m/%d/%H') /
model['filename'])
if use_tmp:
_tmpdir = tempfile.TemporaryDirectory()
gribdir = Path(_tmpdir.name)
else:
gribdir = modelpath
async for params in files_to_retrieve(session, model, gribdir,
inittime):
logger.debug('Processing parameters %s', params)
fetch_tasks.add(asyncio.create_task(
fetch_grib_files(session, params, gribdir, inittime,
chunksize)))
files = await asyncio.gather(*fetch_tasks)
if len(files) != 0: # skip to next inittime
path_to_files = files[0].parent
try:
nctmpfile = await process_grib_to_netcdf(path_to_files,
model)
await optimize_netcdf(nctmpfile, finalpath)
except Exception:
raise
if use_tmp:
_tmpdir.cleanup()
else:
# remove grib files
for f in files:
f.unlink()
if once:
break
else:
logger.info('Moving on to next model run')
inittime = await next_run_time(inittime, modelpath, model)
async def run(basepath, model_name, chunksize, once=False, use_tmp=False):
session = make_session()
modelpath = basepath / model_name
if model_name != 'gefs':
model = model_map[model_name]
await _run_loop(session, model, modelpath, chunksize, once, use_tmp)
else:
base_model = model_map[model_name].copy()
members = base_model.pop('members')
member_loops = set()
for member in members:
model = base_model.copy()
model['member'] = member
model['file'] = model['file'].replace('{stat_or_member}', member)
model['filename'] = model['filename'].format(stat_or_member=member)
member_loops.add(asyncio.create_task(
_run_loop(session, model, modelpath, chunksize, once,
use_tmp)))
await asyncio.wait(member_loops)
await session.close()
async def optimize_only(path_to_files, model_name):
model = model_map[model_name]
nctmpfile = await process_grib_to_netcdf(path_to_files, model)
try:
await optimize_netcdf(
nctmpfile, path_to_files / f'{model_name}.nc')
except Exception:
raise
else:
# remove grib files
for f in path_to_files.glob(f'{model["file"].split(".")[0]}*.grib2'):
f.unlink()
def check_wgrib2():
if shutil.which('wgrib2') is None:
logger.error('wgrib2 was not found in PATH and is required')
sys.exit(1)
|
'''
Contiene funciones para generar mapas coropléticos que muestran
conteos de ingresos agrupados por primera letra de CIE,
municipio del AMM y semana epidemiológica.
AMM = Área Metropolitana de Monterrey
'''
import itertools
import plotly
import plotly.graph_objects as go
import pandas as pd
import epiweeks
from filter_geojson import read_amm_geojson
from readers import read_entries, read_amm_municipalities
def group_dates_by_epiweeks(entries_dates: pd.Series) -> pd.Series:
'''
Agrupa fechas de ingreso for semanas epidemiológicas.
:param entries: Columna de fechas de ingreso.
:returns: `pandas.Series` de fechas de ingreso ya agrupadas.
'''
# años mínimo y máximo que aparecen en el archivo
minyear, maxyear = min(entries_dates).year, max(entries_dates).year
# unir semanas de los años que aparecen en el archivo
# y uno posterior
weeks = list(itertools.chain.from_iterable(
epiweeks.Year(year).iterweeks()
for year in range(minyear, maxyear + 2)
))
# lista de ingresos en semana epidemiológica
epidates = list()
# índice para iterar las semanas
i = 0
week = weeks[i]
for dfdate in entries_dates.tolist():
while True:
# si está dentro de la semana actual
if week.startdate() <= dfdate <= week.enddate():
epidates.append(week)
# siguiente ingreso
break
# si no, siguiente semana
# pero mismo ingreso
i += 1
week = weeks[i]
return epidates
def count_grouped_entries(entries: pd.DataFrame) -> pd.DataFrame:
'''
Cuenta los casos de ingresos agrupados por CIE, municipio y semana epidemiológica.
:param entries: `DataFrame` filtrado por CIE, debe contener las columnas:
[MUNIC, INGRE]
:returns: `entries` con una columna `CONT`, que cuenta de los casos agrupados.
'''
# agregar columna de conteos de casos
entries['CONT'] = 0
# contar casos
return (entries
.groupby(['MUNIC', 'INGRE'])
.count()
.reset_index())
def get_amm_entries(year: int, cie: str) -> pd.DataFrame:
'''
Filtra solamente los ingresos del AMM, agrupa las fechas
por semana epidemiológica y cuenta los casos de CIE.
:param year: Año del archivo a leer (EGRESO_`year`.csv).
:param cie: Primera letra de CIE.
:returns: Registros de ingresos del AMM, y con columnas extras:
[NOM_MUN, LETRA_CIE, CONT]
'''
entries = read_entries(year)
amm_munics = read_amm_municipalities()
# filtrar registros por
entries_amm = entries[
# Nuevo León
(entries['ENTIDAD'] == '19') &
# municipios del AMM
entries['MUNIC'].isin(amm_munics['MUNIC']) &
# primera letra de CIE coincide con el parámetro
entries['DIAG_INI'].str.startswith(cie)
# las columnas del estado y CIE ya no se usarán
].drop(columns=['ENTIDAD', 'DIAG_INI'])
# convertir columna de ingresos a tipo datetime
entries_amm['INGRE'] = pd.to_datetime(entries_amm['INGRE'])
# ordenar ascendente por ingresos
entries_amm.sort_values('INGRE', inplace=True)
# agrupar fechas de ingreso por semanas epidemiológicas
entries_amm['INGRE'] = group_dates_by_epiweeks(entries_amm['INGRE'])
entries_amm = count_grouped_entries(entries_amm)
# agregar columna con nombres de municipios
entries_amm = amm_munics.merge(entries_amm, on='MUNIC')
return entries_amm
def plot_entries_choropleth(year: int, cie: str, output: str = '') -> None:
'''
Genera un mapa coroplético animado sobre el conteo de
ingresos por municipio, CIE y semana epidemiológica.
:param year: Año del archivo a leer (EGRESO_`year`.csv).
:param cie: Primera letra de CIE.
:param output: Ruta relativa del archivo HTML para guardar el mapa coroplético.
'''
# si no se especificó nombre de archivo, generar uno
filepath = output if output else f'ingresos_{cie}_{year}.html'
print(f'{filepath}: Preparando datos...', flush=True)
entries_amm = get_amm_entries(year, cie)
# límites de casos por archivo (año)
mincount, maxcount = min(entries_amm['CONT']), max(entries_amm['CONT'])
munics_geojson = read_amm_geojson()
# por cada letra inicial de los CIE
# for cie_letter in entries_amm['LETRA_CIE'].unique():
# filtrar ingresos por la letra CIE
entries_amm = entries_amm.sort_values('INGRE')
# listas para animación del mapa
frames, steps = list(), list()
print(f'{filepath}: Generando mapa coroplético...', flush=True)
# por cada semana en los ingresos del CIE actual
for week in entries_amm['INGRE'].unique():
# filtrar ingresos por la semana
entries_ingre = entries_amm[
entries_amm['INGRE'] == week
].sort_values('MUNIC')
label = f'{week.year}, semana {week.week}'
name = f'frame_{week}'
frames.append({
'name': name,
'data': [
# mapa coroplético
dict(
type='choroplethmapbox',
geojson=munics_geojson,
locations=entries_ingre['MUNIC'],
z=entries_ingre['CONT'],
zmin=mincount,
zmax=maxcount,
hoverinfo='z+text+name',
text=entries_ingre['NOM_MUN'],
name='Casos en',
colorscale='Viridis',
marker_opacity=0.5,
marker_line_width=0,
)
]
})
steps.append({
'label': label,
'method': 'animate',
'args': [
[name],
{
'mode': 'immediate',
'frame': {
'duration': 500,
'redraw': True
},
'transition': {'duration': 300}
}
]
})
sliders = [{
'transition': {'duration': 300},
'x': 0.08,
'len': 0.88,
'currentvalue': {'xanchor': 'center'},
'steps': steps
}]
playbtn = [{
'type': 'buttons',
'showactive': True,
'x': 0.045, 'y': -0.08,
'buttons': [{
'label': 'Play',
'method': 'animate',
'args': [
None,
{
'mode': 'immediate',
'frame': {
'duration': 500,
'redraw': True
},
'transition': {'duration': 300},
'fromcurrent': True
}
]
}]
}]
layout = go.Layout(
title=f'Archivo: EGRESOS_{year} CIE: {cie}',
mapbox_style='carto-positron',
mapbox_zoom=9.5,
mapbox_center = {'lat': 25.680, 'lon': -100.249},
sliders=sliders,
updatemenus=playbtn
)
data = frames[0]['data']
figure = go.Figure(data=data, layout=layout, frames=frames)
print(f'{filepath}: Guardando mapa en archivo...', flush=True)
# guardar mapa en archivo
plotly.offline.plot(figure, filename=filepath)
|
import hashlib
import logging
from tornado import gen
from redistest import RedisTestCase, async_test
class ScriptingTestCase(RedisTestCase):
@async_test
@gen.engine
def test_eval(self):
script = 'return 2'
script_digest = hashlib.sha1(script).hexdigest()
results = yield gen.Task(self.client.eval, script)
self.assertEqual(2, results)
# test evalsha
results = yield gen.Task(self.client.evalsha, script_digest)
self.assertEqual(2, results)
self.stop()
# TODO: script_exists, script_load, script_flush, script_kill
|
import numpy as np
import pandas as pd
import altair as alt
import pathlib
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from utils import Header, make_dash_table
# Allow large data set
alt.data_transformers.disable_max_rows()
# Get data
wine = pd.read_csv("data/processed/wine_quality.csv")
corr_df = pd.read_csv("data/processed/correlation.csv")
# Get a list of unique column names
variables = corr_df["level_0"].unique()
variables = np.delete(variables, np.argwhere(variables == "Quality Factor"))
# Don't want this as an option in scatterplot
variables = np.delete(variables, np.argwhere(
variables == "Quality Factor Numeric"))
# Matrix plot. I couldn't figure out how to make it work at the bottom without a callback input
def plot_matrix():
click = alt.selection_multi(fields=['type'], bind='legend')
chart = alt.Chart(corr_df, title="Correlation Plot for Numeric Features").mark_square().encode(
color=alt.Color('type', scale=alt.Scale(domain=['red', 'white'],
range=['darkred', 'blue'])),
x='level_0',
y='level_1',
size='abs',
opacity=alt.condition(click, alt.value(0.7), alt.value(0)),
tooltip=["type", "corr"]
).configure_title(fontSize=18).properties(height=250, width=250).add_selection(click)
return chart.to_html()
def create_layout(app):
# Page layouts
layout = html.Div(
[html.Div([Header(app)]),
# page 1
html.Div(
[ # Row 3
html.Div(
[html.H3('Motivation'),
# html.Br([]),
html.H6(
"\
Wine is a multi billion dollar global industry. \
With 36 billion bottles of wine produced each year ,producers are constantly looking for ways to outperform \
the competition and create the best wines they can. \
Portugal in particular is second in the world for per-capita wine \
consumption and eleventh for wine production, creating over 600,000 \
litres per year .Physicochemical components are fundamental to a wine’s quality \
and those who understand this aspect of wine will have a greater edge into crafting an enjoyable \
and profitable product.Wine quality evaluation is the main part of the certification process to \
improve wine making. It is generally assessed by physicochemical tests and sensory analysis. \
The relationship between physicochemical structure and subjective quality is complex. \
No individual component can be used to accurately predict a wine’s quality, and interactions \
are as important as the components themselves. For example, perhaps higher alcohol content only \
improves a wine within a certain range of sulphate content, and wines outside this range are made \
worse by higher alcohol content. Trained wine tasting experts are able to reliably score wine on a \
scale ranging from 0 (very bad) to 10 (excellent), and those scores can be used to determine how these \
physicochemical properties affect quality.Our interactive dashboard will allow users to explore a number \
of physicochemical variables and how they interact to determine the subjective quality of a wine. \
Our visualizations will allow users to test and discover for themselves these relationships. \
Wine producers, wine enthusiasts, and curious individuals can all make use of this dashboard.",
style={'color': "#FFFFFF"},
className="row",
),
],
className="product"
),
],
className="twelve columns"),
html.Div(
dcc.Markdown('''
## Welcome!
##### Hello and thank you for stopping by the [Wine Vision App](https://wine-vision.herokuapp.com/)!
##### This page is our homebase providing you information on the project.

### The problem
Wine making has always been a traditional practice passed down for many generations; yet, some of wine's secrets are still a mystery to most people, even wine producers! So how are we supposed to craft the perfect wine without knowing what makes it perfect (speaking from both a consumer and business perspective)?
In general, wine quality evaluation is assessed by physicochemical tests and sensory analys. It's the roadmap to improving a wine. However the relationship between physicochemical structure and subjective quality is complex and no individual component can be used to accurately predict a wine’s quality. The interactions are as important as the components themselves.
From a business perspective, producers are constantly looking for ways to outperform the competition by creating the best wine they can. Those who understand the fundamental physiochemical aspects of wine will have a greater edge into crafting an enjoyable and profitable product. So, we introduce to you the *Wine Vision Dashboard*.
### The solution
**Our interactive dashboard will allow users to explore how a number of physicochemical variables interact and determine the subjective quality of a wine. Wine producers, wine enthusiasts, and curious individuals can all make use of this dashboard to discover these elusive relationships.**

## App Description
Our dashboard has three pages: an overview, a comparison of red and white wines, and a comparison of different wine quality levels.
The Interactive Graphics page contains a number of graphis to explore the effects of physicochemical properties on wine quality. On the left hand side users are able to select the wine type (eg. red wine, white wine) as well as the physicochemical features of interest, with subset ranges if they so desire. Some possible visualizations are as follows:
The “Overview” page provides information on how to use the app and includes some references and further information for those interested in wine aspects the dashboard does not cover.
The second page is “Wine Type”, primarily intended to demonstrate differences between red and white wines. This page has a good layout, without leaving any large white spaces. It also demonstrates a cohesive narrative, with users able to distinguish high correlation variables from the correlation matrix and then investigating deeper using the scatter plot and density plot.
The third page is “Quality Factors”, where users can explore features of wines in different quality groups. Users can subset the data range by selecting areas on the scatter plot, which immediately updates the other plots. The bar plot allows users to visualize quality factor proportions in their selections.. The “drag and drop” functionality makes this page particularly interactive.
## The Data
Portugal is second in the world for per-capita wine consumption [2](https://www.nationmaster.com/nmx/ranking/wine-consumption-per-capita) and eleventh for wine production [3](https://en.wikipedia.org/wiki/List_of_wine-producing_regions), so by no coincidence we built our dashboard on the famous Portuguese wine quality data set from Cortez et al., 2009.
Data was collected from Vinho Verde wines originating from the northwest regions of Portugal. These wines have a medium alcohol content, and are particularly sought for their freshness in summer months. Each wine sample was evaluated by at least three sensory assessors (using blind tastes) who graded the wine from 0 (worst) to 10 (best). The final quality score is given by the median of these evaluations.
The dataset consists of the physiochemical composition and sensory test results for 4898 white and 1599 red wine samples which were collected from May 2004 to February 2007. Each wine sample contains 12 variables that provide the acidity properties (fixed acidity, volatile acidity, citric acid, pH), sulphides contents (free sulfur dioxide, total sulfur dioxide, sulphates), density related properties (residual sugar, alcohol, density), and salt content (chlorides). It also contains quality as the response variable. In order to improve classification analyses, we define a new variable, quality_factor. Any wine with a quality score less than six is classified as “below average”, a score of 6 is “average”, and above 6 is “above average”.

## A Fun Usage Scenario
Alice is a winemaker in BC’s Okanagan Valley. She would like to create a new summer wine and hopes to take inspiration from the Vinho Verde wines, known for their refreshing qualities. Alice seeks our dashboard to better understand what wine attributes she should focus on to provide a tasting experience comparable to the very best Vinho Verde wines. However, there are some physicochemical properties she has little control over due to the soils and grape species available to her. Due to the above average alkalinity of Okanagan soil, she knows that her wines will typically be less acidic than true Vinho Verde wines, and the altitude means the chloride content will be lower as well. She wants to try to optimize the variables she has control over to make the best wine possible. She looks to our dashboard to see how Vinho Verde wines with higher pH and lower chloride content tend to fare. Looking at the interactive scatterplots, she sees that wines which have values within her possible ranges for these variables tend to be of very poor quality when they are also high in residual sugar, but less sweet wines are of good quality. She then consults the histograms and sees that there are not very many wines available that have these properties, so she knows that she will not have much direct competition should she go forward with this design. A few years later, she released this wine to broad critical acclaim and millions in profit.
## Get involved
If you think you can help in any of the areas listed above (and we bet you can) or in any of the many areas that we haven't yet thought of (and here we're *sure* you can) then please check out our [contributors' guidelines](https://github.com/ubco-mds-2020-labs/WineVision/blob/main/CONTRIBUTING.md) and our [roadmap](https://github.com/ubco-mds-2020-labs/WineVision/pull/1).
Please note that it's very important to us that we maintain a positive and supportive environment for everyone who wants to participate. When you join us we ask that you follow our [code of conduct](https://github.com/ubco-mds-2020-labs/WineVision/blob/main/CODE_OF_CONDUCT.md) in all interactions both on and offline.
## Contact us
If you want to report a problem or suggest an enhancement we'd love for you to [open an issue](https://github.com/ubco-mds-2020-labs/WineVision/issues) at this github repository because then we can get right on it.
## Data Citation
Paulo Cortez, University of Minho, Guimarães, Portugal, http://www3.dsi.uminho.pt/pcortez
A. Cerdeira, F. Almeida, T. Matos and J. Reis, Viticulture Commission of the Vinho Verde Region(CVRVV), Porto, Portugal
@2009
'''),
style={"font-size": "1.925rem"}
)
]
)
return layout
|
"""Mixin classes for Renderers that plot.
"""
import os, sys, re, math, tempfile
import itertools
import numpy
from SphinxReport.ResultBlock import ResultBlock, ResultBlocks
from SphinxReportPlugins.Renderer import Renderer, NumpyMatrix
from SphinxReport.DataTree import path2str
from collections import OrderedDict as odict
from SphinxReport import Utils
from SphinxReport import Stats
import pandas.rpy.common
try:
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
import rpy2.robjects.lib.ggplot2 as ggplot2
except ImportError:
R = None
from docutils.parsers.rst import directives
def parseRanges(r):
'''given a string in the format "x,y",
return a tuple of values (x,y).
missing values are set to None.
'''
if not r: return r
r = [ x.strip() for x in r.split(",")]
if r[0] == "": r[0] = None
else: r[0] = float(r[0])
if r[1] == "": r[1] = None
else: r[1] = float(r[1])
return r
def getCurrentRDevice():
'''return the numerical device id of the current device.'''
#R.dev_off()
#return R.dev_cur().values()[0]
return R["dev.cur"]()[0]
class Plotter(object):
"""Base class for Renderers that do simple 2D plotting.
This mixin class provides convenience function for :class:`Renderer.Renderer`
classes that do 2D plotting.
The base class takes care of counting the plots created,
provided that the methods :meth:`startPlot` and :meth:`endPlot`
are called appropriately. It then inserts the appropriate place holders.
This class adds the following options to the :term:`report` directive:
:term:`logscale`: apply logscales one or more axes.
:term:`xtitle`: add a label to the X axis
:term:`ytitle`: add a label to the Y axis
:term:`title`: title of the plot
:term:`add-title`: add title to each plot
:term:`legend-location`: specify the location of the legend
:term:`as-lines`: do not plot symbols
:term:`xrange`: restrict plot a part of the x-axis
:term:`yrange`: restrict plot a part of the y-axis
With some plots default layout options will result in plots
that are misaligned (legends truncated, etc.). To fix this it might
be necessary to increase plot size, reduce font size, or others.
The following options will be passed on the matplotlib to permit
this control.
:term:`mpl-figure`: options for matplotlib
``figure`` calls().
:term:`mpl-legend`: options for matplotlib
``legend`` calls().
:term:`mpl-subplot`: options for matplotlib
``subplots_adjust`` calls().
:term:`mpl-rc`: general environment settings for matplotlib.
See the matplotlib documentation. Multiple options can be
separated by ;, for example
``:mpl-rc: figure.figsize=(20,10);legend.fontsize=4``
"""
mLegendFontSize = 8
# number of chars to use to reduce legend font size
mMaxLegendSize = 100
## maximum number of rows per column. If there are more,
## the legend is split into multiple columns
mLegendMaxRowsPerColumn = 30
options = (
('logscale', directives.unchanged),
('title', directives.unchanged),
('add-title', directives.flag),
('xtitle', directives.unchanged),
('ytitle', directives.unchanged),
('xrange', directives.unchanged),
('yrange', directives.unchanged),
('zrange', directives.unchanged),
('mpl-figure', directives.unchanged),
('mpl-legend', directives.unchanged),
('mpl-subplot', directives.unchanged),
('mpl-rc', directives.unchanged),
('as-lines', directives.flag),
('legend-location', directives.unchanged),
)
def __init__(self, *args, **kwargs ):
"""parse option arguments."""
self.mFigure = 0
self.mColors = "bgrcmk"
self.mSymbols = ["g-D","b-h","r-+","c-+","m-+","y-+","k-o","g-^","b-<","r->","c-D","m-h"]
self.mMarkers = "so^>dph8+x"
self.logscale = kwargs.get("logscale", None )
self.title = kwargs.get("title", None )
self.add_title = "add-title" in kwargs
self.xlabel = kwargs.get("xtitle", None )
self.ylabel = kwargs.get("ytitle", None )
# substitute '-' in SphinxReport-speak for ' ' in matplotlib speak
self.legend_location = re.sub("-", " ", kwargs.get("legend-location", "outer-top"))
self.width = kwargs.get("width", 0.50 )
self.mAsLines = "as-lines" in kwargs
self.xrange = parseRanges(kwargs.get("xrange", None ))
self.yrange = parseRanges(kwargs.get("yrange", None ))
self.zrange = parseRanges(kwargs.get("zrange", None ))
if self.mAsLines:
self.mSymbols = []
for y in ("-",":","--"):
for x in "gbrcmyk":
self.mSymbols.append( y+x )
def startPlot( self ):
R.x11()
def endPlot( self, work, path ):
# currently: collects only single plots.
figid = getCurrentRDevice()
blocks = ResultBlocks( ResultBlock( "\n".join( ("#$rpl %i$#" % (figid), "")),
title = "/".join(map(str, path)) ) )
return blocks
class LinePlot( Renderer, Plotter ):
'''create a line plot.
This :class:`Renderer` requires at least three levels:
line / data / coords.
This is a base class that provides several hooks for
derived classes.
initPlot()
for line, data in work:
initLine()
for coords in data:
xlabel, ylabels = initCoords()
for ylabel in ylabels:
addData( xlabel, ylabel )
finishCoords()
finishLine()
finishPlot()
'''
nlevels = 3
def __init__(self, *args, **kwargs):
Renderer.__init__(self, *args, **kwargs )
Plotter.__init__(self, *args, **kwargs )
def initPlot(self, fig, work, path ):
'''initialize plot.'''
self.legend = []
self.plots = []
self.xlabels = []
self.ylabels = []
def addData( self,
line, label,
xlabel, ylabel,
xvals, yvals,
nplotted ):
s = self.mSymbols[nplotted % len(self.mSymbols)]
self.plots.append( plt.plot( xvals,
yvals,
s ) )
self.ylabels.append(ylabel)
self.xlabels.append(xlabel)
def initCoords( self, label, coords ):
'''hook for code working a collection of coords.
should return a single key for xvalues and
one or more keys for y-values.
'''
keys = list(coords.keys())
return keys[0], keys[1:]
def render(self, dataframe, path ):
fig = self.startPlot()
labels = dataframe.index.levels
paths = list(itertools.product( *labels ) )
self.initPlot( fig, dataseries, path )
nplotted = 0
for idx in range( 0, len(paths), 2):
self.initLine( path, dataseries )
xpath = paths[idx]
ypath = paths[idx+1]
xvalues, yvalues = dataseries.ix[xpath], dataseries.ix[ypath]
if len(xvalues) != len(yvalues):
raise ValueError( "length of x,y tuples not consistent: %i != %i" % \
len(xvalues), len(yvalues))
R.plot( xvalues, yvalues )
self.initCoords( xvalues, yvalues )
nplotted += 1
self.finishPlot( fig, dataseries, path )
figid = getCurrentRDevice()
blocks = ResultBlocks( ResultBlock( "\n".join( ("#$rpl %i$#" % (figid), "")),
title = "/".join(path) ) )
return blocks
class BoxPlot( Renderer, Plotter ):
"""Write a set of box plots.
This :class:`Renderer` requires two levels.
labels[dict] / data[array]
"""
options = Renderer.options + Plotter.options
nlevels = 1
def __init__(self, *args, **kwargs):
Renderer.__init__(self, *args, **kwargs )
Plotter.__init__(self, *args, **kwargs )
def render(self, dataframe, path ):
self.startPlot()
rframe = pandas.rpy.common.convert_to_r_dataframe(dataframe)
R.boxplot( rframe )
return self.endPlot( dataframe, path )
class SmoothScatterPlot(Renderer, Plotter):
"""A smoothed scatter plot.
See R.smoothScatter.
This :class:`Renderer` requires one levels:
coords[dict]
"""
options = Renderer.options + Plotter.options +\
( ('bins', directives.unchanged), )
nlevels = 2
def __init__(self, *args, **kwargs):
Renderer.__init__(self, *args, **kwargs )
Plotter.__init__(self, *args, **kwargs )
self.nbins = kwargs.get("nbins", "128" )
if self.nbins:
if "," in self.nbins: self.nbins=list(map(int, self.nbins.split(",")))
else: self.nbins=int(self.nbins)
def render(self, dataframe, path ):
if len(dataframe.columns) < 2:
raise ValueError( "requiring two coordinates, only got %s" % str(dataframe.columns))
plts, legend = [], []
blocks = ResultBlocks()
for xcolumn, ycolumn in itertools.combinations( dataframe.columns, 2 ):
# remove missing data points
xvalues, yvalues = Stats.filterMissing( (dataframe[xcolumn], dataframe[ycolumn]) )
# remove columns with all NaN
if len(xvalues) == 0 or len(yvalues) == 0:
continue
# apply log transformation on data not on plot
if self.logscale:
if "x" in self.logscale:
xvalues = R.log10(xvalues)
if "y" in self.logscale:
yvalues = R.log10(yvalues)
self.startPlot()
# wrap, as pandas series can not
# passed through rpy2.
R.smoothScatter( numpy.array( xvalues, dtype=numpy.float),
numpy.array( yvalues, dtype=numpy.float),
xlab=xcolumn,
ylab=ycolumn,
nbin = self.nbins )
blocks.extend( self.endPlot( dataframe, path ) )
return blocks
class HeatmapPlot(NumpyMatrix, Plotter):
"""A heatmap plot
See R.heatmap.2 in the gplots package
This :class:`Renderer` requires one levels:
coords[dict]
"""
options = NumpyMatrix.options + Plotter.options
nlevels = 2
def __init__(self, *args, **kwargs):
NumpyMatrix.__init__(self, *args, **kwargs )
Plotter.__init__(self, *args, **kwargs )
def plot( self, matrix, row_headers, col_headers, path ):
'''plot matrix.
Large matrices are split into several plots.
'''
self.debug("HeatmapPlot started")
self.startPlot()
R.library( 'gplots' )
R["heatmap.2"]( matrix,
trace = 'none',
dendrogram = 'none',
col=R.bluered(75),
symbreaks = True,
symkey = True,
cexCol = 0.5,
cexRow = 0.5,
labRow = row_headers,
labCol = col_headers,
mar = ro.IntVector((10,10)),
keysize = 1 )
self.debug("HeatmapPlot finished")
return self.endPlot( None, path )
def render(self, dataseries, path ):
self.debug("building matrix started")
matrix, rows, columns = self.buildMatrix( dataseries )
self.debug("building matrix finished")
return self.plot( matrix, rows, columns, path )
class GGPlot( Renderer, Plotter ):
"""Write a set of box plots.
This :class:`Renderer` requires two levels.
labels[dict] / data[array]
"""
options = (
('statement', directives.unchanged),
) + Renderer.options + Plotter.options
nlevels = 1
def __init__(self, *args, **kwargs):
Renderer.__init__(self, *args, **kwargs )
Plotter.__init__(self, *args, **kwargs )
if "statement" not in kwargs:
raise ValueError("r-ggplot renderer requires a statement option" )
self.statement = kwargs.get( 'statement' )
def render(self, dataframe, path ):
R.library( 'ggplot2' )
rframe = pandas.rpy.common.convert_to_r_dataframe(dataframe)
# sometimes the row/column mapping did not work
# rframe.colnames = dataframe.columns
unAsIs = R('''function (x) {
if(typeof(x) %in% c("integer","double")) {
class(x) <- "numeric"
return (x)}
else if (typeof(x) == "character") {
class(x) <- "character"
return (x) }
else {
return(x) } }''')
rframe = R["as.data.frame"](R.lapply(rframe,unAsIs))
R.assign( "rframe", rframe )
# start plot
R('''gp = ggplot( rframe )''')
# add aesthetics and geometries
try:
pp = R('''gp + %s ''' % self.statement )
except ValueError as msg:
raise ValueError( "could not interprete R statement: gp + %s; msg=%s" % (self.statement, msg ))
figname = re.sub( '/', '_', path2str(path) )
r = ResultBlock( '#$ggplot %s$#' % figname,
title = path2str(path) )
r.rggplot = pp
r.figname = figname
return ResultBlocks( r )
|
from typing import List, Optional
from cloudrail.knowledge.context.aws.ec2.security_group import SecurityGroup
from cloudrail.knowledge.context.aws.indirect_public_connection_data import IndirectPublicConnectionData
from cloudrail.knowledge.context.aws.kms.kms_key import KmsKey
from cloudrail.knowledge.context.aws.networking_config.inetwork_configuration import INetworkConfiguration
from cloudrail.knowledge.context.aws.service_name import AwsServiceName
from cloudrail.knowledge.context.aws.networking_config.network_configuration import NetworkConfiguration
from cloudrail.knowledge.context.aws.networking_config.network_entity import NetworkEntity
class RdsInstance(NetworkEntity, INetworkConfiguration):
"""
Attributes:
name: The name of the instance.
arn: The ARN of the instance.
port: The port the instance is listening on.
publicly_accessible: True if the database is configured to have
a public IP address.
db_subnet_group_name: The name of the SB subnet group.
security_group_ids: The IDs of the security groups in use
with the instance.
db_cluster_id: The cluster ID, if this instance is part of a cluster,
or None otherwise.
encrypted_at_rest: True is encryption at rest is enabled.
performance_insights_enabled: True if performance insights is enabled.
performance_insights_kms_key: The ARN of the KMS Key used to encrypt
the performance insights, if any is used.
performance_insights_kms_data: The actual KMS Key object, if a KMS key
is used to encrypt performance insights.
security_group_allowing_public_access: A security group that allows access from the internet.
This value will be None when this resource is not accessible from the internet.
indirect_public_connection_data: The data that describes that a publicly-accessible resource can access this resource by a security group of this resource.
backup_retention_period: Number of days to retain backups.
engine_type: The Database engine name to be used for this RDS instance.
engine_version: The Database engine version to be used for this RDS instance.
iam_database_authentication_enabled: An indication whether authentication to the RDS instance using IAM entities is enabled.
"""
def __init__(self,
account: str,
region: str,
name: str,
arn: str,
port: int,
publicly_accessible: bool,
db_subnet_group_name: str,
security_group_ids: List[str],
db_cluster_id: Optional[str],
encrypted_at_rest: bool,
performance_insights_enabled: bool,
performance_insights_kms_key: Optional[str],
engine_type: str,
engine_version: str):
super().__init__(name, account, region, AwsServiceName.AWS_RDS_CLUSTER_INSTANCE)
self.arn: str = arn
self.port: int = port
self.db_subnet_group_name: str = db_subnet_group_name
self.is_in_default_vpc: bool = db_subnet_group_name is None
self.network_configuration: NetworkConfiguration = NetworkConfiguration(publicly_accessible, security_group_ids, None)
self.db_cluster_id: Optional[str] = db_cluster_id
self.encrypted_at_rest: bool = encrypted_at_rest
self.performance_insights_kms_key: Optional[str] = performance_insights_kms_key
self.performance_insights_enabled: bool = performance_insights_enabled
self.performance_insights_kms_data: Optional[KmsKey] = None
self.backup_retention_period: Optional[int] = None
self.engine_type: str = engine_type
self.engine_version: str = engine_version
self.iam_database_authentication_enabled: Optional[bool] = None
self.cloudwatch_logs_exports: Optional[list] = None
self.indirect_public_connection_data: Optional[IndirectPublicConnectionData] = None
self.security_group_allowing_public_access: Optional[SecurityGroup] = None
def get_keys(self) -> List[str]:
return [self.arn]
def get_name(self) -> str:
return self.name
def get_arn(self) -> str:
return self.arn
def get_extra_data(self) -> str:
port = 'port: {}'.format(self.port) if self.port else ''
db_subnet_group_name = 'db_subnet_group_name: {}'.format(self.db_subnet_group_name) if self.db_subnet_group_name else ''
return ', '.join([port, db_subnet_group_name])
def get_all_network_configurations(self) -> List[NetworkConfiguration]:
return [self.network_configuration]
def get_type(self, is_plural: bool = False) -> str:
if not is_plural:
return 'RDS Instance'
else:
return 'RDS Instances'
def get_cloud_resource_url(self) -> Optional[str]:
return '{0}rds/home?region={1}#database:id={2};is-cluster=false'\
.format(self.AWS_CONSOLE_URL, self.region, self.db_cluster_id)
@property
def is_tagable(self) -> bool:
return True
|
from pytelegrafhttp.util import check_online
# Global default install directory for all files. Note that each specific file / directory may override where it is
# located, but by default they are all grouped together
install_dir = '/etc/pytelegrafhttp'
###############
# ENVIRONMENT #
###############
# Directory for files for use with daemon communication
env_daemon_files_dir = install_dir + '/daemon'
# file to save cookies in
env_cookies_file = install_dir + '/cookies.pkl'
# file to save general program state in
env_state_file = install_dir + '/state.pkl'
# How often the system should save state. Given in terms of 'ticks', where one tick is equal to the time of the
# time_collection_interval
########
# TIME #
########
# How often metrics are collected. Measured in seconds.
time_collection_interval = 60
# How often to save the state. Given in terms of time_collection_interval variable, so putting 10 here indicates that
# the state should be saved after every 10th data collection.
time_save_frequencey = 10
###########
# LOGGING #
###########
# Location of log file directory (by default all logs are kept together; override individual log file paths to change
# this).
log_dir = install_dir + '/logs'
# Location of log file that contains all output
log_main_log_path = log_dir + '/main.log'
# Location of log file that contains only errors
log_error_log_path = log_dir + '/errors.log'
# Maximum size of a log file before it is rotated. Format is flexible, and accepts strings such as "24KB", "8g", or
# "5kbs"
log_file_max_size = "5 Mbs"
# Number of log files to keep. Once this number of rotated logs is reached, every rotation after that will cause the
# oldest one to be deleted.
log_file_keep_count = 4
# Whether entire HTTP request should be in logged output. If True, headers, body, and query string are included. If
# False, only the host and the URI endpoint are included in logged output. WARNING: If set to True, sensitive data
# could be included in logged output, such as passwords or session IDs.
log_full_http_requests = False
# Whether entire HTTP response should be in response. If True, headers and body are included. If False, only the HTTP
# status code is included in logged output.
log_full_http_responses = False
# Additional system log to use. Adding one of these values requires that the associated python module is installed on
# the host system separately from this application.
#
# Supported values are: 'systemd'
log_os_logs = []
# Uncomment to enable journalctl logging
# log_os_logs.append('systemd')
###########
# SCRAPER #
###########
scraper_host = 'e-fancomics.org'
scraper_username = 'username'
scraper_password = 'password'
scraper_use_ssl = True
scraper_login_steps = [
('attempt', {'endpoint': '/fancomicsathome.php'}),
('resp-extract', {'type': 'form-vars', 'inject': {'UserName': 'username', 'PassWord': 'password'}}),
('submit-form', {}),
('bounce-transfer', {'pattern': '<a href="([^"]+)">Or click here if you do not wish to wait</a>'}),
('verify', {'pattern': 'F@H Miss% shows the percentage of requests'})
]
scraper_logged_out_pattern = 'requires you to log on.</p>'
scraper_bot_kicked_pattern = 'banned for excessive pageloads which indicates'
scraper_telegraf_destinations = {
'hath-net': {
'port': 10000,
'global-tags': {}
},
'hath-client-net-stats': {
'port': 10001,
'global-tags': {}
}
}
scraper_endpoints = []
scraper_endpoints.append({
'endpoint': '/fancomicsathome.php',
'verify-pattern': 'F@H Miss% shows the percentage of requests',
'metrics': [
{
'dest': 'hath-net', # destination db / telegraf identifier
'name': 'hath-net', # metrics name
'regex': [
r'<td>North and South America</td>\s*',
r'<td [^>]*>[^ ]+ Gbit/s</td>\s*',
r'<td [^>]*>=</td>\s*',
r'<td [^>]*>([^ ]+) MB/s</td>\s*',
r'<td [^>]*>([^ ]+) %</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>',
],
'values': [
{'name': 'load', 'conversion': int, 'type': 'CAPTURE-1'},
{'name': 'miss-rate', 'conversion': float, 'type': 'CAPTURE-2'},
{'name': 'coverage', 'conversion': float, 'type': 'CAPTURE-3'},
{'name': 'hits-per-gb', 'conversion': float, 'type': 'CAPTURE-4'},
{'name': 'quality', 'conversion': int, 'type': 'CAPTURE-5'}
],
'tags': {'region': 'americas'}
},
{
'dest': 'hath-net',
'name': 'hath-net',
'regex': [
r'<td>Europe and South America</td>\s*',
r'<td [^>]*>[^ ]+ Gbit/s</td>\s*',
r'<td [^>]*>=</td>\s*',
r'<td [^>]*>([^ ]+) MB/s</td>\s*',
r'<td [^>]*>([^ ]+) %</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>',
],
'values': [
{'name': 'load', 'conversion': int, 'type': 'CAPTURE-1'},
{'name': 'miss-rate', 'conversion': float, 'type': 'CAPTURE-2'},
{'name': 'coverage', 'conversion': float, 'type': 'CAPTURE-3'},
{'name': 'hits-per-gb', 'conversion': float, 'type': 'CAPTURE-4'},
{'name': 'quality', 'conversion': int, 'type': 'CAPTURE-5'}
],
'tags': {'region': 'europe-africa'}
},
{
'dest': 'hath-net',
'name': 'hath-net',
'regex': [
r'<td>Asia and Oceania</td>\s*',
r'<td [^>]*>[^ ]+ Gbit/s</td>\s*',
r'<td [^>]*>=</td>\s*',
r'<td [^>]*>([^ ]+) MB/s</td>\s*',
r'<td [^>]*>([^ ]+) %</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>',
],
'values': [
{'name': 'load', 'conversion': int, 'type': 'CAPTURE-1'},
{'name': 'miss-rate', 'conversion': float, 'type': 'CAPTURE-2'},
{'name': 'coverage', 'conversion': float, 'type': 'CAPTURE-3'},
{'name': 'hits-per-gb', 'conversion': float, 'type': 'CAPTURE-4'},
{'name': 'quality', 'conversion': int, 'type': 'CAPTURE-5'}
],
'tags': {'region': 'asia-oceania'}
},
{
'dest': 'hath-net',
'name': 'hath-net',
'regex': [
r'<td>Global</td>\s*',
r'<td [^>]*>[^ ]+ Gbit/s</td>\s*',
r'<td [^>]*>=</td>\s*',
r'<td [^>]*>([^ ]+) MB/s</td>\s*',
r'<td [^>]*>([^ ]+) %</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td [^>]*>([^<]+)</td>',
],
'values': [
{'name': 'load', 'conversion': int, 'type': 'CAPTURE-1'},
{'name': 'miss-rate', 'conversion': float, 'type': 'CAPTURE-2'},
{'name': 'coverage', 'conversion': float, 'type': 'CAPTURE-3'},
{'name': 'hits-per-gb', 'conversion': float, 'type': 'CAPTURE-4'},
{'name': 'quality', 'conversion': int, 'type': 'CAPTURE-5'}
],
'tags': {'region': 'global'}
},
{
'dest': 'hath-client-net-stats',
'name': 'hath-health',
'regex': [
r'<tr>\s*',
r'<td><a [^>]*>([^<]+)</a></td>\s*',
r'<td>([^<]+)</td>\s*',
r'<td [^>]*>Online</td>\s*',
r'<td>[^<]*</td>\s*',
r'<td>([^<]*)</td>\s*',
r'<td>([^<]+)</td>\s*',
r'<td [^>]*>[^<]+</td>\s*',
r'<td>[^<]*</td>\s*',
r'<td>[^<]*</td>\s*',
r'<td>[^<]*</td>\s*',
r'<td [^>]*>([^<]+)</td>\s*',
r'<td>([^<]+)</td>\s*',
r'<td>([^ ]+) / min</td>\s*',
r'<td>([^ ]+) / day</td>\s*',
],
'values': [
{'name': 'online', 'conversion': lambda last: check_online(last, max_minutes=5), 'type': 'CAPTURE-3'},
{'name': 'files', 'conversion': lambda x: int(x.replace(',', '')), 'type': 'CAPTURE-4'},
{'name': 'trust', 'conversion': int, 'type': 'CAPTURE-5'},
{'name': 'quality', 'conversion': int, 'type': 'CAPTURE-6'},
{'name': 'hitrate', 'conversion': float, 'type': 'CAPTURE-7'},
{'name': 'hathrate', 'conversion': float, 'type': 'CAPTURE-8'}
],
'tags': {
'host': 'CAPTURE-1',
'client-id': 'CAPTURE-2',
}
},
{
'dest': 'hath-client-net-stats',
'name': 'hath-health',
'regex': [
r'<tr>\s*',
r'<td><a [^>]*>([^<]+)</a></td>\s*',
r'<td>([^<]+)</td>\s*',
r'<td [^>]*>Offline</td>\s*',
r'<td>[^<]*</td>\s*',
r'<td>[^<]*</td>\s*',
r'<td>([^<]+)</td>\s*',
r'<td [^>]*>Not available when offline</td>\s*'
],
'values': [
{'name': 'online', 'conversion': 0, 'type': 'VALUE'},
{'name': 'files', 'conversion': lambda x: int(x.replace(',', '')), 'type': 'CAPTURE-3'}
],
'tags': {
'host': 'CAPTURE-1',
'client-id': 'CAPTURE-2',
}
}
]
})
|
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE.text, distributed with this software.
Created on Aug 2, 2017
@author: jrm
"""
import markdown
from .lxml_raw import RawComponent
from web.components.md import ProxyMarkdown
class MarkdownComponent(RawComponent, ProxyMarkdown):
""" A block for rendering Markdown source. """
def _refresh_source(self):
d = self.declaration
#: Parse md and put in a root node
source = markdown.markdown(
d.source,
tab_length=d.tab_length,
safe_mode=d.safe_mode,
output_format=d.output_format,
extensions=d.extensions,
extension_configs=d.extension_configs
)
#: Parse source to html
super(MarkdownComponent, self).set_source(source)
def set_source(self, source):
self._refresh_source()
def set_safe_mode(self, mode):
self._refresh_source()
def set_output_format(self, format):
self._refresh_source()
def set_tab_length(self, length):
self._refresh_source()
def set_extensions(self, extensions):
self._refresh_source()
def set_extension_configs(self, config):
self._refresh_source()
|
# -*- coding: utf-8 -*-
import environ
# Scrapy settings for scrapy_app project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# Initialise environment variables
env = environ.Env()
environ.Env.read_env()
BOT_NAME = 'scrapy_app'
SPIDER_MODULES = ['scrapy_app.spiders']
NEWSPIDER_MODULE = 'scrapy_app.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapy_app (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapy_app.middlewares.ScrapyAppSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrapy_app.middlewares.ScrapyAppDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy_app.pipelines.ScrapyAppPipeline': 200,
}
HTTPERROR_ALLOWED_CODES = [404, 403]
# MongoDB credentials
MONGODB_HOSTNAME = env('MONGODB_HOSTNAME')
MONGODB_PORT = env('MONGODB_PORT')
MONGODB_DATABASE = env('MONGODB_DATABASE')
MONGODB_URI = 'mongodb://' + MONGODB_HOSTNAME + ':' + MONGODB_PORT
# Elastic Search secrets
ELASTIC_SEARCH_USERNAME = env('ELASTIC_SEARCH_USERNAME')
ELASTIC_SEARCH_PASSWORD = env('ELASTIC_SEARCH_PASSWORD')
ELASTIC_SEARCH_HOSTNAME = env('ELASTIC_SEARCH_HOSTNAME')
ELASTIC_SEARCH_PORT = env('ELASTIC_SEARCH_PORT')
ELASTIC_SEARCH_URI = 'http://' + ELASTIC_SEARCH_USERNAME + ':' + ELASTIC_SEARCH_PASSWORD + '@' \
+ ELASTIC_SEARCH_HOSTNAME + ':' + ELASTIC_SEARCH_PORT
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
import pandas as pd
import numpy as np
from datetime import datetime
from rdflib import Graph, Literal, URIRef
from rdflib.namespace import FOAF , XSD, DC, FOAF, SKOS, RDF, RDFS
import cleansing.contact as cls_contact
from helper.functions import add_literal, concept_uri, export_data, export_df, exists_contact_cont, get_cleansed_data
import helper.namespaces as ns
def main(file, mode):
contact_cleansed = get_cleansed_data(file, 'contact')
g = Graph()
for _, row in contact_cleansed.iterrows():
abb_id, abb_uuid = concept_uri(ns.lblod + 'persoon/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']))
g.add((abb_id, RDF.type, ns.person.Person))
add_literal(g, abb_id, ns.mu.uuid, abb_uuid, XSD.string)
add_literal(g, abb_id, FOAF.familyName, str(row['Familienaam Contact Cleansed']), XSD.string)
add_literal(g, abb_id, ns.persoon.gebruikteVoornaam, str(row['Voornaam Contact Cleansed']), XSD.string)
if exists_contact_cont(row):
site_id, site_uuid = concept_uri(ns.lblod + 'vesting/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']))
g.add((site_id, RDF.type, ns.org.Site))
add_literal(g, site_id, ns.mu.uuid, site_uuid, XSD.string)
contact_id, contact_uuid = concept_uri(ns.lblod + 'contactpunt/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']) + '1')
g.add((contact_id, RDF.type, ns.schema.ContactPoint))
add_literal(g, contact_id, ns.mu.uuid, contact_uuid, XSD.string)
add_literal(g, contact_id, ns.schema.email, str(row['Titel Cleansed']), XSD.string)
add_literal(g, contact_id, ns.schema.email, str(row['Mail nr2 Cleansed']), XSD.string)
add_literal(g, contact_id, ns.schema.telephone, str(row['Telefoonnr Contact 1']), XSD.string)
g.add((site_id, ns.schema.siteAddress, contact_id))
if str(row['Telefoonnr Contact 2']) != str(np.nan):
contact_tel2_id, contact_tel2_uuid = concept_uri(ns.lblod + 'contactpunt/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']) + str(row['Telefoonnr Contact 2']))
g.add((contact_tel2_id, RDF.type, ns.schema.ContactPoint))
add_literal(g, contact_tel2_id, ns.mu.uuid, contact_tel2_uuid, XSD.string)
add_literal(g, contact_tel2_id, ns.schema.telephone, str(row['Telefoonnr Contact 2']), XSD.string)
g.add((site_id, ns.schema.siteAddress, contact_tel2_id))
if str(row['GSMnr Contact Cleansed']) != str(np.nan):
contact_gsm_id, contact_gsm_uuid = concept_uri(ns.lblod + 'contactpunt/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']) + str(row['GSMnr Contact Cleansed']))
g.add((contact_gsm_id, RDF.type, ns.schema.ContactPoint))
add_literal(g, contact_gsm_id, ns.mu.uuid, contact_gsm_uuid, XSD.string)
add_literal(g, contact_gsm_id, ns.schema.telephone, str(row['GSMnr Contact Cleansed']), XSD.string)
g.add((site_id, ns.schema.siteAddress, contact_gsm_id))
g.add((abb_id, ns.org.basedAt, site_id))
if str(row['Id']) != str(np.nan):
attr_id, _ = concept_uri(ns.lblod + 'gestructureerdeIdentificator/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']))
g.add((attr_id, RDF.type, ns.generiek.GestructureerdeIdentificator))
add_literal(g, attr_id, ns.generiek.lokaleIdentificator, str(row['Id']), XSD.string)
g.add((abb_id, ns.generiek.gestructureerdeIdentificator, attr_id))
org_id, _ = concept_uri(ns.lblod + 'organisatie/', str(row['organisation_id']))
if (str(row['Decretale functie Cleansed']) == str(np.nan)) and (str(row['Functietitel Cleansed']) != str(np.nan)):
position_id, _ = concept_uri(ns.lblod + 'hoedanigheid/', str(row['organisation_id']) + str(row['Functietitel Cleansed']))
g.add((position_id, RDF.type, ns.organisatie.Hoedanigheid))
# TODO: Map Functietitel properly
role_id, _ = concept_uri(ns.lblod + 'rol/', str(row['Functietitel Cleansed']))
g.add((role_id, RDF.type, ns.org.Role))
add_literal(g, role_id, RDFS.label, str(row['Functietitel Cleansed']), XSD.string)
g.add((position_id, ns.org.role, role_id))
g.add((position_id, ns.org.postIn, org_id))
g.add((org_id, ns.org.hasPost, position_id))
g.add((abb_id, ns.org.holds, position_id))
g.add((position_id, ns.org.heldBy, abb_id))
elif str(row['Decretale functie Cleansed']) != str(np.nan):
# Bestuur temporary
bestuur_temporary, bestuur_uuid = concept_uri(ns.lblod + 'bestuursorgaan/', str(row['organisation_id']) + str(datetime.now()))
g.add((bestuur_temporary, RDF.type, ns.besluit.Bestuursorgaan))
add_literal(g, bestuur_temporary, ns.mu.uuid, bestuur_uuid, XSD.string)
g.add((bestuur_temporary, ns.generiek.isTijdspecialisatieVan, org_id))
## Functionaris
person_functionaris, _ = concept_uri(ns.lblod + 'functionaris/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']) + str(row['organisation_id']) + str(row['Decretale functie Cleansed'].lower().replace(" ", "")))
g.add((person_functionaris, RDF.type, ns.lblodlg.Functionaris))
g.add((person_functionaris, ns.mandaat.isBestuurlijkeAliasVan, abb_id))
#start
#einde
#status ~ cf loket lokale besturen PoC https://poc-form-builder.relance.s.redpencil.io/codelijsten
# https://data.vlaanderen.be/id/conceptscheme/MandatarisStatusCode
g.add((person_functionaris, ns.mandaat.status, ns.functionaris_status[row['Functionaris status']]))
g.add((abb_id, ns.mandaat.isAangesteldAls, person_functionaris))
# https://data.vlaanderen.be/doc/conceptscheme/BestuursfunctieCode
## Bestuuursfunctie
person_bestuursfunctie, person_bestuursfunctie_uuid = concept_uri(ns.lblod + 'bestuursfunctie/', str(row['Voornaam Contact Cleansed']) + str(row['Familienaam Contact Cleansed']) + str(row['organisation_id']))
g.add((person_bestuursfunctie, RDF.type, ns.lblodlg.Bestuursfunctie))
add_literal(g, person_bestuursfunctie, ns.mu.uuid, person_bestuursfunctie_uuid, XSD.string)
g.add((person_bestuursfunctie, ns.org.role, ns.bestursfunctie_code[row['Decretale functie Cleansed']]))
g.add((person_bestuursfunctie, ns.org.heldBy, person_functionaris))
g.add((person_functionaris, ns.org.holds, person_bestuursfunctie))
g.add((bestuur_temporary, ns.org.hasPost, person_bestuursfunctie))
g.add((person_bestuursfunctie, ns.org.postIn, bestuur_temporary))
export_data(g, f'contact-{mode}') |
# Python MSI Generator
# (C) 2003 Martin v. Loewis
# See "FOO" in comments refers to MSDN sections with the title FOO.
import msilib, schema, sequence, os, glob, time, re, shutil, zipfile
from msilib import Feature, CAB, Directory, Dialog, Binary, add_data
import uisample
from win32com.client import constants
from distutils.spawn import find_executable
from uuids import product_codes
import tempfile
# Settings can be overridden in config.py below
# 0 for official python.org releases
# 1 for intermediate releases by anybody, with
# a new product code for every package.
snapshot = 1
# 1 means that file extension is px, not py,
# and binaries start with x
testpackage = 0
# Location of build tree
srcdir = os.path.abspath("../..")
# Text to be displayed as the version in dialogs etc.
# goes into file name and ProductCode. Defaults to
# current_version.day for Snapshot, current_version otherwise
full_current_version = None
# Is Tcl available at all?
have_tcl = True
# path to PCbuild directory
PCBUILD="PCbuild"
# msvcrt version
MSVCR = "90"
# Name of certificate in default store to sign MSI with
certname = None
# Make a zip file containing the PDB files for this build?
pdbzip = True
try:
from config import *
except ImportError:
pass
# Extract current version from Include/patchlevel.h
lines = open(srcdir + "/Include/patchlevel.h").readlines()
major = minor = micro = level = serial = None
levels = {
'PY_RELEASE_LEVEL_ALPHA':0xA,
'PY_RELEASE_LEVEL_BETA': 0xB,
'PY_RELEASE_LEVEL_GAMMA':0xC,
'PY_RELEASE_LEVEL_FINAL':0xF
}
for l in lines:
if not l.startswith("#define"):
continue
l = l.split()
if len(l) != 3:
continue
_, name, value = l
if name == 'PY_MAJOR_VERSION': major = value
if name == 'PY_MINOR_VERSION': minor = value
if name == 'PY_MICRO_VERSION': micro = value
if name == 'PY_RELEASE_LEVEL': level = levels[value]
if name == 'PY_RELEASE_SERIAL': serial = value
short_version = major+"."+minor
# See PC/make_versioninfo.c
FIELD3 = 1000*int(micro) + 10*level + int(serial)
current_version = "%s.%d" % (short_version, FIELD3)
# This should never change. The UpgradeCode of this package can be
# used in the Upgrade table of future packages to make the future
# package replace this one. See "UpgradeCode Property".
# upgrade_code gets set to upgrade_code_64 when we have determined
# that the target is Win64.
upgrade_code_snapshot='{92A24481-3ECB-40FC-8836-04B7966EC0D5}'
upgrade_code='{65E6DE48-A358-434D-AA4F-4AF72DB4718F}'
upgrade_code_64='{6A965A0C-6EE6-4E3A-9983-3263F56311EC}'
if snapshot:
current_version = "%s.%s.%s" % (major, minor, int(time.time()/3600/24))
product_code = msilib.gen_uuid()
else:
product_code = product_codes[current_version]
if full_current_version is None:
full_current_version = current_version
extensions = [
'bz2.pyd',
'pyexpat.pyd',
'select.pyd',
'unicodedata.pyd',
'winsound.pyd',
'_elementtree.pyd',
'_socket.pyd',
'_ssl.pyd',
'_testcapi.pyd',
'_tkinter.pyd',
'_msi.pyd',
'_ctypes.pyd',
'_ctypes_test.pyd',
'_sqlite3.pyd',
'_hashlib.pyd',
'_multiprocessing.pyd'
]
# Well-known component UUIDs
# These are needed for SharedDLLs reference counter; if
# a different UUID was used for each incarnation of, say,
# python24.dll, an upgrade would set the reference counter
# from 1 to 2 (due to what I consider a bug in MSI)
# Using the same UUID is fine since these files are versioned,
# so Installer will always keep the newest version.
# NOTE: All uuids are self generated.
pythondll_uuid = {
"24":"{9B81E618-2301-4035-AC77-75D9ABEB7301}",
"25":"{2e41b118-38bd-4c1b-a840-6977efd1b911}",
"26":"{34ebecac-f046-4e1c-b0e3-9bac3cdaacfa}",
"27":"{4fe21c76-1760-437b-a2f2-99909130a175}",
"30":"{6953bc3b-6768-4291-8410-7914ce6e2ca8}",
"31":"{4afcba0b-13e4-47c3-bebe-477428b46913}",
"32":"{3ff95315-1096-4d31-bd86-601d5438ad5e}",
} [major+minor]
# Compute the name that Sphinx gives to the docfile
docfile = ""
if int(micro):
docfile = micro
if level < 0xf:
if level == 0xC:
docfile += "rc%s" % (serial,)
else:
docfile += '%x%s' % (level, serial)
docfile = 'python%s%s%s.chm' % (major, minor, docfile)
# Build the mingw import library, libpythonXY.a
# This requires 'nm' and 'dlltool' executables on your PATH
def build_mingw_lib(lib_file, def_file, dll_file, mingw_lib):
warning = "WARNING: %s - libpythonXX.a not built"
nm = find_executable('nm')
dlltool = find_executable('dlltool')
if not nm or not dlltool:
print(warning % "nm and/or dlltool were not found")
return False
nm_command = '%s -Cs %s' % (nm, lib_file)
dlltool_command = "%s --dllname %s --def %s --output-lib %s" % \
(dlltool, dll_file, def_file, mingw_lib)
export_match = re.compile(r"^_imp__(.*) in python\d+\.dll").match
f = open(def_file,'w')
f.write("LIBRARY %s\n" % dll_file)
f.write("EXPORTS\n")
nm_pipe = os.popen(nm_command)
for line in nm_pipe.readlines():
m = export_match(line)
if m:
f.write(m.group(1)+"\n")
f.close()
exit = nm_pipe.close()
if exit:
print(warning % "nm did not run successfully")
return False
if os.system(dlltool_command) != 0:
print(warning % "dlltool did not run successfully")
return False
return True
# Target files (.def and .a) go in PCBuild directory
lib_file = os.path.join(srcdir, PCBUILD, "python%s%s.lib" % (major, minor))
def_file = os.path.join(srcdir, PCBUILD, "python%s%s.def" % (major, minor))
dll_file = "python%s%s.dll" % (major, minor)
mingw_lib = os.path.join(srcdir, PCBUILD, "libpython%s%s.a" % (major, minor))
have_mingw = build_mingw_lib(lib_file, def_file, dll_file, mingw_lib)
# Determine the target architecture
dll_path = os.path.join(srcdir, PCBUILD, dll_file)
msilib.set_arch_from_file(dll_path)
if msilib.pe_type(dll_path) != msilib.pe_type("msisupport.dll"):
raise SystemError("msisupport.dll for incorrect architecture")
if msilib.Win64:
upgrade_code = upgrade_code_64
# Bump the last digit of the code by one, so that 32-bit and 64-bit
# releases get separate product codes
digit = hex((int(product_code[-2],16)+1)%16)[-1]
product_code = product_code[:-2] + digit + '}'
if testpackage:
ext = 'px'
testprefix = 'x'
else:
ext = 'py'
testprefix = ''
if msilib.Win64:
SystemFolderName = "[System64Folder]"
registry_component = 4|256
else:
SystemFolderName = "[SystemFolder]"
registry_component = 4
msilib.reset()
# condition in which to install pythonxy.dll in system32:
# a) it is Windows 9x or
# b) it is NT, the user is privileged, and has chosen per-machine installation
sys32cond = "(Windows9x or (Privileged and ALLUSERS))"
def build_database():
"""Generate an empty database, with just the schema and the
Summary information stream."""
if snapshot:
uc = upgrade_code_snapshot
else:
uc = upgrade_code
if msilib.Win64:
productsuffix = " (64-bit)"
else:
productsuffix = ""
# schema represents the installer 2.0 database schema.
# sequence is the set of standard sequences
# (ui/execute, admin/advt/install)
msiname = "python-%s%s.msi" % (full_current_version, msilib.arch_ext)
db = msilib.init_database(msiname,
schema, ProductName="Python "+full_current_version+productsuffix,
ProductCode=product_code,
ProductVersion=current_version,
Manufacturer=u"Python Software Foundation",
request_uac = True)
# The default sequencing of the RemoveExistingProducts action causes
# removal of files that got just installed. Place it after
# InstallInitialize, so we first uninstall everything, but still roll
# back in case the installation is interrupted
msilib.change_sequence(sequence.InstallExecuteSequence,
"RemoveExistingProducts", 1510)
msilib.add_tables(db, sequence)
# We cannot set ALLUSERS in the property table, as this cannot be
# reset if the user choses a per-user installation. Instead, we
# maintain WhichUsers, which can be "ALL" or "JUSTME". The UI manages
# this property, and when the execution starts, ALLUSERS is set
# accordingly.
add_data(db, "Property", [("UpgradeCode", uc),
("WhichUsers", "ALL"),
("ProductLine", "Python%s%s" % (major, minor)),
])
db.Commit()
return db, msiname
def remove_old_versions(db):
"Fill the upgrade table."
start = "%s.%s.0" % (major, minor)
# This requests that feature selection states of an older
# installation should be forwarded into this one. Upgrading
# requires that both the old and the new installation are
# either both per-machine or per-user.
migrate_features = 1
# See "Upgrade Table". We remove releases with the same major and
# minor version. For an snapshot, we remove all earlier snapshots. For
# a release, we remove all snapshots, and all earlier releases.
if snapshot:
add_data(db, "Upgrade",
[(upgrade_code_snapshot, start,
current_version,
None, # Ignore language
migrate_features,
None, # Migrate ALL features
"REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT"
else:
add_data(db, "Upgrade",
[(upgrade_code, start, current_version,
None, migrate_features, None, "REMOVEOLDVERSION"),
(upgrade_code_snapshot, start, "%s.%d.0" % (major, int(minor)+1),
None, migrate_features, None, "REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT;REMOVEOLDVERSION"
props += ";TARGETDIR;DLLDIR"
# Installer collects the product codes of the earlier releases in
# these properties. In order to allow modification of the properties,
# they must be declared as secure. See "SecureCustomProperties Property"
add_data(db, "Property", [("SecureCustomProperties", props)])
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
if kw.get("bitmap", True):
self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 135, 10, 220, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
def add_ui(db):
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
add_data(db, 'ActionText', uisample.ActionText)
add_data(db, 'UIText', uisample.UIText)
# Bitmaps
if not os.path.exists(srcdir+r"\PC\python_icon.exe"):
raise RuntimeError("Run icons.mak in PC directory")
add_data(db, "Binary",
[("PythonWin", msilib.Binary(r"%s\PCbuild\installer.bmp" % srcdir)), # 152x328 pixels
("py.ico",msilib.Binary(srcdir+r"\PC\py.ico")),
])
add_data(db, "Icon",
[("python_icon.exe", msilib.Binary(srcdir+r"\PC\python_icon.exe"))])
# Scripts
# CheckDir sets TargetExists if TARGETDIR exists.
# UpdateEditIDLE sets the REGISTRY.tcl component into
# the installed/uninstalled state according to both the
# Extensions and TclTk features.
if os.system("nmake /nologo /c /f msisupport.mak") != 0:
raise RuntimeError("'nmake /f msisupport.mak' failed")
add_data(db, "Binary", [("Script", msilib.Binary("msisupport.dll"))])
# See "Custom Action Type 1"
if msilib.Win64:
CheckDir = "CheckDir"
UpdateEditIDLE = "UpdateEditIDLE"
else:
CheckDir = "_CheckDir@4"
UpdateEditIDLE = "_UpdateEditIDLE@4"
add_data(db, "CustomAction",
[("CheckDir", 1, "Script", CheckDir)])
if have_tcl:
add_data(db, "CustomAction",
[("UpdateEditIDLE", 1, "Script", UpdateEditIDLE)])
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair")])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
compileargs = r'-Wi "[TARGETDIR]Lib\compileall.py" -f -x "bad_coding|badsyntax|site-packages|py2_|lib2to3\\tests" "[TARGETDIR]Lib"'
lib2to3args = r'-c "import lib2to3.pygram, lib2to3.patcomp;lib2to3.patcomp.PatternCompiler()"'
# See "CustomAction Table"
add_data(db, "CustomAction", [
# msidbCustomActionTypeFirstSequence + msidbCustomActionTypeTextData + msidbCustomActionTypeProperty
# See "Custom Action Type 51",
# "Custom Action Execution Scheduling Options"
("InitialTargetDir", 307, "TARGETDIR",
"[WindowsVolume]Python%s%s" % (major, minor)),
("SetDLLDirToTarget", 307, "DLLDIR", "[TARGETDIR]"),
("SetDLLDirToSystem32", 307, "DLLDIR", SystemFolderName),
# msidbCustomActionTypeExe + msidbCustomActionTypeSourceFile
# See "Custom Action Type 18"
("CompilePyc", 18, "python.exe", compileargs),
("CompilePyo", 18, "python.exe", "-O "+compileargs),
("CompileGrammar", 18, "python.exe", lib2to3args),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
("InitialTargetDir", 'TARGETDIR=""', 750),
# In the user interface, assume all-users installation if privileged.
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("SelectDirectoryDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, "AdminUISequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
])
# Execute Sequences
add_data(db, "InstallExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("UpdateEditIDLE", None, 1050),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
add_data(db, "AdminExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
# See "ControlEvent Table". Parameters are the event, the parameter
# to the action, and optionally the condition for the event, and the order
# of events.
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Acknowledgements", 135, 95, 220, 120, 0x30003,
"Special Windows thanks to:\n"
" Mark Hammond, without whose years of freely \n"
" shared Windows expertise, Python for Windows \n"
" would still be Python for DOS.")
c = exit_dialog.text("warning", 135, 200, 220, 40, 0x30003,
"{\\VerdanaRed9}Warning: Python 2.5.x is the last "
"Python release for Windows 9x.")
c.condition("Hide", "NOT Version9X")
exit_dialog.text("Description", 135, 235, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
costing.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 135, 70, 220, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 135, 110, 220, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 135, 135, 220, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Target directory selection
seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Destination Directory")
c = seldlg.text("Existing", 135, 25, 235, 30, 0x30003,
"{\VerdanaRed9}This update will replace your existing [ProductLine] installation.")
c.condition("Hide", 'REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""')
seldlg.text("Description", 135, 50, 220, 40, 0x30003,
"Please select a directory for the [ProductName] files.")
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
c.event("DoAction", "CheckDir", "TargetExistsOk<>1", order=1)
# If the target exists, but we found that we are going to remove old versions, don't bother
# confirming that the target directory exists. Strictly speaking, we should determine that
# the target directory is indeed the target of the product that we are going to remove, but
# I don't know how to do that.
c.event("SpawnDialog", "ExistingDirectoryDlg", 'TargetExists=1 and REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""', 2)
c.event("SetTargetPath", "TARGETDIR", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 3)
c.event("SpawnWaitDialog", "WaitForCostingDlg", "CostingComplete=1", 4)
c.event("NewDialog", "SelectFeaturesDlg", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 5)
c = seldlg.cancel("Cancel", "DirectoryCombo")
c.event("SpawnDialog", "CancelDlg")
seldlg.control("DirectoryCombo", "DirectoryCombo", 135, 70, 172, 80, 393219,
"TARGETDIR", None, "DirectoryList", None)
seldlg.control("DirectoryList", "DirectoryList", 135, 90, 208, 136, 3, "TARGETDIR",
None, "PathEdit", None)
seldlg.control("PathEdit", "PathEdit", 135, 230, 206, 16, 3, "TARGETDIR", None, "Next", None)
c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
c.event("DirectoryListUp", "0")
c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
c.event("DirectoryListNew", "0")
#####################################################################
# SelectFeaturesDlg
features = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal|track_disk_space,
title, "Tree", "Next", "Cancel")
features.title("Customize [ProductName]")
features.text("Description", 135, 35, 220, 15, 0x30003,
"Select the way you want features to be installed.")
features.text("Text", 135,45,220,30, 3,
"Click on the icons in the tree below to change the way features will be installed.")
c=features.back("< Back", "Next")
c.event("NewDialog", "SelectDirectoryDlg")
c=features.next("Next >", "Cancel")
c.mapping("SelectionNoItems", "Enabled")
c.event("SpawnDialog", "DiskCostDlg", "OutOfDiskSpace=1", order=1)
c.event("EndDialog", "Return", "OutOfDiskSpace<>1", order=2)
c=features.cancel("Cancel", "Tree")
c.event("SpawnDialog", "CancelDlg")
# The browse property is not used, since we have only a single target path (selected already)
features.control("Tree", "SelectionTree", 135, 75, 220, 95, 7, "_BrowseProperty",
"Tree of selections", "Back", None)
#c=features.pushbutton("Reset", 42, 243, 56, 17, 3, "Reset", "DiskCost")
#c.mapping("SelectionNoItems", "Enabled")
#c.event("Reset", "0")
features.control("Box", "GroupBox", 135, 170, 225, 90, 1, None, None, None, None)
c=features.xbutton("DiskCost", "Disk &Usage", None, 0.10)
c.mapping("SelectionNoItems","Enabled")
c.event("SpawnDialog", "DiskCostDlg")
c=features.xbutton("Advanced", "Advanced", None, 0.30)
c.event("SpawnDialog", "AdvancedDlg")
c=features.text("ItemDescription", 140, 180, 210, 30, 3,
"Multiline description of the currently selected item.")
c.mapping("SelectionDescription","Text")
c=features.text("ItemSize", 140, 210, 210, 45, 3,
"The size of the currently selected item.")
c.mapping("SelectionSize", "Text")
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 135, 60, 235, 80, 3,
"WhichUsers", "", "Next")
g.condition("Disable", "VersionNT=600") # Not available on Vista and Windows 2008
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 235, 20, "Install just for me (not available on Windows Vista)")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", order = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Advanced Dialog.
advanced = PyDialog(db, "AdvancedDlg", x, y, w, h, modal, title,
"CompilePyc", "Ok", "Ok")
advanced.title("Advanced Options for [ProductName]")
# A radio group with two options: allusers, justme
advanced.checkbox("CompilePyc", 135, 60, 230, 50, 3,
"COMPILEALL", "Compile .py files to byte code after installation", "Ok")
c = advanced.cancel("Ok", "CompilePyc", name="Ok") # Button just has location of cancel button.
c.event("EndDialog", "Return")
#####################################################################
# Existing Directory dialog
dlg = Dialog(db, "ExistingDirectoryDlg", 50, 30, 200, 80, modal, title,
"No", "No", "No")
dlg.text("Title", 10, 20, 180, 40, 3,
"[TARGETDIR] exists. Are you sure you want to overwrite existing files?")
c=dlg.pushbutton("Yes", 30, 60, 55, 17, 3, "Yes", "No")
c.event("[TargetExists]", "0", order=1)
c.event("[TargetExistsOk]", "1", order=2)
c.event("EndDialog", "Return", order=3)
c=dlg.pushbutton("No", 115, 60, 55, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 135, 63, 230, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 135, 108, 230, 60, 3,
"MaintenanceForm_Action", "", "Next")
g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
# See "Feature Table". The feature level is 1 for all features,
# and the feature attributes are 0 for the DefaultFeature, and
# FollowParent for all other features. The numbers are the Display
# column.
def add_features(db):
# feature attributes:
# msidbFeatureAttributesFollowParent == 2
# msidbFeatureAttributesDisallowAdvertise == 8
# Features that need to be installed with together with the main feature
# (i.e. additional Python libraries) need to follow the parent feature.
# Features that have no advertisement trigger (e.g. the test suite)
# must not support advertisement
global default_feature, tcltk, htmlfiles, tools, testsuite, ext_feature, private_crt
default_feature = Feature(db, "DefaultFeature", "Python",
"Python Interpreter and Libraries",
1, directory = "TARGETDIR")
shared_crt = Feature(db, "SharedCRT", "MSVCRT", "C Run-Time (system-wide)", 0,
level=0)
private_crt = Feature(db, "PrivateCRT", "MSVCRT", "C Run-Time (private)", 0,
level=0)
add_data(db, "Condition", [("SharedCRT", 1, sys32cond),
("PrivateCRT", 1, "not "+sys32cond)])
# We don't support advertisement of extensions
ext_feature = Feature(db, "Extensions", "Register Extensions",
"Make this Python installation the default Python installation", 3,
parent = default_feature, attributes=2|8)
if have_tcl:
tcltk = Feature(db, "TclTk", "Tcl/Tk", "Tkinter, IDLE, pydoc", 5,
parent = default_feature, attributes=2)
htmlfiles = Feature(db, "Documentation", "Documentation",
"Python HTMLHelp File", 7, parent = default_feature)
tools = Feature(db, "Tools", "Utility Scripts",
"Python utility scripts (Tools/", 9,
parent = default_feature, attributes=2)
testsuite = Feature(db, "Testsuite", "Test suite",
"Python test suite (Lib/test/)", 11,
parent = default_feature, attributes=2|8)
def extract_msvcr90():
# Find the redistributable files
if msilib.Win64:
arch = "amd64"
else:
arch = "x86"
dir = os.path.join(os.environ['VS90COMNTOOLS'], r"..\..\VC\redist\%s\Microsoft.VC90.CRT" % arch)
result = []
installer = msilib.MakeInstaller()
# omit msvcm90 and msvcp90, as they aren't really needed
files = ["Microsoft.VC90.CRT.manifest", "msvcr90.dll"]
for f in files:
path = os.path.join(dir, f)
kw = {'src':path}
if f.endswith('.dll'):
kw['version'] = installer.FileVersion(path, 0)
kw['language'] = installer.FileVersion(path, 1)
result.append((f, kw))
return result
def generate_license():
import shutil, glob
out = open("LICENSE.txt", "w")
shutil.copyfileobj(open(os.path.join(srcdir, "LICENSE")), out)
shutil.copyfileobj(open("crtlicense.txt"), out)
for name, pat, file in (("bzip2","bzip2-*", "LICENSE"),
("openssl", "openssl-*", "LICENSE"),
("Tcl", "tcl8*", "license.terms"),
("Tk", "tk8*", "license.terms"),
("Tix", "tix-*", "license.terms")):
out.write("\nThis copy of Python includes a copy of %s, which is licensed under the following terms:\n\n" % name)
dirs = glob.glob(srcdir+"/../"+pat)
if not dirs:
raise ValueError, "Could not find "+srcdir+"/../"+pat
if len(dirs) > 2:
raise ValueError, "Multiple copies of "+pat
dir = dirs[0]
shutil.copyfileobj(open(os.path.join(dir, file)), out)
out.close()
class PyDirectory(Directory):
"""By default, all components in the Python installer
can run from source."""
def __init__(self, *args, **kw):
if "componentflags" not in kw:
kw['componentflags'] = 2 #msidbComponentAttributesOptional
Directory.__init__(self, *args, **kw)
def check_unpackaged(self):
self.unpackaged_files.discard('__pycache__')
self.unpackaged_files.discard('.svn')
if self.unpackaged_files:
print "Warning: Unpackaged files in %s" % self.absolute
print self.unpackaged_files
# See "File Table", "Component Table", "Directory Table",
# "FeatureComponents Table"
def add_files(db):
cab = CAB("python")
tmpfiles = []
# Add all executables, icons, text files into the TARGETDIR component
root = PyDirectory(db, cab, None, srcdir, "TARGETDIR", "SourceDir")
default_feature.set_current()
if not msilib.Win64:
root.add_file("%s/w9xpopen.exe" % PCBUILD)
root.add_file("README.txt", src="README")
root.add_file("NEWS.txt", src="Misc/NEWS")
generate_license()
root.add_file("LICENSE.txt", src=os.path.abspath("LICENSE.txt"))
root.start_component("python.exe", keyfile="python.exe")
root.add_file("%s/python.exe" % PCBUILD)
root.start_component("pythonw.exe", keyfile="pythonw.exe")
root.add_file("%s/pythonw.exe" % PCBUILD)
# msidbComponentAttributesSharedDllRefCount = 8, see "Component Table"
dlldir = PyDirectory(db, cab, root, srcdir, "DLLDIR", ".")
pydll = "python%s%s.dll" % (major, minor)
pydllsrc = os.path.join(srcdir, PCBUILD, pydll)
dlldir.start_component("DLLDIR", flags = 8, keyfile = pydll, uuid = pythondll_uuid)
installer = msilib.MakeInstaller()
pyversion = installer.FileVersion(pydllsrc, 0)
if not snapshot:
# For releases, the Python DLL has the same version as the
# installer package.
assert pyversion.split(".")[:3] == current_version.split(".")
dlldir.add_file("%s/python%s%s.dll" % (PCBUILD, major, minor),
version=pyversion,
language=installer.FileVersion(pydllsrc, 1))
DLLs = PyDirectory(db, cab, root, srcdir + "/" + PCBUILD, "DLLs", "DLLS|DLLs")
# msvcr90.dll: Need to place the DLL and the manifest into the root directory,
# plus another copy of the manifest in the DLLs directory, with the manifest
# pointing to the root directory
root.start_component("msvcr90", feature=private_crt)
# Results are ID,keyword pairs
manifest, crtdll = extract_msvcr90()
root.add_file(manifest[0], **manifest[1])
root.add_file(crtdll[0], **crtdll[1])
# Copy the manifest
# Actually, don't do that anymore - no DLL in DLLs should have a manifest
# dependency on msvcr90.dll anymore, so this should not be necessary
#manifest_dlls = manifest[0]+".root"
#open(manifest_dlls, "w").write(open(manifest[1]['src']).read().replace("msvcr","../msvcr"))
#DLLs.start_component("msvcr90_dlls", feature=private_crt)
#DLLs.add_file(manifest[0], src=os.path.abspath(manifest_dlls))
# Now start the main component for the DLLs directory;
# no regular files have been added to the directory yet.
DLLs.start_component()
# Check if _ctypes.pyd exists
have_ctypes = os.path.exists(srcdir+"/%s/_ctypes.pyd" % PCBUILD)
if not have_ctypes:
print("WARNING: _ctypes.pyd not found, ctypes will not be included")
extensions.remove("_ctypes.pyd")
# Add all .py files in Lib, except tkinter, test
dirs = []
pydirs = [(root,"Lib")]
while pydirs:
# Commit every now and then, or else installer will complain
db.Commit()
parent, dir = pydirs.pop()
if dir == ".svn" or dir == '__pycache__' or dir.startswith("plat-"):
continue
elif dir in ["tkinter", "idlelib", "Icons"]:
if not have_tcl:
continue
tcltk.set_current()
elif dir in ['test', 'tests', 'data', 'output']:
# test: Lib, Lib/email, Lib/ctypes, Lib/sqlite3
# tests: Lib/distutils
# data: Lib/email/test
# output: Lib/test
testsuite.set_current()
elif not have_ctypes and dir == "ctypes":
continue
else:
default_feature.set_current()
lib = PyDirectory(db, cab, parent, dir, dir, "%s|%s" % (parent.make_short(dir), dir))
# Add additional files
dirs.append(lib)
lib.glob("*.txt")
if dir=='site-packages':
lib.add_file("README.txt", src="README")
continue
files = lib.glob("*.py")
files += lib.glob("*.pyw")
if files:
# Add an entry to the RemoveFile table to remove bytecode files.
lib.remove_pyc()
# package READMEs if present
lib.glob("README")
if dir=='Lib':
lib.add_file('wsgiref.egg-info')
if dir=='test' and parent.physical=='Lib':
lib.add_file("185test.db")
lib.add_file("audiotest.au")
lib.add_file("sgml_input.html")
lib.add_file("testtar.tar")
lib.add_file("test_difflib_expect.html")
lib.add_file("check_soundcard.vbs")
lib.add_file("empty.vbs")
lib.add_file("Sine-1000Hz-300ms.aif")
lib.glob("*.uue")
lib.glob("*.pem")
lib.glob("*.pck")
lib.glob("cfgparser.*")
lib.add_file("zip_cp437_header.zip")
lib.add_file("zipdir.zip")
if dir=='capath':
lib.glob("*.0")
if dir=='tests' and parent.physical=='distutils':
lib.add_file("Setup.sample")
if dir=='decimaltestdata':
lib.glob("*.decTest")
if dir=='xmltestdata':
lib.glob("*.xml")
lib.add_file("test.xml.out")
if dir=='output':
lib.glob("test_*")
if dir=='sndhdrdata':
lib.glob("sndhdr.*")
if dir=='idlelib':
lib.glob("*.def")
lib.add_file("idle.bat")
lib.add_file("ChangeLog")
if dir=="Icons":
lib.glob("*.gif")
lib.add_file("idle.icns")
if dir=="command" and parent.physical=="distutils":
lib.glob("wininst*.exe")
lib.add_file("command_template")
if dir=="lib2to3":
lib.removefile("pickle", "*.pickle")
if dir=="macholib":
lib.add_file("README.ctypes")
lib.glob("fetch_macholib*")
if dir=='turtledemo':
lib.add_file("turtle.cfg")
if dir=="pydoc_data":
lib.add_file("_pydoc.css")
if dir=="data" and parent.physical=="test" and parent.basedir.physical=="email":
# This should contain all non-.svn files listed in subversion
for f in os.listdir(lib.absolute):
if f.endswith(".txt") or f==".svn":continue
if f.endswith(".au") or f.endswith(".gif"):
lib.add_file(f)
else:
print("WARNING: New file %s in email/test/data" % f)
for f in os.listdir(lib.absolute):
if os.path.isdir(os.path.join(lib.absolute, f)):
pydirs.append((lib, f))
for d in dirs:
d.check_unpackaged()
# Add DLLs
default_feature.set_current()
lib = DLLs
lib.add_file("py.ico", src=srcdir+"/PC/py.ico")
lib.add_file("pyc.ico", src=srcdir+"/PC/pyc.ico")
dlls = []
tclfiles = []
for f in extensions:
if f=="_tkinter.pyd":
continue
if not os.path.exists(srcdir + "/" + PCBUILD + "/" + f):
print("WARNING: Missing extension", f)
continue
dlls.append(f)
lib.add_file(f)
lib.add_file('python3.dll')
# Add sqlite
if msilib.msi_type=="Intel64;1033":
sqlite_arch = "/ia64"
elif msilib.msi_type=="x64;1033":
sqlite_arch = "/amd64"
tclsuffix = "64"
else:
sqlite_arch = ""
tclsuffix = ""
lib.add_file("sqlite3.dll")
if have_tcl:
if not os.path.exists("%s/%s/_tkinter.pyd" % (srcdir, PCBUILD)):
print("WARNING: Missing _tkinter.pyd")
else:
lib.start_component("TkDLLs", tcltk)
lib.add_file("_tkinter.pyd")
dlls.append("_tkinter.pyd")
tcldir = os.path.normpath(srcdir+("/../tcltk%s/bin" % tclsuffix))
for f in glob.glob1(tcldir, "*.dll"):
lib.add_file(f, src=os.path.join(tcldir, f))
# check whether there are any unknown extensions
for f in glob.glob1(srcdir+"/"+PCBUILD, "*.pyd"):
if f.endswith("_d.pyd"): continue # debug version
if f in dlls: continue
print("WARNING: Unknown extension", f)
# Add headers
default_feature.set_current()
lib = PyDirectory(db, cab, root, "include", "include", "INCLUDE|include")
lib.glob("*.h")
lib.add_file("pyconfig.h", src="../PC/pyconfig.h")
# Add import libraries
lib = PyDirectory(db, cab, root, PCBUILD, "libs", "LIBS|libs")
for f in dlls:
lib.add_file(f.replace('pyd','lib'))
lib.add_file('python%s%s.lib' % (major, minor))
lib.add_file('python3.lib')
# Add the mingw-format library
if have_mingw:
lib.add_file('libpython%s%s.a' % (major, minor))
if have_tcl:
# Add Tcl/Tk
tcldirs = [(root, '../tcltk%s/lib' % tclsuffix, 'tcl')]
tcltk.set_current()
while tcldirs:
parent, phys, dir = tcldirs.pop()
lib = PyDirectory(db, cab, parent, phys, dir, "%s|%s" % (parent.make_short(dir), dir))
if not os.path.exists(lib.absolute):
continue
for f in os.listdir(lib.absolute):
if os.path.isdir(os.path.join(lib.absolute, f)):
tcldirs.append((lib, f, f))
else:
lib.add_file(f)
# Add tools
tools.set_current()
tooldir = PyDirectory(db, cab, root, "Tools", "Tools", "TOOLS|Tools")
for f in ['i18n', 'pynche', 'Scripts']:
lib = PyDirectory(db, cab, tooldir, f, f, "%s|%s" % (tooldir.make_short(f), f))
lib.glob("*.py")
lib.glob("*.pyw", exclude=['pydocgui.pyw'])
lib.remove_pyc()
lib.glob("*.txt")
if f == "pynche":
x = PyDirectory(db, cab, lib, "X", "X", "X|X")
x.glob("*.txt")
if os.path.exists(os.path.join(lib.absolute, "README")):
lib.add_file("README.txt", src="README")
if f == 'Scripts':
lib.add_file("2to3.py", src="2to3")
if have_tcl:
lib.start_component("pydocgui.pyw", tcltk, keyfile="pydocgui.pyw")
lib.add_file("pydocgui.pyw")
# Add documentation
htmlfiles.set_current()
lib = PyDirectory(db, cab, root, "Doc", "Doc", "DOC|Doc")
lib.start_component("documentation", keyfile=docfile)
lib.add_file(docfile, src="build/htmlhelp/"+docfile)
cab.commit(db)
for f in tmpfiles:
os.unlink(f)
# See "Registry Table", "Component Table"
def add_registry(db):
# File extensions, associated with the REGISTRY.def component
# IDLE verbs depend on the tcltk feature.
# msidbComponentAttributesRegistryKeyPath = 4
# -1 for Root specifies "dependent on ALLUSERS property"
tcldata = []
if have_tcl:
tcldata = [
("REGISTRY.tcl", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"py.IDLE")]
add_data(db, "Component",
# msidbComponentAttributesRegistryKeyPath = 4
[("REGISTRY", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"InstallPath"),
("REGISTRY.doc", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"Documentation"),
("REGISTRY.def", msilib.gen_uuid(), "TARGETDIR", registry_component,
None, None)] + tcldata)
# See "FeatureComponents Table".
# The association between TclTk and pythonw.exe is necessary to make ICE59
# happy, because the installer otherwise believes that the IDLE and PyDoc
# shortcuts might get installed without pythonw.exe being install. This
# is not true, since installing TclTk will install the default feature, which
# will cause pythonw.exe to be installed.
# REGISTRY.tcl is not associated with any feature, as it will be requested
# through a custom action
tcldata = []
if have_tcl:
tcldata = [(tcltk.id, "pythonw.exe")]
add_data(db, "FeatureComponents",
[(default_feature.id, "REGISTRY"),
(htmlfiles.id, "REGISTRY.doc"),
(ext_feature.id, "REGISTRY.def")] +
tcldata
)
# Extensions are not advertised. For advertised extensions,
# we would need separate binaries that install along with the
# extension.
pat = r"Software\Classes\%sPython.%sFile\shell\%s\command"
ewi = "Edit with IDLE"
pat2 = r"Software\Classes\%sPython.%sFile\DefaultIcon"
pat3 = r"Software\Classes\%sPython.%sFile"
pat4 = r"Software\Classes\%sPython.%sFile\shellex\DropHandler"
tcl_verbs = []
if have_tcl:
tcl_verbs=[
("py.IDLE", -1, pat % (testprefix, "", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -e "%1"',
"REGISTRY.tcl"),
("pyw.IDLE", -1, pat % (testprefix, "NoCon", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -e "%1"',
"REGISTRY.tcl"),
]
add_data(db, "Registry",
[# Extensions
("py.ext", -1, r"Software\Classes\."+ext, "",
"Python.File", "REGISTRY.def"),
("pyw.ext", -1, r"Software\Classes\."+ext+'w', "",
"Python.NoConFile", "REGISTRY.def"),
("pyc.ext", -1, r"Software\Classes\."+ext+'c', "",
"Python.CompiledFile", "REGISTRY.def"),
("pyo.ext", -1, r"Software\Classes\."+ext+'o', "",
"Python.CompiledFile", "REGISTRY.def"),
# MIME types
("py.mime", -1, r"Software\Classes\."+ext, "Content Type",
"text/plain", "REGISTRY.def"),
("pyw.mime", -1, r"Software\Classes\."+ext+'w', "Content Type",
"text/plain", "REGISTRY.def"),
#Verbs
("py.open", -1, pat % (testprefix, "", "open"), "",
r'"[TARGETDIR]python.exe" "%1" %*', "REGISTRY.def"),
("pyw.open", -1, pat % (testprefix, "NoCon", "open"), "",
r'"[TARGETDIR]pythonw.exe" "%1" %*', "REGISTRY.def"),
("pyc.open", -1, pat % (testprefix, "Compiled", "open"), "",
r'"[TARGETDIR]python.exe" "%1" %*', "REGISTRY.def"),
] + tcl_verbs + [
#Icons
("py.icon", -1, pat2 % (testprefix, ""), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyw.icon", -1, pat2 % (testprefix, "NoCon"), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyc.icon", -1, pat2 % (testprefix, "Compiled"), "",
r'[DLLs]pyc.ico', "REGISTRY.def"),
# Descriptions
("py.txt", -1, pat3 % (testprefix, ""), "",
"Python File", "REGISTRY.def"),
("pyw.txt", -1, pat3 % (testprefix, "NoCon"), "",
"Python File (no console)", "REGISTRY.def"),
("pyc.txt", -1, pat3 % (testprefix, "Compiled"), "",
"Compiled Python File", "REGISTRY.def"),
# Drop Handler
("py.drop", -1, pat4 % (testprefix, ""), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyw.drop", -1, pat4 % (testprefix, "NoCon"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyc.drop", -1, pat4 % (testprefix, "Compiled"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
])
# Registry keys
prefix = r"Software\%sPython\PythonCore\%s" % (testprefix, short_version)
add_data(db, "Registry",
[("InstallPath", -1, prefix+r"\InstallPath", "", "[TARGETDIR]", "REGISTRY"),
("InstallGroup", -1, prefix+r"\InstallPath\InstallGroup", "",
"Python %s" % short_version, "REGISTRY"),
("PythonPath", -1, prefix+r"\PythonPath", "",
r"[TARGETDIR]Lib;[TARGETDIR]DLLs", "REGISTRY"),
("Documentation", -1, prefix+r"\Help\Main Python Documentation", "",
"[TARGETDIR]Doc\\"+docfile , "REGISTRY.doc"),
("Modules", -1, prefix+r"\Modules", "+", None, "REGISTRY"),
("AppPaths", -1, r"Software\Microsoft\Windows\CurrentVersion\App Paths\Python.exe",
"", r"[TARGETDIR]Python.exe", "REGISTRY.def"),
("DisplayIcon", -1,
r"Software\Microsoft\Windows\CurrentVersion\Uninstall\%s" % product_code,
"DisplayIcon", "[TARGETDIR]python.exe", "REGISTRY")
])
# Shortcuts, see "Shortcut Table"
add_data(db, "Directory",
[("ProgramMenuFolder", "TARGETDIR", "."),
("MenuDir", "ProgramMenuFolder", "PY%s%s|%sPython %s.%s" % (major,minor,testprefix,major,minor))])
add_data(db, "RemoveFile",
[("MenuDir", "TARGETDIR", None, "MenuDir", 2)])
tcltkshortcuts = []
if have_tcl:
tcltkshortcuts = [
("IDLE", "MenuDir", "IDLE|IDLE (Python GUI)", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Lib\idlelib\idle.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
("PyDoc", "MenuDir", "MODDOCS|Module Docs", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Tools\scripts\pydocgui.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
]
add_data(db, "Shortcut",
tcltkshortcuts +
[# Advertised shortcuts: targets are features, not files
("Python", "MenuDir", "PYTHON|Python (command line)", "python.exe",
default_feature.id, None, None, None, "python_icon.exe", 2, None, "TARGETDIR"),
# Advertising the Manual breaks on (some?) Win98, and the shortcut lacks an
# icon first.
#("Manual", "MenuDir", "MANUAL|Python Manuals", "documentation",
# htmlfiles.id, None, None, None, None, None, None, None),
## Non-advertised shortcuts: must be associated with a registry component
("Manual", "MenuDir", "MANUAL|Python Manuals", "REGISTRY.doc",
"[#%s]" % docfile, None,
None, None, None, None, None, None),
("Uninstall", "MenuDir", "UNINST|Uninstall Python", "REGISTRY",
SystemFolderName+"msiexec", "/x%s" % product_code,
None, None, None, None, None, None),
])
db.Commit()
def build_pdbzip():
pdbexclude = ['kill_python.pdb', 'make_buildinfo.pdb',
'make_versioninfo.pdb']
path = "python-%s%s-pdb.zip" % (full_current_version, msilib.arch_ext)
pdbzip = zipfile.ZipFile(path, 'w')
for f in glob.glob1(os.path.join(srcdir, PCBUILD), "*.pdb"):
if f not in pdbexclude and not f.endswith('_d.pdb'):
pdbzip.write(os.path.join(srcdir, PCBUILD, f), f)
pdbzip.close()
db,msiname = build_database()
try:
add_features(db)
add_ui(db)
add_files(db)
add_registry(db)
remove_old_versions(db)
db.Commit()
finally:
del db
# Merge CRT into MSI file. This requires the database to be closed.
mod_dir = os.path.join(os.environ["ProgramFiles"], "Common Files", "Merge Modules")
if msilib.Win64:
modules = ["Microsoft_VC90_CRT_x86_x64.msm", "policy_9_0_Microsoft_VC90_CRT_x86_x64.msm"]
else:
modules = ["Microsoft_VC90_CRT_x86.msm","policy_9_0_Microsoft_VC90_CRT_x86.msm"]
for i, n in enumerate(modules):
modules[i] = os.path.join(mod_dir, n)
def merge(msi, feature, rootdir, modules):
cab_and_filecount = []
# Step 1: Merge databases, extract cabfiles
m = msilib.MakeMerge2()
m.OpenLog("merge.log")
m.OpenDatabase(msi)
for module in modules:
print module
m.OpenModule(module,0)
m.Merge(feature, rootdir)
print "Errors:"
for e in m.Errors:
print e.Type, e.ModuleTable, e.DatabaseTable
print " Modkeys:",
for s in e.ModuleKeys: print s,
print
print " DBKeys:",
for s in e.DatabaseKeys: print s,
print
cabname = tempfile.mktemp(suffix=".cab")
m.ExtractCAB(cabname)
cab_and_filecount.append((cabname, len(m.ModuleFiles)))
m.CloseModule()
m.CloseDatabase(True)
m.CloseLog()
# Step 2: Add CAB files
i = msilib.MakeInstaller()
db = i.OpenDatabase(msi, constants.msiOpenDatabaseModeTransact)
v = db.OpenView("SELECT LastSequence FROM Media")
v.Execute(None)
maxmedia = -1
while 1:
r = v.Fetch()
if not r: break
seq = r.IntegerData(1)
if seq > maxmedia:
maxmedia = seq
print "Start of Media", maxmedia
for cabname, count in cab_and_filecount:
stream = "merged%d" % maxmedia
msilib.add_data(db, "Media",
[(maxmedia+1, maxmedia+count, None, "#"+stream, None, None)])
msilib.add_stream(db, stream, cabname)
os.unlink(cabname)
maxmedia += count
# The merge module sets ALLUSERS to 1 in the property table.
# This is undesired; delete that
v = db.OpenView("DELETE FROM Property WHERE Property='ALLUSERS'")
v.Execute(None)
v.Close()
db.Commit()
merge(msiname, "SharedCRT", "TARGETDIR", modules)
# certname (from config.py) should be (a substring of)
# the certificate subject, e.g. "Python Software Foundation"
if certname:
os.system('signtool sign /n "%s" /t http://timestamp.verisign.com/scripts/timestamp.dll %s' % (certname, msiname))
if pdbzip:
build_pdbzip()
|
import torch
import torch.functional as F
from torch import nn
__all__ = ["DropBlock2D", "DropBlock3D", "DropBlockScheduled"]
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, H, W)`
- Output: `(N, C, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock2D, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, "Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.0:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask and place on input device
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).to(x)
# compute block mask
block_mask, keeped = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :]
# scale output
out = out * (block_mask.numel() / keeped).to(out)
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool2d(
input=mask[:, None, :, :],
kernel_size=(self.block_size, self.block_size),
stride=(1, 1),
padding=self.block_size // 2,
)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1]
keeped = block_mask.numel() - block_mask.sum().to(torch.float32) # prevent overflow in float16
block_mask = 1 - block_mask.squeeze(1)
return block_mask, keeped
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 2)
class DropBlock3D(DropBlock2D):
r"""Randomly zeroes 3D spatial blocks of the input tensor.
An extension to the concept described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, D, H, W)`
- Output: `(N, C, D, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock3D, self).__init__(drop_prob, block_size)
def forward(self, x):
# shape: (bsize, channels, depth, height, width)
assert x.dim() == 5, "Expected input with 5 dimensions (bsize, channels, depth, height, width)"
if not self.training or self.drop_prob == 0.0:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask and place on input device
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).to(x)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool3d(
input=mask[:, None, :, :, :],
kernel_size=(self.block_size, self.block_size, self.block_size),
stride=(1, 1, 1),
padding=self.block_size // 2,
)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 3)
class DropBlockScheduled(nn.Module):
def __init__(self, dropblock, start_value, stop_value, nr_steps, start_step=0):
super(DropBlockScheduled, self).__init__()
self.dropblock = dropblock
self.register_buffer("i", torch.zeros(1, dtype=torch.int64))
self.start_step = start_step
self.nr_steps = nr_steps
self.step_size = (stop_value - start_value) / nr_steps
def forward(self, x):
if self.training:
self.step()
return self.dropblock(x)
def step(self):
idx = self.i.item()
if self.start_step < idx < self.start_step + self.nr_steps:
self.dropblock.drop_prob += self.step_size
self.i += 1
|
from file_manager import *
from centralauth import *
from cohorts import *
from tags import *
from replication_lag import *
from reports import *
from batch import *
# ignore flake8 because of F403 violation
# flake8: noqa
|
from enum import Enum
class ReturnCodes(Enum):
OK = 0
ERROR = 1
FATAL = 2
DOCKER_ERROR = 125
|
from .q_learner import QLearner
from components.episode_buffer import EpisodeBatch
import torch as th
class DCGLearner(QLearner):
""" QLearner for a Deep Coordination Graph (DCG, Boehmer et al., 2020). """
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
""" Overrides the train method from QLearner. """
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
# Calculate the maximal Q-Values of the target network
target_out = []
self.target_mac.init_hidden(batch.batch_size)
self.mac.init_hidden(batch.batch_size)
# Run through the episodes in the batch step by step
for t in range(batch.max_seq_length):
# In double Q-learning, the actions are selected greedy w.r.t. mac
greedy = self.mac.forward(batch, t=t, policy_mode=False)
# Q-value of target_mac with the above greedy actions
target_out.append(self.target_mac.forward(batch, t=t, actions=greedy, policy_mode=False))
# The TD-targets for time steps 1 to max_seq_length-1 (i.e., one step in the future)
target_out = th.stack(target_out[1:], dim=1).unsqueeze(dim=-1) # Concat across time, starting at index 1
targets = rewards + self.args.gamma * (1 - terminated) * target_out
# Calculate estimated Q-Values for the current actions
mac_out = []
self.mac.init_hidden(batch.batch_size)
# Q-values from time step 0 to max_seq_length-2 (i.e., the present)
for t in range(batch.max_seq_length - 1):
val = self.mac.forward(batch, t=t, actions=actions[:, t], policy_mode=False, compute_grads=True)
mac_out.append(val)
mac_out = th.stack(mac_out, dim=1).unsqueeze(dim=-1) # Concat the Q-values over time
# Calculate TD-error and masked loss for 1-step Q-Learning targets
td_error = (mac_out - targets.detach())
mask = mask.expand_as(td_error)
td_error = td_error * mask
loss = (td_error ** 2).sum() / mask.sum()
# Optimise the loss
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
# Update target network if it is time
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
# Log important learning variables
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm.item(), t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (mac_out * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
|
import torch
from torch.utils.data.dataset import Dataset
import torchvision
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torchvision.utils import make_grid
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import random_split
import os, os.path
import json
class ChartLoader(Dataset):
def __init__(self, dir):
self.dir = dir
self.data = []
self.LoadData()
def __len__(self):
return len(self.data)
def __getitem__(self, index):
data = self.data[index]
prices = data[0]
prediction = data[1]
#tensor = torch.tensor(prices)
#predictionTensor = torch.tensor([prediction])
return (prices, prediction)
def LoadData(self):
#counter = 0
for filename in os.listdir(self.dir):
with open(os.path.join(self.dir, filename)) as f:
jsonData = json.load(f)
for item in jsonData:
upInt = int(item["Up"])
prediction = item["Prediction"]
#print(upInt)
self.data.append( (torch.tensor(item['Prices']), torch.tensor(upInt)) )
# counter += 1
# if (counter == 100):
# break |
# proxy module
from __future__ import absolute_import
from apptools.naming.adapter.api import *
|
from sys import platform
import time
from datetime import datetime as dt
redirect="127.0.0.1"
temp_host="hosts"
web_block_list=["Youtube.com","Facebook.com","Twitter.com"]
if platform == "linux" or platform == "linux2":
host_path=r"/etc/hosts"
#print("Im Here Linux")
elif platform == "darwin":
host_path=r"/private/etc/hosts"
#print("Im Here Mac")
elif platform == "win32":
host_path=r"c:\windows\system32\drivers\etc\hosts"
#print('Im Here Windows')
while True:
if dt(dt.now().year,dt.now().month,dt.now().day,10) < dt.now() < dt(dt.now().year,dt.now().month,dt.now().day,16):
print("Working hours...")
#time.sleep(5)
file=open(temp_host,'r+')
content=file.read()
#print(content)
for websit in web_block_list:
if websit in content:
pass
else:
file.write(redirect + " " + websit + "\n")
else:
file=open(temp_host,'r+')
content=file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in web_block_list):
file.write(line)
file.truncate()
print("Njoyy Time")
time.sleep(5)
|
import paho.mqtt.client as mqtt
import logging
# When in production change this to a non public one
host = 'broker.emqx.io'
port = 1883
keepalive = 60
topic = 'world'
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
print(f"Connected ? {client.is_connected()}")
subscribe(topic)
publish_message(topic, "Hello")
while True:
message = input('msg: ')
client.publish(topic, message)
# publish_message(topic, msg)
print(f'Prompt: {message}')
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
def subscribe(topic):
print(f'Subscribed to: {topic}')
return client.subscribe(topic)
def publish_message( topic, message):
client.publish(topic, message)
print(f'Published msg: {message}, on: {topic}')
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(host, port, keepalive)
# client.publish(topic, 'Help')
client.loop_forever()
|
import json
from periodic_table import ELEMENTS, lookupEle
string = []
with open("string_2.json", "r") as f:
string_2 = json.load(f)["response"]
for each in string_2:
formula = each["formula"]
string.append({lookupEle(element): number for element, number in formula.items()})
print(len(string))
with open("string_3.json", "r") as f:
string_3 = json.load(f)["response"]
for each in string_3:
formula = each["formula"]
string.append({lookupEle(element): number for element, number in formula.items()})
print(len(string))
with open("string.json", "w") as f:
json.dump({"names": string}, f)
|
import click
import os
import shutil
import tempfile
import zedenv.plugins.configuration as plugin_config
from typing import Tuple
from zedenv.lib.logger import ZELogger
class SystemdBoot(plugin_config.Plugin):
systems_allowed = ["linux"]
bootloader = "systemdboot"
allowed_properties: Tuple[dict] = (
{
"property": f"esp",
"description": "Set location for esp.",
"default": "/mnt/efi"
},
)
def __init__(self, zedenv_data: dict):
super().__init__(zedenv_data)
self.env_dir = "env"
self.boot_mountpoint = "/boot"
self.entry_prefix = "zedenv"
self.old_entry = f"{self.entry_prefix}-{self.old_boot_environment}"
self.new_entry = f"{self.entry_prefix}-{self.boot_environment}"
# Set defaults
for pr in self.allowed_properties:
self.zedenv_properties[pr["property"]] = pr["default"]
self.check_zedenv_properties()
ZELogger.verbose_log({
"level": "INFO",
"message": f"esp set to {self.zedenv_properties['esp']}\n"
}, self.verbose)
if not os.path.isdir(self.zedenv_properties["esp"]):
self.plugin_property_error(self.zedenv_properties)
def edit_bootloader_entry(self, temp_esp: str):
temp_entries_dir = os.path.join(temp_esp, "loader/entries")
temp_bootloader_file = os.path.join(temp_entries_dir,
f"{self.new_entry}.conf")
real_entries_dir = os.path.join(self.zedenv_properties["esp"], "loader/entries")
real_bootloader_file = os.path.join(
real_entries_dir, f"{self.old_entry}.conf")
try:
os.makedirs(temp_entries_dir)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_entries_dir}\n{e}"
}, exit_on_error=True)
except OSError as os_err:
ZELogger.log({
"level": "EXCEPTION",
"message": os_err
}, exit_on_error=True)
config_entries = os.listdir(real_entries_dir)
entry_guess_list = [
f"title Boot Environment [{self.boot_environment}]\n",
f"linux /env/{self.new_entry}/vmlinuz-linux\n",
f"initrd /env/{self.new_entry}/initramfs-linux.img\n",
f"options zfs={self.be_root}/{self.boot_environment}\n"
]
config_matches = [en.split(".conf")[0] for en in config_entries
if en.split(".conf")[0] == (self.old_entry or self.new_entry)]
old_conf = True if self.old_entry in config_matches else False
new_conf = True if self.new_entry in config_matches else False
if old_conf and (self.old_boot_environment == self.boot_environment):
ZELogger.log({
"level": "INFO",
"message": (f"Attempting to activate same boot environment while config "
f"'{self.old_entry}.conf' "
"already exists. Will not modify old configuration.\n")
})
elif new_conf:
ZELogger.log({
"level": "INFO",
"message": (f"Attempting to activate boot environment while config for "
f"'{self.new_entry}.conf' already exists. "
"Will not modify old configuration.\n")
})
else:
if old_conf:
ZELogger.log({
"level": "INFO",
"message": (f"Using existing entry {self.old_entry} as template "
f"taking best guess at creating one at "
f"{self.new_entry}.conf\n")
})
with open(real_bootloader_file, "r") as old_conf:
old_conf_list = old_conf.readlines()
new_entry_list = [l.replace(self.old_boot_environment, self.boot_environment)
for l in old_conf_list]
else:
entry_guess_full = '\n'.join(entry_guess_list)
ZELogger.log({
"level": "INFO",
"message": (f"You have no matching bootloader entries in {real_entries_dir}, "
f"taking best guess at creating one at {real_bootloader_file}:\n"
f"{entry_guess_full}.\n")
})
new_entry_list = entry_guess_list
if not self.noop:
with open(temp_bootloader_file, "w") as boot_conf:
boot_conf.writelines(new_entry_list)
if not self.noconfirm:
if click.confirm(
"Would you like to edit the generated bootloader config?",
default=True):
click.edit(filename=temp_bootloader_file)
def modify_bootloader(self, temp_esp: str, ):
real_kernel_dir = os.path.join(self.zedenv_properties["esp"], self.env_dir)
temp_kernel_dir = os.path.join(temp_esp, self.env_dir)
real_old_dataset_kernel = os.path.join(real_kernel_dir, self.old_entry)
temp_new_dataset_kernel = os.path.join(temp_kernel_dir, self.new_entry)
if not os.path.isdir(real_old_dataset_kernel):
ZELogger.log({
"level": "INFO",
"message": (f"No directory for Boot environments kernels found at "
f"'{real_old_dataset_kernel}', creating empty directory."
f"Don't forget to add your kernel to "
f"{real_kernel_dir}/{self.boot_environment}.")
})
if not self.noop:
try:
os.makedirs(temp_new_dataset_kernel)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_new_dataset_kernel}\n{e}"
}, exit_on_error=True)
except OSError as os_err:
ZELogger.log({
"level": "EXCEPTION",
"message": os_err
}, exit_on_error=True)
else:
if not self.noop:
try:
shutil.copytree(real_old_dataset_kernel, temp_new_dataset_kernel)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_new_dataset_kernel}\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to {temp_new_dataset_kernel}\n{e}"
}, exit_on_error=True)
def edit_bootloader_default(self, temp_esp: str, overwrite: bool):
real_loader_dir_path = os.path.join(self.zedenv_properties["esp"], "loader")
temp_loader_dir_path = os.path.join(temp_esp, "loader")
real_loader_conf_path = os.path.join(real_loader_dir_path, "loader.conf")
temp_loader_conf_path = os.path.join(temp_loader_dir_path, "loader.conf")
ZELogger.verbose_log({
"level": "INFO",
"message": f"Updating {real_loader_conf_path}\n"
}, self.verbose)
if not os.path.isdir(temp_loader_dir_path):
try:
os.makedirs(temp_loader_dir_path)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_loader_dir_path}\n{e}"
}, exit_on_error=True)
except OSError as os_err:
ZELogger.log({
"level": "EXCEPTION",
"message": os_err
}, exit_on_error=True)
if not os.path.isfile(real_loader_conf_path):
ZELogger.log({
"level": "EXCEPTION",
"message": f"Missing file: {real_loader_conf_path}\n"
}, exit_on_error=True)
try:
shutil.copy(real_loader_conf_path, temp_loader_conf_path)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to '{temp_loader_conf_path}'\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to '{temp_loader_conf_path}'\n{e}"
}, exit_on_error=True)
with open(temp_loader_conf_path, "r") as loader_conf:
conf_list = loader_conf.readlines()
line_num = next((l for l, val in enumerate(conf_list)
if val.split(' ', 1)[0] == "default"), None)
if line_num:
conf_list[line_num] = f"default {self.new_entry}\n"
if not self.noop:
if os.path.isfile(real_loader_conf_path):
ZELogger.verbose_log({
"level": "INFO",
"message": (f"File {real_loader_conf_path} already exists, backed up to "
f"'{real_loader_conf_path}.bak' and replaced.\n")
}, self.verbose)
if os.path.isfile(f"{real_loader_conf_path}.bak"):
try:
os.remove(f"{real_loader_conf_path}.bak")
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to remove "
f"'{real_loader_conf_path}.bak'\n")
}, exit_on_error=True)
try:
shutil.move(real_loader_conf_path, f"{real_loader_conf_path}.bak")
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to write to "
f"'{real_loader_conf_path}.bak'\n")
}, exit_on_error=True)
with open(real_loader_conf_path, "w") as loader_conf:
loader_conf.writelines(conf_list)
if not self.noconfirm:
if click.confirm(
"Would you like to edit the generated 'loader.conf'?", default=True):
click.edit(filename=real_loader_conf_path)
def post_activate(self):
ZELogger.verbose_log({
"level": "INFO",
"message": (f"Creating Temporary working directory. "
"No changes will be made until the end of "
"the systemd-boot configuration.\n")
}, self.verbose)
with tempfile.TemporaryDirectory(prefix="zedenv", suffix=self.bootloader) as t_esp:
ZELogger.verbose_log({
"level": "INFO",
"message": f"Created {t_esp}.\n"
}, self.verbose)
self.modify_bootloader(t_esp)
self.edit_bootloader_entry(t_esp)
self.recurse_move(t_esp, self.zedenv_properties["esp"])
self.edit_bootloader_default(t_esp, overwrite=True)
# TODO: self.cleanup_entries()
def mid_activate(self, be_mountpoint: str):
ZELogger.verbose_log({
"level": "INFO",
"message": f"Running {self.bootloader} mid activate.\n"
}, self.verbose)
replace_pattern = r'(^{esp}/{env}/?)(.*)(\s.*{boot}\s.*$)'.format(
esp=self.zedenv_properties["esp"], env=self.env_dir, boot=self.boot_mountpoint)
self.modify_fstab(be_mountpoint, replace_pattern, self.new_entry)
def post_destroy(self, target):
real_kernel_dir = os.path.join(self.zedenv_properties["esp"], self.env_dir)
dataset_kernels = os.path.join(real_kernel_dir, f"{self.entry_prefix}-{target}")
# if not self.noop:
if os.path.exists(dataset_kernels):
shutil.rmtree(dataset_kernels)
ZELogger.verbose_log({
"level": "INFO",
"message": f"Removed {dataset_kernels}.\n"
}, self.verbose)
real_entries_dir = os.path.join(self.zedenv_properties["esp"], "loader/entries")
real_bootloader_file = os.path.join(real_entries_dir, f"zedenv-{target}.conf")
if os.path.isfile(real_bootloader_file):
try:
os.remove(real_bootloader_file)
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to remove "
f"'{real_bootloader_file}'\n")
}, exit_on_error=True)
ZELogger.verbose_log({
"level": "INFO",
"message": f"Removed {real_bootloader_file}.\n"
}, self.verbose)
|
import sys
import termcolor
def match(target, patterns):
"""
:param patterns: the patterns to search for within the target string
:type patterns: list
:param target: the string to search within
:type target: str
"""
spans = []
current_index = 0
for pattern in patterns:
i = target.find(pattern, current_index)
if i < 0:
break
else:
end = i + len(pattern)
spans.append(range(i, end))
current_index = end
if len(spans) == len(patterns):
return spans
else:
return None
def main(target, patterns):
"""
This method returns the result of the complete process. It can be useful if used from the python console or in a
library
:param target: The string to search inside
:type target: str
:param patterns: The substring to search for
:type patterns: list
:return: The string with the substring bolded highlighted in red
"""
input_len = len(target)
matches = match(target, patterns)
if matches is None:
print('No complete matches found', file=sys.stderr)
return
string_builder = []
current_end = 0
def colored(s):
"""
wrapper around termcolor.colored, with program default colors and attributes
:param s: the string to color
:type s: str
:rtype: str
"""
return termcolor.colored(s, color='red', attrs=['bold'])
for m in matches:
if m.start == 0 or m.start == current_end:
string_builder.append(colored(target[m.start:m.stop]))
current_end = m.stop
else:
string_builder.append(target[current_end:m.start])
string_builder.append(colored(target[m.start:m.stop]))
current_end = m.stop
if current_end != input_len:
string_builder.append(target[current_end:])
return ''.join(string_builder)
if __name__ == '__main__':
queries = sys.argv[1:]
if len(queries) == 1:
queries = [char for char in queries[0]]
# Strip of placeholder characters
while True:
try:
queries.remove('_')
except ValueError as _:
break
result = main(str(sys.stdin.read()), queries)
print(result)
|
import requests
from datetime import datetime
def weatherAPI(apikey):
with open(apikey) as f:
userkey = f.read()
datacode = 'F-C0032-001' # 一般天氣預報-今明 36 小時天氣預報
mylocation = '桃園市'
cwbpage = 'https://opendata.cwb.gov.tw/api/v1/rest/datastore/%s?Authorization=%s&locationName=%s' % (datacode, userkey, mylocation)
try:
response = requests.get(cwbpage).json()
except Exception as e:
print('error: ', e)
else:
return response
def timeDelta(dtstr):
global getTimenow
transformtime = datetime.strptime(dtstr, '%Y-%m-%d %H:%M:%S')
daysdelta = transformtime.day - getTimenow.day
if daysdelta == 0:
respstr = '今天'
elif daysdelta == 1:
respstr = '明天'
if transformtime.hour == 0:
respstr+= '凌晨 0 時'
elif transformtime.hour == 6:
respstr+= '早上 6 點'
elif transformtime.hour == 18:
respstr+= '晚上 6 點'
return respstr
factortranslation = {
'Wx': '天氣現象',
'PoP': '降雨機率',
'MinT': '最低溫度',
'CI': '舒適度',
'MaxT': '最高溫度'
}
getTimenow = datetime.now()
getWeather = weatherAPI('D:/__開發中/weatherCWBuserkey')
makeOutput = {}
if getWeather['success'] == 'true':
for i in getWeather['records']['location'][0]['weatherElement']:
getFactor = factortranslation[i['elementName']]
for j in i['time']:
if getFactor in ('天氣現象', '舒適度'):
makeKey = '%s 到 %s' % (timeDelta(j['startTime']), timeDelta(j['endTime']))
if makeKey not in makeOutput:
makeOutput[makeKey] = []
makeOutput[makeKey].append('%s: %s' % (getFactor, j['parameter']['parameterName']))
else:
makeKey = '%s 到 %s' % (timeDelta(j['startTime']), timeDelta(j['endTime']))
if makeKey not in makeOutput:
makeOutput[makeKey] = []
makeOutput[makeKey].append('%s: %s %s' % (getFactor, j['parameter']['parameterName'], j['parameter']['parameterUnit']))
for i, j in makeOutput.items():
print(i, ':')
for k in j:
print(' '*4, k)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib
from auxFunctions import *
from function_for_si_figures import *
def getContacts_rand(nt,na,tstart,tend):
contacts=[]
for sim in range(20):
fileN = "data/random/Cont_16x16_"+nt+"_n"+str(na)+"_s"+str(sim)+".txt"
filX=pd.read_csv(fileN).dropna()
contacts+=list(filX['contOpp'].values[tstart:tend]/5.12)
return contacts
def getInd(start,stop,by,typeN,binsX,scale=1.):
xval,yval,zval=[],[],[]
for i in range(start,stop,by):
histX=[]
results=getContacts_rand(typeN,i,10000,100000)
for j in range(len(binsX)-1):
tmp=np.sum((results>=binsX[j])*(results<binsX[j+1]))
if tmp==0:
histX+=[1.]
else:
histX+=[tmp]
if np.sum(results>=binsX[-1])==0:
histX+=[1.]
else:
histX+=[np.sum(results>=binsX[-1])]
tmp1,tmp2,tmp3=[],[],[]
for j in range(len(histX)):
tmp1+=[i*scale]
tmp2+=[binsX[j]]
tmpV = histX[j]*1./np.sum(histX)
if tmpV!=0:
tmp3+=[tmpV]#####-np.log10(hist[j]/np.sum(hist))]
else:
tmp3+=[1.]
xval+=[tmp1]
yval+=[tmp2]
zval+=[tmp3]
print i#xval,yval,zval
return xval,yval,zval
'''
binsS = np.arange(50,101,1)
xs,ys,zs= getInd(0,21,1,'shot',binsS)
filen = "dataS12_shot_"
for i in range(len(xs)):
fileo = open(filen+str(i)+".txt","w")
fileo.write("x,y,z\n")
for j in range(len(xs[i])):
fileo.write("%s,%s,%s\n" %(xs[i][j],ys[i][j],zs[i][j]))
fileo.close()
'''
binsW = np.arange(50,101,1)
xw,yw,zw= getInd(0,210,10,'white',binsW,scale=10)
filen = "dataS12_white_"
for i in range(len(xw)):
fileo = open(filen+str(i)+".txt","w")
fileo.write("x,y,z\n")
for j in range(len(xw[i])):
fileo.write("%s,%s,%s\n" %(xw[i][j],yw[i][j],zw[i][j]))
fileo.close()
|
from scrapy.exceptions import DropItem
from scrapy.exporters import CsvItemExporter
class ValidateItemPipeline(object):
def process_item(self, item, spider):
if not all(item.values()):
raise DropItem('Missing Values!')
else:
return item
class WriteItemPipeline(object):
def __init__(self):
self.filename = 'webmd.csv'
def open_spider(self, spider):
self.csvfile = open(self.filename, 'wb')
self.exporter = CsvItemExporter(self.csvfile)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.csvfile.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
|
"""
solution AdventOfCode 2019 day 9 part 1.
https://adventofcode.com/2019/day/9
author: pca
"""
from general.general import read_file
from general.general import get_location_input_files
from app.int_machine import IntMachine
def main(args=None):
program_code = read_file(get_location_input_files(), 'input_day9.txt')[0]
m = IntMachine(program_code, [1])
m.run()
print(f"Finished, output is {m.output}")
if __name__ == "__main__":
main()
|
import os
import sys
import numpy as np
import random
import matplotlib.pyplot as plt
import cv2
sys.path.append("../../")
from topview.common import get_big_data_path
from topview.datasets.common import get_cloud_itop, get_joints_itop, get_cloud_panoptic, get_joints_panoptic
ITOP = "ITOP"
PANOPTIC = "PANOPTIC"
BOTH = "BOTH"
CLOUD = "cloud"
JOINTS = "joints"
DEPTH_IMAGES = "depth_images"
JOINTS2D = "2Djoints"
JOINTS3DAND2D = "joints3Dand2D"
SIDE_CLOUD = "side_cloud"
SIDE_JOINTS = "side_joints"
SIDE_DEPTH_IMAGES = "side_depth_images"
SIDE_JOINTS2D = "side_2Djoints"
SIDE_JOINTS3DAND2D = "side_joints3Dand2D"
class DBDataset():
def __init__(self, name, fraction, x_type = CLOUD, y_type = JOINTS):
assert(name in [ITOP, PANOPTIC, BOTH])
assert(fraction in ["train", "validation", "test"])
self.x_type = x_type
self.y_type = y_type
file_directory = {
ITOP: {
CLOUD: f"{get_big_data_path()}/ITOP/clouds_depth",
JOINTS: f"{get_big_data_path()}/ITOP/joints",
DEPTH_IMAGES: f"{get_big_data_path()}/ITOP/for_capsules/top/depths",
JOINTS2D: f"{get_big_data_path()}/ITOP/for_capsules/top/joints_2d",
JOINTS3DAND2D: f"{get_big_data_path()}/ITOP/for_capsules/top/joints_3dand2d",
SIDE_CLOUD: f"{get_big_data_path()}/ITOP/clouds_depth_side",
SIDE_JOINTS: f"{get_big_data_path()}/ITOP/joints_side",
SIDE_DEPTH_IMAGES: f"{get_big_data_path()}/ITOP/for_capsules/side/depths",
SIDE_JOINTS2D: f"{get_big_data_path()}/ITOP/for_capsules/side/joints_2d",
SIDE_JOINTS3DAND2D: f"{get_big_data_path()}/ITOP/for_capsules/side/joints_3dand2d"
},
PANOPTIC: {
# CLOUD: f"{get_big_data_path()}/PANOPTIC/clouds_top",
# JOINTS: f"{get_big_data_path()}/PANOPTIC/joints"
}
}
file_names = {k_outer: {k_inner: os.listdir(v_inner) for k_inner, v_inner in dict_outer.items()} \
for k_outer, dict_outer in file_directory.items()}
for k, v in file_names.items():
print(k)
for k2, v2 in v.items():
print(k2, v2[:3])
def select(cloud_file_names, joint_file_names, dataset_name):
pref_cloud = list(map(lambda x: x[:11], cloud_file_names))
pref_joint = list(map(lambda x: x[:11], joint_file_names))
suff_cloud = cloud_file_names[0][11:]
suff_joint = joint_file_names[0][11:]
# from pdb import set_trace as bp
# bp()
common_pref = list(set(pref_cloud) & set(pref_joint))
def get_correct(prefixes):
num_frames_per_user = {
ITOP: 420,
PANOPTIC: 350
}
fraction_boundaries = {
ITOP: {
"train": (4, 19),
"validation": (0, 3),
"test": (0, 3)
},
PANOPTIC: {
"train": (0, 17),
"validation": (19, 20),
"test": (21, 23)
}
}
user_to_fnames = {}
for file_prefix in prefixes:
user = int(file_prefix[:2])
if user not in user_to_fnames:
user_to_fnames[user] = [file_prefix]
else:
user_to_fnames[user].append(file_prefix)
if dataset_name == ITOP:
user_to_fnames = {k: random.sample(v, num_frames_per_user[ITOP]) \
for k, v in user_to_fnames.items()}
elif dataset_name == PANOPTIC:
user_to_fnames = {k: v[:330] for k, v in user_to_fnames.items()}
users_boundaries = fraction_boundaries[dataset_name][fraction]
ret_prefixes = []
for i in range(users_boundaries[0], users_boundaries[1] + 1):
ret_prefixes += user_to_fnames[i]
return ret_prefixes
common_pref = get_correct(common_pref)
return [pref + suff_cloud for pref in common_pref], [pref + suff_joint for pref in common_pref]
if name in [ITOP, BOTH]:
file_names[ITOP][x_type], file_names[ITOP][y_type] = \
select(file_names[ITOP][x_type], file_names[ITOP][y_type], ITOP)
if name in [PANOPTIC, BOTH]:
file_names[PANOPTIC][x_type], file_names[PANOPTIC][y_type] = \
select(file_names[PANOPTIC][x_type], file_names[PANOPTIC][y_type], PANOPTIC)
file_paths = {dataset_key: \
{dataset_type: [f"{file_directory[dataset_key][dataset_type]}/{fname}" for fname in file_names[dataset_key][dataset_type]] \
for dataset_type in file_directory[dataset_key].keys()} for dataset_key in [ITOP, PANOPTIC]}
if name == BOTH:
self.X_paths = file_paths[ITOP][x_type] + file_paths[PANOPTIC][x_type]
self.Y_paths = file_paths[ITOP][y_type] + file_paths[PANOPTIC][y_type]
else:
self.X_paths = file_paths[name][x_type]
self.Y_paths = file_paths[name][y_type]
def get_dataset(self):
X_data = []
Y_data = []
for X_path, Y_path in zip(self.X_paths, self.Y_paths):
if ITOP in X_path:
# from pdb import set_trace as bp
# bp()
if self.x_type == CLOUD or self.x_type == SIDE_CLOUD:
X_element = get_cloud_itop(X_path)
elif self.x_type == DEPTH_IMAGES or self.x_type == SIDE_DEPTH_IMAGES:
X_element = plt.imread(X_path)
Y_element = get_joints_itop(Y_path)
elif PANOPTIC in X_path:
if self.x_type == CLOUD or self.x_type == SIDE_CLOUD:
X_element = get_cloud_panoptic(X_path)
elif self.x_type == DEPTH_IMAGES or self.x_type == SIDE_DEPTH_IMAGES:
X_element = plt.imread(X_path)
Y_element = get_joints_panoptic(Y_path)
X_data.append(X_element)
Y_data.append(Y_element)
return X_data, Y_data, self.X_paths, self.Y_paths
# test the scipt
'''
import numpy as np
PANOPTIC = DBDataset("PANOPTIC", "train")
cl, jts, _, _ = PANOPTIC.get_dataset()
np.save("tmp/pan_cl", cl[0])
np.save("tmp/pan_jt", jts[0])
ITOP_set = DBDataset("ITOP", "train", "depth_images", "2Djoints")
cl, jts, _, _ = ITOP_set.get_dataset()
print(len(cl), len(jts))
'''
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack
from spack.main import SpackCommand
python = SpackCommand('python')
def test_python():
out = python('-c', 'import spack; print(spack.spack_version)')
assert out.strip() == spack.spack_version
|
from pprint import pprint
def Loop(row,col):
step=1
while True:
steps = list(range(1,step+1))
for i,j in zip(list(reversed(steps)),steps):
if (i,j) == (1,1):
prevVal=20151125
else:
nextValue = (prevVal*252533)%33554393
prevVal=nextValue
# print("({},{}) = {}".format(i,j,prevVal))
if (i,j) == (row,col):
print("Row: {} Column: {} => {}".format(row,col,prevVal))
return
step +=1
Loop(3010,3019) |
from pathlib import Path
from typing import Optional
from logger import Logger
class ConversionJob(object):
__slots__ = ["output_format", "verbose", "output_path", "file_path", "logger"]
def __init__(
self,
output_format: str,
verbose: bool,
output_path: Path,
file_path: Path,
logger: Optional[Logger] = None
):
self.output_format = output_format
self.verbose = verbose
self.output_path = output_path
self.file_path = file_path
self.logger = logger if logger is not None else Logger() |
from flask import Flask, redirect, url_for, render_template, request
import pymongo
import os
from dotenv import load_dotenv
load_dotenv()
client = pymongo.MongoClient(os.getenv('MONGO_CLIENT')) #This takes the login credientials for the mongoDB from the .env file e.g. "mongodb+srv://ojas:<PASSWORD>@cluster0.kfpcm.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"
db = client.get_database('UserData')
Information_Collection = db.get_collection("Information")
Items = [ "Oxygen Cylinder", "Hospital Bed", "Plasma", "Remedisvir", "Fabiflu", "Tocilizumbad", "Oxygen Refill","All Items"]
application = Flask(__name__)
@application.route("/", methods = ["POST","GET"]) #Landing page
def home():
return render_template("index.html")
@application.route("/find",methods = ["POST","GET"]) # when the submit button is pressed
def find():
recipients = []
# The data extraction from the form
donations = request.form["Donation"]
location = request.form["Location"]
#Choice of data extract
if donations == "All Items" and location =="All Locations":
out = Information_Collection.find()
for i in out:
if len(i) == 5:
recipients.append(i)
if recipients == []:
return render_template("nil.html")
return render_template("results.html",recipients = recipients, donation = 8 ,location="All Locations",items = Items)
if donations == "All Items":
out = Information_Collection.find({"Location":{"$eq":location}})
for i in out:
if len(i) == 5:
recipients.append(i)
if recipients == []:
return render_template("nil.html")
return render_template("results.html",recipients = recipients, donation = 8 ,location=location,items = Items)
if location == "All Locations":
donation = Items.index(donations) +1
out = Information_Collection.find({"Item":{"$eq":donation}})
for i in out:
if len(i) == 5:
recipients.append(i)
if recipients == []:
return render_template("nil.html")
return render_template("results.html",recipients = recipients, donation = donation ,location="All Locations",items = Items)
donation = Items.index(donations)+1 # finds the index of the item
out = Information_Collection.find( {"$and":[{"Item":{"$eq":donation}},{"Location":{"$eq":location}}]} ) # Data retreival from the main database
#Data parsing of the raw output to prepare for front end code
for i in out:
if len(i) == 5:
recipients.append(i)
if recipients == []: #If there is no match then this list will be empty
return render_template("nil.html")
return render_template("results.html",recipients = recipients, donation = donation ,location=location,items = Items)
if __name__ == "__main__":
application.run(debug=True)
|
# coding: utf-8
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
# pylint: disable=R0201
from collections import OrderedDict
from pyccel.codegen.printing.ccode import CCodePrinter
from pyccel.ast.literals import LiteralTrue, LiteralInteger, LiteralString
from pyccel.ast.literals import Nil
from pyccel.ast.core import Assign, AliasAssign, FunctionDef, FunctionAddress
from pyccel.ast.core import If, IfSection, Return, FunctionCall, Deallocate
from pyccel.ast.core import create_incremented_string, SeparatorComment
from pyccel.ast.core import Import
from pyccel.ast.core import AugAssign
from pyccel.ast.operators import PyccelEq, PyccelNot, PyccelOr, PyccelAssociativeParenthesis, IfTernaryOperator, PyccelIsNot
from pyccel.ast.datatypes import NativeInteger, NativeBool, NativeReal, str_dtype
from pyccel.ast.cwrapper import PyArg_ParseTupleNode, PyBuildValueNode
from pyccel.ast.cwrapper import PyArgKeywords
from pyccel.ast.cwrapper import Py_None, Py_DECREF
from pyccel.ast.cwrapper import generate_datatype_error, PyErr_SetString
from pyccel.ast.cwrapper import scalar_object_check, flags_registry
from pyccel.ast.cwrapper import PyccelPyArrayObject, PyccelPyObject
from pyccel.ast.cwrapper import C_to_Python, Python_to_C
from pyccel.ast.numpy_wrapper import array_checker, array_type_check
from pyccel.ast.numpy_wrapper import pyarray_to_c_ndarray
from pyccel.ast.numpy_wrapper import numpy_get_data, numpy_get_dim
from pyccel.ast.bind_c import as_static_function_call
from pyccel.ast.variable import VariableAddress, Variable, ValuedVariable
__all__ = ["CWrapperCodePrinter", "cwrappercode"]
dtype_registry = {('pyobject' , 0) : 'PyObject',
('pyarrayobject', 0) : 'PyArrayObject'}
class CWrapperCodePrinter(CCodePrinter):
"""A printer to convert a python module to strings of c code creating
an interface between python and an implementation of the module in c"""
def __init__(self, parser, target_language, extra_includes=(), **settings):
CCodePrinter.__init__(self, parser, **settings)
self._target_language = target_language
self._cast_functions_dict = OrderedDict()
self._to_free_PyObject_list = []
self._function_wrapper_names = dict()
self._global_names = set()
self._module_name = None
self._extra_includes = extra_includes
# --------------------------------------------------------------------
# Helper functions
# --------------------------------------------------------------------
def stored_in_c_pointer(self, a):
"""
Return True if variable is pointer or stored in a pointer
Parameters
-----------
a : Variable
Variable holding information needed (is_pointer, is_optional)
Returns
-------
stored_in_c : Boolean
"""
stored_in_c = CCodePrinter.stored_in_c_pointer(self, a)
if self._target_language == 'fortran':
return stored_in_c or (isinstance(a, Variable) and a.rank>0)
else:
return stored_in_c
def get_new_name(self, used_names, requested_name):
"""
Generate a new name, return the requested_name if it's not in
used_names set or generate new one based on the requested_name.
The generated name is appended to the used_names set
Parameters
----------
used_names : set of strings
Set of variable and function names to avoid name collisions
requested_name : String
The desired name
Returns
----------
name : String
"""
if requested_name not in used_names:
used_names.add(requested_name)
return requested_name
else:
incremented_name, _ = create_incremented_string(used_names, prefix=requested_name)
return incremented_name
def function_signature(self, expr):
args = list(expr.arguments)
if any([isinstance(a, FunctionAddress) for a in args]):
# Functions with function addresses as arguments cannot be
# exposed to python so there is no need to print their signature
return ''
else:
return CCodePrinter.function_signature(self, expr)
def get_declare_type(self, expr):
"""
Get the declaration type of a variable
Parameters
-----------
variable : Variable
Variable holding information needed to choose the declaration type
Returns
-------
type_declaration : String
"""
dtype = self._print(expr.dtype)
prec = expr.precision
if self._target_language == 'c' and dtype != "pyarrayobject":
return CCodePrinter.get_declare_type(self, expr)
else :
dtype = self.find_in_dtype_registry(dtype, prec)
if self.stored_in_c_pointer(expr):
return '{0} *'.format(dtype)
else:
return '{0} '.format(dtype)
def get_new_PyObject(self, name, used_names):
"""
Create new PyccelPyObject Variable with the desired name
Parameters
-----------
name : String
The desired name
used_names : Set of strings
Set of variable and function names to avoid name collisions
Returns: Variable
-------
"""
return Variable(dtype=PyccelPyObject(),
name=self.get_new_name(used_names, name),
is_pointer=True)
def find_in_dtype_registry(self, dtype, prec):
"""
Find the corresponding C dtype in the dtype_registry
raise PYCCEL_RESTRICTION_TODO if not found
Parameters
-----------
dtype : String
expression data type
prec : Integer
expression precision
Returns
-------
dtype : String
"""
try :
return dtype_registry[(dtype, prec)]
except KeyError:
return CCodePrinter.find_in_dtype_registry(self, dtype, prec)
def get_default_assign(self, arg, func_arg):
if arg.rank > 0 :
return AliasAssign(arg, Nil())
elif func_arg.is_optional:
return AliasAssign(arg, Py_None)
elif isinstance(arg.dtype, (NativeReal, NativeInteger, NativeBool)):
return Assign(arg, func_arg.value)
elif isinstance(arg.dtype, PyccelPyObject):
return AliasAssign(arg, Py_None)
else:
raise NotImplementedError('Default values are not implemented for this datatype : {}'.format(func_arg.dtype))
def _get_static_function(self, used_names, function, collect_dict):
"""
Create arguments and functioncall for arguments rank > 0 in fortran.
Format : a is numpy array
func(a) ==> static_func(a.DIM , a.DATA)
where a.DATA = buffer holding data
a.DIM = size of array
"""
additional_body = []
if self._target_language == 'fortran':
static_args = []
for a in function.arguments:
if isinstance(a, Variable) and a.rank>0:
# Add shape arguments for static function
for i in range(collect_dict[a].rank):
var = Variable(dtype=NativeInteger() ,name = self.get_new_name(used_names, a.name + "_dim"))
body = FunctionCall(numpy_get_dim, [collect_dict[a], i])
if a.is_optional:
body = IfTernaryOperator(PyccelIsNot(VariableAddress(collect_dict[a]),Nil()), body , LiteralInteger(0))
body = Assign(var, body)
additional_body.append(body)
static_args.append(var)
static_args.append(a)
static_function = as_static_function_call(function, self._module_name)
else:
static_function = function
static_args = function.arguments
return static_function, static_args, additional_body
def _get_check_type_statement(self, variable, collect_var):
"""
Get the code which checks if the variable collected from python
has the expected type
Parameters
----------
variable : Variable
The variable containing the PythonObject
collect_var : Variable
The variable in which the result will be saved,
used to provide information about the expected type
Returns
-------
check : str
A string containing the code which determines whether 'variable'
contains an object which can be saved in 'collect_var'
"""
if variable.rank > 0 :
check = array_type_check(collect_var, variable)
else :
check = scalar_object_check(collect_var, variable)
if isinstance(variable, ValuedVariable):
default = PyccelNot(VariableAddress(collect_var)) \
if variable.rank > 0 else \
PyccelEq(VariableAddress(collect_var), VariableAddress(Py_None))
check = PyccelAssociativeParenthesis(PyccelOr(default, check))
return check
def _get_wrapper_name(self, used_names, func):
"""
create wrapper function name
Parameters
-----------
used_names: list of strings
List of variable and function names to avoid name collisions
func : FunctionDef or Interface
Returns
-------
wrapper_name : string
"""
name = func.name
wrapper_name = self.get_new_name(used_names.union(self._global_names), name+"_wrapper")
self._function_wrapper_names[func.name] = wrapper_name
self._global_names.add(wrapper_name)
return wrapper_name
# -------------------------------------------------------------------
# Functions managing the creation of wrapper body
# -------------------------------------------------------------------
def _valued_variable_management(self, variable, collect_var, tmp_variable):
"""
Responsible for creating the body collecting the default value of an valuedVariable
and the check needed.
if the valuedVariable is optional create body to collect the new value
Parameters
----------
tmp_variable : Variable
The temporary variable to hold result
variable : Variable
The optional variable
collect_var : Variable
variable which holds the value collected with PyArg_Parsetuple
Returns
-------
section :
IfSection
collect_body : List
list containing the lines necessary to collect the new optional variable value
"""
valued_var_check = PyccelEq(VariableAddress(collect_var), VariableAddress(Py_None))
collect_body = []
if variable.is_optional:
collect_body = [AliasAssign(variable, tmp_variable)]
section = IfSection(valued_var_check, [AliasAssign(variable, Nil())])
else:
section = IfSection(valued_var_check, [Assign(variable, variable.value)])
return section, collect_body
def _body_scalar(self, variable, collect_var, error_check = False, tmp_variable = None):
"""
Responsible for collecting value and managing error and create the body
of arguments in format:
Parameters
----------
variable : Variable
the variable needed to collect
collect_var : Variable
variable which holds the value collected with PyArg_Parsetuple
error_check : boolean
True if checking the data type and raising error is needed
tmp_variable : Variable
temporary variable to hold value default None
Returns
-------
body : If block
"""
var = tmp_variable if tmp_variable else variable
sections = []
collect_value = [Assign(var, FunctionCall(Python_to_C(var), [collect_var]))]
if isinstance(variable, ValuedVariable):
section, optional_collect = self._valued_variable_management(variable, collect_var, tmp_variable)
sections.append(section)
collect_value += optional_collect
if error_check:
check_type = scalar_object_check(collect_var, var)
sections.append(IfSection(check_type, collect_value))
error = generate_datatype_error(var)
sections.append(IfSection(LiteralTrue(), [error, Return([Nil()])]))
else:
sections.append(IfSection(LiteralTrue(), collect_value))
return If(*sections)
def _body_array(self, variable, collect_var, check_type = False) :
"""
Responsible for collecting value and managing error and create the body
of arguments with rank greater than 0 in format
Parameters
----------
Variable : Variable
The optional variable
collect_var : variable
the pyobject type variable holder of value
check_type : Boolean
True if the type is needed
Returns
-------
body : list
A list of statements
"""
body = []
#check optional :
if variable.is_optional :
check = PyccelNot(VariableAddress(collect_var))
body += [IfSection(check, [Assign(VariableAddress(variable), Nil())])]
check = array_checker(collect_var, variable, check_type, self._target_language)
body += [IfSection(check, [Return([Nil()])])]
if self._target_language == 'fortran':
collect_func = FunctionCall(numpy_get_data, [collect_var])
else:
collect_func = FunctionCall(pyarray_to_c_ndarray, [collect_var])
body += [IfSection(LiteralTrue(), [Assign(VariableAddress(variable),
collect_func)])]
body = [If(*body)]
return body
def _body_management(self, used_names, variable, collect_var, check_type = False):
"""
Responsible for calling functions that take care of body creation
Parameters
----------
used_names : list of strings
List of variable and function names to avoid name collisions
Variable : Variable
The optional variable
collect_var : variable
the pyobject type variable holder of value
check_type : Boolean
True if the type is needed
Returns
-------
body : list
A list of statements
tmp_variable : Variable
temporary variable to hold value default None
"""
tmp_variable = None
body = []
if variable.rank > 0:
body = self._body_array(variable, collect_var, check_type)
else:
if variable.is_optional:
tmp_variable = Variable(dtype=variable.dtype, precision = variable.precision,
name = self.get_new_name(used_names, variable.name+"_tmp"))
body = [self._body_scalar(variable, collect_var, check_type, tmp_variable)]
return body, tmp_variable
# -------------------------------------------------------------------
# Parsing arguments and building values Types functions
# -------------------------------------------------------------------
def get_PyArgParseType(self, used_names, variable):
"""
Responsible for creating any necessary intermediate variables which are used
to collect the result of PyArgParse, and collecting the required cast function
Parameters
----------
used_names : list of strings
List of variable and function names to avoid name collisions
variable : Variable
The variable which will be passed to the translated function
Returns
-------
collect_var : Variable
The variable which will be used to collect the argument
"""
if variable.rank > 0:
collect_type = PyccelPyArrayObject()
collect_var = Variable(dtype= collect_type, is_pointer = True, rank = variable.rank,
order= variable.order,
name=self.get_new_name(used_names, variable.name+"_tmp"))
else:
collect_type = PyccelPyObject()
collect_var = Variable(dtype=collect_type, is_pointer=True,
name = self.get_new_name(used_names, variable.name+"_tmp"))
return collect_var
def get_PyBuildValue(self, used_names, variable):
"""
Responsible for collecting the variable required to build the result
and the necessary cast function
Parameters
----------
used_names : list of strings
List of variable and function names to avoid name collisions
variable : Variable
The variable returned by the translated function
Returns
-------
collect_var : Variable
The variable which will be provided to PyBuild
cast_func_stmts : functionCall
call to cast function responsible for the conversion of one data type into another
"""
cast_function = FunctionCall(C_to_Python(variable), [VariableAddress(variable)])
collect_type = PyccelPyObject()
collect_var = Variable(dtype=collect_type, is_pointer=True,
name = self.get_new_name(used_names, variable.name+"_tmp"))
self._to_free_PyObject_list.append(collect_var) #TODO remove in next PR
return collect_var, cast_function
#--------------------------------------------------------------------
# _print_ClassName functions
#--------------------------------------------------------------------
def _print_Interface(self, expr):
# Collecting all functions
funcs = expr.functions
# Save all used names
used_names = set(n.name for n in funcs)
# Find a name for the wrapper function
wrapper_name = self._get_wrapper_name(used_names, expr)
# Collect local variables
python_func_args = self.get_new_PyObject("args" , used_names)
python_func_kwargs = self.get_new_PyObject("kwargs", used_names)
python_func_selfarg = self.get_new_PyObject("self" , used_names)
# Collect wrapper arguments and results
wrapper_args = [python_func_selfarg, python_func_args, python_func_kwargs]
wrapper_results = [self.get_new_PyObject("result", used_names)]
# Collect parser arguments
wrapper_vars = {}
# Collect argument names for PyArgParse
arg_names = [a.name for a in funcs[0].arguments]
keyword_list_name = self.get_new_name(used_names,'kwlist')
keyword_list = PyArgKeywords(keyword_list_name, arg_names)
wrapper_body = [keyword_list]
wrapper_body_translations = []
body_tmp = []
# To store the mini function responsible for collecting value and calling interfaces functions and return the builded value
funcs_def = []
default_value = {} # dict to collect all initialisation needed in the wrapper
check_var = Variable(dtype = NativeInteger(), name = self.get_new_name(used_names , "check"))
wrapper_vars[check_var.name] = check_var
types_dict = OrderedDict((a, set()) for a in funcs[0].arguments) #dict to collect each variable possible type and the corresponding flags
# collect parse arg
parse_args = [self.get_PyArgParseType(used_names,a) for a in funcs[0].arguments]
# Managing the body of wrapper
for func in funcs :
mini_wrapper_func_body = []
res_args = []
mini_wrapper_func_vars = {a.name : a for a in func.arguments}
# update ndarray local variables properties
local_arg_vars = [a.clone(a.name, is_pointer=True, allocatable=False)
if isinstance(a, Variable) and a.rank > 0 else a for a in func.arguments]
# update optional variable properties
local_arg_vars = [a.clone(a.name, is_pointer=True) if a.is_optional else a for a in local_arg_vars]
flags = 0
collect_vars = {}
# Loop for all args in every functions and create the corresponding condition and body
for p_arg, f_arg in zip(parse_args, local_arg_vars):
collect_vars[f_arg] = p_arg
body, tmp_variable = self._body_management(used_names, f_arg, p_arg)
if tmp_variable :
mini_wrapper_func_vars[tmp_variable.name] = tmp_variable
# get check type function
check = self._get_check_type_statement(f_arg, p_arg)
# If the variable cannot be collected from PyArgParse directly
wrapper_vars[p_arg.name] = p_arg
# Save the body
wrapper_body_translations.extend(body)
# Write default values
if isinstance(f_arg, ValuedVariable):
wrapper_body.append(self.get_default_assign(parse_args[-1], f_arg))
flag_value = flags_registry[(f_arg.dtype, f_arg.precision)]
flags = (flags << 4) + flag_value # shift by 4 to the left
types_dict[f_arg].add((f_arg, check, flag_value)) # collect variable type for each arguments
mini_wrapper_func_body += body
# create the corresponding function call
static_function, static_args, additional_body = self._get_static_function(used_names, func, collect_vars)
mini_wrapper_func_body.extend(additional_body)
for var in static_args:
mini_wrapper_func_vars[var.name] = var
if len(func.results)==0:
func_call = FunctionCall(static_function, static_args)
else:
results = func.results if len(func.results)>1 else func.results[0]
func_call = Assign(results,FunctionCall(static_function, static_args))
mini_wrapper_func_body.append(func_call)
# Loop for all res in every functions and create the corresponding body and cast
for r in func.results :
collect_var, cast_func = self.get_PyBuildValue(used_names, r)
mini_wrapper_func_vars[collect_var.name] = collect_var
if cast_func is not None:
mini_wrapper_func_vars[r.name] = r
mini_wrapper_func_body.append(AliasAssign(collect_var, cast_func))
res_args.append(VariableAddress(collect_var) if collect_var.is_pointer else collect_var)
# Building PybuildValue and freeing the allocated variable after.
mini_wrapper_func_body.append(AliasAssign(wrapper_results[0],PyBuildValueNode(res_args)))
mini_wrapper_func_body += [FunctionCall(Py_DECREF, [i]) for i in self._to_free_PyObject_list]
# Call free function for C type
if self._target_language == 'c':
mini_wrapper_func_body += [Deallocate(i) for i in local_arg_vars if i.rank > 0]
mini_wrapper_func_body.append(Return(wrapper_results))
self._to_free_PyObject_list.clear()
# Building Mini wrapper function
mini_wrapper_func_name = self.get_new_name(used_names.union(self._global_names), func.name + '_mini_wrapper')
self._global_names.add(mini_wrapper_func_name)
mini_wrapper_func_def = FunctionDef(name = mini_wrapper_func_name,
arguments = parse_args,
results = wrapper_results,
body = mini_wrapper_func_body,
local_vars = mini_wrapper_func_vars.values())
funcs_def.append(mini_wrapper_func_def)
# append check condition to the functioncall
body_tmp.append(IfSection(PyccelEq(check_var, LiteralInteger(flags)), [AliasAssign(wrapper_results[0],
FunctionCall(mini_wrapper_func_def, parse_args))]))
# Errors / Types management
# Creating check_type function
check_func_def = self._create_wrapper_check(check_var, parse_args, types_dict, used_names, funcs[0].name)
funcs_def.append(check_func_def)
# Create the wrapper body with collected informations
body_tmp = [IfSection(PyccelNot(check_var), [Return([Nil()])])] + body_tmp
body_tmp.append(IfSection(LiteralTrue(),
[PyErr_SetString('PyExc_TypeError', '"Arguments combinations don\'t exist"'),
Return([Nil()])]))
wrapper_body_translations = [If(*body_tmp)]
# Parsing Arguments
parse_node = PyArg_ParseTupleNode(python_func_args, python_func_kwargs, funcs[0].arguments, parse_args, keyword_list)
wrapper_body += list(default_value.values())
wrapper_body.append(If(IfSection(PyccelNot(parse_node), [Return([Nil()])])))
#finishing the wrapper body
wrapper_body.append(Assign(check_var, FunctionCall(check_func_def, parse_args)))
wrapper_body.extend(wrapper_body_translations)
wrapper_body.append(Return(wrapper_results)) # Return
# Create FunctionDef
funcs_def.append(FunctionDef(name = wrapper_name,
arguments = wrapper_args,
results = wrapper_results,
body = wrapper_body,
local_vars = wrapper_vars.values()))
sep = self._print(SeparatorComment(40))
return sep + '\n'.join(CCodePrinter._print_FunctionDef(self, f) for f in funcs_def)
def _create_wrapper_check(self, check_var, parse_args, types_dict, used_names, func_name):
check_func_body = []
flags = (len(types_dict) - 1) * 4
for arg in types_dict:
var_name = ""
body = []
types = []
arg_type_check_list = list(types_dict[arg])
arg_type_check_list.sort(key= lambda x : x[0].precision)
for elem in arg_type_check_list:
var_name = elem[0].name
value = elem[2] << flags
body.append(IfSection(elem[1], [AugAssign(check_var, '+' ,value)]))
types.append(elem[0])
flags -= 4
error = ' or '.join(['{} bit {}'.format(v.precision * 8 , str_dtype(v.dtype)) if not isinstance(v.dtype, NativeBool)
else str_dtype(v.dtype) for v in types])
body.append(IfSection(LiteralTrue(), [PyErr_SetString('PyExc_TypeError', '"{} must be {}"'.format(var_name, error)), Return([LiteralInteger(0)])]))
check_func_body += [If(*body)]
check_func_body = [Assign(check_var, LiteralInteger(0))] + check_func_body
check_func_body.append(Return([check_var]))
# Creating check function definition
check_func_name = self.get_new_name(used_names.union(self._global_names), 'type_check')
self._global_names.add(check_func_name)
check_func_def = FunctionDef(name = check_func_name,
arguments = parse_args,
results = [check_var],
body = check_func_body,
local_vars = [])
return check_func_def
def _print_IndexedElement(self, expr):
assert(len(expr.indices)==1)
return '{}[{}]'.format(self._print(expr.base), self._print(expr.indices[0]))
def _print_PyccelPyObject(self, expr):
return 'pyobject'
def _print_PyccelPyArrayObject(self, expr):
return 'pyarrayobject'
def _print_PyArg_ParseTupleNode(self, expr):
name = 'PyArg_ParseTupleAndKeywords'
pyarg = expr.pyarg
pykwarg = expr.pykwarg
flags = expr.flags
# All args are modified so even pointers are passed by address
args = ', '.join(['&{}'.format(a.name) for a in expr.args])
if expr.args:
code = '{name}({pyarg}, {pykwarg}, "{flags}", {kwlist}, {args})'.format(
name=name,
pyarg=pyarg,
pykwarg=pykwarg,
flags = flags,
kwlist = expr.arg_names.name,
args = args)
else :
code ='{name}({pyarg}, {pykwarg}, "", {kwlist})'.format(
name=name,
pyarg=pyarg,
pykwarg=pykwarg,
kwlist = expr.arg_names.name)
return code
def _print_PyBuildValueNode(self, expr):
name = 'Py_BuildValue'
flags = expr.flags
args = ', '.join(['{}'.format(self._print(a)) for a in expr.args])
#to change for args rank 1 +
if expr.args:
code = '{name}("{flags}", {args})'.format(name=name, flags=flags, args=args)
else :
code = '{name}("")'.format(name=name)
return code
def _print_PyArgKeywords(self, expr):
arg_names = ',\n'.join(['"{}"'.format(a) for a in expr.arg_names] + [self._print(Nil())])
return ('static char *{name}[] = {{\n'
'{arg_names}\n'
'}};\n'.format(name=expr.name, arg_names = arg_names))
def _print_FunctionDef(self, expr):
# Save all used names
used_names = set([a.name for a in expr.arguments] + [r.name for r in expr.results] + [expr.name])
# update ndarray local variables properties
local_arg_vars = [a.clone(a.name, is_pointer=True, allocatable=False)
if isinstance(a, Variable) and a.rank > 0 else a for a in expr.arguments]
# update optional variable properties
local_arg_vars = [a.clone(a.name, is_pointer=True) if a.is_optional else a for a in local_arg_vars]
# Find a name for the wrapper function
wrapper_name = self._get_wrapper_name(used_names, expr)
used_names.add(wrapper_name)
# Collect local variables
wrapper_vars = {a.name : a for a in local_arg_vars}
wrapper_vars.update({r.name : r for r in expr.results})
python_func_args = self.get_new_PyObject("args" , used_names)
python_func_kwargs = self.get_new_PyObject("kwargs", used_names)
python_func_selfarg = self.get_new_PyObject("self" , used_names)
# Collect arguments and results
wrapper_args = [python_func_selfarg, python_func_args, python_func_kwargs]
wrapper_results = [self.get_new_PyObject("result", used_names)]
if expr.is_private:
wrapper_func = FunctionDef(name = wrapper_name,
arguments = wrapper_args,
results = wrapper_results,
body = [PyErr_SetString('PyExc_NotImplementedError', '"Private functions are not accessible from python"'),
AliasAssign(wrapper_results[0], Nil()),
Return(wrapper_results)])
return CCodePrinter._print_FunctionDef(self, wrapper_func)
if any(isinstance(arg, FunctionAddress) for arg in local_arg_vars):
wrapper_func = FunctionDef(name = wrapper_name,
arguments = wrapper_args,
results = wrapper_results,
body = [PyErr_SetString('PyExc_NotImplementedError', '"Cannot pass a function as an argument"'),
AliasAssign(wrapper_results[0], Nil()),
Return(wrapper_results)])
return CCodePrinter._print_FunctionDef(self, wrapper_func)
# Collect argument names for PyArgParse
arg_names = [a.name for a in local_arg_vars]
keyword_list_name = self.get_new_name(used_names,'kwlist')
keyword_list = PyArgKeywords(keyword_list_name, arg_names)
wrapper_body = [keyword_list]
wrapper_body_translations = []
parse_args = []
collect_vars = {}
for arg in local_arg_vars:
collect_var = self.get_PyArgParseType(used_names, arg)
collect_vars[arg] = collect_var
body, tmp_variable = self._body_management(used_names, arg, collect_var, True)
if tmp_variable :
wrapper_vars[tmp_variable.name] = tmp_variable
# If the variable cannot be collected from PyArgParse directly
wrapper_vars[collect_var.name] = collect_var
# Save cast to argument variable
wrapper_body_translations.extend(body)
parse_args.append(collect_var)
# Write default values
if isinstance(arg, ValuedVariable):
wrapper_body.append(self.get_default_assign(parse_args[-1], arg))
# Parse arguments
parse_node = PyArg_ParseTupleNode(python_func_args, python_func_kwargs, local_arg_vars, parse_args, keyword_list)
wrapper_body.append(If(IfSection(PyccelNot(parse_node), [Return([Nil()])])))
wrapper_body.extend(wrapper_body_translations)
# Call function
static_function, static_args, additional_body = self._get_static_function(used_names, expr, collect_vars)
wrapper_body.extend(additional_body)
for var in static_args :
wrapper_vars[var.name] = var
if len(expr.results)==0:
func_call = FunctionCall(static_function, static_args)
else:
results = expr.results if len(expr.results)>1 else expr.results[0]
func_call = Assign(results,FunctionCall(static_function, static_args))
wrapper_body.append(func_call)
# Loop over results to carry out necessary casts and collect Py_BuildValue type string
res_args = []
for a in expr.results :
collect_var, cast_func = self.get_PyBuildValue(used_names, a)
if cast_func is not None:
wrapper_vars[collect_var.name] = collect_var
wrapper_body.append(AliasAssign(collect_var, cast_func))
res_args.append(VariableAddress(collect_var) if collect_var.is_pointer else collect_var)
# Call PyBuildNode
wrapper_body.append(AliasAssign(wrapper_results[0],PyBuildValueNode(res_args)))
# Call free function for python type
wrapper_body += [FunctionCall(Py_DECREF, [i]) for i in self._to_free_PyObject_list]
# Call free function for C type
if self._target_language == 'c':
wrapper_body += [Deallocate(i) for i in local_arg_vars if i.rank > 0]
self._to_free_PyObject_list.clear()
#Return
wrapper_body.append(Return(wrapper_results))
# Create FunctionDef and write using classic method
wrapper_func = FunctionDef(name = wrapper_name,
arguments = wrapper_args,
results = wrapper_results,
body = wrapper_body,
local_vars = tuple(wrapper_vars.values()))
return CCodePrinter._print_FunctionDef(self, wrapper_func)
def _print_Module(self, expr):
self._global_names = set(f.name for f in expr.funcs)
self._module_name = expr.name
sep = self._print(SeparatorComment(40))
if self._target_language == 'fortran':
static_funcs = [as_static_function_call(f, expr.name) for f in expr.funcs]
else:
static_funcs = expr.funcs
function_signatures = ''.join('{};\n'.format(self.function_signature(f)) for f in static_funcs)
interface_funcs = [f.name for i in expr.interfaces for f in i.functions]
funcs = [*expr.interfaces, *(f for f in expr.funcs if f.name not in interface_funcs)]
function_defs = '\n'.join(self._print(f) for f in funcs)
cast_functions = '\n'.join(CCodePrinter._print_FunctionDef(self, f)
for f in self._cast_functions_dict.values())
method_def_func = ',\n'.join(('{{\n'
'"{name}",\n'
'(PyCFunction){wrapper_name},\n'
'METH_VARARGS | METH_KEYWORDS,\n'
'{doc_string}\n'
'}}').format(
name = f.name,
wrapper_name = self._function_wrapper_names[f.name],
doc_string = self._print(LiteralString('\n'.join(f.doc_string.comments))) \
if f.doc_string else '""')
for f in funcs)
method_def_name = self.get_new_name(self._global_names, '{}_methods'.format(expr.name))
method_def = ('static PyMethodDef {method_def_name}[] = {{\n'
'{method_def_func},\n'
'{{ NULL, NULL, 0, NULL}}\n'
'}};\n'.format(method_def_name = method_def_name ,method_def_func = method_def_func))
module_def_name = self.get_new_name(self._global_names, '{}_module'.format(expr.name))
module_def = ('static struct PyModuleDef {module_def_name} = {{\n'
'PyModuleDef_HEAD_INIT,\n'
'/* name of module */\n'
'\"{mod_name}\",\n'
'/* module documentation, may be NULL */\n'
'NULL,\n' #TODO: Add documentation
'/* size of per-interpreter state of the module, or -1 if the module keeps state in global variables. */\n'
'-1,\n'
'{method_def_name}\n'
'}};\n'.format(module_def_name = module_def_name, mod_name = expr.name, method_def_name = method_def_name))
init_func = ('PyMODINIT_FUNC PyInit_{mod_name}(void)\n{{\n'
'PyObject *m;\n'
'import_array();\n'
'm = PyModule_Create(&{module_def_name});\n'
'if (m == NULL) return NULL;\n'
'return m;\n}}\n'.format(mod_name=expr.name, module_def_name = module_def_name))
# Print imports last to be sure that all additional_imports have been collected
imports = [Import(s) for s in self._additional_imports]
imports += [Import('numpy_version')]
imports += [Import('numpy/arrayobject')]
imports += [Import(i) for i in self._extra_includes]
imports = ''.join(self._print(i) for i in imports)
return ('#define PY_ARRAY_UNIQUE_SYMBOL CWRAPPER_ARRAY_API\n'
'{imports}\n'
'{function_signatures}\n'
'{sep}\n'
'{cast_functions}\n'
'{sep}\n'
'{function_defs}\n'
'{method_def}\n'
'{sep}\n'
'{module_def}\n'
'{sep}\n'
'{init_func}'.format(
imports = imports,
function_signatures = function_signatures,
sep = sep,
cast_functions = cast_functions,
function_defs = function_defs,
method_def = method_def,
module_def = module_def,
init_func = init_func))
def cwrappercode(expr, parser, target_language, assign_to=None, **settings):
"""Converts an expr to a string of c wrapper code
expr : Expr
A pyccel expression to be converted.
parser : Parser
The parser used to collect the expression
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See below
for examples.
dereference : iterable, optional
An iterable of symbols that should be dereferenced in the printed code
expression. These would be values passed by address to the function.
For example, if ``dereference=[a]``, the resulting code would print
``(*a)`` instead of ``a``.
"""
return CWrapperCodePrinter(parser, target_language, **settings).doprint(expr, assign_to)
|
from mergers import *
def merge_data():
""" Merge all the data and export to a new file
"""
season_latin = ['2016-17', '2017-18', '2018-19', '2019-20', '2020-21', '2021-22']
encoding_latin = ['latin-1', 'latin-1', 'latin-1', 'utf-8', 'utf-8', 'utf-8']
df = pd.DataFrame()
for i,j in zip(season_latin, encoding_latin):
data = pd.read_csv(import_merged_gw(season=f'{i}'), encoding=f'{j}')
data['season'] = i
df = df.append(data, ignore_index=True, sort=False)
df = df[['season','name', 'position', 'team', 'assists','bonus','bps','clean_sheets','creativity','element','fixture','goals_conceded','goals_scored','ict_index','influence','kickoff_time','minutes','opponent_team','own_goals','penalties_missed','penalties_saved','red_cards','round','saves','selected','team_a_score','team_h_score','threat','total_points','transfers_balance','transfers_in','transfers_out','value','was_home','yellow_cards','GW']]
df = clean_players_name_string(df, col='name')
df = filter_players_exist_latest(df, col='position')
df = get_opponent_team_name(df)
df = df[['season_x', 'name', 'position', 'team_x', 'assists', 'bonus', 'bps',
'clean_sheets', 'creativity', 'element', 'fixture', 'goals_conceded',
'goals_scored', 'ict_index', 'influence', 'kickoff_time', 'minutes',
'opponent_team', 'opp_team_name', 'own_goals', 'penalties_missed', 'penalties_saved',
'red_cards', 'round', 'saves', 'selected', 'team_a_score',
'team_h_score', 'threat', 'total_points', 'transfers_balance',
'transfers_in', 'transfers_out', 'value', 'was_home', 'yellow_cards',
'GW']]
export_cleaned_data(df)
def main():
merge_data()
if __name__ == "__main__":
main() |
# coding=utf-8
import os
import re
import string
from string import Template
def fun_replace(str):
"""去除list中的非单词字符和空白字符"""
str1 = re.sub(r'[^\w\s]', '', str)
return str1.strip()
def not_empty(s):
"""过滤掉空字符串"""
return s and s.strip()
# def fun_get_attri(line):
# line_list = line.split('//')
# comment = ''
# if len(line_list) > 1:
# comment = '//' + line_list[1]
# line_list = line_list[0].split(':')
# line_list = list(map(fun_replace, line_list))
# attri_name = line_list[0]
# attri_up_name = attri_name[0].upper() + attri_name[1:]
# attri_type = 'QString'
# if len(line_list) > 1:
# if line_list[1] == 'int':
# attri_type = 'int'
# elif line_list[1] == 'long' or line_list[1] == 'int64':
# attri_type = 'qint64'
# return attri_name, attri_up_name, attri_type, comment
def fun_get_attri(line):
"""解析xml生成类属性"""
line_list = line.split('|')
line_list = list(map(fun_replace, line_list))
line_list = list(filter(not_empty, line_list))
comment = ''
if len(line_list) > 2:
comment = '//' + line_list[2]
print(line_list)
attri_name = line_list[0]
attri_up_name = attri_name[0].upper() + attri_name[1:]
attri_type = 'QString'
if len(line_list) > 1:
if line_list[1] == 'int':
attri_type = 'int'
elif line_list[1] == 'long' or line_list[1] == 'int64':
attri_type = 'qint64'
return attri_name, attri_up_name, attri_type, comment
if __name__ == '__main__':
root_dir = os.path.dirname(os.path.abspath(__file__))
tmpl_dir = os.path.join(root_dir, 'template')
output_dir = os.path.join(root_dir, 'generator')
input_file = open(os.path.join(root_dir, 'className.xml'),
'r', encoding='utf-8', errors='ignore')
template_cpp_file = open(os.path.join(tmpl_dir, 'model_cpp.tmpl'), 'r')
cpp_tmpl = Template(template_cpp_file.read())
template_h_file = open(os.path.join(tmpl_dir, 'model_head.tmpl'), 'r')
h_tmpl = Template(template_h_file.read())
attri_tmpl = Template(
'JY_PROPERTY_READWRITE($type_name, $var_up_name) \t\t')
model_replace_tmpl = Template('map.insert("$var_up_name","$var_name");\n')
constructor_tmpl = Template(
'this->_$var_up_name\t\t\t= obj._$var_up_name;\n')
equal_tmpl = Template(' (obj._$var_up_name == this->_$var_up_name) &&')
zero_tmpl = Template('this->_$var_up_name = 0;\n')
class_name = input_file.readline().strip()
h_class_file = open(os.path.join(output_dir, r'%s.h' %
class_name), 'w', encoding='utf-8')
cpp_class_file = open(os.path.join(
output_dir, r'%s.cpp' % class_name), 'w')
input_file.readline() # 空行
model_attri = ''
model_replace = ''
model_constructor = ''
model_zero = ''
table = str.maketrans({key: None for key in string.punctuation})
line_count = 0
while 1:
line = input_file.readline()
line = line.strip()
if not line:
break
line_count += 1
attri_name, attri_up_name, attri_type, comment = fun_get_attri(line)
attri_item = attri_tmpl.safe_substitute(
type_name=attri_type, var_up_name=attri_up_name)
model_attri += ' ' + attri_item + comment + '\n'
replace_item = model_replace_tmpl.safe_substitute(
var_name=attri_name, var_up_name=attri_up_name)
model_replace += ' ' + replace_item
constructor_item = constructor_tmpl.safe_substitute(
var_up_name=attri_up_name)
model_constructor += ' ' + constructor_item
if attri_type == 'int' or attri_type == 'qint64':
zero_item = zero_tmpl.safe_substitute(var_up_name=attri_up_name)
model_zero += ' ' + zero_item
str_h_out = h_tmpl.safe_substitute(
str_class_name=class_name, str_class_define=class_name.upper(), str_model_atti=model_attri)
str_cpp_out = cpp_tmpl.safe_substitute(str_class_name=class_name, str_model_replace=model_replace, str_model_constructor=model_constructor,
str_model_zero=model_zero)
h_class_file.write(str_h_out)
cpp_class_file.write(str_cpp_out)
input_file.close()
h_class_file.close()
cpp_class_file.close()
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.cosmology import FlatLambdaCDM
from scipy import interpolate
import util as UT
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
def test_median():
''' Test the weighted median
'''
w = np.repeat(1., 100)
data = np.arange(100)
print np.median(data)
print UT.median(data, weights=w)
if np.median(data) != UT.median(data, weights=w):
raise ValueError
return None
def test_fit_zoft():
z_table, t_table = UT.zt_table()
cosmo = FlatLambdaCDM(H0=70, Om0=0.274)
prettyplot()
fig = plt.figure()
sub = fig.add_subplot(111)
for deg in range(2,10):
coeff = UT.fit_zoft(deg)
if deg > 5:
print 'deg = ', deg, coeff
zoft = np.poly1d(coeff)
z_arr = np.arange(0., 2., 0.1)
t_arr = cosmo.age(z_arr).value
sub.plot(t_arr, (zoft(t_arr) - z_arr)/z_arr, label='Degree='+str(deg))
z_of_t = interpolate.interp1d(t_arr, z_arr, kind='cubic')
zint = z_of_t(t_table[1:20])#np.interp(t_table[:20], t_arr, z_arr)
sub.scatter(t_table[1:20], (z_table[1:20] - zint)/zint, c='k', s=30)
sub.plot(np.arange(0., 14., 0.1), np.repeat(-0.025, len(np.arange(0., 14., 0.1))), c='k', ls='--', lw=3)
sub.plot(np.arange(0., 14., 0.1), np.repeat(0.025, len(np.arange(0., 14., 0.1))), c='k', ls='--', lw=3)
sub.set_ylim([-0.05, 0.05])
sub.set_xlim([3, 13.8])
sub.legend(loc='upper left')
plt.show()
def test_fit_tofz():
z_table, t_table = UT.zt_table()
cosmo = FlatLambdaCDM(H0=70, Om0=0.274)
prettyplot()
fig = plt.figure()
sub = fig.add_subplot(111)
for deg in range(2,10):
coeff = UT.fit_tofz(deg)
if deg > 5:
print 'deg = ', deg, coeff
tofz = np.poly1d(coeff)
z_arr = np.arange(0., 2., 0.1)
t_arr = cosmo.age(z_arr).value
sub.plot(z_arr, (tofz(z_arr) - t_arr)/t_arr, label='Degree='+str(deg))
t_of_z = interpolate.interp1d(z_arr, t_arr, kind='cubic')
tint = t_of_z(z_table[1:20])#np.interp(t_table[:20], t_arr, z_arr)
sub.scatter(z_table[1:20], (t_table[1:20] - tint)/tint, c='k', s=30)
sub.plot(np.arange(0., 2., 0.1), np.repeat(-0.025, len(np.arange(0., 2., 0.1))), c='k', ls='--', lw=3)
sub.plot(np.arange(0., 2., 0.1), np.repeat(0.025, len(np.arange(0., 2., 0.1))), c='k', ls='--', lw=3)
sub.set_ylim([-0.05, 0.05])
sub.set_xlim([0., 2.])
sub.legend(loc='upper left')
plt.show()
if __name__=='__main__':
#test_fit_zoft()
#test_fit_tofz()
test_median()
|
from _mycmath import add # NOQA
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: python.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='python.proto',
package='python',
syntax='proto3',
serialized_pb=_b('\n\x0cpython.proto\x12\x06python\x1a\x1egoogle/protobuf/wrappers.proto\"\x87\x42\n\x08InfoDict\x12(\n\x07\x66ormats\x18\x01 \x03(\x0b\x32\x17.python.InfoDict.Format\x12)\n\x03url\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03\x65xt\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x66ormat\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nplayer_url\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\talt_title\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\ndisplay_id\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\nthumbnails\x18\x08 \x03(\x0b\x32\x1a.python.InfoDict.Thumbnail\x12/\n\tthumbnail\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0b\x64\x65scription\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08uploader\x18\x0b \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07license\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x63reator\x18\r \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0crelease_date\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\ttimestamp\x18\x0f \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x31\n\x0bupload_date\x18\x10 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0buploader_id\x18\x11 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cuploader_url\x18\x12 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08location\x18\x13 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\tsubtitles\x18\x14 \x03(\x0b\x32\x1a.python.InfoDict.Subtitles\x12\x36\n\x12\x61utomatic_captions\x18\x15 \x03(\x0b\x32\x1a.python.InfoDict.Subtitles\x12.\n\x08\x64uration\x18\x16 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x30\n\nview_count\x18% \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x30\n\nlike_count\x18\x18 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x33\n\rdislike_count\x18\x19 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x32\n\x0crepost_count\x18\x1a \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x34\n\x0e\x61verage_rating\x18\x1b \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x33\n\rcomment_count\x18\x1c \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12*\n\x08\x63omments\x18\x1d \x03(\x0b\x32\x18.python.InfoDict.Comment\x12.\n\tage_limit\x18\x1e \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x31\n\x0bwebpage_url\x18\x1f \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\ncategories\x18 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04tags\x18! \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x07is_live\x18\" \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\nstart_time\x18# \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08\x65nd_time\x18$ \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12*\n\x08\x63hapters\x18& \x03(\x0b\x32\x18.python.InfoDict.Chapter\x12\x32\n\x0c\x63hapter_info\x18\' \x01(\x0b\x32\x1c.python.InfoDict.ChapterInfo\x12\x30\n\x0bseries_info\x18( \x01(\x0b\x32\x1b.python.InfoDict.SeriesInfo\x12.\n\nalbum_info\x18) \x01(\x0b\x32\x1a.python.InfoDict.AlbumInfo\x12\"\n\x08\x63hildren\x18* \x03(\x0b\x32\x10.python.InfoDict\x12\x30\n\nchild_urls\x18+ \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x0e\x65xtractor_name\x18, \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\textractor\x18Y \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05title\x18- \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x02id\x18. \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nresolution\x18/ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06ie_key\x18\x30 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x0chttp_headers\x18\x31 \x03(\x0b\x32!.python.InfoDict.HttpHeadersEntry\x12,\n\x06height\x18\x32 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12+\n\x05width\x18\x33 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x06vcodec\x18\x34 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08\x66ilesize\x18\x35 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12:\n\x14__x_forwarded_for_ip\x18: \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08page_url\x18; \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x06\x64irect\x18< \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x0f\x66ilesize_approx\x18\x41 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12/\n\tvideo_url\x18G \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0bplaylist_id\x18H \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x0eplaylist_title\x18I \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rextractor_key\x18J \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x08protocol\x18K \x01(\x0e\x32 .python.InfoDict.Format.Protocol\x12)\n\x03\x61\x62r\x18L \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x06\x61\x63odec\x18M \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x61uthor\x18N \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03tbr\x18O \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12/\n\tformat_id\x18P \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0csession_code\x18Q \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10session_speakers\x18R \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0csession_room\x18S \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x61uthors\x18T \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05likes\x18U \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x35\n\x0fstretched_ratio\x18V \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12*\n\x04show\x18W \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08\x63\x61tegory\x18X \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\x14webpage_url_basename\x18Z \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\xf3\x12\n\x06\x46ormat\x12)\n\x03url\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cmanifest_url\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03\x65xt\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x66ormat\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tformat_id\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0b\x66ormat_note\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05width\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12,\n\x06height\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x30\n\nresolution\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03tbr\x18\n \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12)\n\x03\x61\x62r\x18\x0b \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x06\x61\x63odec\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03\x61sr\x18\r \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12)\n\x03vbr\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12)\n\x03\x66ps\x18\x0f \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x06vcodec\x18\x10 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tcontainer\x18\x11 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08\x66ilesize\x18\x12 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x35\n\x0f\x66ilesize_approx\x18\x13 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x30\n\nplayer_url\x18\x14 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x08protocol\x18\x15 \x01(\x0e\x32 .python.InfoDict.Format.Protocol\x12\x37\n\x11\x66ragment_base_url\x18\x16 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\tfragments\x18\x17 \x03(\x0b\x32 .python.InfoDict.Format.Fragment\x12/\n\npreference\x18\x18 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12.\n\x08language\x18\x19 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\x13language_preference\x18\x1a \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12,\n\x07quality\x18\x1b \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x36\n\x11source_preference\x18\x1c \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\x0chttp_headers\x18\x1d \x03(\x0b\x32(.python.InfoDict.Format.HttpHeadersEntry\x12\x35\n\x0fstretched_ratio\x18\x1e \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12-\n\tno_resume\x18\x1f \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\tplay_path\x18\x36 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03\x61pp\x18\x37 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08page_url\x18\x38 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0ertmp_real_time\x18\x39 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x31\n\x0b\x66ormat_name\x18= \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12@\n\x1a\x65xtra_param_to_segment_url\x18\x42 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rflash_version\x18\x43 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\trtmp_conn\x18\x44 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\trtmp_live\x18\x45 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12(\n\x02id\x18\x46 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\xc1\x01\n\x08\x46ragment\x12)\n\x03url\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04path\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08\x64uration\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08\x66ilesize\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x1a\x32\n\x10HttpHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8d\x01\n\x08Protocol\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04HTTP\x10\x01\x12\t\n\x05HTTPS\x10\x02\x12\x08\n\x04RTSP\x10\x03\x12\x08\n\x04RTMP\x10\x04\x12\t\n\x05RTMPE\x10\x05\x12\x08\n\x04M3U8\x10\x06\x12\x0f\n\x0bM3U8_NATIVE\x10\x07\x12\x16\n\x12HTTP_DASH_SEGMENTS\x10\x08\x12\x07\n\x03ISM\x10\t\x12\x07\n\x03\x46\x34M\x10\n\x1a\xb0\x03\n\tThumbnail\x12(\n\x02id\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03url\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\npreference\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12*\n\x05width\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12+\n\x06height\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12/\n\nresolution\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12.\n\x08\x66ilesize\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x32\n\x0c\x61spect_ratio\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12/\n\tformat_id\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x80\x02\n\tSubtitles\x12)\n\x03tag\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\nsubformats\x18\x02 \x03(\x0b\x32$.python.InfoDict.Subtitles.Subformat\x1a\x8d\x01\n\tSubformat\x12)\n\x03\x65xt\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03url\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\xfb\x02\n\x07\x43omment\x12,\n\x06\x61uthor\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tauthor_id\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\ncomment_id\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04html\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04text\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\ttimestamp\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x06parent\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12(\n\x02id\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x98\x01\n\x07\x43hapter\x12\x30\n\nstart_time\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12+\n\x05title\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\xa3\x01\n\x0b\x43hapterInfo\x12-\n\x07\x63hapter\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\x0e\x63hapter_number\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\nchapter_id\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\xe3\x02\n\nSeriesInfo\x12,\n\x06series\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06season\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\rseason_number\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12/\n\tseason_id\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x65pisode\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\x0e\x65pisode_number\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\nepisode_id\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\xee\x03\n\tAlbumInfo\x12+\n\x05track\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0ctrack_number\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12.\n\x08track_id\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x61rtist\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05genre\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05\x61lbum\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nalbum_type\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x61lbum_artist\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x64isc_number\x18\t \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x31\n\x0crelease_year\x18\n \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x1a\x32\n\x10HttpHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xeb\x03\n\x07Request\x12+\n\x06job_id\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12@\n\x13url_resolve_request\x18\x02 \x01(\x0b\x32!.python.Request.URLResolveRequestH\x00\x12K\n\x19url_is_resolvable_request\x18\x03 \x01(\x0b\x32&.python.Request.URLIsResolvableRequestH\x00\x1a\xd3\x01\n\x11URLResolveRequest\x12)\n\x03url\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08username\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08password\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rresolver_name\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x43\n\x16URLIsResolvableRequest\x12)\n\x03url\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValueB\t\n\x07request\"\xd8\x05\n\x08Response\x12+\n\x06job_id\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x43\n\x14url_resolve_response\x18\x02 \x01(\x0b\x32#.python.Response.URLResolveResponseH\x00\x12N\n\x1aurl_is_resolvable_response\x18\x03 \x01(\x0b\x32(.python.Response.URLIsResolvableResponseH\x00\x1a\xf8\x02\n\x12URLResolveResponse\x12#\n\tinfo_dict\x18\x01 \x03(\x0b\x32\x10.python.InfoDict\x12+\n\x07success\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x11password_required\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0egeo_restricted\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12>\n\x08redirect\x18\x05 \x01(\x0b\x32,.python.Response.URLResolveResponse.Redirect\x1a\x65\n\x08Redirect\x12)\n\x03url\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08resolver\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x82\x01\n\x17URLIsResolvableResponse\x12\x31\n\ris_resolvable\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x0eresolver_names\x18\x02 \x03(\x0b\x32\x1c.google.protobuf.StringValueB\n\n\x08responseB+\n\x19\x63heap.hella.gobi.pybridgeB\x0ePyBridgeProtosb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
_INFODICT_FORMAT_PROTOCOL = _descriptor.EnumDescriptor(
name='Protocol',
full_name='python.InfoDict.Format.Protocol',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HTTP', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HTTPS', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RTSP', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RTMP', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RTMPE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='M3U8', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='M3U8_NATIVE', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HTTP_DASH_SEGMENTS', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISM', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F4M', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6067,
serialized_end=6208,
)
_sym_db.RegisterEnumDescriptor(_INFODICT_FORMAT_PROTOCOL)
_INFODICT_FORMAT_FRAGMENT = _descriptor.Descriptor(
name='Fragment',
full_name='python.InfoDict.Format.Fragment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='python.InfoDict.Format.Fragment.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='python.InfoDict.Format.Fragment.path', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='duration', full_name='python.InfoDict.Format.Fragment.duration', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filesize', full_name='python.InfoDict.Format.Fragment.filesize', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5819,
serialized_end=6012,
)
_INFODICT_FORMAT_HTTPHEADERSENTRY = _descriptor.Descriptor(
name='HttpHeadersEntry',
full_name='python.InfoDict.Format.HttpHeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='python.InfoDict.Format.HttpHeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='python.InfoDict.Format.HttpHeadersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6014,
serialized_end=6064,
)
_INFODICT_FORMAT = _descriptor.Descriptor(
name='Format',
full_name='python.InfoDict.Format',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='python.InfoDict.Format.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='manifest_url', full_name='python.InfoDict.Format.manifest_url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ext', full_name='python.InfoDict.Format.ext', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format', full_name='python.InfoDict.Format.format', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format_id', full_name='python.InfoDict.Format.format_id', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format_note', full_name='python.InfoDict.Format.format_note', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='python.InfoDict.Format.width', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='python.InfoDict.Format.height', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resolution', full_name='python.InfoDict.Format.resolution', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tbr', full_name='python.InfoDict.Format.tbr', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='abr', full_name='python.InfoDict.Format.abr', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='acodec', full_name='python.InfoDict.Format.acodec', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asr', full_name='python.InfoDict.Format.asr', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vbr', full_name='python.InfoDict.Format.vbr', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fps', full_name='python.InfoDict.Format.fps', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vcodec', full_name='python.InfoDict.Format.vcodec', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='container', full_name='python.InfoDict.Format.container', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filesize', full_name='python.InfoDict.Format.filesize', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filesize_approx', full_name='python.InfoDict.Format.filesize_approx', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_url', full_name='python.InfoDict.Format.player_url', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protocol', full_name='python.InfoDict.Format.protocol', index=20,
number=21, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fragment_base_url', full_name='python.InfoDict.Format.fragment_base_url', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fragments', full_name='python.InfoDict.Format.fragments', index=22,
number=23, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='preference', full_name='python.InfoDict.Format.preference', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language', full_name='python.InfoDict.Format.language', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language_preference', full_name='python.InfoDict.Format.language_preference', index=25,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quality', full_name='python.InfoDict.Format.quality', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_preference', full_name='python.InfoDict.Format.source_preference', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='http_headers', full_name='python.InfoDict.Format.http_headers', index=28,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stretched_ratio', full_name='python.InfoDict.Format.stretched_ratio', index=29,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='no_resume', full_name='python.InfoDict.Format.no_resume', index=30,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='play_path', full_name='python.InfoDict.Format.play_path', index=31,
number=54, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app', full_name='python.InfoDict.Format.app', index=32,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_url', full_name='python.InfoDict.Format.page_url', index=33,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rtmp_real_time', full_name='python.InfoDict.Format.rtmp_real_time', index=34,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format_name', full_name='python.InfoDict.Format.format_name', index=35,
number=61, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extra_param_to_segment_url', full_name='python.InfoDict.Format.extra_param_to_segment_url', index=36,
number=66, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flash_version', full_name='python.InfoDict.Format.flash_version', index=37,
number=67, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rtmp_conn', full_name='python.InfoDict.Format.rtmp_conn', index=38,
number=68, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rtmp_live', full_name='python.InfoDict.Format.rtmp_live', index=39,
number=69, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='python.InfoDict.Format.id', index=40,
number=70, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_INFODICT_FORMAT_FRAGMENT, _INFODICT_FORMAT_HTTPHEADERSENTRY, ],
enum_types=[
_INFODICT_FORMAT_PROTOCOL,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3789,
serialized_end=6208,
)
_INFODICT_THUMBNAIL = _descriptor.Descriptor(
name='Thumbnail',
full_name='python.InfoDict.Thumbnail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='python.InfoDict.Thumbnail.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url', full_name='python.InfoDict.Thumbnail.url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='preference', full_name='python.InfoDict.Thumbnail.preference', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='python.InfoDict.Thumbnail.width', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='python.InfoDict.Thumbnail.height', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resolution', full_name='python.InfoDict.Thumbnail.resolution', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filesize', full_name='python.InfoDict.Thumbnail.filesize', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aspect_ratio', full_name='python.InfoDict.Thumbnail.aspect_ratio', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format_id', full_name='python.InfoDict.Thumbnail.format_id', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6211,
serialized_end=6643,
)
_INFODICT_SUBTITLES_SUBFORMAT = _descriptor.Descriptor(
name='Subformat',
full_name='python.InfoDict.Subtitles.Subformat',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ext', full_name='python.InfoDict.Subtitles.Subformat.ext', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='python.InfoDict.Subtitles.Subformat.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url', full_name='python.InfoDict.Subtitles.Subformat.url', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6761,
serialized_end=6902,
)
_INFODICT_SUBTITLES = _descriptor.Descriptor(
name='Subtitles',
full_name='python.InfoDict.Subtitles',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tag', full_name='python.InfoDict.Subtitles.tag', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subformats', full_name='python.InfoDict.Subtitles.subformats', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_INFODICT_SUBTITLES_SUBFORMAT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6646,
serialized_end=6902,
)
_INFODICT_COMMENT = _descriptor.Descriptor(
name='Comment',
full_name='python.InfoDict.Comment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='author', full_name='python.InfoDict.Comment.author', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='author_id', full_name='python.InfoDict.Comment.author_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='comment_id', full_name='python.InfoDict.Comment.comment_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='html', full_name='python.InfoDict.Comment.html', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='python.InfoDict.Comment.text', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='python.InfoDict.Comment.timestamp', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parent', full_name='python.InfoDict.Comment.parent', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='python.InfoDict.Comment.id', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6905,
serialized_end=7284,
)
_INFODICT_CHAPTER = _descriptor.Descriptor(
name='Chapter',
full_name='python.InfoDict.Chapter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start_time', full_name='python.InfoDict.Chapter.start_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end_time', full_name='python.InfoDict.Chapter.end_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='python.InfoDict.Chapter.title', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7287,
serialized_end=7439,
)
_INFODICT_CHAPTERINFO = _descriptor.Descriptor(
name='ChapterInfo',
full_name='python.InfoDict.ChapterInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chapter', full_name='python.InfoDict.ChapterInfo.chapter', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chapter_number', full_name='python.InfoDict.ChapterInfo.chapter_number', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chapter_id', full_name='python.InfoDict.ChapterInfo.chapter_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7442,
serialized_end=7605,
)
_INFODICT_SERIESINFO = _descriptor.Descriptor(
name='SeriesInfo',
full_name='python.InfoDict.SeriesInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='series', full_name='python.InfoDict.SeriesInfo.series', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='season', full_name='python.InfoDict.SeriesInfo.season', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='season_number', full_name='python.InfoDict.SeriesInfo.season_number', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='season_id', full_name='python.InfoDict.SeriesInfo.season_id', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='episode', full_name='python.InfoDict.SeriesInfo.episode', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='episode_number', full_name='python.InfoDict.SeriesInfo.episode_number', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='episode_id', full_name='python.InfoDict.SeriesInfo.episode_id', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7608,
serialized_end=7963,
)
_INFODICT_ALBUMINFO = _descriptor.Descriptor(
name='AlbumInfo',
full_name='python.InfoDict.AlbumInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='track', full_name='python.InfoDict.AlbumInfo.track', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='track_number', full_name='python.InfoDict.AlbumInfo.track_number', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='track_id', full_name='python.InfoDict.AlbumInfo.track_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='artist', full_name='python.InfoDict.AlbumInfo.artist', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='genre', full_name='python.InfoDict.AlbumInfo.genre', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='album', full_name='python.InfoDict.AlbumInfo.album', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='album_type', full_name='python.InfoDict.AlbumInfo.album_type', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='album_artist', full_name='python.InfoDict.AlbumInfo.album_artist', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='disc_number', full_name='python.InfoDict.AlbumInfo.disc_number', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='release_year', full_name='python.InfoDict.AlbumInfo.release_year', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7966,
serialized_end=8460,
)
_INFODICT_HTTPHEADERSENTRY = _descriptor.Descriptor(
name='HttpHeadersEntry',
full_name='python.InfoDict.HttpHeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='python.InfoDict.HttpHeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='python.InfoDict.HttpHeadersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6014,
serialized_end=6064,
)
_INFODICT = _descriptor.Descriptor(
name='InfoDict',
full_name='python.InfoDict',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='formats', full_name='python.InfoDict.formats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url', full_name='python.InfoDict.url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ext', full_name='python.InfoDict.ext', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format', full_name='python.InfoDict.format', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_url', full_name='python.InfoDict.player_url', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alt_title', full_name='python.InfoDict.alt_title', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_id', full_name='python.InfoDict.display_id', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='thumbnails', full_name='python.InfoDict.thumbnails', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='thumbnail', full_name='python.InfoDict.thumbnail', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='python.InfoDict.description', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uploader', full_name='python.InfoDict.uploader', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='license', full_name='python.InfoDict.license', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='creator', full_name='python.InfoDict.creator', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='release_date', full_name='python.InfoDict.release_date', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='python.InfoDict.timestamp', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upload_date', full_name='python.InfoDict.upload_date', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uploader_id', full_name='python.InfoDict.uploader_id', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uploader_url', full_name='python.InfoDict.uploader_url', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location', full_name='python.InfoDict.location', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subtitles', full_name='python.InfoDict.subtitles', index=19,
number=20, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='automatic_captions', full_name='python.InfoDict.automatic_captions', index=20,
number=21, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='duration', full_name='python.InfoDict.duration', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_count', full_name='python.InfoDict.view_count', index=22,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='like_count', full_name='python.InfoDict.like_count', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dislike_count', full_name='python.InfoDict.dislike_count', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='repost_count', full_name='python.InfoDict.repost_count', index=25,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='average_rating', full_name='python.InfoDict.average_rating', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='comment_count', full_name='python.InfoDict.comment_count', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='comments', full_name='python.InfoDict.comments', index=28,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age_limit', full_name='python.InfoDict.age_limit', index=29,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='webpage_url', full_name='python.InfoDict.webpage_url', index=30,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='categories', full_name='python.InfoDict.categories', index=31,
number=32, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tags', full_name='python.InfoDict.tags', index=32,
number=33, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_live', full_name='python.InfoDict.is_live', index=33,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='start_time', full_name='python.InfoDict.start_time', index=34,
number=35, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end_time', full_name='python.InfoDict.end_time', index=35,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chapters', full_name='python.InfoDict.chapters', index=36,
number=38, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chapter_info', full_name='python.InfoDict.chapter_info', index=37,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='series_info', full_name='python.InfoDict.series_info', index=38,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='album_info', full_name='python.InfoDict.album_info', index=39,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='children', full_name='python.InfoDict.children', index=40,
number=42, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='child_urls', full_name='python.InfoDict.child_urls', index=41,
number=43, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extractor_name', full_name='python.InfoDict.extractor_name', index=42,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extractor', full_name='python.InfoDict.extractor', index=43,
number=89, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='python.InfoDict.title', index=44,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='python.InfoDict.id', index=45,
number=46, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resolution', full_name='python.InfoDict.resolution', index=46,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ie_key', full_name='python.InfoDict.ie_key', index=47,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='http_headers', full_name='python.InfoDict.http_headers', index=48,
number=49, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='python.InfoDict.height', index=49,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='python.InfoDict.width', index=50,
number=51, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vcodec', full_name='python.InfoDict.vcodec', index=51,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filesize', full_name='python.InfoDict.filesize', index=52,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='__x_forwarded_for_ip', full_name='python.InfoDict.__x_forwarded_for_ip', index=53,
number=58, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_url', full_name='python.InfoDict.page_url', index=54,
number=59, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='direct', full_name='python.InfoDict.direct', index=55,
number=60, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filesize_approx', full_name='python.InfoDict.filesize_approx', index=56,
number=65, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='video_url', full_name='python.InfoDict.video_url', index=57,
number=71, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='playlist_id', full_name='python.InfoDict.playlist_id', index=58,
number=72, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='playlist_title', full_name='python.InfoDict.playlist_title', index=59,
number=73, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extractor_key', full_name='python.InfoDict.extractor_key', index=60,
number=74, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protocol', full_name='python.InfoDict.protocol', index=61,
number=75, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='abr', full_name='python.InfoDict.abr', index=62,
number=76, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='acodec', full_name='python.InfoDict.acodec', index=63,
number=77, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='author', full_name='python.InfoDict.author', index=64,
number=78, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tbr', full_name='python.InfoDict.tbr', index=65,
number=79, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format_id', full_name='python.InfoDict.format_id', index=66,
number=80, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_code', full_name='python.InfoDict.session_code', index=67,
number=81, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_speakers', full_name='python.InfoDict.session_speakers', index=68,
number=82, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_room', full_name='python.InfoDict.session_room', index=69,
number=83, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='authors', full_name='python.InfoDict.authors', index=70,
number=84, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='likes', full_name='python.InfoDict.likes', index=71,
number=85, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stretched_ratio', full_name='python.InfoDict.stretched_ratio', index=72,
number=86, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='show', full_name='python.InfoDict.show', index=73,
number=87, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='category', full_name='python.InfoDict.category', index=74,
number=88, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='webpage_url_basename', full_name='python.InfoDict.webpage_url_basename', index=75,
number=90, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_INFODICT_FORMAT, _INFODICT_THUMBNAIL, _INFODICT_SUBTITLES, _INFODICT_COMMENT, _INFODICT_CHAPTER, _INFODICT_CHAPTERINFO, _INFODICT_SERIESINFO, _INFODICT_ALBUMINFO, _INFODICT_HTTPHEADERSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=8512,
)
_REQUEST_URLRESOLVEREQUEST = _descriptor.Descriptor(
name='URLResolveRequest',
full_name='python.Request.URLResolveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='python.Request.URLResolveRequest.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='username', full_name='python.Request.URLResolveRequest.username', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='password', full_name='python.Request.URLResolveRequest.password', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resolver_name', full_name='python.Request.URLResolveRequest.resolver_name', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8715,
serialized_end=8926,
)
_REQUEST_URLISRESOLVABLEREQUEST = _descriptor.Descriptor(
name='URLIsResolvableRequest',
full_name='python.Request.URLIsResolvableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='python.Request.URLIsResolvableRequest.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8928,
serialized_end=8995,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='python.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job_id', full_name='python.Request.job_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url_resolve_request', full_name='python.Request.url_resolve_request', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url_is_resolvable_request', full_name='python.Request.url_is_resolvable_request', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_REQUEST_URLRESOLVEREQUEST, _REQUEST_URLISRESOLVABLEREQUEST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='request', full_name='python.Request.request',
index=0, containing_type=None, fields=[]),
],
serialized_start=8515,
serialized_end=9006,
)
_RESPONSE_URLRESOLVERESPONSE_REDIRECT = _descriptor.Descriptor(
name='Redirect',
full_name='python.Response.URLResolveResponse.Redirect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='python.Response.URLResolveResponse.Redirect.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resolver', full_name='python.Response.URLResolveResponse.Redirect.resolver', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9491,
serialized_end=9592,
)
_RESPONSE_URLRESOLVERESPONSE = _descriptor.Descriptor(
name='URLResolveResponse',
full_name='python.Response.URLResolveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='info_dict', full_name='python.Response.URLResolveResponse.info_dict', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='success', full_name='python.Response.URLResolveResponse.success', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='password_required', full_name='python.Response.URLResolveResponse.password_required', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='geo_restricted', full_name='python.Response.URLResolveResponse.geo_restricted', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='redirect', full_name='python.Response.URLResolveResponse.redirect', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSE_URLRESOLVERESPONSE_REDIRECT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9216,
serialized_end=9592,
)
_RESPONSE_URLISRESOLVABLERESPONSE = _descriptor.Descriptor(
name='URLIsResolvableResponse',
full_name='python.Response.URLIsResolvableResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='is_resolvable', full_name='python.Response.URLIsResolvableResponse.is_resolvable', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resolver_names', full_name='python.Response.URLIsResolvableResponse.resolver_names', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9595,
serialized_end=9725,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='python.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job_id', full_name='python.Response.job_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url_resolve_response', full_name='python.Response.url_resolve_response', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url_is_resolvable_response', full_name='python.Response.url_is_resolvable_response', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSE_URLRESOLVERESPONSE, _RESPONSE_URLISRESOLVABLERESPONSE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='response', full_name='python.Response.response',
index=0, containing_type=None, fields=[]),
],
serialized_start=9009,
serialized_end=9737,
)
_INFODICT_FORMAT_FRAGMENT.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT_FRAGMENT.fields_by_name['path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT_FRAGMENT.fields_by_name['duration'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT_FRAGMENT.fields_by_name['filesize'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT_FRAGMENT.containing_type = _INFODICT_FORMAT
_INFODICT_FORMAT_HTTPHEADERSENTRY.containing_type = _INFODICT_FORMAT
_INFODICT_FORMAT.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['manifest_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['ext'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['format'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['format_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['format_note'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['width'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_INFODICT_FORMAT.fields_by_name['height'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_INFODICT_FORMAT.fields_by_name['resolution'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['tbr'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['abr'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['acodec'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['asr'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['vbr'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['fps'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['vcodec'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['container'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['filesize'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['filesize_approx'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['player_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['protocol'].enum_type = _INFODICT_FORMAT_PROTOCOL
_INFODICT_FORMAT.fields_by_name['fragment_base_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['fragments'].message_type = _INFODICT_FORMAT_FRAGMENT
_INFODICT_FORMAT.fields_by_name['preference'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_FORMAT.fields_by_name['language'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['language_preference'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_FORMAT.fields_by_name['quality'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_FORMAT.fields_by_name['source_preference'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_FORMAT.fields_by_name['http_headers'].message_type = _INFODICT_FORMAT_HTTPHEADERSENTRY
_INFODICT_FORMAT.fields_by_name['stretched_ratio'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_FORMAT.fields_by_name['no_resume'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_INFODICT_FORMAT.fields_by_name['play_path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['app'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['page_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['rtmp_real_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_INFODICT_FORMAT.fields_by_name['format_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['extra_param_to_segment_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['flash_version'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['rtmp_conn'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.fields_by_name['rtmp_live'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_INFODICT_FORMAT.fields_by_name['id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_FORMAT.containing_type = _INFODICT
_INFODICT_FORMAT_PROTOCOL.containing_type = _INFODICT_FORMAT
_INFODICT_THUMBNAIL.fields_by_name['id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_THUMBNAIL.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_THUMBNAIL.fields_by_name['preference'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_THUMBNAIL.fields_by_name['width'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_THUMBNAIL.fields_by_name['height'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_THUMBNAIL.fields_by_name['resolution'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_THUMBNAIL.fields_by_name['filesize'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_THUMBNAIL.fields_by_name['aspect_ratio'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_THUMBNAIL.fields_by_name['format_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_THUMBNAIL.containing_type = _INFODICT
_INFODICT_SUBTITLES_SUBFORMAT.fields_by_name['ext'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SUBTITLES_SUBFORMAT.fields_by_name['data'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SUBTITLES_SUBFORMAT.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SUBTITLES_SUBFORMAT.containing_type = _INFODICT_SUBTITLES
_INFODICT_SUBTITLES.fields_by_name['tag'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SUBTITLES.fields_by_name['subformats'].message_type = _INFODICT_SUBTITLES_SUBFORMAT
_INFODICT_SUBTITLES.containing_type = _INFODICT
_INFODICT_COMMENT.fields_by_name['author'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_COMMENT.fields_by_name['author_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_COMMENT.fields_by_name['comment_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_COMMENT.fields_by_name['html'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_COMMENT.fields_by_name['text'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_COMMENT.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_COMMENT.fields_by_name['parent'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_COMMENT.fields_by_name['id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_COMMENT.containing_type = _INFODICT
_INFODICT_CHAPTER.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_CHAPTER.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT_CHAPTER.fields_by_name['title'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_CHAPTER.containing_type = _INFODICT
_INFODICT_CHAPTERINFO.fields_by_name['chapter'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_CHAPTERINFO.fields_by_name['chapter_number'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_CHAPTERINFO.fields_by_name['chapter_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_CHAPTERINFO.containing_type = _INFODICT
_INFODICT_SERIESINFO.fields_by_name['series'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SERIESINFO.fields_by_name['season'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SERIESINFO.fields_by_name['season_number'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_SERIESINFO.fields_by_name['season_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SERIESINFO.fields_by_name['episode'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SERIESINFO.fields_by_name['episode_number'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_SERIESINFO.fields_by_name['episode_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_SERIESINFO.containing_type = _INFODICT
_INFODICT_ALBUMINFO.fields_by_name['track'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_ALBUMINFO.fields_by_name['track_number'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_ALBUMINFO.fields_by_name['track_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_ALBUMINFO.fields_by_name['artist'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_ALBUMINFO.fields_by_name['genre'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_ALBUMINFO.fields_by_name['album'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_ALBUMINFO.fields_by_name['album_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_ALBUMINFO.fields_by_name['album_artist'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT_ALBUMINFO.fields_by_name['disc_number'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_INFODICT_ALBUMINFO.fields_by_name['release_year'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_INFODICT_ALBUMINFO.containing_type = _INFODICT
_INFODICT_HTTPHEADERSENTRY.containing_type = _INFODICT
_INFODICT.fields_by_name['formats'].message_type = _INFODICT_FORMAT
_INFODICT.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['ext'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['format'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['player_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['alt_title'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['display_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['thumbnails'].message_type = _INFODICT_THUMBNAIL
_INFODICT.fields_by_name['thumbnail'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['description'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['uploader'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['license'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['creator'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['release_date'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['upload_date'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['uploader_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['uploader_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['location'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['subtitles'].message_type = _INFODICT_SUBTITLES
_INFODICT.fields_by_name['automatic_captions'].message_type = _INFODICT_SUBTITLES
_INFODICT.fields_by_name['duration'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['view_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_INFODICT.fields_by_name['like_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_INFODICT.fields_by_name['dislike_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_INFODICT.fields_by_name['repost_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_INFODICT.fields_by_name['average_rating'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['comment_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_INFODICT.fields_by_name['comments'].message_type = _INFODICT_COMMENT
_INFODICT.fields_by_name['age_limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_INFODICT.fields_by_name['webpage_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['categories'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['tags'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['is_live'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_INFODICT.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['chapters'].message_type = _INFODICT_CHAPTER
_INFODICT.fields_by_name['chapter_info'].message_type = _INFODICT_CHAPTERINFO
_INFODICT.fields_by_name['series_info'].message_type = _INFODICT_SERIESINFO
_INFODICT.fields_by_name['album_info'].message_type = _INFODICT_ALBUMINFO
_INFODICT.fields_by_name['children'].message_type = _INFODICT
_INFODICT.fields_by_name['child_urls'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['extractor_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['extractor'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['title'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['resolution'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['ie_key'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['http_headers'].message_type = _INFODICT_HTTPHEADERSENTRY
_INFODICT.fields_by_name['height'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['width'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['vcodec'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['filesize'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['__x_forwarded_for_ip'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['page_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['direct'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_INFODICT.fields_by_name['filesize_approx'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['video_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['playlist_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['playlist_title'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['extractor_key'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['protocol'].enum_type = _INFODICT_FORMAT_PROTOCOL
_INFODICT.fields_by_name['abr'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['acodec'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['author'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['tbr'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['format_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['session_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['session_speakers'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['session_room'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['authors'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['likes'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['stretched_ratio'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_INFODICT.fields_by_name['show'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['category'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INFODICT.fields_by_name['webpage_url_basename'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REQUEST_URLRESOLVEREQUEST.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REQUEST_URLRESOLVEREQUEST.fields_by_name['username'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REQUEST_URLRESOLVEREQUEST.fields_by_name['password'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REQUEST_URLRESOLVEREQUEST.fields_by_name['resolver_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REQUEST_URLRESOLVEREQUEST.containing_type = _REQUEST
_REQUEST_URLISRESOLVABLEREQUEST.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REQUEST_URLISRESOLVABLEREQUEST.containing_type = _REQUEST
_REQUEST.fields_by_name['job_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_REQUEST.fields_by_name['url_resolve_request'].message_type = _REQUEST_URLRESOLVEREQUEST
_REQUEST.fields_by_name['url_is_resolvable_request'].message_type = _REQUEST_URLISRESOLVABLEREQUEST
_REQUEST.oneofs_by_name['request'].fields.append(
_REQUEST.fields_by_name['url_resolve_request'])
_REQUEST.fields_by_name['url_resolve_request'].containing_oneof = _REQUEST.oneofs_by_name['request']
_REQUEST.oneofs_by_name['request'].fields.append(
_REQUEST.fields_by_name['url_is_resolvable_request'])
_REQUEST.fields_by_name['url_is_resolvable_request'].containing_oneof = _REQUEST.oneofs_by_name['request']
_RESPONSE_URLRESOLVERESPONSE_REDIRECT.fields_by_name['url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_RESPONSE_URLRESOLVERESPONSE_REDIRECT.fields_by_name['resolver'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_RESPONSE_URLRESOLVERESPONSE_REDIRECT.containing_type = _RESPONSE_URLRESOLVERESPONSE
_RESPONSE_URLRESOLVERESPONSE.fields_by_name['info_dict'].message_type = _INFODICT
_RESPONSE_URLRESOLVERESPONSE.fields_by_name['success'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_RESPONSE_URLRESOLVERESPONSE.fields_by_name['password_required'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_RESPONSE_URLRESOLVERESPONSE.fields_by_name['geo_restricted'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_RESPONSE_URLRESOLVERESPONSE.fields_by_name['redirect'].message_type = _RESPONSE_URLRESOLVERESPONSE_REDIRECT
_RESPONSE_URLRESOLVERESPONSE.containing_type = _RESPONSE
_RESPONSE_URLISRESOLVABLERESPONSE.fields_by_name['is_resolvable'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_RESPONSE_URLISRESOLVABLERESPONSE.fields_by_name['resolver_names'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_RESPONSE_URLISRESOLVABLERESPONSE.containing_type = _RESPONSE
_RESPONSE.fields_by_name['job_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_RESPONSE.fields_by_name['url_resolve_response'].message_type = _RESPONSE_URLRESOLVERESPONSE
_RESPONSE.fields_by_name['url_is_resolvable_response'].message_type = _RESPONSE_URLISRESOLVABLERESPONSE
_RESPONSE.oneofs_by_name['response'].fields.append(
_RESPONSE.fields_by_name['url_resolve_response'])
_RESPONSE.fields_by_name['url_resolve_response'].containing_oneof = _RESPONSE.oneofs_by_name['response']
_RESPONSE.oneofs_by_name['response'].fields.append(
_RESPONSE.fields_by_name['url_is_resolvable_response'])
_RESPONSE.fields_by_name['url_is_resolvable_response'].containing_oneof = _RESPONSE.oneofs_by_name['response']
DESCRIPTOR.message_types_by_name['InfoDict'] = _INFODICT
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InfoDict = _reflection.GeneratedProtocolMessageType('InfoDict', (_message.Message,), dict(
Format = _reflection.GeneratedProtocolMessageType('Format', (_message.Message,), dict(
Fragment = _reflection.GeneratedProtocolMessageType('Fragment', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_FORMAT_FRAGMENT,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Format.Fragment)
))
,
HttpHeadersEntry = _reflection.GeneratedProtocolMessageType('HttpHeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_FORMAT_HTTPHEADERSENTRY,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Format.HttpHeadersEntry)
))
,
DESCRIPTOR = _INFODICT_FORMAT,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Format)
))
,
Thumbnail = _reflection.GeneratedProtocolMessageType('Thumbnail', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_THUMBNAIL,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Thumbnail)
))
,
Subtitles = _reflection.GeneratedProtocolMessageType('Subtitles', (_message.Message,), dict(
Subformat = _reflection.GeneratedProtocolMessageType('Subformat', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_SUBTITLES_SUBFORMAT,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Subtitles.Subformat)
))
,
DESCRIPTOR = _INFODICT_SUBTITLES,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Subtitles)
))
,
Comment = _reflection.GeneratedProtocolMessageType('Comment', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_COMMENT,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Comment)
))
,
Chapter = _reflection.GeneratedProtocolMessageType('Chapter', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_CHAPTER,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.Chapter)
))
,
ChapterInfo = _reflection.GeneratedProtocolMessageType('ChapterInfo', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_CHAPTERINFO,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.ChapterInfo)
))
,
SeriesInfo = _reflection.GeneratedProtocolMessageType('SeriesInfo', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_SERIESINFO,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.SeriesInfo)
))
,
AlbumInfo = _reflection.GeneratedProtocolMessageType('AlbumInfo', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_ALBUMINFO,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.AlbumInfo)
))
,
HttpHeadersEntry = _reflection.GeneratedProtocolMessageType('HttpHeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _INFODICT_HTTPHEADERSENTRY,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict.HttpHeadersEntry)
))
,
DESCRIPTOR = _INFODICT,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.InfoDict)
))
_sym_db.RegisterMessage(InfoDict)
_sym_db.RegisterMessage(InfoDict.Format)
_sym_db.RegisterMessage(InfoDict.Format.Fragment)
_sym_db.RegisterMessage(InfoDict.Format.HttpHeadersEntry)
_sym_db.RegisterMessage(InfoDict.Thumbnail)
_sym_db.RegisterMessage(InfoDict.Subtitles)
_sym_db.RegisterMessage(InfoDict.Subtitles.Subformat)
_sym_db.RegisterMessage(InfoDict.Comment)
_sym_db.RegisterMessage(InfoDict.Chapter)
_sym_db.RegisterMessage(InfoDict.ChapterInfo)
_sym_db.RegisterMessage(InfoDict.SeriesInfo)
_sym_db.RegisterMessage(InfoDict.AlbumInfo)
_sym_db.RegisterMessage(InfoDict.HttpHeadersEntry)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
URLResolveRequest = _reflection.GeneratedProtocolMessageType('URLResolveRequest', (_message.Message,), dict(
DESCRIPTOR = _REQUEST_URLRESOLVEREQUEST,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.Request.URLResolveRequest)
))
,
URLIsResolvableRequest = _reflection.GeneratedProtocolMessageType('URLIsResolvableRequest', (_message.Message,), dict(
DESCRIPTOR = _REQUEST_URLISRESOLVABLEREQUEST,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.Request.URLIsResolvableRequest)
))
,
DESCRIPTOR = _REQUEST,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.Request)
))
_sym_db.RegisterMessage(Request)
_sym_db.RegisterMessage(Request.URLResolveRequest)
_sym_db.RegisterMessage(Request.URLIsResolvableRequest)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
URLResolveResponse = _reflection.GeneratedProtocolMessageType('URLResolveResponse', (_message.Message,), dict(
Redirect = _reflection.GeneratedProtocolMessageType('Redirect', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE_URLRESOLVERESPONSE_REDIRECT,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.Response.URLResolveResponse.Redirect)
))
,
DESCRIPTOR = _RESPONSE_URLRESOLVERESPONSE,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.Response.URLResolveResponse)
))
,
URLIsResolvableResponse = _reflection.GeneratedProtocolMessageType('URLIsResolvableResponse', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE_URLISRESOLVABLERESPONSE,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.Response.URLIsResolvableResponse)
))
,
DESCRIPTOR = _RESPONSE,
__module__ = 'python_pb2'
# @@protoc_insertion_point(class_scope:python.Response)
))
_sym_db.RegisterMessage(Response)
_sym_db.RegisterMessage(Response.URLResolveResponse)
_sym_db.RegisterMessage(Response.URLResolveResponse.Redirect)
_sym_db.RegisterMessage(Response.URLIsResolvableResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031cheap.hella.gobi.pybridgeB\016PyBridgeProtos'))
_INFODICT_FORMAT_HTTPHEADERSENTRY.has_options = True
_INFODICT_FORMAT_HTTPHEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_INFODICT_HTTPHEADERSENTRY.has_options = True
_INFODICT_HTTPHEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
from turtle import Turtle, Screen
t = Turtle()
screen = Screen()
def move_forwards():
t.forward(10)
def move_backwards():
t.backward(10)
def turn_left():
t.left(10)
def turn_right():
t.right(10)
def clear():
t.home()
t.clear()
screen.listen()
screen.onkey(key="Up", fun=move_forwards)
screen.onkey(key="Down", fun=move_backwards)
screen.onkey(key="Left", fun=turn_left)
screen.onkey(key="Right", fun=turn_right)
screen.onkey(key="c", fun=clear)
screen.exitonclick()
|
#!/usr/bin/python3
# -*-coding:utf-8 -*-
# ProjectName:Zhihaofans的文件模块
# Author:zhihaofans
# PythonVersion:3.x
import os
import shutil
def read(filePath):
with open(filePath) as f:
return f.read()
def write(filePath, fileData, stopIfExisted=False):
if stopIfExisted and exists(filePath):
return None
with open(filePath, "w") as f:
return f.write(fileData)
def cp(old, new, force=False):
if os.path.exists(old):
if os.path.exists(old):
if force:
if os.path.isfile:
try:
shutil.copyfile(old, new)
return True
except:
return False
else:
try:
shutil.copytree(old, new, True)
return True
except:
return False
else:
return False
else:
if os.path.isfile:
try:
shutil.copyfile(old, new)
return True
except:
return False
else:
try:
shutil.copytree(old, new, True)
return True
except:
return False
else:
return False
def mv(old, new, force=False):
if os.path.exists(old):
if force:
if os.path.isfile(new):
try:
os.remove(new)
except:
return False
else:
try:
shutil.rmtree(new)
except:
return False
try:
shutil.move(old, new)
return True
except:
return False
else:
try:
shutil.move(old, new)
return True
except:
return False
def mk(path, mode=0o777):
try:
os.makedirs(name=path, mode=mode, exist_ok=True)
return True
except:
return False
def rm(path):
if os.path.exists(path):
if os.path.isdir(path):
try:
os.removedirs(path)
return True
except:
return False
else:
try:
os.remove(path)
return True
except:
return False
else:
return False
def exists(path):
return os.path.exists(path)
def isdir(path):
return os.path.isdir(path)
def getDir(path):
return os.path.dirname(path)
def getFilename(path):
return os.path.basename(path)
def getExtname(path):
return os.path.splitext(path)[1]
def getMyPyPath():
return os.path.split(os.path.realpath(__file__))[0]
def getUpPath(path):
inputPath = path
if inputPath[-1] == '/' or inputPath[-1] == '\\':
inputPath = inputPath[:-1]
return os.path.split(inputPath)[0]
|
import os
import sys
import launch
import launch_ros.actions
from ament_index_python.packages import get_package_share_directory
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
yolox_ros_share_dir = get_package_share_directory('yolox_ros_cpp')
yolox_param_yaml = os.path.join(yolox_ros_share_dir, "param", "nano_trtexec.yaml")
container = ComposableNodeContainer(
name='yolox_container',
namespace='',
package='rclcpp_components',
executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='v4l2_camera',
plugin='v4l2_camera::V4L2Camera',
name='v4l2_camera',
parameters=[{
"image_size": [640,480]
}]),
ComposableNode(
package='yolox_ros_cpp',
plugin='yolox_ros_cpp::YoloXNode',
name='yolox_ros_cpp',
parameters=[yolox_param_yaml],
)
],
output='screen',
)
rqt_graph = launch_ros.actions.Node(
package="rqt_graph", executable="rqt_graph",
)
return launch.LaunchDescription([
container,
rqt_graph,
]) |
# -*- coding: utf-8 -*-
from datetime import datetime
from pytz import timezone, utc
from pytz.exceptions import UnknownTimeZoneError
from bleach import linkify
from flask import g, render_template, redirect, request, Markup, abort, flash, escape
from flask_mail import Message
from coaster.utils import make_name
from coaster.views import jsonp, load_models, requestargs
from coaster.gfm import markdown
from baseframe import _
from .. import app, mail, lastuser
from ..models import db, ProposalSpace, ProposalSpaceSection, Proposal, Comment, Vote, ProposalFeedback, FEEDBACK_AUTH_TYPE
from ..forms import ProposalForm, CommentForm, DeleteCommentForm, ConfirmDeleteForm, ConfirmSessionForm
proposal_headers = [
'id',
'title',
'url',
'proposer',
'speaker',
'email',
'phone',
'section',
'type',
'level',
'votes',
'comments',
'submitted',
'confirmed'
]
def send_mail(sender, to, body, subject):
msg = Message(sender=sender, subject=subject, recipients=[to])
msg.body = body
msg.html = markdown(msg.body) # FIXME: This does not include HTML head/body tags
mail.send(msg)
def proposal_data(proposal):
"""
Return proposal data suitable for a JSON dump. Request helper, not to be used standalone.
"""
votes_count = None
votes_groups = None
votes_bydate = dict([(group.name, {}) for group in proposal.proposal_space.usergroups])
if 'tz' in request.args:
try:
tz = timezone(request.args['tz'])
except UnknownTimeZoneError:
abort(400)
else:
tz = None
if lastuser.has_permission('siteadmin'):
votes_count = len(proposal.votes.votes)
votes_groups = dict([(group.name, 0) for group in proposal.proposal_space.usergroups])
groupuserids = dict([(group.name, [user.userid for user in group.users])
for group in proposal.proposal_space.usergroups])
for vote in proposal.votes.votes:
for groupname, userids in groupuserids.items():
if vote.user.userid in userids:
votes_groups[groupname] += -1 if vote.votedown else +1
if tz:
date = tz.normalize(vote.updated_at.replace(tzinfo=utc).astimezone(tz)).strftime('%Y-%m-%d')
else:
date = vote.updated_at.strftime('%Y-%m-%d')
votes_bydate[groupname].setdefault(date, 0)
votes_bydate[groupname][date] += -1 if vote.votedown else +1
return {'id': proposal.id,
'name': proposal.url_name,
'title': proposal.title,
'url': proposal.url_for(_external=True),
'json_url': proposal.url_for('json', _external=True),
'proposer': proposal.user.fullname,
'speaker': proposal.speaker.fullname if proposal.speaker else None,
'email': proposal.email if lastuser.has_permission('siteadmin') else None,
'phone': proposal.phone if lastuser.has_permission('siteadmin') else None,
'section': proposal.section.title if proposal.section else None,
'type': proposal.session_type,
'level': proposal.technical_level,
'objective': proposal.objective.html,
'description': proposal.description.html,
'requirements': proposal.requirements.html,
'slides': proposal.slides,
'links': proposal.links,
'bio': proposal.bio.html,
'votes': proposal.votes.count,
'votes_count': votes_count,
'votes_groups': votes_groups,
'votes_bydate': votes_bydate,
'comments': proposal.comments.count,
'submitted': proposal.created_at.isoformat() + 'Z',
'confirmed': proposal.confirmed,
}
def proposal_data_flat(proposal, groups=[]):
data = proposal_data(proposal)
cols = [data[header] for header in proposal_headers if header not in ('votes_groups', 'votes_bydate')]
for name in groups:
cols.append(data['votes_groups'][name])
return cols
# --- Routes ------------------------------------------------------------------
@app.route('/<space>/new', methods=['GET', 'POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
permission='new-proposal', addlperms=lastuser.permissions)
def proposal_new(space):
form = ProposalForm(model=Proposal, parent=space)
# del form.session_type # We don't use this anymore # But The Goa Project still does, so commented out
form.section.query = ProposalSpaceSection.query.filter_by(proposal_space=space, public=True).order_by('title')
if len(list(form.section.query.all())) == 0:
# Don't bother with sections when there aren't any
del form.section
if request.method == 'GET':
form.email.data = g.user.email
form.phone.data = g.user.phone
if form.validate_on_submit():
proposal = Proposal(user=g.user, proposal_space=space)
if form.speaking.data:
proposal.speaker = g.user
else:
proposal.speaker = None
with db.session.no_autoflush:
proposal.votes.vote(g.user) # Vote up your own proposal by default
form.populate_obj(proposal)
proposal.name = make_name(proposal.title)
db.session.add(proposal)
db.session.commit()
flash(_("Your new session has been saved"), 'info')
return redirect(proposal.url_for(), code=303)
return render_template('baseframe/autoform.html', form=form, title=_("Submit a session proposal"),
submit=_("Submit proposal"),
breadcrumbs=[(space.url_for(), space.title)],
message=Markup(
_('This form uses <a href="http://daringfireball.net/projects/markdown/">Markdown</a> for formatting.')))
@app.route('/<space>/<proposal>/edit', methods=['GET', 'POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission=('edit-proposal', 'siteadmin'), addlperms=lastuser.permissions)
def proposal_edit(space, proposal):
form = ProposalForm(obj=proposal, model=Proposal, parent=space)
if not proposal.session_type:
del form.session_type # Remove this if we're editing a proposal that had no session type
form.section.query = ProposalSpaceSection.query.filter_by(proposal_space=space, public=True).order_by('title')
if len(list(form.section.query.all())) == 0:
# Don't bother with sections when there aren't any
del form.section
if proposal.user != g.user:
del form.speaking
elif request.method == 'GET':
form.speaking.data = proposal.speaker == g.user
if form.validate_on_submit():
form.populate_obj(proposal)
proposal.name = make_name(proposal.title)
if proposal.user == g.user:
# Only allow the speaker to change this status
if form.speaking.data:
proposal.speaker = g.user
else:
if proposal.speaker == g.user:
proposal.speaker = None
proposal.edited_at = datetime.utcnow()
db.session.commit()
flash(_("Your changes have been saved"), 'info')
return redirect(proposal.url_for(), code=303)
return render_template('baseframe/autoform.html', form=form, title=_("Edit session proposal"), submit=_("Save changes"),
breadcrumbs=[(space.url_for(), space.title),
(proposal.url_for(), proposal.title)],
message=Markup(
_('This form uses <a href="http://daringfireball.net/projects/markdown/">Markdown</a> for formatting.')))
@app.route('/<space>/<proposal>/confirm', methods=['POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission=('confirm-proposal', 'siteadmin'), addlperms=lastuser.permissions)
def proposal_confirm(space, proposal):
form = ConfirmSessionForm()
if form.validate_on_submit():
proposal.confirmed = not proposal.confirmed
db.session.commit()
if proposal.confirmed:
flash(_("This proposal has been confirmed."), 'success')
else:
flash(_("This session has been cancelled."), 'success')
return redirect(proposal.url_for())
@app.route('/<space>/<proposal>/delete', methods=['GET', 'POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission=('delete-proposal', 'siteadmin'), addlperms=lastuser.permissions)
def proposal_delete(space, proposal):
form = ConfirmDeleteForm()
if form.validate_on_submit():
if 'delete' in request.form:
comments = Comment.query.filter_by(commentspace=proposal.comments).order_by('created_at').all()
for comment in comments:
db.session.delete(comment)
db.session.delete(proposal.comments)
votes = Vote.query.filter_by(votespace=proposal.votes).all()
for vote in votes:
db.session.delete(vote)
db.session.delete(proposal.votes)
db.session.delete(proposal)
db.session.commit()
flash(_("Your proposal has been deleted"), "info")
return redirect(space.url_for())
else:
return redirect(proposal.url_for())
return render_template('delete.html', form=form, title=_(u"Confirm delete"),
message=_(u"Do you really wish to delete your proposal ‘{title}’? "
u"This will remove all votes and comments as well. This operation "
u"is permanent and cannot be undone.").format(title=proposal.title))
@app.route('/<space>/<proposal>', methods=['GET', 'POST'])
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission='view', addlperms=lastuser.permissions)
def proposal_view(space, proposal):
if proposal.proposal_space != space:
return redirect(proposal.url_for(), code=301)
comments = sorted(Comment.query.filter_by(commentspace=proposal.comments, parent=None).order_by('created_at').all(),
key=lambda c: c.votes.count, reverse=True)
commentform = CommentForm(model=Comment)
delcommentform = DeleteCommentForm()
if request.method == 'POST':
if request.form.get('form.id') == 'newcomment' and commentform.validate() and 'new-comment' in g.permissions:
send_mail_info = []
if commentform.comment_edit_id.data:
comment = Comment.query.get(int(commentform.comment_edit_id.data))
if comment:
if 'edit-comment' in comment.permissions(g.user, g.permissions):
comment.message = commentform.message.data
comment.edited_at = datetime.utcnow()
flash(_("Your comment has been edited"), 'info')
else:
flash(_("You can only edit your own comments"), 'info')
else:
flash(_("No such comment"), 'error')
else:
comment = Comment(user=g.user, commentspace=proposal.comments,
message=commentform.message.data)
if commentform.parent_id.data:
parent = Comment.query.get(int(commentform.parent_id.data))
if parent.user.email:
if parent.user == proposal.user: # check if parent comment & proposal owner are same
if not g.user == parent.user: # check if parent comment is by proposal owner
send_mail_info.append({'to': proposal.user.email or proposal.email,
'subject': u"{space} Funnel: {proposal}".format(space=space.title, proposal=proposal.title),
'template': 'proposal_comment_reply_email.md'})
else: # send mail to parent comment owner & proposal owner
if not parent.user == g.user:
send_mail_info.append({'to': parent.user.email,
'subject': u"{space} Funnel: {proposal}".format(space=space.title, proposal=proposal.title),
'template': 'proposal_comment_to_proposer_email.md'})
if not proposal.user == g.user:
send_mail_info.append({'to': proposal.user.email or proposal.email,
'subject': u"{space} Funnel: {proposal}".format(space=space.title, proposal=proposal.title),
'template': 'proposal_comment_email.md'})
if parent and parent.commentspace == proposal.comments:
comment.parent = parent
else: # for top level comment
if not proposal.user == g.user:
send_mail_info.append({'to': proposal.user.email or proposal.email,
'subject': u"{space} Funnel: {proposal}".format(space=space.title, proposal=proposal.title),
'template': 'proposal_comment_email.md'})
proposal.comments.count += 1
comment.votes.vote(g.user) # Vote for your own comment
db.session.add(comment)
flash(_("Your comment has been posted"), 'info')
db.session.commit()
to_redirect = comment.url_for(proposal=proposal, _external=True)
for item in send_mail_info:
email_body = render_template(item.pop('template'), proposal=proposal, comment=comment, link=to_redirect)
if item.get('to'):
# Sender is set to None to prevent revealing email.
send_mail(sender=None, body=email_body, **item)
# Redirect despite this being the same page because HTTP 303 is required to not break
# the browser Back button
return redirect(to_redirect, code=303)
elif request.form.get('form.id') == 'delcomment' and delcommentform.validate():
comment = Comment.query.get(int(delcommentform.comment_id.data))
if comment:
if 'delete-comment' in comment.permissions(g.user, g.permissions):
comment.delete()
proposal.comments.count -= 1
db.session.commit()
flash(_("Your comment was deleted"), 'info')
else:
flash(_("You did not post that comment"), 'error')
else:
flash(_("No such comment"), 'error')
return redirect(proposal.url_for(), code=303)
links = [Markup(linkify(unicode(escape(l)))) for l in proposal.links.replace('\r\n', '\n').split('\n') if l]
confirmform = ConfirmSessionForm()
return render_template('proposal.html', space=space, proposal=proposal,
comments=comments, commentform=commentform, delcommentform=delcommentform,
breadcrumbs=[(space.url_for(), space.title)],
links=links, confirmform=confirmform)
@app.route('/<space>/<proposal>/feedback', methods=['POST'])
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission='view', addlperms=lastuser.permissions)
@requestargs('id_type', 'userid', ('content', int), ('presentation', int), ('min_scale', int), ('max_scale', int))
def session_feedback(space, proposal, id_type, userid, content, presentation, min_scale=0, max_scale=2):
# Process feedback
if not min_scale <= content <= max_scale:
abort(400)
if not min_scale <= presentation <= max_scale:
abort(400)
if id_type not in ('email', 'deviceid'):
abort(400)
# Was feedback already submitted?
feedback = ProposalFeedback.query.filter_by(
proposal=proposal,
auth_type=FEEDBACK_AUTH_TYPE.NOAUTH,
id_type=id_type,
userid=userid).first()
if feedback is not None:
return "Dupe\n", 403
else:
feedback = ProposalFeedback(
proposal=proposal,
auth_type=FEEDBACK_AUTH_TYPE.NOAUTH,
id_type=id_type,
userid=userid,
min_scale=min_scale,
max_scale=max_scale,
content=content,
presentation=presentation)
db.session.add(feedback)
db.session.commit()
return "Saved\n", 201
@app.route('/<space>/<proposal>/json', methods=['GET', 'POST'])
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission='view', addlperms=lastuser.permissions)
def proposal_json(space, proposal):
return jsonp(proposal_data(proposal))
@app.route('/<space>/<proposal>/next')
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission='view', addlperms=lastuser.permissions)
def proposal_next(space, proposal):
next = proposal.getnext()
if next:
return redirect(next.url_for())
else:
flash(_("You were at the last proposal"), 'info')
return redirect(space.url_for())
@app.route('/<space>/<proposal>/prev')
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(Proposal, {'url_name': 'proposal', 'proposal_space': 'space'}, 'proposal'),
permission='view', addlperms=lastuser.permissions)
def proposal_prev(space, proposal):
prev = proposal.getprev()
if prev:
return redirect(prev.url_for())
else:
flash(_("You were at the first proposal"), 'info')
return redirect(space.url_for())
|
import torch as _torch
assert _torch.__version__ >= '1.1.0'
from .acts import *
from .op import *
from .layers import *
from .blocks import *
from .utils import *
from .optim import *
|
#!/usr/bin/env python
from __future__ import with_statement
import logging
# Library path
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# Standard libraries
import atexit
import random
import re
import socket
import threading
import time
import traceback
import weakref
import xmlrpclib
import Queue
import SimpleXMLRPCServer
#import multiprocessing
# Import third-party libraries
import turbogears
# Custom libraries
#import buzzbot.searcher
#import buzzbot.visitor
#import buzzbot.model
#import buzzbot.bot
#import buzzbot.botUtilities
import buzzbot
try:
from buzzbot import *
print "importing all buzzbot modules"
except:
from buzzbot import cpu_core_counter
from buzzbot import searcher
from buzzbot import visitor
from buzzbot import model
from buzzbot import bot
from buzzbot import botUtilities
from buzzbot import commands
myBotRoutines = bot.BotRoutines()
myBotUtilities = botUtilities.Utilities()
print "importing some buzzbot modules"
try:
myBotRoutines = buzzbot.bot.BotRoutines()
myBotUtilities = buzzbot.botUtilities.Utilities()
except:
pass
#I haven't quite grokked the differences in namespaces between the dev and production box
# this insures the visitor module is available
try:
import bot
import botUtilities
import visitor
myBotRoutines = bot.BotRoutines()
myBotUtilities = botUtilities.Utilities()
except:
pass
DEBUG_RUN_SERIALLY = False
import logging
class CrawlerBase(object):
"""
Methods provided to CrawlerClient and CrawlerServer.
"""
def host(self):
"""
Return the connection host.
"""
return turbogears.config.get("crawler.socket_host", "localhost")
def port(self):
"""
Return the connection port.
"""
return int(turbogears.config.get("crawler.socket_port", 50015))
def logger(self):
import logging
name = 'crawler'
fname = '/var/log/buzz/crawler.log'
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
fname, maxBytes=100000, backupCount=5)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger.addHandler(handler)
return logger
cb = CrawlerBase()
logger = cb.logger()
def has_fork():
"""
Does this OS have the `fork` system call?
"""
return "fork" in os.__dict__
class CrawlerServerFunctions(object):
"""
Set of functions exposed by the CrawlerServer to the CrawlerClient.
In English: the crawler server is a simple xmlrpc server. We have a proxy server
attached to the smlrpc server that we're passing these methods to. Things are a little strange
because not everything can get passed between the proxy and the real server. For instance,
you can easily pass xml or html, but not objects (they could be repr-ed or pickled, of course)
So, for instance, we can ask the proxy server to "enqueue" a request. This involves passing
the proxy server a dict holding the search_id and other stuff like a deletion flag. The
proxy is supposed to relay that to the real server, which is supposed to process it. The
processing involves doing the search, visit, scoring processes/threads and reporting the results
"""
def enqueue(self, item):
"""
Enqueue the item into the Crawler.
"""
#print "enqueue method of class CrawlerServerFunctions called"
# NOTE: The XML-RPC proxy can't accept a bare **kwargs, so it's passed in as a `dict`.
global server
server.crawler.enqueue(**item)
def stop(self):
"""
Stop the Crawler.
"""
global server
server.keep_running = False
def prepare_results(self):
"""
Prepare scored results.
"""
global server
server.crawler.prepare_results()
def results_for(self, search_id):
"""
Return list of scored results for the `search_id`.
"""
global server
return server.crawler.results_for(search_id)
def evaluate(self, code):
"""
Return the result of eval'ing the string of `code`.
"""
global server
if self.allow_dangerous_operations():
return eval(code)
else:
raise SecurityError("Dangerous operations not allowed on server")
def execute(self, code):
"""
Pass `code` to Python's `exec`.
"""
global server
if self.allow_dangerous_operations():
exec code
else:
raise SecurityError("Dangerous operations not allowed on server")
def allow_dangerous_operations(self):
"""
Does this server allow dangerous operations? Returns true if the
`DANGEROUS` environmental variable has a value.
"""
global server
if os.environ.has_key('DANGEROUS'):
return True
else:
return False
def ping(self):
"""
Respond with true, to indicate server is alive.
"""
print "pinging crawlerServiceFunctions.ping"
return server.crawler.ping()
def dogma(self):
return "hello from crawlerServiceFunctions.dogma"
def dogmaFromServer(self):
return server.crawler.dogma()
class proxyServerError(StandardError):
pass
class SecurityError(StandardError):
pass
class WrapperError(StandardError):
pass
class CrawlerServer(CrawlerBase):
def __init__(self, items_completed=True, concurrency_library=None, run_as_cron = False):
"""
Instantiate a server that hosts a Crawler.
"""
#added option to change the xmlrpc port if the crawler is running as an independent
# process, as in the case of a nightly cron job
logger.debug( "crawler.CrawlerServer.__init__ xmlrpc server initiated on %s:%s" % (str(self.host()), str(self.port()) ))
self.service = SimpleXMLRPCServer.SimpleXMLRPCServer(addr=(self.host(), self.port()), logRequests=False, allow_none=True)
self.service.register_instance(CrawlerServerFunctions())
#keep_running = True turns it on; keep_running = False shuts it down)
self.keep_running = True
#this instansiates a crawler process manager and its processes
logger.debug("crawler.CrawlerServer.__init__ instansiating a crawler process server")
self.crawler = Crawler(items_completed=items_completed, concurrency_library=concurrency_library)
logger.debug("crawler.CrawlerServer.__init__ Success in instansiating a crawler process server")
def start(self):
"""
Start the server.
"""
logger.debug( "starting the crawler process server")
self.crawler.start()
while self.keep_running:
self.service.handle_request()
self.stop()
logger.debug("crawler_runner (crawler/proxy thread) stopped")
def stop(self):
"""
Stop the server.
"""
self.keep_running = False
self.crawler.stop()
class ResilentXmlRpcServerProxy(object):
"""
Provides a wrapper around the XmlRpc proxy that retries the connection.
"""
def __init__(self, proxy, timeout_seconds=5.0, pause_seconds=0.25):
#logger.debug("ResilentXmlRpcServerProxy __init--")
self.proxy = proxy
#logger.debug("proxy is of type %s" % type (proxy))
self.timeout_seconds = timeout_seconds
self.pause_seconds = pause_seconds
def __getattr__(self, name):
DEBUG = False
if DEBUG: print "GA: %s" % name
proxy = self.proxy
def wrapper(*args):
init_time = time.time()
deadline = init_time + self.timeout_seconds
while deadline > time.time():
try:
#logger.debug("returning function %s from the xmlrpc proxy wrapper with args: %s" %( name, repr(args) ))
return proxy.__getattr__(name)(*args)
except Exception, e:
if DEBUG: print "WA: retry"
logger.error("xmlrpc server returned error: %s" %e)
time.sleep(self.pause_seconds)
#if this process is too slow, give it a bit more priority (nice is the priority -20 = highest +20 = lowest )
#if time.time() > init_time + 3:
##TODO: fix this niceness thing ;it's a good idea, but too quickly sets *something* to -20 which freezes the system
#try:
#nice = os.nice(0)
#os.nice(nice-1)
#newnice = os.nice(0)
#print "changed nice from %i to %i" %(nice, newnice)
#except:
# pass
logger.info("gave up trying to connect to the XML-RPC server")
raise TimeoutError("Couldn't connect to XML-RPC server, no response after %s seconds" % str(self.timeout_seconds))
return wrapper
class CrawlerClient(CrawlerBase):
"""
Client that connects to the CrawlerServer.
"""
def __init__(self, run_as_cron = False):
"""
Instantiate a client.
"""
#note, don't use a logger in __init__ unless its installed in __init__
self.raw_service = xmlrpclib.ServerProxy(uri="http://%s:%s/" % (self.host(), self.port()), allow_none=True)
self.service = ResilentXmlRpcServerProxy(proxy=self.raw_service)
def enqueue(self, **item):
"""
Enqueue an item for crawling. The `item` is a dict with the same
parameters accepted by `Crawler.enqueue`.
"""
str(item['search_id'])
logger.debug( "crawler client method enqueue called for item: %s" % str(item['search_id']) )
# NOTE: The XML-RPC proxy can't accept a bare **kwargs, so pass them as a `dict`.
testme = self.service.enqueue(item)
return self.service.enqueue(item)
def stop(self):
"""
Stop the CrawlerServer.
"""
print "stopping the xmlrpc server proxy"
try:
self.raw_service.stop()
except:
pass
return self.raw_service.stop()
def dogma(self):
#print "hello from CrawlerClient.dogma"
pass
def evaluate(self, code):
"""
Return the result of evaluating the code on the server.
"""
#print "evaluating " + code
return self.service.evaluate(code)
def execute(self, code):
"""
Pass `code` to Python's `exec`.
"""
self.service.execute(code)
def prepare_results(self):
"""
Prepare scored results.
"""
#logger.debug( "initiating CrawlerClient prepare results process")
self.service.prepare_results()
def results_for(self, search_id):
"""
Return list of scored results for the `search_id`.
"""
#logger.debug( "initiating CrawlerClient results for search id %s" %str(search_id))
return self.service.results_for(search_id)
def ping(self):
"""
Is the server responding?
"""
try:
print "pinging crawler client"
return self.raw_service.ping()
except:
return False
class Crawler(object):
'''sentinal to flag workers to stop'''
STOP = 'STOP'
def __init__(self, items_completed=True, searchers_count=None, visitors_count=None,
scorers_count=None, concurrency_library=None):
self.debug = False
self.debug_run_serially = DEBUG_RUN_SERIALLY
cb = CrawlerBase()
self.logger = cb.logger()
#get the concurrency library
##TODO: set this in the turbogears.config files
self._concurrency_library = concurrency_library
#import the library
exec "import %s" % self._concurrency_library
#this returns the *object* representing the concurrency library
self._concurrency_library_module = eval("%s" % self._concurrency_library)
#figure out how many CPUs we have to work with
try:
self.cpu_core_count = self._concurrency_library_module.cpu_count() #works with multiprocessing
except:
try: self.cpu_core_count = cpu_core_counter.cpu_core_count() # works with threading and pyprocessing
except: self.cpu_core_count = 1
'''
the manager herds the cats (processes), providing locks, semaphores and the such;
it runs on its own process
'''
self.manager = self._concurrency_library_module.Manager()
self.lock = self.manager.Lock()
'''
These objects are queues to be managed within - you guessed it - the manager; it runs on its own process.
If we ever switch to a theading library, these would just be Queue.Queue() objects.
'''
self.items_to_search = self.manager.Queue()
self.items_to_visit= self.manager.Queue()
self.items_to_score= self.manager.Queue()
self.items_to_finalize= self.manager.Queue()
self.items_completed = None
if items_completed:
self.items_completed = self.manager.dict()
'''
the following is a bit convoluted but produces a dict (queue) with three items (searcher, visitor, scorer);
Each of these three items is in itself a dict with the same two items (input, output).
searcher = queue.get('searcher') evaluates to: { input : AutoProxy[Queue], output : AutoProxy[Queue]
myinput = searcher.get('input') evaluates to: AutoProxy[Queue] object
'''
self.queue = {}
self.queue['searcher'] = {}
self.queue['searcher']['input'] = self.items_to_search
self.queue['searcher']['output'] = self.items_to_visit
self.queue['visitor'] = {}
self.queue['visitor']['input'] = self.items_to_visit
self.queue['visitor']['output'] = self.items_to_score
self.queue['scorer'] = {}
self.queue['scorer']['input'] = self.items_to_score
self.queue['scorer']['output'] = self.items_to_finalize
'''
Figure out how many processes to spawn as a function of the CPUs available; the optimal number
is at least partly a function of the real time performance desired - a smaller number provides
faster response
'''
# Worker counts
self.searchers_count = searchers_count or max(2, self.cpu_core_count)
#TODO: experiment with the visitor counts
self.visitors_count = visitors_count or min(5, self.cpu_core_count * 5)
self.scorers_count = scorers_count or min(2, self.cpu_core_count)
# Workers pools
self.searchers = []
self.visitors = []
self.scorers = []
def __del__(self):
#this is the destructor method called after object is killed, so we need to re-import logger
try:
print ("trying to stop crawler process manager")
self.stop()
except Exception, e:
print ("crawler.Crawler failed to stop normally: %s" % e)
pass
finally:
#logger.debug("destroyed")
pass
def start(self, kind=None):
"""
Start the crawler. It will begin processing any entries in the queues
immediately. This starts all types of processes, unless we ask it only to
run one (kind = "searcher", say)
"""
#this logic is to run the program serially for debugging purposes (independent
# processes are hideous to work with).
if not DEBUG_RUN_SERIALLY:
if kind:
logger.info ("crawler start method called for kind = " + kind)
'''
the following statements use strange pythonic syntax to dig evaluate variables; compact
but arcane.
self__dict__ is a dict of object:value pair known to self (i.e., this class)
the term ['%ss' % kind] uses text formatting strings: %s is replaced with the value for kind
self.__dict__['%ss' % kind], then pulls the value for "kind" from the dictionary
...
so, say "kind" is searcher
so, count = searcher_count and workers = searcher
...
this makes it work with any type of process. but *who cares*?
'''
count = self.__dict__['%ss_count' % kind] #this is dict of all objects known to self
workers = self.__dict__['%ss' % kind]
for i in range(count):
worker = None
'''
Here, the "target" i.e., the thing executed by the process; this will be a search process, a
visitor processs, or whatever TBD by the process_wrapper routine. The list of "workers" gets
the process appended to it. The last step is to actually run the process(using the start method).
'''
if self._concurrency_library == "processing":
worker = self._concurrency_library_module.Process(target=self._processes_wrapper, args=[kind])
elif self._concurrency_library == "threading":
worker = self._concurrency_library_module.Thread(target=self._processes_wrapper, args=[kind])
elif self._concurrency_library == "multiprocessing":
worker = self._concurrency_library_module.Process(target=self._processes_wrapper, args=[kind])
else:
raise NotImplementedError("Unknown concurrency_library: %s" % self._concurrency_library)
workers.append(worker)
logger.info("starting %s process" % (kind))
worker.start()
logger.info("started as %s " % (worker.name))
a=1
a=2
else:
'''
Recursively calls the logic above to initaite "worker processes" for the requested number of
searchers, visitors, and scorers (invoked when no worker type is specified in the call).
'''
logger.info( "starting processes for all - searcher, visitor, and scorer")
self.start("searcher")
self.start("visitor")
self.start("scorer")
else:
logger.debug("running serially for debugging")
def dogma(self):
return "hello from Crawler.dogma"
def testQueues(self):
#having an issue with being able to add items to the queues
pass
def _processes_wrapper(self, kind): #line 449
"""
This routine serves as a container for the worker (searcher, visitor, scorer) processes. The idea is
that the calling routine can iterate over all processes using the same logic, because the statement
in the calling routine can be agnostic as to exactly which proces it's calling.
The calling routine has loaded up a set of queues, one each for the searchers, visitors, and scorers. These
queues are stored in a dict structure called queue. The "queue" dict has three objects, each of which is another
dict: searcher, visitor, and scorer. Each of these secondary objects has two entries: input and output (both Queues).
The logic uses the input argument "kind" to find the correct input/output queue combination, and also to figure
out which processing routine to pass control to. For instance, if the "kind" argument is "searcher", it digs out
the searcher input and output queues from the "queue" dict object. Then, using the self._worker_name routine,
discovers that it needs to pass control to the self._searcher_process method embedded in a Process object. Then, by
invoking the "target" method, it launches the process, passing along the specifications for this particular search
and the right output queue (in this case "scorer" input queue dug out of the "queue" dict mentioned above)
"""
#discern the name of the process to be invoked from the "kind" input argument
tagline = "%s %s:" % (kind, self._worker_name())
#creat an alias for the actual Process object
target = self.__getattribute__('%s_process' % kind)
#create aliases for the correct input/output Queue objects to be used
input = self.queue[kind]['input']
output = self.queue[kind]['output']
logger.debug("process wrapper processing a %s queue; currently " %kind)
logger.debug("input queue is %s long; output queue is %s long" %(str(input.qsize()), str(output.qsize())))
#self.debug_run_serially is used for debugging only
logger.info("self.debug_run_serially is set to : %s" %str(self.debug_run_serially) )
if not self.debug_run_serially:
#iterate over the input queue ...
for item in iter(input.get, self.STOP):
logger.debug("input dict to %s is: %s" %(kind, repr(item)))
stop_requested = False
##TODO: implement graceful stop for runaway bots using search.stop_requested
try:
search_id = item.get('search_id')
search = model.Search.get(int(search_id))
stop_requested = search.stoprequested
except:
logger.error("process_wrapper failed to get search_id for handoff to %s" %kind)
logger.error(traceback.print_stack())
if not stop_requested:
#result = None
logger.info("launching process: %s" %kind)
result = target(item=item, output=output)
if result:
logger.debug("process %s found a result" %kind)
else:
logger.debug("process %s result is None" %kind)
if not output:
logger.debug("output is %s" %repr(output))
if result and output != None:
output.put(result)
if kind == 'scorer':
logger.info ("outputting %s to results_for" %result)
else:
try:
logger.debug("couldn't place item in queue; this would clear queue")
#logger.debug(traceback.print_stack())
input.queue.clear()
except:
pass
else:
logger.debug("stop requested in processes_wrapper")
def _worker_name(self):
"""
Returns string name uniquely identifying this worker. Actual name will
depend on the underlying concurrency library.
"""
if self._concurrency_library == "processing":
return self._concurrency_library_module.currentProcess().getName()
elif self._concurrency_library == "threading":
return self._concurrency_library_module.currentThread().getName()
elif self._concurrency_library == "multiprocessing":
return self._concurrency_library_module.current_process().name
else:
raise NotImplementedError("Unknown concurrency_library: %s" % self._concurrency_library)
def searcher_process(self, item, output=None):
"""
This process is invoked by the _process_wrapper routine. It runs the search engine queries via the
routine "searcher.SearcherRunner". It's run as a self-contained process/thread. It's important that
any changes be thoroughly vetted because if it has problems, it will likely die silently.
"""
#check to see if we're running serially for debugging
if item.has_key('debug_run_serially'):
self.debug_run_serially=item.get('debug_run_serially')
serial_return = []
logger.info( "running searcher process for item %s" % repr(item))
#PB if user requested stop, don't bother
search = model.Search.get(item['search_id'])
stop_requested = search.stoprequested
#logger.debug("checked stoprequested")
targetword = search.targetword
#logger.debug("entering stoprequested loop")
if not stop_requested:
print "deploying searcher.SearchRunner"
myresult = searcher.SearchRunner(
#each "result" is a raw url returned from a search engine
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
debug_run_serially = self.debug_run_serially
)
logger.debug("myresult is %s" % repr(myresult))
for result in searcher.SearchRunner(
#each "result" is a raw url returned from a search engine
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results']):
urlid = None
logger.debug("searcher_process found: %s" % repr(result))
##TODO: move this processing logic outside the crawler
#clean the url up (first-order validity checks, etc.) Below returns a list or nothing
logger.debug("evaluating %s" %result)
fixedUrl = myBotUtilities.fixUrls(targetword, urlList = [result])
#logger.info("fixed url is %s" %fixedUrl)
cleanResult = ""
if len(fixedUrl)>0: #cleanResult is null if the url failed our tests
cleanResult= fixedUrl[0]
#logger.debug("checking if url %s is sponsor site %s" %(str(fixedUrl), str(targetword)))
if myBotUtilities.isTargetWordSite(cleanResult, targetword): #sponsor site
cleanResult = ""
logger.debug("%s is from the sponsor site" % str(fixedUrl))
if not myBotUtilities.goodUrl(cleanResult): #known junk, videos, etc
cleanResult = ""
logger.debug("%s is known junk" % str(fixedUrl))
if len(cleanResult) > 0:
#if we have this id for this search, we'll grab its id (content specs may have changed)
dupReturn = myBotUtilities.isDupUrl(item['search_id'], cleanResult) #returns 0 or the ID of thedup
if dupReturn > 0:
urlid = dupReturn
logger.debug("we already have url %s" %str(fixedUrl))
else:
try:
urlid = myBotUtilities.addUrl(item['search_id'], cleanResult)
except:
logger.debug("tried but failed to have botUtilites add this url %s" %cleanResult)
if urlid:
logger.debug("attempting to output this url to visitor process queue: %s" %str(cleanResult))
subitem = dict(
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
url_id = urlid,
parseFast = item['parseFast'] or True
)
output.put(subitem)
logger.debug("visitor process queue fed searchid: %s and urlid: %s " %(str(item['search_id']), str(urlid)))
if self.debug_run_serially:
return serial_return
def visitor_process(self, item, output=None):
'''
This process is invoked by the _process_wrapper routine. It runs the visitors (they read the web sites)
engine queries via the
routine "visitor.Visitor". It's run as a self-contained process/thread. It's important that
any changes be thoroughly vetted because if it has problems, it will likely die silently.
logger.debug ("visitor process started")
'''
logger.debug("visitor_process invoked ... working on item: %s" %repr(item))
return_dict = None; search = None
#check to see if we're running serially for debugging
if item.has_key('debug_run_serially'):
self.debug_run_serially=item.get('debug_run_serially')
serial_return=[]
if item.has_key('parseFast'):
parseFast = item.get('parseFast')
else:
parseFast = True
logger.debug("trying to retrieve search " + str(item['search_id']))
#make sure we can find the search in the database
try:
search = model.Search.get(item['search_id'])
stop_requested = search.stoprequested
except:
logger.error("crawler.visitor_process couldn't load search")
pass
if not search:
logger.error("visitor_process can't find a search")
else:
#we *do have a valid search
logger.debug( "visitor process checking for URLs to visit")
visitorContent = None; url_record = None
try:
url_record = model.URLS.get(item['url_id'])
except:
pass
#visitor.Visitor returns a list object containing model.Content objects
logger.debug("pinging visitor.Visitor")
if url_record:
visitorContent = visitor.Visitor(search, url_record, parseFast)
if visitorContent:
logger.debug("**enqueing a visitor.Visitor object")
for content in visitorContent:
logger.debug("crawler.visitor_process viewing content: %s" %repr(content))
try:
logger.info("we have content for search %s : content: %s" %(str(item['search_id']), str(content.id)))
subitem = dict(
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
content_id = content.id,
parseFast = parseFast
)
except:
logger.warn("crawler.visitor_process couldn't parse the input dict")
#debug_run_serially is for debugging - allows serial processing
if self.debug_run_serially and subitem:
serial_return.append(subitem)
#for production - passes this on to the scorer
else:
try:
output.put(subitem)
except:
logger.error("scorer not loaded for urlid %s, content %s" %(str(urlid), str(content.id)))
return None
def enqueue(self, search_id, max_results=8, delete_existing=False, queue_name="items_to_search", **kwargs):
"""
Add a job to the crawler.
Keyword arguments:
* search_id: Crawl this search record.
* max_results: Return approximately this many results. Default is to
let the searcher decide how many to return.
* delete_existing: Delete existing records for this search record?
Defaults to False.
* queue_name: Name of queue to use. Defaults to "items_to_search".
"""
queue = self.__getattribute__(queue_name)
item = kwargs
item['search_id'] = search_id
item['max_results'] = max_results
item['delete_existing'] = delete_existing
logger.debug("enqueued into `%s`: %s" % (queue_name, item))
queue.put(item)
def scorer_process(self, item, output=None):
"""
Score a single item.
"""
logger.info( "scorer process started")
content = None; stop_requested = None; search = None
try:
search_id = item['search_id']
content_id = item['content_id']
except Exception, e:
logger.error("bad item passed to crawler.scorer")
try:
#these may be null or placeholder objects
search = model.Search.get(item['search_id'])
except:
logger.debug("scorer couldn't retrieve search")
try:
content = model.Content.get(content_id) #a search object (db record)
except:
logger.info("scorer couldn't retrieve content")
try:
stop_requested = search.stoprequested
except:
logger.debug("scorer couldn't retrieve stop_requested")
#TODO: implement the "stop_requested" feature to kill runaway bots
if content:
try:
myBotRoutines.addScoreToContent(content)
logger.info("adding score to content %s" %content)
except Exception, e:
logger.debug( "bot.addScoreToContent has a problem")
logger.error(traceback.format_exc(e))
raise
return item
def prepare_results(self):
"""
load the scored results from the output of the scorer process (a Queue object
called items_to_finalize) into a dict called items completed
"""
while True:
item = None
try:
item = self.items_to_finalize.get_nowait()
except Queue.Empty:
pass # Handle below
if not item:
#logger.debug("results_for: no items")
break
leaf = None
self.lock.acquire()
if self.items_completed.has_key(item['search_id']):
leaf = self.items_completed[item['search_id']]
else:
#logger.debug("results_for: creating array for item:" % repr(item))
leaf = []
#logger.debug("results_for: appending Search#%s/Content#%s" % (item['search_id'], item['content_id']))
try:
leaf.append(item['content_id'])
self.items_completed[item['search_id']] = leaf
#logger.debug("leaf (items completed list) is %s" %repr(leaf))
except:
pass
self.lock.release()
def results_for(self, search_id):
"""
Calls the prepare_results method to unload the scorer output queue. When
finished, it calls the destructor for items_completed
"""
#logger.debug("results_for called")
#logger.debug("prepare_results search %s" %str(search_id))
self.prepare_results()
#logger.debug("prepare_results returned")
#logger.debug("self.items_completed: %s" %repr(self.items_completed))
if self.items_completed.has_key(search_id):
results = self.items_completed[search_id]
del self.items_completed[search_id]
#logger.debug("results_for: returning results for Search#%s: %s" % (str(search_id), repr(results)))
return results
else:
#logger.debug("results_for returned no results for Search#%s" % str(search_id))
return []
def ping(self):
"""
Is the server alive? Yes, always because this is a local object.
"""
return True
def stop(self, kind=None):
"""
This is a generic routine to stop processes. If no "kind" argument is provided, it iterates over the
top block of logic for searcher, visitor and scorer process types. The syntax is a bit convoluted here
and noted in the comments
"""
print ("crawler.Crawler.stop called")
cb = CrawlerBase()
logger = cb.logger()
if kind:
#this aliases the variable called <kind>s_count e.g., count = searchers_count
count = self.__dict__['%ss_count' % kind]
#alias for the input queue associated with this process
queue = self.queue[kind]['input']
stopped = False
#throws a "stop" sentinal into the queue
for i in range(count):
try:
logger.info("stopping queue %s" %kind)
#traceback()
queue.put(self.STOP)
except Exception, e:
# Ignore if the queue is already stopped
pass
"""
The next equation assigns an alias for the variable that represents the "kind"
of process we're going to stop. If we passed in "scorer", the variable workers
would be set to "scorers".
"""
workers = self.__dict__['%ss' % kind]
for worker in workers:
try:
#tell it to stop accepting new work until done with what it's doing
worker.join()
except Exception, e:
# Ignore if worker is already dead
pass
#clear the stack of active workers
while len(workers) != 0:
logger.debug("clearing worker stack in crawler.stop")
workers.pop()
stopped = True
if stopped:
try:
import logging
cb = CrawlerBase()
logger = cb.logger()
logger.info("stopped %i %s processes" % (count, kind))
except:
# Logging and logger aren't available otherwise if stop() is called from destructor.
print ("Crawler: stopped %i %s processes" % (count, kind))
pass
else:
"""
If this routine is called without a "kind" it recursively calls itself
to stop each type of active process; this is sort of the main loop for the
method.
"""
self.stop("searcher")
self.stop("visitor")
self.stop("scorer")
class CrawlerRunner(object):
"""
This is the main entry point for the crawler module.
"""
_instance = None
_instance_lock = threading.Lock()
#grab the logger from the server base
# TODO collapse container_location and concurrency_library to single value
def __init__(self, concurrency_library=None, container_location=None, manager=True, run_as_cron = False, **kwargs):
#note, don't use a logger in __init__ unless its installed in __init__
self._concurrency_library = self._get_concurrency_library(concurrency_library)
self._container_location = self._get_container_location(container_location)
self._manager = self._container_location == "local" or manager
self._lock = threading.Lock()
#run_as_cron will spawn a completely new instance of the crawler, hosted on a different
# xmlrpc server than the mainline web app
crawler_kwargs = dict(
concurrency_library=self._concurrency_library
)
crawler_kwargs.update(kwargs)
self.crawler_kwargs = crawler_kwargs
if self._container_location == "local":
crawler_kwargs.update(run_as_cron = run_as_cron)
self.crawler = Crawler( run_as_cron = run_as_cron, **crawler_kwargs)
elif self._container_location == "remote":
self.crawler = CrawlerClient(run_as_cron = run_as_cron)
else:
raise NotImplementedError("Unknown container_location: %s" % self._container_location)
def __del__(self):
"""
The destructor method for a CrawlerRunner object
"""
self._crawler = None
self._lock = None
def run_visitor_serially(self, **kwargs):
self.run_serially = True
self.crawler = Crawler( run_as_cron = run_as_cron, **self.crawler_kwargs)
item = kwargs
item.update(debug_run_serially = True)
visitReturn = self.crawler.visitor_process(kwargs)
if visitReturn:
aFewMore = 3
for j in range(0, min(len(visitReturn), aFewMore-1)):
v = visitReturn[j]
v.update(debug_run_serially = True)
scoreReturn = self.crawler.scorer_process(v)
def run_serially(self, **kwargs):
'''
this is or debugging, and is used the same as enqueue. Instead of directing processing to
the process queues, it runs them serially i.e. the searcher routine hands off to the visitor
routine then the scorer routine. It's much slower, but allows access to the running code.
'''
self.run_serially = True
self.crawler = Crawler(**self.crawler_kwargs)
item = kwargs
item.update(debug_run_serially = True)
searchReturn = self.crawler.searcher_process(item) #a list
if searchReturn:
#try one to see if it works generally
s =searchReturn[0]
s.update(debug_run_serially = True)
s.update(parseFast = kwargs['parseFast'])
visitReturn = self.crawler.visitor_process(s)
if len(visitReturn) >0 :
#visitReturn.update(debug_run_serially = True)
v=visitReturn[0]
v.update(debug_run_serially = True)
scoreReturn = self.crawler.scorer_process(v)
#try a few more
aFewMore = 10
if searchReturn:
for i in range(0, min(len(searchReturn), aFewMore-1)):
s=searchReturn[i]
s.update(debug_run_serially = True)
visitReturn = self.crawler.visitor_process(s)
if visitReturn:
for j in range(0, min(len(visitReturn), aFewMore-1)):
v = visitReturn[j]
v.update(debug_run_serially = True)
scoreReturn = self.crawler.scorer_process(v)
def start(self):
print "%s.start" % self
if self._manager:
"""
The next line signs up this object for garbage collection if (and only if)
the program terminates normally. If it crashes, or is stopped during debugging
there may be an orphaned process. If so, to process may need to be killed manually;
use sudo netstat - tap to look for network connections (host/port specifications
are set in CrawlerBase).
"""
atexit.register(self.stop) #sets up to kill be object upon normal termination
if self._container_location == "remote":
pass
'''
*** We'll start the crawler server from a terminal window - at least for debugging; when
the main (client) program shuts down ungracefully, it doesn't kill the server. This
means we have to kill it manually.
killing_crawler = False
try:
pause_seconds = 0.5
#pat - why are we trying to kill the xmlrpc proxy server?
killCrawler= False
if killCrawler:
while True:
logger.debug("stopping crawler (this is normal)")
logger.debug("for debugging, don't stop the server")
#self.crawler.stop() # Will throw exception when down to end loop
#logger.info("CrawlerRunner.start: killing stale remote crawler...")
killing_crawler = True
time.sleep(pause_seconds)
except Exception, e:
if killing_crawler:
print "killing crawler"
logger.info("CrawlerRunner.start: killed stale remote crawler")
pass # Ignore because service may not be running already
logger.info("CrawlerRunner.start: launching remote xmlrpc server in os")
filename = re.sub("\.pyc$", ".py", __file__, 1)
# TODO safely quote paths
cmd = "'%s' --server --config '%s'" % (filename, commands.configuration)
logger.info(cmd)
#logger.debug("not starting the server from crawler - relying on externally-started one")
os.system("%s &" % cmd)
'''
elif self._container_location == "local":
logger.info("CrawlerRunner.start: launching local crawler")
return self.crawler.start()
else:
raise NotImplementedError("Unknown container_location: %s" % self._container_location)
def stop(self):
print "%s.stop" % self
if self._manager:
with self._lock:
if self.crawler:
try:
return self.crawler.stop()
except Exception, e:
print "CrawlerRunner.stop failed: %s" % e
def enqueue(self, **item):
#logger.debug("CrawlerRunner enqueueing item %s into a %s object" %(repr(item), type(self.crawler)))
return self.crawler.enqueue(**item)
def results_for(self, search_id):
#logger.debug("CrawlerRunner.results_for for search %s" % str(search_id))
return self.crawler.results_for(search_id)
def ping(self):
return self.crawler.ping()
@classmethod
def _get_concurrency_library(self, kind=None):
if kind:
return kind
else:
return turbogears.config.get("crawler.concurrency_library", has_fork() and "processing" or "threading")
@classmethod
def _get_container_location(self, kind=None):
if kind:
return kind
else:
return turbogears.config.get("crawler.container_location", has_fork() and "remote" or "local")
@classmethod
def get_instance(self):
with self._instance_lock:
if not self._instance:
self._instance = self()
self._instance.start()
return self._instance
class SearcherError(StandardError):
pass
class TypeError(StandardError):
pass
class TimeoutError(StandardError):
"""
Raised when a timeout is reached.
"""
pass
if __name__ == "__main__":
import logging
cb = CrawlerBase()
logger = cb.logger()
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--config", dest="configfile", help="Optional configuration file", metavar="FILE")
parser.add_option("-c", "--client", action="store_true", dest="client", help="Start client")
parser.add_option("-s", "--server", action="store_true", dest="server", help="Start server")
#parser.add_option("-n", "--nightly", action="store_true", dest="run_as_cron", help="Run as cron")
parser.add_option("-k", "--concurrency", dest="concurrency_library", help="threading OR processing OR multiprocessing", metavar="LIBRARY")
(options, args) = parser.parse_args()
#set up two possibilites for logging so contemporaneously-executing real-time
# and chron files won't step on each other. Simultaneaty shouldn't be a problem with
# the stuff running as processes because each is on its own thread
cb = CrawlerBase()
logger = cb.logger()
logger.debug("booting configfile")
if options.configfile:
commands.boot(options.configfile)
else:
logger.debug("booting commands")
commands.boot()
if options.client:
logger.info("Starting client...")
client = CrawlerClient(run_as_cron = run_as_cron)
try:
from ipdb import set_trace
except:
from pdb import set_trace
#set_trace()
# TODO figure out how to make session exit without exceptions
else:
logger.info("Starting server from crawler.__main__")
global server
logger.debug("forcing concurrency library to be multiprocessing")
server = CrawlerServer(concurrency_library='multiprocessing')
#server = CrawlerServer(concurrency_library=options.concurrency_library)
try:
# pat - don't need to start the server here server.start()
server.start()
pass
except KeyboardInterrupt:
logger.info("Shutting down crawler process server due to keyboard interrupt...")
server.stop()
logger.debug("crawler process server shut down succesfully")
logger.info("Stopped server")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.