text stringlengths 4 1.02M | meta dict |
|---|---|
import datetime
import logging
from flask import (
Blueprint,
request,
current_app,
session,
send_from_directory,
render_template,
jsonify,
)
from werkzeug.exceptions import BadRequest, Forbidden, NotFound, Unauthorized
from geonature.core.gn_commons.models import TAdditionalFields
from sqlalchemy import or_, func, distinct, case
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import joinedload
from geojson import Feature, FeatureCollection
from shapely.geometry import asShape
from geoalchemy2.shape import from_shape, to_shape
from marshmallow import ValidationError
from utils_flask_sqla_geo.utilsgeometry import remove_third_dimension
from geonature.utils.env import DB, db, ROOT_DIR
from pypnusershub.db.models import User, Organisme
from utils_flask_sqla_geo.generic import GenericTableGeo
from geonature.utils import filemanager
from .models import (
TRelevesOccurrence,
TOccurrencesOccurrence,
CorCountingOccurrence,
VReleveOccurrence,
DefaultNomenclaturesValue
)
from .repositories import (
ReleveRepository,
get_query_occtax_filters,
get_query_occtax_order,
)
from .schemas import OccurrenceSchema, ReleveCruvedSchema, ReleveSchema
from .utils import as_dict_with_add_cols
from utils_flask_sqla.response import to_csv_resp, to_json_resp, json_resp
from geonature.utils.errors import GeonatureApiError
from geonature.utils.utilsgeometrytools import export_as_geo_file
from geonature.core.users.models import UserRigth
from geonature.core.gn_permissions import decorators as permissions
from geonature.core.gn_permissions.tools import get_or_fetch_user_cruved
blueprint = Blueprint("pr_occtax", __name__)
log = logging.getLogger(__name__)
@blueprint.route("/releves", methods=["GET"])
@permissions.check_cruved_scope("R", True, module_code="OCCTAX")
@json_resp
def getReleves(info_role):
"""
Route for map list web interface
.. :quickref: Occtax;
"""
releve_repository = ReleveRepository(TRelevesOccurrence)
q = releve_repository.get_filtered_query(info_role)
parameters = request.args
limit = int(parameters.get("limit", 100))
page = int(parameters.get("offset", 0))
orderby = {
"orderby": (parameters.get("orderby", "date_max")).lower(),
"order": (parameters.get("order", "desc")).lower()
if (parameters.get("order", "desc")).lower() == "asc"
else "desc", # asc or desc
}
# Filters
q = get_query_occtax_filters(parameters, TRelevesOccurrence, q)
query_without_limit = q
# Order by
q = get_query_occtax_order(orderby, TRelevesOccurrence, q)
data = q.limit(limit).offset(page * limit).all()
# Pour obtenir le nombre de résultat de la requete sans le LIMIT
nb_results_without_limit = query_without_limit.count()
user = info_role
user_cruved = get_or_fetch_user_cruved(
session=session, id_role=info_role.id_role, module_code="OCCTAX"
)
featureCollection = []
for n in data:
releve_cruved = n.get_releve_cruved(user, user_cruved)
feature = n.get_geofeature(
fields=[
"t_occurrences_occtax",
"t_occurrences_occtax.cor_counting_occtax",
"t_occurrences_occtax.taxref",
"observers",
"digitiser",
"dataset",
"t_occurrences_occtax.cor_counting_occtax.medias"
]
)
feature["properties"]["rights"] = releve_cruved
featureCollection.append(feature)
return {
"total": nb_results_without_limit,
"total_filtered": len(data),
"page": page,
"limit": limit,
"items": FeatureCollection(featureCollection),
}
@blueprint.route("/occurrences", methods=["GET"])
@permissions.check_cruved_scope("R", module_code="OCCTAX")
@json_resp
def getOccurrences():
"""
Get all Occurrences
.. :quickref: Occtax;
:returns: `dict<TOccurrencesOccurrence>`
"""
q = DB.session.query(TOccurrencesOccurrence)
data = q.all()
return [n.as_dict() for n in data]
@blueprint.route("/counting/<int:id_counting>", methods=["GET"])
@json_resp
def getOneCounting(id_counting):
"""
Get one counting record, with its id_counting
.. :quickref: Occtax;
:param id_counting: the pr_occtax.cor_counting_occtax PK
:type id_counting: int
:returns: a dict representing a counting record
:rtype: dict<CorCountingOccurrence>
"""
try:
data = (
DB.session.query(CorCountingOccurrence, TRelevesOccurrence.id_releve_occtax)
.join(
TOccurrencesOccurrence,
TOccurrencesOccurrence.id_occurrence_occtax
== CorCountingOccurrence.id_occurrence_occtax,
)
.join(
TRelevesOccurrence,
TRelevesOccurrence.id_releve_occtax
== TOccurrencesOccurrence.id_releve_occtax,
)
.filter(CorCountingOccurrence.id_counting_occtax == id_counting)
.one()
)
except NoResultFound:
return None
counting = data[0].as_dict()
counting["id_releve"] = data[1]
return counting
@blueprint.route("/releve/<int:id_releve>", methods=["GET"])
@permissions.check_cruved_scope("R", True, module_code="OCCTAX")
def getOneReleve(id_releve, info_role):
"""
Get one releve
.. :quickref: Occtax;
:param id_releve: the id releve from pr_occtax.t_releve_occtax
:type id_releve: int
:returns: Return a releve with its attached Cruved
:rtype: `dict{'releve':<TRelevesOccurrence>, 'cruved': Cruved}`
"""
releveCruvedSchema = ReleveCruvedSchema()
releve = DB.session.query(TRelevesOccurrence).get(id_releve)
if not releve:
raise NotFound('The releve "{}" does not exist'.format(id_releve))
# check if the user is autorized
releve = releve.get_releve_if_allowed(info_role)
user_cruved = get_or_fetch_user_cruved(
session=session, id_role=info_role.id_role, module_code="OCCTAX"
)
releve_cruved = {
"releve": {
"properties": releve,
"id": releve.id_releve_occtax,
"geometry": releve.geom_4326,
},
"cruved": releve.get_releve_cruved(info_role, user_cruved),
}
return releveCruvedSchema.dump(releve_cruved)
@blueprint.route("/vreleveocctax", methods=["GET"])
@permissions.check_cruved_scope("R", True, module_code="OCCTAX")
@json_resp
def getViewReleveOccurrence(info_role):
"""
Deprecated
"""
releve_repository = ReleveRepository(VReleveOccurrence)
q = releve_repository.get_filtered_query(info_role)
parameters = request.args
nbResultsWithoutFilter = DB.session.query(VReleveOccurrence).count()
limit = int(parameters.get("limit")) if parameters.get("limit") else 100
page = int(parameters.get("offset")) if parameters.get("offset") else 0
# Filters
for param in parameters:
if param in VReleveOccurrence.__table__.columns:
col = getattr(VReleveOccurrence.__table__.columns, param)
q = q.filter(col == parameters[param])
# Order by
if "orderby" in parameters:
if parameters.get("orderby") in VReleveOccurrence.__table__.columns:
orderCol = getattr(
VReleveOccurrence.__table__.columns, parameters["orderby"]
)
if "order" in parameters:
if parameters["order"] == "desc":
orderCol = orderCol.desc()
q = q.order_by(orderCol)
data = q.limit(limit).offset(page * limit).all()
user = info_role
user_cruved = get_or_fetch_user_cruved(
session=session,
id_role=info_role.id_role,
module_code="OCCTAX",
id_application_parent=current_app.config["ID_APPLICATION_GEONATURE"],
)
featureCollection = []
for n in data:
releve_cruved = n.get_releve_cruved(user, user_cruved)
feature = n.get_geofeature()
feature["properties"]["rights"] = releve_cruved
featureCollection.append(feature)
if data:
return {
"items": FeatureCollection(featureCollection),
"total": nbResultsWithoutFilter,
}
return {"message": "not found"}, 404
@blueprint.route("/releve", methods=["POST"])
@permissions.check_cruved_scope("C", True, module_code="OCCTAX")
@json_resp
def insertOrUpdateOneReleve(info_role):
"""
Route utilisée depuis l'appli mobile => depreciée et non utilisée par l'appli web
Post one Occtax data (Releve + Occurrence + Counting)
.. :quickref: Occtax; Post one Occtax data (Releve + Occurrence + Counting)
**Request JSON object:**
.. sourcecode:: http
{
"geometry":
{"type":"Point",
"coordinates":[0.9008789062500001,47.14489748555398]},
"properties":
{
"id_releve_occtax":null,"id_dataset":1,"id_digitiser":1,"date_min":"2019-05-09","date_max":"2019-05-09","hour_min":null,"hour_max":null,"altitude_min":null,"altitude_max":null,"meta_device_entry":"web","comment":null,"id_nomenclature_obs_technique":316,"observers":[1],"observers_txt":null,"id_nomenclature_grp_typ":132,
"t_occurrences_occtax":[{
"id_releve_occtax":null,"id_occurrence_occtax":null,"id_nomenclature_obs_technique":41,"id_nomenclature_bio_condition":157,"id_nomenclature_bio_status":29,"id_nomenclature_naturalness":160,"id_nomenclature_exist_proof":81,"id_nomenclature_observation_status":88,"id_nomenclature_blurring":175,"id_nomenclature_source_status":75,"determiner":null,"id_nomenclature_determination_method":445,"cd_nom":67111,"nom_cite":"Ablette = <i> Alburnus alburnus (Linnaeus, 1758)</i> - [ES - 67111]","meta_v_taxref":null,"sample_number_proof":null,"comment":null,
"cor_counting_occtax":[{
"id_counting_occtax":null,"id_nomenclature_life_stage":1,"id_nomenclature_sex":171,"id_nomenclature_obj_count":146,"id_nomenclature_type_count":94,"id_occurrence_occtax":null,"count_min":1,"count_max":1
}]
}]
}
}
:returns: GeoJson<TRelevesOccurrence>
"""
releveRepository = ReleveRepository(TRelevesOccurrence)
data = dict(request.get_json())
depth = data.pop("depth", None)
occurrences_occtax = None
if "t_occurrences_occtax" in data["properties"]:
occurrences_occtax = data["properties"]["t_occurrences_occtax"]
data["properties"].pop("t_occurrences_occtax")
observersList = None
if "observers" in data["properties"]:
observersList = data["properties"]["observers"]
data["properties"].pop("observers")
# Test et suppression des propriétés inexistantes de TRelevesOccurrence
attliste = [k for k in data["properties"]]
for att in attliste:
if not getattr(TRelevesOccurrence, att, False):
data["properties"].pop(att)
releve = TRelevesOccurrence(**data["properties"])
shape = asShape(data["geometry"])
two_dimension_geom = remove_third_dimension(shape)
releve.geom_4326 = from_shape(two_dimension_geom, srid=4326)
if observersList is not None:
observers = DB.session.query(User).filter(User.id_role.in_(observersList)).all()
for o in observers:
releve.observers.append(o)
for occ in occurrences_occtax:
cor_counting_occtax = []
if "cor_counting_occtax" in occ:
cor_counting_occtax = occ["cor_counting_occtax"]
occ.pop("cor_counting_occtax")
# Test et suppression
# des propriétés inexistantes de TOccurrencesOccurrence
attliste = [k for k in occ]
for att in attliste:
if not getattr(TOccurrencesOccurrence, att, False):
occ.pop(att)
# pop the id if None. otherwise DB.merge is not OK
if "id_occurrence_occtax" in occ and occ["id_occurrence_occtax"] is None:
occ.pop("id_occurrence_occtax")
occtax = TOccurrencesOccurrence(**occ)
for cnt in cor_counting_occtax:
# Test et suppression
# des propriétés inexistantes de CorCountingOccurrence
attliste = [k for k in cnt]
for att in attliste:
if not getattr(CorCountingOccurrence, att, False):
cnt.pop(att)
# pop the id if None. otherwise DB.merge is not OK
if "id_counting_occtax" in cnt and cnt["id_counting_occtax"] is None:
cnt.pop("id_counting_occtax")
countingOccurrence = CorCountingOccurrence(**cnt)
occtax.cor_counting_occtax.append(countingOccurrence)
releve.t_occurrences_occtax.append(occtax)
# if its a update
if releve.id_releve_occtax:
# get update right of the user
user_cruved = get_or_fetch_user_cruved(
session=session, id_role=info_role.id_role, module_code="OCCTAX"
)
update_code_filter = user_cruved["U"]
# info_role.code_action = update_data_scope
user = UserRigth(
id_role=info_role.id_role,
value_filter=update_code_filter,
code_action="U",
id_organisme=info_role.id_organisme,
)
releve = releveRepository.update(releve, user, shape)
# if its a simple post
else:
# set id_digitiser
releve.id_digitiser = info_role.id_role
if info_role.value_filter in ("0", "1", "2"):
# Check if user can add a releve in the current dataset
allowed = releve.user_is_in_dataset_actor(info_role)
if not allowed:
raise Forbidden(
"User {} has no right in dataset {}".format(
info_role.id_role, releve.id_dataset
)
)
DB.session.add(releve)
DB.session.commit()
DB.session.flush()
return releve.get_geofeature(depth=depth)
def releveHandler(request, *, releve, info_role):
releveSchema = ReleveSchema()
# Modification de la requete geojson en releve
json_req = request.get_json()
json_req["properties"]["geom_4326"] = json_req["geometry"]
# chargement des données POST et merge avec relevé initial
try:
releve = releveSchema.load(json_req["properties"], instance=releve)
except ValidationError as error:
log.exception(error.messages)
raise BadRequest(error.messages)
# Test des droits d'édition du relevé
if releve.id_releve_occtax is not None:
user_cruved = get_or_fetch_user_cruved(
session=session, id_role=info_role.id_role, module_code="OCCTAX"
)
# info_role.code_action = update_data_scope
user = UserRigth(
id_role=info_role.id_role,
value_filter=user_cruved["U"],
code_action="U",
id_organisme=info_role.id_organisme,
)
releve = releve.get_releve_if_allowed(user)
# fin test, si ici => c'est ok
# if creation
else:
if info_role.value_filter in ("0", "1", "2"):
# Check if user can add a releve in the current dataset
allowed = releve.user_is_in_dataset_actor(info_role)
if not allowed:
raise Forbidden(
"User {} has no right in dataset {}".format(
info_role.id_role, releve.id_dataset
)
)
# set id_digitiser
releve.id_digitiser = info_role.id_role
DB.session.add(releve)
DB.session.commit()
DB.session.flush()
return releve
@blueprint.route("/only/releve", methods=["POST"])
@permissions.check_cruved_scope("C", True, module_code="OCCTAX")
def createReleve(info_role):
"""
Post one Occtax data (Releve + Occurrence + Counting)
.. :quickref: Occtax; Post one Occtax data (Releve + Occurrence + Counting)
**Request JSON object:**
.. sourcecode:: http
{
"geometry":
{"type":"Point",
"coordinates":[0.9008789062500001,47.14489748555398]},
"properties":
{
"id_releve_occtax":null,"id_dataset":1,"id_digitiser":1,"date_min":"2019-05-09","date_max":"2019-05-09","hour_min":null,"hour_max":null,"altitude_min":null,"altitude_max":null,"meta_device_entry":"web","comment":null,"id_nomenclature_obs_technique":316,"observers":[1],"observers_txt":null,"id_nomenclature_grp_typ":132,
"t_occurrences_occtax":[{
"id_releve_occtax":null,"id_occurrence_occtax":null,"id_nomenclature_obs_technique":41,"id_nomenclature_bio_condition":157,"id_nomenclature_bio_status":29,"id_nomenclature_naturalness":160,"id_nomenclature_exist_proof":81,"id_nomenclature_observation_status":88,"id_nomenclature_blurring":175,"id_nomenclature_source_status":75,"determiner":null,"id_nomenclature_determination_method":445,"cd_nom":67111,"nom_cite":"Ablette = <i> Alburnus alburnus (Linnaeus, 1758)</i> - [ES - 67111]","meta_v_taxref":null,"sample_number_proof":null,"comment":null,
"cor_counting_occtax":[{
"id_counting_occtax":null,"id_nomenclature_life_stage":1,"id_nomenclature_sex":171,"id_nomenclature_obj_count":146,"id_nomenclature_type_count":94,"id_occurrence_occtax":null,"count_min":1,"count_max":1
}]
}]
}
}
:returns: GeoJson<TRelevesOccurrence>
"""
# nouveau releve vide
releve = TRelevesOccurrence()
releve = (
ReleveSchema()
.dump(releveHandler(request=request, releve=releve, info_role=info_role))
)
return {
"geometry": releve.pop("geom_4326", None),
"properties": releve,
"id": releve["id_releve_occtax"],
}
@blueprint.route("/only/releve/<int:id_releve>", methods=["POST"])
@permissions.check_cruved_scope("U", True, module_code="OCCTAX")
def updateReleve(id_releve, info_role):
"""
Post one Occurrence data (Occurrence + Counting) for add to Releve
"""
# get releve by id_releve
releve = DB.session.query(TRelevesOccurrence).get(id_releve)
if not releve:
return {"message": "not found"}, 404
releve = (
ReleveSchema()
.dump(releveHandler(request=request, releve=releve, info_role=info_role))
)
return {
"geometry": releve.pop("geom_4326", None),
"properties": releve,
"id": releve["id_releve_occtax"],
}
def occurrenceHandler(request, *, occurrence, info_role):
releve = TRelevesOccurrence.query.get_or_404(occurrence.id_releve_occtax)
# Test des droits d'édition du relevé si modification
if occurrence.id_occurrence_occtax is not None:
user_cruved = get_or_fetch_user_cruved(
session=session, id_role=info_role.id_role, module_code="OCCTAX"
)
# info_role.code_action = update_data_scope
info_role = UserRigth(
id_role=info_role.id_role,
value_filter=user_cruved["U"],
code_action="U",
id_organisme=info_role.id_organisme,
)
releve = releve.get_releve_if_allowed(info_role)
# fin test, si ici => c'est ok
occurrenceSchema = OccurrenceSchema()
try:
occurrence = occurrenceSchema.load(request.get_json(), instance=occurrence)
except ValidationError as error:
log.exception(error.messages)
raise BadRequest(error.messages)
DB.session.add(occurrence)
DB.session.commit()
return occurrence
@blueprint.route("/releve/<int:id_releve>/occurrence", methods=["POST"])
@permissions.check_cruved_scope("C", True, module_code="OCCTAX")
def createOccurrence(id_releve, info_role):
"""
Post one Occurrence data (Occurrence + Counting) for add to Releve
"""
# get releve by id_releve
occurrence = TOccurrencesOccurrence()
occurrence.id_releve_occtax = id_releve
return OccurrenceSchema().dump(
occurrenceHandler(request=request, occurrence=occurrence, info_role=info_role)
)
@blueprint.route("/occurrence/<int:id_occurrence>", methods=["POST"])
@permissions.check_cruved_scope("U", True, module_code="OCCTAX")
def updateOccurrence(id_occurrence, info_role):
"""
Post one Occurrence data (Occurrence + Counting) for add to Releve
"""
occurrence = TOccurrencesOccurrence.query.get_or_404(id_occurrence)
return OccurrenceSchema().dump(
occurrenceHandler(request=request, occurrence=occurrence, info_role=info_role)
)
@blueprint.route("/releve/<int:id_releve>", methods=["DELETE"])
@permissions.check_cruved_scope("D", True, module_code="OCCTAX")
@json_resp
def deleteOneReleve(id_releve, info_role):
"""Delete one releve and its associated occurrences and counting
.. :quickref: Occtax;
:params int id_releve: ID of the releve to delete
"""
releveRepository = ReleveRepository(TRelevesOccurrence)
releveRepository.delete(id_releve, info_role)
return {"message": "delete with success"}, 200
@blueprint.route("/occurrence/<int:id_occ>", methods=["DELETE"])
@permissions.check_cruved_scope("D", module_code="OCCTAX")
def deleteOneOccurence(id_occ):
"""Delete one occurrence and associated counting
.. :quickref: Occtax;
:params int id_occ: ID of the occurrence to delete
"""
occ = TOccurrencesOccurrence.query.get_or_404(id_occ)
# TODO: check occ ownership!
DB.session.delete(occ)
DB.session.commit()
return '', 204
@blueprint.route("/releve/occurrence_counting/<int:id_count>", methods=["DELETE"])
@permissions.check_cruved_scope("D", module_code="OCCTAX")
def deleteOneOccurenceCounting(id_count):
"""Delete one counting
.. :quickref: Occtax;
:params int id_count: ID of the counting to delete
"""
ccc = CorCountingOccurrence.query.get_or_404(id_count)
# TODO check ccc ownership!
DB.session.delete(data)
DB.session.commit()
return '', 204
@blueprint.route("/defaultNomenclatures", methods=["GET"])
def getDefaultNomenclatures():
"""Get default nomenclatures define in occtax module
.. :quickref: Occtax;
:returns: dict: {'MODULE_CODE': 'ID_NOMENCLATURE'}
"""
organism = request.args.get("organism")
regne = request.args.get("regne", '0')
group2_inpn = request.args.get("group2_inpn", '0')
types = request.args.getlist("id_type")
q = db.session.query(
distinct(DefaultNomenclaturesValue.mnemonique_type),
func.pr_occtax.get_default_nomenclature_value(
DefaultNomenclaturesValue.mnemonique_type, organism, regne, group2_inpn
),
)
if len(types) > 0:
q = q.filter(DefaultNomenclaturesValue.mnemonique_type.in_(tuple(types)))
data = q.all()
if not data:
raise NotFound
return jsonify(dict(data))
@blueprint.route("/export", methods=["GET"])
@permissions.check_cruved_scope(
"E",
True,
module_code="OCCTAX",
)
def export(info_role):
"""Export data from pr_occtax.v_export_occtax view (parameter)
.. :quickref: Occtax; Export data from pr_occtax.v_export_occtax
:query str format: format of the export ('csv', 'geojson', 'shapefile', 'gpkg')
"""
export_view_name = blueprint.config["export_view_name"]
export_geom_column = blueprint.config["export_geom_columns_name"]
export_columns = blueprint.config["export_columns"]
export_srid = blueprint.config["export_srid"]
export_format = request.args["format"] if "format" in request.args else "geojson"
export_col_name_additional_data = blueprint.config["export_col_name_additional_data"]
export_view = GenericTableGeo(
tableName=export_view_name,
schemaName="pr_occtax",
engine=DB.engine,
geometry_field=export_geom_column,
srid=export_srid,
)
columns = (
export_columns
if len(export_columns) > 0
else [db_col.key for db_col in export_view.db_cols]
)
releve_repository = ReleveRepository(export_view)
q = releve_repository.get_filtered_query(info_role, from_generic_table=True)
q = get_query_occtax_filters(
request.args,
export_view,
q,
from_generic_table=True,
obs_txt_column=blueprint.config["export_observer_txt_column"],
)
if current_app.config["OCCTAX"]["ADD_MEDIA_IN_EXPORT"]:
q, columns = releve_repository.add_media_in_export(q, columns)
data = q.all()
file_name = datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%S")
file_name = filemanager.removeDisallowedFilenameChars(file_name)
#Ajout des colonnes additionnels
additional_col_names = []
query_add_fields = DB.session.query(TAdditionalFields).filter(
TAdditionalFields.modules.any(module_code="OCCTAX")
).filter(TAdditionalFields.exportable == True)
global_add_fields = query_add_fields.filter(~TAdditionalFields.datasets.any()).all()
if "id_dataset" in request.args:
dataset_add_fields = query_add_fields.filter(
TAdditionalFields.datasets.any(id_dataset=request.args['id_dataset'])
).all()
global_add_fields = [*global_add_fields, *dataset_add_fields]
additional_col_names = [field.field_name for field in global_add_fields]
if export_format == "csv":
# set additional data col at the end (remove it and inset it ...)
columns.remove(export_col_name_additional_data)
columns = columns + additional_col_names
columns.append(export_col_name_additional_data)
if additional_col_names:
serialize_result = [
as_dict_with_add_cols(
export_view, row, export_col_name_additional_data, additional_col_names
) for row in data
]
else:
serialize_result = [export_view.as_dict(row) for row in data]
return to_csv_resp(
file_name, serialize_result , columns, ";"
)
elif export_format == "geojson":
if additional_col_names:
features = []
for row in data :
properties = as_dict_with_add_cols(
export_view, row, export_col_name_additional_data, additional_col_names
)
feature = Feature(
properties=properties,
geometry=to_shape(getattr(row, export_geom_column))
)
features.append(feature)
serialize_result = FeatureCollection(features)
else:
serialize_result = FeatureCollection(
[export_view.as_geofeature(d, fields=export_columns) for d in data]
)
return to_json_resp(
serialize_result, as_file=True, filename=file_name, indent=4, extension="geojson"
)
else:
db_cols = [
db_col for db_col in export_view.db_cols if db_col.key in export_columns
]
dir_name, file_name = export_as_geo_file(
export_format=export_format,
export_view=export_view,
db_cols=db_cols,
geojson_col=None,
data=data,
file_name=file_name,
)
db_cols = [
db_col for db_col in export_view.db_cols if db_col.key in export_columns
]
return send_from_directory(dir_name, file_name, as_attachment=True)
| {
"content_hash": "d0c538cdc18476976420b838cc0e8688",
"timestamp": "",
"source": "github",
"line_count": 775,
"max_line_length": 569,
"avg_line_length": 35.38193548387097,
"alnum_prop": 0.6381240654972467,
"repo_name": "PnEcrins/GeoNature",
"id": "d3925b57101e22f07bceb18f63166c736f29efd5",
"size": "27437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/occtax/backend/occtax/blueprint.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1931"
},
{
"name": "Batchfile",
"bytes": "1151"
},
{
"name": "CSS",
"bytes": "763718"
},
{
"name": "HTML",
"bytes": "651"
},
{
"name": "JavaScript",
"bytes": "16182773"
},
{
"name": "PHP",
"bytes": "4058658"
},
{
"name": "PLpgSQL",
"bytes": "893372"
},
{
"name": "Shell",
"bytes": "33147"
}
],
"symlink_target": ""
} |
import os
from collections import namedtuple
from celery.signals import worker_process_init
from scrapi import settings
from scrapi.processing.base import BaseProcessor
DocumentTuple = Document = namedtuple('Document', ['raw', 'normalized'])
__all__ = []
for mod in os.listdir(os.path.dirname(__file__)):
root, ext = os.path.splitext(mod)
if ext == '.py' and root not in ['__init__', 'base']:
__all__.append(root)
from . import *
def get_processor(processor_name):
for klass in BaseProcessor.__subclasses__():
if klass.NAME == processor_name:
return klass()
raise NotImplementedError('No Processor {}'.format(processor_name))
def process_normalized(raw_doc, normalized, kwargs):
''' kwargs is a dictiorary of kwargs.
keyed by the processor name
Exists so that when we run check archive we
specifiy that it's ok to overrite certain files
'''
assert (raw_doc and normalized), 'Raw and normalized documents must be provided to process_normalized'
for p in settings.NORMALIZED_PROCESSING:
extras = kwargs.get(p, {})
get_processor(p).process_normalized(raw_doc, normalized, **extras)
def process_raw(raw_doc, kwargs):
assert raw_doc, 'A raw document must be provided to process_raw'
for p in settings.RAW_PROCESSING:
extras = kwargs.get(p, {})
get_processor(p).process_raw(raw_doc, **extras)
HarvesterResponse = get_processor(settings.RESPONSE_PROCESSOR).HarvesterResponseModel
all_processors = list(map(get_processor, list(set(
settings.NORMALIZED_PROCESSING +
settings.RAW_PROCESSING +
[settings.RESPONSE_PROCESSOR]
))))
for processor in all_processors:
processor.manager.setup()
worker_process_init.connect(processor.manager.celery_setup)
| {
"content_hash": "4e7d2da87f08b388f13c89060aecd925",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 106,
"avg_line_length": 32.07142857142857,
"alnum_prop": 0.6965478841870824,
"repo_name": "CenterForOpenScience/scrapi",
"id": "27d7e14de0dd48d01a79331669da90d9c766ec8b",
"size": "1796",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "scrapi/processing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "964"
},
{
"name": "HTML",
"bytes": "2300"
},
{
"name": "Python",
"bytes": "460450"
}
],
"symlink_target": ""
} |
import functools
import logging
from google.appengine.api import memcache
def cached(time=1200):
"""
Decorator that caches the result of a method for the specified time in seconds.
Use it as:
@cached(time=1200)
def functionToCache(arguments):
...
"""
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
key = '%s%s%s' % (function.__name__, str(args), str(kwargs))
value = memcache.get(key)
logging.debug('Cache lookup for %s, found? %s', key, value != None)
if not value:
value = function(*args, **kwargs)
memcache.set(key, value, time=time)
return value
return wrapper
return decorator
| {
"content_hash": "b022a9886b36532903e26a6250f0c150",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 81,
"avg_line_length": 26.51851851851852,
"alnum_prop": 0.63268156424581,
"repo_name": "voidabhi/python-scripts",
"id": "16d05b94def12e8383680db1ce91c08087bc1335",
"size": "717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "decorator-memcache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "446"
},
{
"name": "Go",
"bytes": "330"
},
{
"name": "JavaScript",
"bytes": "1728"
},
{
"name": "Python",
"bytes": "282732"
},
{
"name": "Shell",
"bytes": "794"
}
],
"symlink_target": ""
} |
from ..args import ConversionConfigArg
from ..conversion import Converter
from kao_command.args import Arg
class SetPreset:
""" Represents a command to create a Conversion Config file """
description = "Set the preset in a config file"
args = [Arg('preset', action='store', help='Preset to store'),
ConversionConfigArg()]
def run(self, *, preset, config):
""" Run the command """
config.preset = preset
config.save() | {
"content_hash": "f2a1059111a277166d5480688315764b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 32.86666666666667,
"alnum_prop": 0.6267748478701826,
"repo_name": "cloew/Kaovert",
"id": "e64d582b0c289947deaf24e0a438c401d71d2ad2",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaovert/commands/set_preset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27917"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import pygments
import re
from datetime import datetime
from pkg_resources import resource_filename
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_all_lexers, get_lexer_by_name
from pygments.styles import get_all_styles, get_style_by_name
from trac.core import *
from trac.config import ConfigSection, ListOption, Option
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.prefs import IPreferencePanelProvider
from trac.util import get_pkginfo, lazy
from trac.util.datefmt import http_date, localtz
from trac.util.translation import _
from trac.web.api import IRequestHandler, HTTPNotFound
from trac.web.chrome import ITemplateProvider, add_notice, add_stylesheet
from genshi import QName, Stream
from genshi.core import Attrs, START, END, TEXT
__all__ = ['PygmentsRenderer']
class PygmentsRenderer(Component):
"""HTML renderer for syntax highlighting based on Pygments."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer,
IPreferencePanelProvider, IRequestHandler,
ITemplateProvider)
is_valid_default_handler = False
pygments_lexer_options = ConfigSection('pygments-lexer',
"""Configure Pygments [%(url)s lexer] options.
For example, to set the
[%(url)s#lexers-for-php-and-related-languages PhpLexer] options
`startinline` and `funcnamehighlighting`:
{{{#!ini
[pygments-lexer]
php.startinline = True
php.funcnamehighlighting = True
}}}
The lexer name is derived from the class name, with `Lexer` stripped
from the end. The lexer //short names// can also be used in place
of the lexer name.
""" % {'url': 'http://pygments.org/docs/lexers/'})
default_style = Option('mimeviewer', 'pygments_default_style', 'trac',
"""The default style to use for Pygments syntax highlighting.""")
pygments_modes = ListOption('mimeviewer', 'pygments_modes',
'', doc=
"""List of additional MIME types known by Pygments.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding Pygments mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion. That can also be used
to override the default quality ratio used by the
Pygments render.""")
expand_tabs = True
returns_source = True
QUALITY_RATIO = 7
EXAMPLE = """<!DOCTYPE html>
<html lang="en">
<head>
<title>Hello, world!</title>
<script>
jQuery(document).ready(function($) {
$("h1").fadeIn("slow");
});
</script>
</head>
<body>
<h1>Hello, world!</h1>
</body>
</html>"""
# ISystemInfoProvider methods
def get_system_info(self):
version = get_pkginfo(pygments).get('version')
# if installed from source, fallback to the hardcoded version info
if not version and hasattr(pygments, '__version__'):
version = pygments.__version__
yield 'Pygments', version
# IHTMLPreviewRenderer methods
def get_extra_mimetypes(self):
for _, aliases, _, mimetypes in get_all_lexers():
for mimetype in mimetypes:
yield mimetype, aliases
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
try:
return self._types[mimetype][1]
except KeyError:
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
req = context.req
style = req.session.get('pygments_style', self.default_style)
add_stylesheet(req, '/pygments/%s.css' % style)
try:
if len(content) > 0:
mimetype = mimetype.split(';', 1)[0]
language = self._types[mimetype][0]
return self._generate(language, content, context)
except (KeyError, ValueError):
raise Exception("No Pygments lexer found for mime-type '%s'."
% mimetype)
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield 'pygments', _('Syntax Highlighting')
def render_preference_panel(self, req, panel):
styles = list(get_all_styles())
if req.method == 'POST':
style = req.args.get('style')
if style and style in styles:
req.session['pygments_style'] = style
add_notice(req, _("Your preferences have been saved."))
req.redirect(req.href.prefs(panel or None))
for style in sorted(styles):
add_stylesheet(req, '/pygments/%s.css' % style, title=style.title())
output = self._generate('html', self.EXAMPLE)
return 'prefs_pygments.html', {
'output': output,
'selection': req.session.get('pygments_style', self.default_style),
'styles': styles
}
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/pygments/(\w+)\.css', req.path_info)
if match:
req.args['style'] = match.group(1)
return True
def process_request(self, req):
style = req.args['style']
try:
style_cls = get_style_by_name(style)
except ValueError as e:
raise HTTPNotFound(e)
parts = style_cls.__module__.split('.')
filename = resource_filename('.'.join(parts[:-1]), parts[-1] + '.py')
mtime = datetime.fromtimestamp(os.path.getmtime(filename), localtz)
last_modified = http_date(mtime)
if last_modified == req.get_header('If-Modified-Since'):
req.send_response(304)
req.end_headers()
return
formatter = HtmlFormatter(style=style_cls)
content = u'\n\n'.join([
formatter.get_style_defs('div.code pre'),
formatter.get_style_defs('table.code td')
]).encode('utf-8')
req.send_response(200)
req.send_header('Content-Type', 'text/css; charset=utf-8')
req.send_header('Last-Modified', last_modified)
req.send_header('Content-Length', len(content))
req.write(content)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [resource_filename('trac.mimeview', 'templates')]
# Internal methods
@lazy
def _lexer_alias_name_map(self):
lexer_alias_name_map = {}
for lexer_name, aliases, _, _ in get_all_lexers():
name = aliases[0] if aliases else lexer_name
for alias in aliases:
lexer_alias_name_map[alias] = name
return lexer_alias_name_map
@lazy
def _lexer_options(self):
lexer_options = {}
for key, lexer_option_value in self.pygments_lexer_options.options():
try:
lexer_name_or_alias, lexer_option_name = key.split('.')
except ValueError:
pass
else:
lexer_name = self._lexer_alias_to_name(lexer_name_or_alias)
lexer_option = {lexer_option_name: lexer_option_value}
lexer_options.setdefault(lexer_name, {}).update(lexer_option)
return lexer_options
@lazy
def _types(self):
types = {}
for lexer_name, aliases, _, mimetypes in get_all_lexers():
name = aliases[0] if aliases else lexer_name
for mimetype in mimetypes:
types[mimetype] = (name, self.QUALITY_RATIO)
# Pygments < 1.4 doesn't know application/javascript
if 'application/javascript' not in types:
js_entry = self._types.get('text/javascript')
if js_entry:
types['application/javascript'] = js_entry
types.update(Mimeview(self.env).configured_modes_mapping('pygments'))
return types
def _generate(self, language, content, context=None):
lexer_name = self._lexer_alias_to_name(language)
lexer_options = {'stripnl': False}
lexer_options.update(self._lexer_options.get(lexer_name, {}))
if context:
lexer_options.update(context.get_hint('lexer_options', {}))
lexer = get_lexer_by_name(lexer_name, **lexer_options)
return GenshiHtmlFormatter().generate(lexer.get_tokens(content))
def _lexer_alias_to_name(self, alias):
return self._lexer_alias_name_map.get(alias, alias)
class GenshiHtmlFormatter(HtmlFormatter):
"""A Pygments formatter subclass that generates a Python stream instead
of writing markup as strings to an output file.
"""
def _chunk(self, tokens):
"""Groups tokens with the same CSS class in the token stream
and yields them one by one, along with the CSS class, with the
values chunked together."""
last_class = None
text = []
for ttype, value in tokens:
c = self._get_css_class(ttype)
if c == 'n':
c = ''
if c == last_class:
text.append(value)
continue
# If no value, leave the old <span> open.
if value:
yield last_class, u''.join(text)
text = [value]
last_class = c
if text:
yield last_class, u''.join(text)
def generate(self, tokens):
pos = None, -1, -1
span = QName('span')
class_ = QName('class')
def _generate():
for c, text in self._chunk(tokens):
if c:
attrs = Attrs([(class_, c)])
yield START, (span, attrs), pos
yield TEXT, text, pos
yield END, span, pos
else:
yield TEXT, text, pos
return Stream(_generate())
| {
"content_hash": "0dbc7bd73897824d7a78007f8c5a6832",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 80,
"avg_line_length": 34.50342465753425,
"alnum_prop": 0.5996029776674938,
"repo_name": "pkdevbox/trac",
"id": "0a318342a0ad3b9e930d7b151df9d58ef9d7f229",
"size": "10474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trac/mimeview/pygments.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3268"
},
{
"name": "CSS",
"bytes": "71129"
},
{
"name": "HTML",
"bytes": "356464"
},
{
"name": "JavaScript",
"bytes": "85641"
},
{
"name": "Makefile",
"bytes": "18957"
},
{
"name": "Python",
"bytes": "3830196"
},
{
"name": "Shell",
"bytes": "9573"
}
],
"symlink_target": ""
} |
from south.db import db
from django.db import models
from cms.models import *
class Migration:
def forwards(self, orm):
# Adding ManyToManyField 'GlobalPagePermission.sites'
db.create_table('cms_globalpagepermission_sites', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('globalpagepermission', models.ForeignKey(orm.GlobalPagePermission, null=False)),
('site', models.ForeignKey(orm['sites.Site'], null=False))
))
def backwards(self, orm):
# Dropping ManyToManyField 'GlobalPagePermission.sites'
db.delete_table('cms_globalpagepermission_sites')
models = {
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('models.CharField', [], {'max_length': '100'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'name': ('models.CharField', [], {'max_length': '50'})
},
'cms.pageuser': {
'created_by': ('models.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['auth.User']"}),
'user_ptr': ('models.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.publiccmsplugin': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 50, 2, 192128)'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.PublicPage']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.PublicCMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'auth.user': {
'date_joined': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 50, 2, 672263)'}),
'email': ('models.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('models.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('models.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'is_active': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 50, 2, 672114)'}),
'last_name': ('models.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('models.CharField', [], {'max_length': '128'}),
'user_permissions': ('models.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('models.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'cms.publictitle': {
'Meta': {'unique_together': "(('language', 'page'),)"},
'application_urls': ('models.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 50, 3, 921248)'}),
'has_url_overwrite': ('models.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_title': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('models.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.PublicPage']"}),
'page_title': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('models.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('models.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('models.CharField', [], {'max_length': '255'})
},
'cms.globalpagepermission': {
'can_add': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change_permissions': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_change_softroot': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_delete': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_moderate': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_move_page': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_publish': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_recover_page': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'group': ('models.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'sites': ('models.ManyToManyField', [], {'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('models.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.publicpage': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 50, 1, 786313)'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('models.ForeignKey', [], {'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['cms.PublicPage']"}),
'publication_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reverse_id': ('models.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('models.CharField', [], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagepermission': {
'can_add': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change_permissions': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_change_softroot': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_delete': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_moderate': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_move_page': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_publish': ('models.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'grant_on': ('models.IntegerField', [], {'default': '5'}),
'group': ('models.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('models.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.cmsplugin': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 50, 0, 511048)'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'inherited_public': ('models.OneToOneField', [], {'blank': 'True', 'related_name': "'inherited_origin'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.PublicCMSPlugin']"}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('models.CharField', [], {'max_length': '100'}),
'content_type': ('models.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'name': ('models.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('models.CharField', [], {'max_length': '100'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'model': ('models.CharField', [], {'max_length': '100'}),
'name': ('models.CharField', [], {'max_length': '100'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)"},
'application_urls': ('models.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 50, 2, 559416)'}),
'has_url_overwrite': ('models.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'menu_title': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('models.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('models.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'public': ('models.OneToOneField', [], {'blank': 'True', 'related_name': "'origin'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.PublicTitle']"}),
'redirect': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('models.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('models.CharField', [], {'max_length': '255'})
},
'cms.page': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 30, 9, 49, 58, 942360)'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('models.ForeignKey', [], {'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'public': ('models.OneToOneField', [], {'blank': 'True', 'related_name': "'origin'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.PublicPage']"}),
'publication_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reverse_id': ('models.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('models.CharField', [], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'action': ('models.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('models.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'message': ('models.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('models.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'cms.pagemoderator': {
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'moderate_children': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderate_descendants': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderate_page': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('models.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'id': ('models.AutoField', [], {'primary_key': 'True', 'blank': 'True'}),
'name': ('models.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('models.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'cms.pageusergroup': {
'created_by': ('models.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['auth.User']"}),
'group_ptr': ('models.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['cms']
| {
"content_hash": "bce46f291dd183985449d3e35dfdb403",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 188,
"avg_line_length": 78.24034334763948,
"alnum_prop": 0.5052111903455841,
"repo_name": "jjanssen/django-cms-patches",
"id": "5dda3a68bb7ed45ebb27dd4107cdddf575aeb612",
"size": "18231",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cms/migrations/0018_site_permissions.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "481850"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Perl",
"bytes": "37998"
},
{
"name": "Python",
"bytes": "1161924"
},
{
"name": "Shell",
"bytes": "239"
}
],
"symlink_target": ""
} |
"""Program reporting counts for the Home Page
Business rules:
reporting only if:
- LOP indicator in closed program with lop_target
- Midline/Endline/Event with set target and 1+ data
- Time aware indicator with completed target period and set target and 1+ data"""
import unittest
import datetime
from factories import (
workflow_models as w_factories,
indicators_models as i_factories
)
from indicators.models import Indicator, PeriodicTarget
from indicators.queries import ProgramWithMetrics
from safedelete.models import HARD_DELETE
from django import test
class ReportingIndicatorBase(test.TestCase):
TIME_AWARE_FREQUENCIES = [
Indicator.ANNUAL,
Indicator.SEMI_ANNUAL,
Indicator.TRI_ANNUAL,
Indicator.QUARTERLY,
Indicator.MONTHLY
]
DATE_FUNCS = {
Indicator.ANNUAL: lambda x: datetime.date(x.year+1, x.month, x.day),
Indicator.SEMI_ANNUAL: lambda x: datetime.date(
x.year + 1 if x.month > 6 else x.year,
x.month - 6 if x.month > 6 else x.month + 6,
1
) - datetime.timedelta(days=1),
Indicator.TRI_ANNUAL: lambda x: datetime.date(
x.year + 1 if x.month > 8 else x.year,
x.month - 8 if x.month > 8 else x.month + 4,
1
) - datetime.timedelta(days=1),
Indicator.QUARTERLY: lambda x: datetime.date(
x.year + 1 if x.month > 9 else x.year,
x.month - 9 if x.month > 9 else x.month + 3,
1
) - datetime.timedelta(days=1),
Indicator.MONTHLY: lambda x: datetime.date(
x.year + 1 if x.month == 12 else x.year,
1 if x.month == 12 else x.month + 1,
1
) - datetime.timedelta(days=1)
}
indicator = None
program = None
targets = []
data = []
def load_base_indicator(self):
"""loads a bare indicator in this program"""
self.indicator = i_factories.IndicatorFactory()
if self.program is not None:
self.indicator.program = self.program
self.indicator.save()
def load_data(self, indicator=None, achieved=None, date=None, target=None):
"""adds data to the indicator"""
indicator = self.indicator if indicator is None else indicator
achieved = 800 if achieved is None else achieved
date = self.program.reporting_period_start + datetime.timedelta(days=5) if date is None else date
datum = i_factories.ResultFactory(
indicator=indicator,
achieved=achieved,
date_collected=date,
periodic_target=target
)
self.data.append(datum)
def load_target(self, indicator, target, period=None, start_date=None):
end_date = start_date
period = "Period {0}".format(len(self.targets)) if period is None else period
if start_date is not None:
end_date = self.DATE_FUNCS[indicator.target_frequency](start_date)
target = i_factories.PeriodicTargetFactory(
indicator=indicator,
period=period,
target=target,
start_date=start_date,
end_date=end_date
)
self.targets.append(target)
return target
def get_time_aware_dates(self, target_frequency):
start_date = self.program.reporting_period_start
date_func = self.DATE_FUNCS[target_frequency]
end_date = date_func(start_date)
dates = [start_date,]
while end_date < self.program.reporting_period_end:
start_date = end_date + datetime.timedelta(days=1)
end_date = date_func(start_date)
dates.append(start_date)
return dates
def load_targets(self, indicator=None, targets=None):
indicator = self.indicator if indicator is None else indicator
target_frequency = indicator.target_frequency
loaded_targets = []
if target_frequency == Indicator.MID_END:
targets = [500, 800] if targets is None else targets
loaded_targets.append(self.load_target(indicator, targets[0], period=PeriodicTarget.MIDLINE))
loaded_targets.append(self.load_target(indicator, targets[1], period=PeriodicTarget.ENDLINE))
elif target_frequency == Indicator.EVENT:
targets = [1200,] if targets is None else targets
for target in targets:
loaded_targets.append(self.load_target(indicator, target))
elif target_frequency in self.TIME_AWARE_FREQUENCIES:
dates = self.get_time_aware_dates(target_frequency)
targets = [400]*len(dates) if targets is None else targets
for target, start_date in zip(targets, dates):
loaded_targets.append(self.load_target(indicator, target, start_date=start_date))
return loaded_targets
def get_closed_program(self):
self.program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2016, 1, 1),
reporting_period_end=datetime.date(2016, 12, 31)
)
def get_annotated_program(self):
return ProgramWithMetrics.home_page.with_annotations('reporting').get(pk=self.program.pk)
class TestSingleNonReportingIndicator(ReportingIndicatorBase):
"""test conditions under which indicator should report reporting as false"""
def one_incomplete_assert(self, program, scenario):
nonreporting = program.scope_counts['nonreporting']
reporting = program.scope_counts['indicator_count'] - nonreporting
self.assertEqual(
nonreporting, 1,
"For {0}, program should have 1 incomplete indicator, got {1}".format(
scenario, nonreporting
)
)
self.assertEqual(
reporting, 0,
"For {0}, program should have 0 complete indicators, got {1}".format(
scenario, reporting
)
)
def test_lop_indicator_in_open_program(self):
# get open (reporting period not over) program:
# set dates from today so test doesn't become obsolete at some arbitrary future date:
start_date = datetime.date.today() - datetime.timedelta(days=10)
end_date = datetime.date.today() + datetime.timedelta(days=100)
self.program = w_factories.ProgramFactory(
reporting_period_start=start_date,
reporting_period_end=end_date
)
self.load_base_indicator()
self.indicator.target_frequency = Indicator.LOP
# lop_target and data should be set to ensure it's only program openness restricting from "complete"
self.indicator.lop_target = 1000
self.indicator.save()
self.load_data()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
def test_lop_indicator_no_lop_target(self):
#program is complete:
self.get_closed_program()
# add indicator with data:
self.load_base_indicator()
self.indicator.target_frequency = Indicator.LOP
self.indicator.save()
self.load_data()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
def test_lop_indicator_no_data(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.LOP
self.indicator.lop_target = 1400
self.indicator.save()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
def test_midend_indicator_no_targets(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.MID_END
self.indicator.save()
# ensure indicator has data for midline indicator:
self.load_data()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
def test_midend_indicator_no_data(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.MID_END
self.indicator.save()
# ensure indicator has targets for midline indicator:
self.load_targets()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
def test_event_indicator_no_target(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.EVENT
self.indicator.save()
self.load_data()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
def test_event_indicator_no_data(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.EVENT
self.indicator.save()
self.load_targets()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
def test_time_aware_indicators_no_targets(self):
self.get_closed_program()
for frequency in self.TIME_AWARE_FREQUENCIES:
self.load_base_indicator()
self.indicator.target_frequency = frequency
self.indicator.save()
self.load_data()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
for datum in self.data:
datum.delete()
self.data = []
self.indicator.delete()
self.indicator = None
def test_time_aware_indicators_no_data(self):
self.targets = []
self.get_closed_program()
for frequency in self.TIME_AWARE_FREQUENCIES:
self.load_base_indicator()
self.indicator.target_frequency = frequency
self.indicator.save()
self.load_targets()
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 1)
for target in self.targets:
target.delete()
self.targets = []
self.indicator.delete()
self.indicator = None
def test_time_aware_indicators_no_completed_periods(self):
# if program started yesterday then no targets will be finished by today:
today = datetime.date.today() - datetime.timedelta(days=1)
self.program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(today.year, today.month, 1),
reporting_period_end=datetime.date(today.year+1, today.month, 1) - datetime.timedelta(days=1)
)
for frequency in [freq for freq in self.TIME_AWARE_FREQUENCIES if freq != Indicator.MONTHLY]:
# the above hack brought to you by the fact that once a month it is impossible to make a monthly indicator
# with no completed programs. I apologize.
self.load_base_indicator()
self.indicator.target_frequency = frequency
self.indicator.save()
targets = self.load_targets()
for target in targets:
self.load_data(date=datetime.date.today()-datetime.timedelta(days=1), target=target)
program = self.get_annotated_program()
self.assertEqual(
program.scope_counts['nonreporting_count'], 1,
'{frequency} frequency indicator got scope counts {sc} instead of 1 nonreporting'.format(
frequency=frequency,
sc=program.scope_counts
))
for target in self.targets:
target.delete()
self.targets = []
self.indicator.delete()
self.indicator = None
class TestSingleReportingIndicator(ReportingIndicatorBase):
def test_lop_indicator_closed_program_target_set_with_data(self):
#program is complete:
self.get_closed_program()
# add indicator with data:
self.load_base_indicator()
self.indicator.target_frequency = Indicator.LOP
self.indicator.lop_target = 1400
self.indicator.save()
target = i_factories.PeriodicTargetFactory(
indicator=self.indicator,
target=self.indicator.lop_target,
start_date=self.indicator.program.reporting_period_start,
end_date=self.indicator.program.reporting_period_end
)
self.load_data(target=target)
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 0)
self.assertEqual(program.scope_counts['indicator_count'], 1)
def test_midend_indicator_midline_target_and_data(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.MID_END
self.indicator.save()
# ensure indicator has targets for midline indicator:
target = self.load_target(self.indicator, 800, period=PeriodicTarget.MIDLINE)
self.load_data(indicator=self.indicator, achieved=500, date=None, target=target)
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 0)
self.assertEqual(program.scope_counts['indicator_count'], 1)
def test_midend_indicator_both_targets_and_data(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.MID_END
self.indicator.save()
# ensure indicator has targets for midline indicator:
target1 = self.load_target(self.indicator, 800, period=PeriodicTarget.MIDLINE)
target2 = self.load_target(self.indicator, 400, period=PeriodicTarget.ENDLINE)
self.load_data(indicator=self.indicator, achieved=500, date=None, target=target1)
self.load_data(indicator=self.indicator, achieved=700, date=None, target=target2)
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 0)
self.assertEqual(program.scope_counts['indicator_count'], 1)
def test_event_indicator_with_target_and_data(self):
self.get_closed_program()
self.load_base_indicator()
self.indicator.target_frequency = Indicator.EVENT
self.indicator.save()
# ensure indicator has targets for midline indicator:
target = self.load_target(self.indicator, 800)
self.load_data(indicator=self.indicator, achieved=500, date=None, target=target)
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 0)
self.assertEqual(program.scope_counts['indicator_count'], 1)
def test_time_aware_indicators_with_completed_targets_and_data(self):
self.get_closed_program()
self.targets = []
self.data = []
for frequency in self.TIME_AWARE_FREQUENCIES:
self.load_base_indicator()
self.indicator.target_frequency = frequency
self.indicator.save()
self.load_targets()
for target in self.targets:
self.load_data(indicator=target.indicator, target=target)
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 0)
self.assertEqual(program.scope_counts['indicator_count'], 1)
for target in self.targets:
target.delete()
for datum in self.data:
datum.delete()
self.data = []
self.targets = []
self.indicator.delete(force_policy=HARD_DELETE)
self.indicator = None
class TestMixedReportingAndNonIndicators(ReportingIndicatorBase):
def test_lop_and_midend_just_mid_reporting(self):
self.get_closed_program()
indicator_lop = i_factories.IndicatorFactory(
target_frequency=Indicator.LOP,
lop_target=1000,
program=self.program
)
i_lop_target = i_factories.PeriodicTargetFactory(
indicator=indicator_lop,
target=indicator_lop.lop_target,
start_date=self.program.reporting_period_start,
end_date=self.program.reporting_period_end
)
lop_data = i_factories.ResultFactory(
indicator=indicator_lop,
periodic_target=i_lop_target,
achieved=400,
date_collected=self.program.reporting_period_end - datetime.timedelta(days=10)
)
indicator_midend = i_factories.IndicatorFactory(
target_frequency=Indicator.MID_END,
program=self.program
)
mid_target = i_factories.PeriodicTargetFactory(
indicator=indicator_midend,
period=PeriodicTarget.MIDLINE,
target=1000
)
mid_data = i_factories.ResultFactory(
indicator=indicator_midend,
periodic_target=mid_target,
achieved=400,
date_collected=self.program.reporting_period_start + datetime.timedelta(days=20)
)
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 0)
self.assertEqual(program.scope_counts['indicator_count'], 2)
def test_multiple_time_aware_indicators(self):
self.get_closed_program()
for frequency in self.TIME_AWARE_FREQUENCIES:
indicator = i_factories.IndicatorFactory(
target_frequency=frequency,
program=self.program
)
self.load_targets(indicator=indicator)
for target in self.targets:
self.load_data(indicator=target.indicator, target=target)
program = self.get_annotated_program()
self.assertEqual(program.scope_counts['nonreporting_count'], 0)
self.assertEqual(program.scope_counts['indicator_count'], len(self.TIME_AWARE_FREQUENCIES))
class TestProgramReportingPeriodCorrect(test.TestCase):
def test_reporting_period_correct_shows_correct(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 2, 28)
)
homepage_program = ProgramWithMetrics.home_page.with_annotations().get(pk=program.pk)
self.assertTrue(homepage_program.reporting_period_correct)
def test_reporting_period_bad_start_shows_incorrect(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 15),
reporting_period_end=datetime.date(2017, 2, 28)
)
homepage_program = ProgramWithMetrics.home_page.with_annotations().get(pk=program.pk)
self.assertFalse(homepage_program.reporting_period_correct)
def test_reporting_period_bad_end_shows_incorrect(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 2, 15)
)
homepage_program = ProgramWithMetrics.home_page.with_annotations().get(pk=program.pk)
self.assertFalse(homepage_program.reporting_period_correct)
def test_reporting_period_bad_both_shows_incorrect(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 15),
reporting_period_end=datetime.date(2017, 2, 15)
)
homepage_program = ProgramWithMetrics.home_page.with_annotations().get(pk=program.pk)
self.assertFalse(homepage_program.reporting_period_correct)
class TestProgramHasTimeAwareIndicators(test.TestCase):
def test_program_with_no_indicators_returns_false(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 2, 28)
)
self.assertFalse(program.has_time_aware_targets)
def test_program_with_non_time_aware_indicators_returns_false(self):
for frequency in [Indicator.LOP, Indicator.MID_END, Indicator.EVENT]:
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 2, 28)
)
i_factories.IndicatorFactory(
target_frequency=frequency,
program=program
)
self.assertFalse(program.has_time_aware_targets)
def test_program_with_time_aware_indicators_returns_true(self):
for frequency in Indicator.REGULAR_TARGET_FREQUENCIES:
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 12, 31)
)
i_factories.IndicatorFactory(
target_frequency=frequency,
program=program
)
self.assertTrue(program.has_time_aware_targets)
def test_program_with_all_time_aware_indicators_returns_true(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 12, 31)
)
for frequency in Indicator.REGULAR_TARGET_FREQUENCIES:
i_factories.IndicatorFactory(
target_frequency=frequency,
program=program
)
self.assertTrue(program.has_time_aware_targets)
def test_program_with_all_indicators_returns_true(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 12, 31)
)
for frequency, _ in Indicator.TARGET_FREQUENCIES:
i_factories.IndicatorFactory(
target_frequency=frequency,
program=program
)
self.assertTrue(program.has_time_aware_targets)
class TestProgramLastTimeAwareStartDate(test.TestCase):
def test_no_time_aware_indicators_returns_none(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 12, 31)
)
self.assertIsNone(program.last_time_aware_indicator_start_date)
def test_program_with_non_time_aware_indicators_returns_none(self):
for frequency in [Indicator.LOP, Indicator.MID_END, Indicator.EVENT]:
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 12, 31)
)
i_factories.IndicatorFactory(
target_frequency=frequency,
program=program
)
self.assertIsNone(program.last_time_aware_indicator_start_date)
def test_program_with_annual_indicator_returns_correct_date(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2017, 12, 31)
)
indicator = i_factories.IndicatorFactory(
target_frequency=Indicator.ANNUAL,
program=program
)
for start, end in [(datetime.date(2015, 1, 1), datetime.date(2015, 12, 31)),
(datetime.date(2016, 1, 1), datetime.date(2016, 12, 31)),
(datetime.date(2017, 1, 1), datetime.date(2017, 12, 31))]:
i_factories.PeriodicTargetFactory(
indicator=indicator,
start_date=start,
end_date=end
)
self.assertEqual(program.last_time_aware_indicator_start_date, datetime.date(2017, 1, 1))
def test_program_with_multiple_indicators_returns_correct_date(self):
program = w_factories.ProgramFactory(
reporting_period_start=datetime.date(2015, 1, 1),
reporting_period_end=datetime.date(2015, 12, 31)
)
indicator1 = i_factories.IndicatorFactory(
target_frequency=Indicator.ANNUAL,
program=program
)
i_factories.PeriodicTargetFactory(
indicator=indicator1,
start_date=datetime.date(2015, 1, 1),
end_date=datetime.date(2015, 12, 31)
)
indicator2 = i_factories.IndicatorFactory(
target_frequency=Indicator.TRI_ANNUAL,
program=program
)
for start, end in [(datetime.date(2015, 1, 1), datetime.date(2015, 4, 30)),
(datetime.date(2015, 5, 1), datetime.date(2015, 8, 31)),
(datetime.date(2015, 9, 1), datetime.date(2015, 12, 31))]:
i_factories.PeriodicTargetFactory(
indicator=indicator2,
start_date=start,
end_date=end
)
self.assertEqual(program.last_time_aware_indicator_start_date, datetime.date(2015, 9, 1))
| {
"content_hash": "b6a80824f050f87593efe2a0c4e694a9",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 118,
"avg_line_length": 43.89948006932409,
"alnum_prop": 0.6302013422818792,
"repo_name": "mercycorps/TolaActivity",
"id": "5e831cd5377332740ed53c6d051721315299db94",
"size": "25330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indicators/tests/program_metric_tests/program_unit/program_reporting_count_unit_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432462"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "437661"
},
{
"name": "JavaScript",
"bytes": "5654491"
},
{
"name": "Python",
"bytes": "1741812"
},
{
"name": "Shell",
"bytes": "4752"
}
],
"symlink_target": ""
} |
"""Representation of a deCONZ gateway."""
import asyncio
import async_timeout
from pydeconz import DeconzSession, errors
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_ALLOW_NEW_DEVICES,
CONF_MASTER_GATEWAY,
DEFAULT_ALLOW_CLIP_SENSOR,
DEFAULT_ALLOW_DECONZ_GROUPS,
DEFAULT_ALLOW_NEW_DEVICES,
DOMAIN as DECONZ_DOMAIN,
LOGGER,
NEW_GROUP,
NEW_LIGHT,
NEW_SCENE,
NEW_SENSOR,
PLATFORMS,
)
from .deconz_event import async_setup_events, async_unload_events
from .errors import AuthenticationRequired, CannotConnect
@callback
def get_gateway_from_config_entry(hass, config_entry):
"""Return gateway with a matching bridge id."""
return hass.data[DECONZ_DOMAIN][config_entry.unique_id]
class DeconzGateway:
"""Manages a single deCONZ gateway."""
def __init__(self, hass, config_entry) -> None:
"""Initialize the system."""
self.hass = hass
self.config_entry = config_entry
self.api = None
self.available = True
self.ignore_state_updates = False
self.deconz_ids = {}
self.entities = {}
self.events = []
@property
def bridgeid(self) -> str:
"""Return the unique identifier of the gateway."""
return self.config_entry.unique_id
@property
def host(self) -> str:
"""Return the host of the gateway."""
return self.config_entry.data[CONF_HOST]
@property
def master(self) -> bool:
"""Gateway which is used with deCONZ services without defining id."""
return self.config_entry.options[CONF_MASTER_GATEWAY]
# Options
@property
def option_allow_clip_sensor(self) -> bool:
"""Allow loading clip sensor from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_CLIP_SENSOR, DEFAULT_ALLOW_CLIP_SENSOR
)
@property
def option_allow_deconz_groups(self) -> bool:
"""Allow loading deCONZ groups from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_DECONZ_GROUPS, DEFAULT_ALLOW_DECONZ_GROUPS
)
@property
def option_allow_new_devices(self) -> bool:
"""Allow automatic adding of new devices."""
return self.config_entry.options.get(
CONF_ALLOW_NEW_DEVICES, DEFAULT_ALLOW_NEW_DEVICES
)
# Signals
@property
def signal_reachable(self) -> str:
"""Gateway specific event to signal a change in connection status."""
return f"deconz-reachable-{self.bridgeid}"
@callback
def async_signal_new_device(self, device_type) -> str:
"""Gateway specific event to signal new device."""
new_device = {
NEW_GROUP: f"deconz_new_group_{self.bridgeid}",
NEW_LIGHT: f"deconz_new_light_{self.bridgeid}",
NEW_SCENE: f"deconz_new_scene_{self.bridgeid}",
NEW_SENSOR: f"deconz_new_sensor_{self.bridgeid}",
}
return new_device[device_type]
# Callbacks
@callback
def async_connection_status_callback(self, available) -> None:
"""Handle signals of gateway connection status."""
self.available = available
self.ignore_state_updates = False
async_dispatcher_send(self.hass, self.signal_reachable, True)
@callback
def async_add_device_callback(
self, device_type, device=None, force: bool = False
) -> None:
"""Handle event of new device creation in deCONZ."""
if not force and not self.option_allow_new_devices:
return
args = []
if device is not None and not isinstance(device, list):
args.append([device])
async_dispatcher_send(
self.hass,
self.async_signal_new_device(device_type),
*args, # Don't send device if None, it would override default value in listeners
)
async def async_update_device_registry(self) -> None:
"""Update device registry."""
device_registry = await self.hass.helpers.device_registry.async_get_registry()
# Host device
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, self.api.config.mac)},
)
# Gateway service
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
identifiers={(DECONZ_DOMAIN, self.api.config.bridgeid)},
manufacturer="Dresden Elektronik",
model=self.api.config.modelid,
name=self.api.config.name,
sw_version=self.api.config.swversion,
via_device=(CONNECTION_NETWORK_MAC, self.api.config.mac),
)
async def async_setup(self) -> bool:
"""Set up a deCONZ gateway."""
try:
self.api = await get_gateway(
self.hass,
self.config_entry.data,
self.async_add_device_callback,
self.async_connection_status_callback,
)
except CannotConnect as err:
raise ConfigEntryNotReady from err
except AuthenticationRequired as err:
raise ConfigEntryAuthFailed from err
self.hass.config_entries.async_setup_platforms(self.config_entry, PLATFORMS)
await async_setup_events(self)
self.api.start()
self.config_entry.add_update_listener(self.async_config_entry_updated)
return True
@staticmethod
async def async_config_entry_updated(hass, entry) -> None:
"""Handle signals of config entry being updated.
This is a static method because a class method (bound method), can not be used with weak references.
Causes for this is either discovery updating host address or config entry options changing.
"""
gateway = get_gateway_from_config_entry(hass, entry)
if gateway.api.host != gateway.host:
gateway.api.close()
gateway.api.host = gateway.host
gateway.api.start()
return
await gateway.options_updated()
async def options_updated(self):
"""Manage entities affected by config entry options."""
deconz_ids = []
if self.option_allow_clip_sensor:
self.async_add_device_callback(NEW_SENSOR)
else:
deconz_ids += [
sensor.deconz_id
for sensor in self.api.sensors.values()
if sensor.type.startswith("CLIP")
]
if self.option_allow_deconz_groups:
self.async_add_device_callback(NEW_GROUP)
else:
deconz_ids += [group.deconz_id for group in self.api.groups.values()]
entity_registry = await self.hass.helpers.entity_registry.async_get_registry()
for entity_id, deconz_id in self.deconz_ids.items():
if deconz_id in deconz_ids and entity_registry.async_is_registered(
entity_id
):
# Removing an entity from the entity registry will also remove them
# from Home Assistant
entity_registry.async_remove(entity_id)
@callback
def shutdown(self, event) -> None:
"""Wrap the call to deconz.close.
Used as an argument to EventBus.async_listen_once.
"""
self.api.close()
async def async_reset(self):
"""Reset this gateway to default state."""
self.api.async_connection_status_callback = None
self.api.close()
await self.hass.config_entries.async_unload_platforms(
self.config_entry, PLATFORMS
)
async_unload_events(self)
self.deconz_ids = {}
return True
async def get_gateway(
hass, config, async_add_device_callback, async_connection_status_callback
) -> DeconzSession:
"""Create a gateway object and verify configuration."""
session = aiohttp_client.async_get_clientsession(hass)
deconz = DeconzSession(
session,
config[CONF_HOST],
config[CONF_PORT],
config[CONF_API_KEY],
async_add_device=async_add_device_callback,
connection_status=async_connection_status_callback,
)
try:
with async_timeout.timeout(10):
await deconz.initialize()
return deconz
except errors.Unauthorized as err:
LOGGER.warning("Invalid key for deCONZ at %s", config[CONF_HOST])
raise AuthenticationRequired from err
except (asyncio.TimeoutError, errors.RequestError) as err:
LOGGER.error("Error connecting to deCONZ gateway at %s", config[CONF_HOST])
raise CannotConnect from err
| {
"content_hash": "9702b97bc2ef472d39bfb92b407ab0d2",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 108,
"avg_line_length": 32.159010600706715,
"alnum_prop": 0.6303702889792331,
"repo_name": "kennedyshead/home-assistant",
"id": "8b057ab9e51f3c0935227a81c576966fde7cafb2",
"size": "9101",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/deconz/gateway.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class VirtualMachineScaleSetInstanceView(Model):
"""The instance view of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar virtual_machine: The instance view status summary for the virtual
machine scale set.
:vartype virtual_machine:
~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetInstanceViewStatusesSummary
:ivar extensions: The extensions information.
:vartype extensions:
list[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetVMExtensionsSummary]
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2016_03_30.models.InstanceViewStatus]
"""
_validation = {
'virtual_machine': {'readonly': True},
'extensions': {'readonly': True},
}
_attribute_map = {
'virtual_machine': {'key': 'virtualMachine', 'type': 'VirtualMachineScaleSetInstanceViewStatusesSummary'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetVMExtensionsSummary]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, statuses=None):
super(VirtualMachineScaleSetInstanceView, self).__init__()
self.virtual_machine = None
self.extensions = None
self.statuses = statuses
| {
"content_hash": "c9234af142a87c8359fcb7f567ce49ab",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 114,
"avg_line_length": 38.5945945945946,
"alnum_prop": 0.696078431372549,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "82dbb90ac32ff85bf31f502cd2f7b73068244bc7",
"size": "1902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/virtual_machine_scale_set_instance_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.polynomial.legendre import legval
from scipy import linalg
from ..utils import logger, warn
from ..io.pick import pick_types, pick_channels, pick_info
from ..surface import _normalize_vectors
from ..bem import _fit_sphere
from ..forward import _map_meg_channels
def _calc_g(cosang, stiffness=4, num_lterms=50):
"""Calculate spherical spline g function between points on a sphere.
Parameters
----------
cosang : array-like of float, shape(n_channels, n_channels)
cosine of angles between pairs of points on a spherical surface. This
is equivalent to the dot product of unit vectors.
stiffness : float
stiffness of the spline.
num_lterms : int
number of Legendre terms to evaluate.
Returns
-------
G : np.ndrarray of float, shape(n_channels, n_channels)
The G matrix.
"""
factors = [(2 * n + 1) / (n ** stiffness * (n + 1) ** stiffness *
4 * np.pi) for n in range(1, num_lterms + 1)]
return legval(cosang, [0] + factors)
def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5):
"""Compute interpolation matrix based on spherical splines.
Implementation based on [1]
Parameters
----------
pos_from : np.ndarray of float, shape(n_good_sensors, 3)
The positions to interpoloate from.
pos_to : np.ndarray of float, shape(n_bad_sensors, 3)
The positions to interpoloate.
alpha : float
Regularization parameter. Defaults to 1e-5.
Returns
-------
interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to))
The interpolation matrix that maps good signals to the location
of bad signals.
References
----------
[1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989).
Spherical splines for scalp potential and current density mapping.
Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7.
"""
pos_from = pos_from.copy()
pos_to = pos_to.copy()
# normalize sensor positions to sphere
_normalize_vectors(pos_from)
_normalize_vectors(pos_to)
# cosine angles between source positions
cosang_from = pos_from.dot(pos_from.T)
cosang_to_from = pos_to.dot(pos_from.T)
G_from = _calc_g(cosang_from)
G_to_from = _calc_g(cosang_to_from)
if alpha is not None:
G_from.flat[::len(G_from) + 1] += alpha
n_channels = G_from.shape[0] # G_from should be square matrix
C = np.r_[np.c_[G_from, np.ones((n_channels, 1))],
np.c_[np.ones((1, n_channels)), 0]]
C_inv = linalg.pinv(C)
interpolation = np.c_[G_to_from,
np.ones((G_to_from.shape[0], 1))].dot(C_inv[:, :-1])
return interpolation
def _do_interp_dots(inst, interpolation, goods_idx, bads_idx):
"""Dot product of channel mapping matrix to channel data."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if isinstance(inst, (BaseRaw, Evoked)):
inst._data[bads_idx] = interpolation.dot(inst._data[goods_idx])
elif isinstance(inst, BaseEpochs):
inst._data[:, bads_idx, :] = np.einsum('ij,xjy->xiy', interpolation,
inst._data[:, goods_idx, :])
else:
raise ValueError('Inputs of type {0} are not supported'
.format(type(inst)))
def _interpolate_bads_eeg(inst):
"""Interpolate bad EEG channels.
Operates in place.
Parameters
----------
inst : mne.io.Raw, mne.Epochs or mne.Evoked
The data to interpolate. Must be preloaded.
"""
bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
inst.info._check_consistency()
bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]
if len(picks) == 0 or len(bads_idx) == 0:
return
goods_idx[picks] = True
goods_idx[bads_idx] = False
pos = inst._get_channel_positions(picks)
# Make sure only EEG are used
bads_idx_pos = bads_idx[picks]
goods_idx_pos = goods_idx[picks]
pos_good = pos[goods_idx_pos]
pos_bad = pos[bads_idx_pos]
# test spherical fit
radius, center = _fit_sphere(pos_good)
distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
distance = np.mean(distance / radius)
if np.abs(1. - distance) > 0.1:
warn('Your spherical fit is poor, interpolation results are '
'likely to be inaccurate.')
logger.info('Computing interpolation matrix from {0} sensor '
'positions'.format(len(pos_good)))
interpolation = _make_interpolation_matrix(pos_good, pos_bad)
logger.info('Interpolating {0} sensors'.format(len(pos_bad)))
_do_interp_dots(inst, interpolation, goods_idx, bads_idx)
def _interpolate_bads_meg(inst, mode='accurate', verbose=None):
"""Interpolate bad channels from data in good channels.
Parameters
----------
inst : mne.io.Raw, mne.Epochs or mne.Evoked
The data to interpolate. Must be preloaded.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used for interpolation. `'fast'` should
be sufficient for most applications.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
"""
picks_meg = pick_types(inst.info, meg=True, eeg=False, exclude=[])
picks_good = pick_types(inst.info, meg=True, eeg=False, exclude='bads')
meg_ch_names = [inst.info['ch_names'][p] for p in picks_meg]
bads_meg = [ch for ch in inst.info['bads'] if ch in meg_ch_names]
# select the bad meg channel to be interpolated
if len(bads_meg) == 0:
picks_bad = []
else:
picks_bad = pick_channels(inst.info['ch_names'], bads_meg,
exclude=[])
# return without doing anything if there are no meg channels
if len(picks_meg) == 0 or len(picks_bad) == 0:
return
info_from = pick_info(inst.info, picks_good)
info_to = pick_info(inst.info, picks_bad)
mapping = _map_meg_channels(info_from, info_to, mode=mode)
_do_interp_dots(inst, mapping, picks_good, picks_bad)
| {
"content_hash": "2ef8d885305ae7c8457d48dc44b9ffbc",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 78,
"avg_line_length": 34.956756756756754,
"alnum_prop": 0.6248646976959951,
"repo_name": "nicproulx/mne-python",
"id": "ca37aff0d7398ff1b83fe53d22b47da50ef6b10b",
"size": "6549",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "mne/channels/interpolation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3723"
},
{
"name": "Python",
"bytes": "5866703"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
"""
System Core Router File
Defines the verbs and the routes dictionary for use in the routes config file
"""
routes = {}
routes['GET'] = {}
routes['POST'] = {}
routes['PUT'] = {}
routes['PATCH'] = {}
routes['DELETE'] = {}
| {
"content_hash": "553158d1db1129ef9e224eb6059da7c0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 21.09090909090909,
"alnum_prop": 0.6077586206896551,
"repo_name": "authman/Python201609",
"id": "f0ca972cef52ea2ede327af3cf8341acf4a6d674",
"size": "232",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Nguyen_Ken/Assignments/AJAX and API/ajax_posts/system/core/router.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1231"
},
{
"name": "C",
"bytes": "430679"
},
{
"name": "C++",
"bytes": "21416"
},
{
"name": "CSS",
"bytes": "22689"
},
{
"name": "HTML",
"bytes": "168012"
},
{
"name": "JavaScript",
"bytes": "3734"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "590654"
},
{
"name": "Shell",
"bytes": "9350"
}
],
"symlink_target": ""
} |
import functools
import threading
from pyqryptonight.pyqryptonight import PoWHelper
from qrl.core.Singleton import Singleton
class CNv1PoWValidator(object, metaclass=Singleton):
def __init__(self):
self.lock = threading.Lock()
self._powv = PoWHelper()
def verify_input(self, mining_blob, target):
return self._verify_input_cached(mining_blob, target)
@functools.lru_cache(maxsize=5)
def _verify_input_cached(self, mining_blob, target):
return self._powv.verifyInput(mining_blob, target)
| {
"content_hash": "440fd5cf95693d1fe0c7800ce92903ed",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 61,
"avg_line_length": 28.473684210526315,
"alnum_prop": 0.7153419593345656,
"repo_name": "theQRL/QRL",
"id": "3a30913dfdf8246d5d43cfff934b6c53424e011a",
"size": "693",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/qrl/core/miners/qryptonight7/CNv1PoWValidator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "185833"
},
{
"name": "Python",
"bytes": "1938166"
},
{
"name": "Shell",
"bytes": "2126"
}
],
"symlink_target": ""
} |
"""
@brief A base class for constructing shapes
@author alexander@gokliya.net
"""
from __future__ import print_function
from csg.core import CSG
from csg.geom import Vector
import numpy
DEFAULTS = dict(origin=[0.0, 0.0, 0.0],
lengths=[1.0, 1.0, 1.0],
radius=1.0,
angle=90.0,
n_theta=16,
n_phi=8)
def Box(origin, lengths):
"""
Create box
@param origin/low end of the box
@param lengths lengths in x, y, and z
"""
center = [origin[i] + 0.5*lengths[i] for i in range(len(origin))]
radius = [0.5*le for le in lengths]
return CSG.cube(center=center, radius=radius)
def Cone(radius, origin, lengths, n_theta=16):
"""
Create cone
@param radius radius
@param origin location of the focal point
@param lengths lengths of the cone
@param n_theta number of theta cells
"""
ori = Vector(origin[0], origin[1], origin[2])
end = Vector(origin[0] + lengths[0],
origin[1] + lengths[1],
origin[2] + lengths[2])
return CSG.cone(start=ori,
end=end,
radius=radius,
slices=n_theta)
def Cylinder(radius, origin, lengths, n_theta=16):
"""
Create cylinder
@param radius radius
@param origin center of low end disk
@param lengths lengths of the cylinder along each axis
@param n_theta number of theta cells
"""
ori = Vector(origin[0], origin[1], origin[2])
end = Vector(origin[0] + lengths[0],
origin[1] + lengths[1],
origin[2] + lengths[2])
return CSG.cylinder(start=ori,
end=end,
radius=radius,
slices=n_theta)
def Sphere(radius, origin, n_theta=16, n_phi=8):
"""
Create sphere
@param radius radius
@param origin center of the sphere
@param n_theta number of theta cells
@param n_phi number of azimuthal cells
"""
return CSG.sphere(center=origin,
radius=radius,
slices=n_theta,
stacks=n_phi)
def CompositeShape(shape_tuples=[], expression=''):
"""
@param shape_tuples list of (variable_name, shape) pairs
@param expression expression involving +, -, and * operations.
"""
for i in range(len(shape_tuples)):
varName = shape_tuples[i][0]
cmd = '{0} = shape_tuples[{1}][1]'.format(varName, i)
exec(cmd)
return eval(expression)
| {
"content_hash": "dc740706d8c53c4f6154be68419fa0bd",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 69,
"avg_line_length": 29.102272727272727,
"alnum_prop": 0.5607184693479109,
"repo_name": "pletzer/icqsol",
"id": "4d317e179d0c87d618dbd52ffdcd7025ab7e273b",
"size": "2584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shapes/icqShape.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "191"
},
{
"name": "C++",
"bytes": "26317"
},
{
"name": "CMake",
"bytes": "22482"
},
{
"name": "Python",
"bytes": "171866"
},
{
"name": "Shell",
"bytes": "557"
}
],
"symlink_target": ""
} |
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst", encoding="utf-8") as readme_file, open(
"HISTORY.rst", encoding="utf-8"
) as history_file:
long_description = readme_file.read() + "\n\n" + history_file.read()
install_requires = [
"click>=6.0",
"rabird.core",
"selenium",
"selenium-requests",
"six>=1.10.0",
"whichcraft",
"arrow",
"Pillow",
"docker",
"lxml",
"attrdict",
]
setup_requires = [
"pytest-runner",
# TODO(starofrainnight): put setup requirements (distutils extensions, etc.) here
]
tests_requires = [
"pytest",
# TODO: put package test requirements here
]
setup(
name="rabird.selenium",
version="0.12.5",
description="An extension library for selenium",
long_description=long_description,
author="Hong-She Liang",
author_email="starofrainnight@gmail.com",
url="https://github.com/starofrainnight/rabird.selenium",
packages=find_packages(),
entry_points={
"console_scripts": ["rabird.selenium=rabird.selenium.__main__:main"]
},
include_package_data=True,
install_requires=install_requires,
license="Apache Software License",
zip_safe=False,
keywords="rabird.selenium,selenium",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
test_suite="tests",
tests_require=tests_requires,
setup_requires=setup_requires,
)
| {
"content_hash": "ad8fea8b97762b239ba8273dc1912b81",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 85,
"avg_line_length": 27.58730158730159,
"alnum_prop": 0.6334867663981588,
"repo_name": "starofrainnight/rabird.selenium",
"id": "b614778f183570688c453d70574217e709c05911",
"size": "1785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2301"
},
{
"name": "Python",
"bytes": "61159"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms import ImageField
from django.forms import ChoiceField
from django.forms import CharField
from django.forms import Textarea
from apps.common.shortcuts import uslugify
from apps.team.models import STATUS_CHOICES
from apps.team.models import Team
from apps.common.shortcuts import COUNTRIES
from apps.link.models import SITE_CHOICES
from apps.team import control
from apps.link import control as link_control
_RESERVED_NAMES = [
u"team",
]
def _validate_name(value):
name = value.strip()
link = uslugify(name)
if len(link) < 3:
raise ValidationError(_("ERROR_NAME_TO_SHORT"))
if link in _RESERVED_NAMES:
raise ValidationError(_("ERROR_NAME_RESERVED"))
if bool(len(Team.objects.filter(link=link))):
raise ValidationError(_("ERROR_NAME_USED"))
if bool(len(Team.objects.filter(name=name))):
raise ValidationError(_("ERROR_NAME_USED"))
class ReplaceLogo(Form):
logo = ImageField(label=_("LOGO"))
class CreateTeam(Form):
name = CharField(label=_('TEAM_NAME'), validators=[_validate_name])
country = ChoiceField(choices=COUNTRIES, label=_('COUNTRY'))
logo = ImageField(label=_("LOGO"))
application = CharField(label=_('APPLICATION'), widget=Textarea)
class CreateJoinRequest(Form):
application = CharField(label=_('JOIN_REQUEST_REASON'), widget=Textarea)
class ProcessJoinRequest(Form):
response = CharField(label=_('RESPONSE'), widget=Textarea)
status = ChoiceField(choices=STATUS_CHOICES[1:], label=_('STATUS'))
class CreateRemoveRequest(Form):
reason = CharField(label=_('REASON'), widget=Textarea)
class ProcessRemoveRequest(Form):
response = CharField(label=_('RESPONSE'), widget=Textarea)
status = ChoiceField(choices=STATUS_CHOICES[1:], label=_('STATUS'))
class LinkCreate(Form):
site = ChoiceField(choices=SITE_CHOICES, label=_("SITE"), required=True)
profile = CharField(max_length=1024, label=_("URL"), required=True)
def __init__(self, *args, **kwargs):
self.account = kwargs.pop("account")
self.team = kwargs.pop("team")
super(LinkCreate, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(LinkCreate, self).clean()
profile = self.cleaned_data["profile"]
site = self.cleaned_data["site"]
if control.site_link_exists(self.team, site):
raise ValidationError(_("ERROR_LINK_PROFILE_FOR_SITE_EXISTS"))
if not link_control.valid_profile_format(profile, site):
raise ValidationError(_("ERROR_BAD_PROFILE_FORMAT"))
if not control.can_create_link(self.account, self.team, site, profile):
raise ValidationError(_("ERROR_CANNOT_CREATE_LINK"))
return cleaned_data
class LinkDelete(Form):
def __init__(self, *args, **kwargs):
self.account = kwargs.pop("account")
self.link = kwargs.pop("link")
self.team = kwargs.pop("team")
super(LinkDelete, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(LinkDelete, self).clean()
if not control.can_delete_link(self.account, self.team, self.link):
raise ValidationError(_("ERROR_CANNOT_DELETE_LINK"))
return cleaned_data
| {
"content_hash": "ee05348ab58e2db17b441bb50e8852e6",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 32.179245283018865,
"alnum_prop": 0.6754617414248021,
"repo_name": "serdardalgic/bikesurf.org",
"id": "057998892d1b04d805a2db43677eb556e92999bf",
"size": "3557",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/team/forms.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1950764"
},
{
"name": "HTML",
"bytes": "6083522"
},
{
"name": "JavaScript",
"bytes": "284942"
},
{
"name": "Makefile",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "214905"
},
{
"name": "Ruby",
"bytes": "4418"
},
{
"name": "Shell",
"bytes": "553"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
from StringIO import StringIO
from mock import patch, Mock
from tests import TestCase, with_settings, logged_in
from nose.tools import eq_
from catsnap import Client
from catsnap.table.album import Album
from catsnap.table.image import Image
class TestAlbum(TestCase):
def test_new_album_requires_login(self):
response = self.app.post('/new_album', data={'name': 'malicious'})
eq_(response.status_code, 302, response.data)
eq_(response.headers['Location'], 'http://localhost/')
def test_get_the_new_album_page(self):
response = self.app.get('/new_album')
eq_(response.status_code, 200)
@logged_in
def test_add_an_album(self):
response = self.app.post('/new_album', data={'name': 'my pics'})
eq_(response.status_code, 302, response.data)
eq_(response.headers['Location'], 'http://localhost/add')
session = Client().session()
albums = session.query(Album.name).all()
eq_(albums, [('my pics',)])
@logged_in
def test_add_an_album__with_json_format(self):
response = self.app.post('/new_album.json', data={'name': 'my pics'})
eq_(response.status_code, 200, response.data)
body = json.loads(response.data)
assert 'album_id' in body, body
@logged_in
def test_whitespace_is_trimmed(self):
response = self.app.post('/new_album.json', data={'name': ' photoz '})
eq_(response.status_code, 200, response.data)
session = Client().session()
album = session.query(Album).one()
eq_(album.name, 'photoz')
@logged_in
def test_album_names_must_be_unique(self):
session = Client().session()
session.add(Album(name='portrait sesh'))
session.flush()
response = self.app.post('/new_album.json',
data={'name': 'portrait sesh'})
eq_(response.status_code, 409, response.data)
body = json.loads(response.data)
eq_(body['error'], "There is already an album with that name.")
@with_settings(aws={'bucket': 'cattysnap'})
def test_get_album_in_json_format(self):
session = Client().session()
album = Album(name='my pix')
session.add(album)
session.flush()
cat = Image(album_id=album.album_id, filename='CA7')
dog = Image(album_id=album.album_id, filename='D06')
session.add(cat)
session.add(dog)
session.flush()
response = self.app.get('/album/{0}.json'.format(album.album_id))
eq_(response.status_code, 200, response.data)
body = json.loads(response.data)
eq_(body, [
{
'page_url': '/image/{0}'.format(cat.image_id),
'source_url': 'https://s3.amazonaws.com/cattysnap/CA7',
'caption': 'CA7'
},
{
'page_url': '/image/{0}'.format(dog.image_id),
'source_url': 'https://s3.amazonaws.com/cattysnap/D06',
'caption': 'D06'
},
])
| {
"content_hash": "b38a7cd1889bd719e919265ac1911b41",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 35.34090909090909,
"alnum_prop": 0.5855305466237942,
"repo_name": "ErinCall/catsnap",
"id": "7303e8cb2b940032927ae5d3832fdd478b913225",
"size": "3110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/web/test_album.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4509"
},
{
"name": "HTML",
"bytes": "15632"
},
{
"name": "JavaScript",
"bytes": "19584"
},
{
"name": "Python",
"bytes": "208716"
}
],
"symlink_target": ""
} |
from ._LaserRange import *
| {
"content_hash": "b96022dcc5f766b3c18d1298cb2152a5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.7407407407407407,
"repo_name": "ros-industrial/acuity",
"id": "dffa724bf77faf59f2753ecaae47520d2c93d130",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/fuerte-devel",
"path": "acuity_ar1000/src/Acuity/msg/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8970"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.privatedns import PrivateDnsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-privatedns
# USAGE
python put_private_dns_zone_cname_record_set.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PrivateDnsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subscriptionId",
)
response = client.record_sets.create_or_update(
resource_group_name="resourceGroup1",
private_zone_name="privatezone1.com",
record_type="CNAME",
relative_record_set_name="recordCNAME",
parameters={
"properties": {"cnameRecord": {"cname": "contoso.com"}, "metadata": {"key1": "value1"}, "ttl": 3600}
},
)
print(response)
# x-ms-original-file: specification/privatedns/resource-manager/Microsoft.Network/stable/2020-06-01/examples/RecordSetCNAMEPut.json
if __name__ == "__main__":
main()
| {
"content_hash": "957d65e7771e9eeff7c9a3d5e80611ac",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 131,
"avg_line_length": 35.26315789473684,
"alnum_prop": 0.7029850746268657,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a5d40cf45b281b561d810fb3fbc6f70c70cd5fa2",
"size": "1808",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/network/azure-mgmt-privatedns/generated_samples/put_private_dns_zone_cname_record_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Erlang port protocol."""
__author__ = "Dmitry Vasiliev <dima@hlabs.org>"
__version__ = "1.0"
from erlport.erlterms import Atom, List, ImproperList
| {
"content_hash": "bab908169ecad88ebf0fb00c1c7f86ac",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 53,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6842105263157895,
"repo_name": "276361270/erlport",
"id": "d3b2367f6739d84bcebf852e38d526e8ad04c076",
"size": "1728",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "priv/python2/erlport/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "159672"
},
{
"name": "Makefile",
"bytes": "12362"
},
{
"name": "Python",
"bytes": "151554"
},
{
"name": "Ruby",
"bytes": "99046"
},
{
"name": "Shell",
"bytes": "5003"
}
],
"symlink_target": ""
} |
import re
import requests
from datetime import date
from datetime import datetime
from juriscraper.opinions.united_states.state import nd
from juriscraper.DeferringList import DeferringList
class Site(nd.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
today = date.today()
self.url = 'http://www.ndcourts.gov/opinions/month/%s.htm' % (today.strftime("%b%Y"))
def _get_download_urls(self):
"""We use a fetcher and a DeferringList object and a HEAD request
to test whether the wpd exists for a case"""
def fetcher(html_link):
if (self.method == "LOCAL"):
return html_link # Can't fetch remote during tests
case_number = re.search('(\d+)', html_link).group(0)
wpd_link = 'http://www.ndcourts.gov/wp/%s.wpd' % case_number
r = requests.head(wpd_link,
allow_redirects=False,
headers={'User-Agent': 'Juriscraper'})
if r.status_code == 200:
return wpd_link
else:
return html_link
if self.crawl_date >= date(1998, 10, 1):
path = '//a/@href[contains(., "/court/opinions/")]'
seed = list(self.html.xpath(path))
else:
path = '//ul//a[text()]/@href'
seed = list(self.html.xpath(path))
return DeferringList(seed=seed,
fetcher=fetcher)
def _get_case_names(self):
if self.crawl_date >= date(1998, 10, 1):
path = '//a[contains(@href, "/court/opinions/")]/text()'
return list(self.html.xpath(path))
else:
path = '//ul//a/text()'
names = self.html.xpath(path)
case_names = []
if self.crawl_date < date(1996, 11, 1):
# A bad time.
for name in names:
name = name.rsplit('-')[0]
case_names.append(name)
return case_names
else:
return list(names)
def _get_case_dates(self):
# A tricky one. We get the case dates, but each can have different number of cases below it, so we have to
# count them.
case_dates = []
if self.crawl_date >= date(1998, 10, 1):
test_path = '//body/a'
if len(self.html.xpath(test_path)) == 0:
# It's a month with no cases (like Jan, 2009)
return []
path = '//body/a|//body/font'
for e in self.html.xpath(path):
if e.tag == 'font':
date_str = e.text
dt = datetime.strptime(date_str, '%B %d, %Y').date()
elif e.tag == 'a':
try:
case_dates.append(dt)
except NameError:
# When we don't yet have the date
continue
else:
path = '//h4|//li'
for e in self.html.xpath(path):
if e.tag == 'h4':
# We make dates on h4's because there's one h4 per date.
date_str = e.text.strip()
dt = datetime.strptime(date_str, '%B %d, %Y').date()
elif e.tag == 'li':
try:
# We append on li's, because there's one li per case.
case_dates.append(dt)
except NameError:
# When we don't yet have the date
continue
return case_dates
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_docket_numbers(self):
if self.crawl_date >= date(1998, 10, 1):
path = '//a/@href[contains(., "/court/opinions/")]'
else:
path = '//ul//a[text()]/@href'
docket_numbers = []
for html_link in self.html.xpath(path):
try:
docket_numbers.append(re.search('(\d+)', html_link).group(0))
except AttributeError:
continue
return docket_numbers
def _get_neutral_citations(self):
if self.crawl_date < date(1997, 02, 01):
# Old format, but no neutral cites, thus short circuit the function.
return None
elif self.crawl_date < date(1998, 10, 01):
# Old format with: 1997 ND 30 - Civil No. 960157 or 1997 ND 30
path = '//li/text()'
elif self.crawl_date >= date(1998, 10, 1):
# New format with: 1997 ND 30
path = '//body/text()'
neutral_cites = []
for t in self.html.xpath(path):
try:
neutral_cites.append(re.search('^.{0,5}(\d{4} ND (?:App )?\d{1,4})', t, re.MULTILINE).group(1))
except AttributeError:
continue
return neutral_cites
def _post_parse(self):
# Remove any information that applies to non-appellate cases.
if self.neutral_citations:
delete_items = []
for i in range(0, len(self.neutral_citations)):
if 'App' in self.neutral_citations[i]:
delete_items.append(i)
for i in sorted(delete_items, reverse=True):
del self.download_urls[i]
del self.case_names[i]
del self.case_dates[i]
del self.precedential_statuses[i]
del self.docket_numbers[i]
del self.neutral_citations[i]
else:
# When there aren't any neutral cites that means they're all supreme court cases.
pass
def _download_backwards(self, d):
self.crawl_date = d
self.url = 'http://www.ndcourts.gov/opinions/month/%s.htm' % (d.strftime("%b%Y"))
self.html = self._download()
| {
"content_hash": "cc2daac77632560dff13aa11e0aa2554",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 114,
"avg_line_length": 39.39473684210526,
"alnum_prop": 0.5,
"repo_name": "m4h7/juriscraper",
"id": "c6fec652bc3a2d1189b4af8b8ad7b61dfccadb4b",
"size": "6042",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "juriscraper/opinions/united_states_backscrapers/state/nd.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "27160373"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "623951"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.providers.amazon.aws.sensors.sagemaker_transform import SageMakerTransformSensor
DESCRIBE_TRANSFORM_INPROGRESS_RESPONSE = {
'TransformJobStatus': 'InProgress',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
}
DESCRIBE_TRANSFORM_COMPLETED_RESPONSE = {
'TransformJobStatus': 'Completed',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
}
DESCRIBE_TRANSFORM_FAILED_RESPONSE = {
'TransformJobStatus': 'Failed',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
'FailureReason': 'Unknown',
}
DESCRIBE_TRANSFORM_STOPPING_RESPONSE = {
'TransformJobStatus': 'Stopping',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
}
class TestSageMakerTransformSensor(unittest.TestCase):
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, 'describe_transform_job')
def test_sensor_with_failure(self, mock_describe_job, mock_client):
mock_describe_job.side_effect = [DESCRIBE_TRANSFORM_FAILED_RESPONSE]
sensor = SageMakerTransformSensor(
task_id='test_task', poke_interval=2, aws_conn_id='aws_test', job_name='test_job_name'
)
with pytest.raises(AirflowException):
sensor.execute(None)
mock_describe_job.assert_called_once_with('test_job_name')
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, '__init__')
@mock.patch.object(SageMakerHook, 'describe_transform_job')
def test_sensor(self, mock_describe_job, hook_init, mock_client):
hook_init.return_value = None
mock_describe_job.side_effect = [
DESCRIBE_TRANSFORM_INPROGRESS_RESPONSE,
DESCRIBE_TRANSFORM_STOPPING_RESPONSE,
DESCRIBE_TRANSFORM_COMPLETED_RESPONSE,
]
sensor = SageMakerTransformSensor(
task_id='test_task', poke_interval=2, aws_conn_id='aws_test', job_name='test_job_name'
)
sensor.execute(None)
# make sure we called 3 times(terminated when its completed)
assert mock_describe_job.call_count == 3
# make sure the hook was initialized with the specific params
calls = [mock.call(aws_conn_id='aws_test')]
hook_init.assert_has_calls(calls)
| {
"content_hash": "bbe61b69a071a02834ebb5f82673eed8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 98,
"avg_line_length": 34.49295774647887,
"alnum_prop": 0.6786443446304614,
"repo_name": "dhuang/incubator-airflow",
"id": "a3e23d8a505c7937e35579931b08319b7bb3289d",
"size": "3237",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/providers/amazon/aws/sensors/test_sagemaker_transform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name="rhumba",
version='0.2.0',
url='http://github.com/calston/rhumba',
license='MIT',
description="An asynchronous job queue written in Twisted",
author='Colin Alston',
author_email='colin.alston@gmail.com',
packages=find_packages() + [
"twisted.plugins",
],
package_data={
'twisted.plugins': ['twisted/plugins/rhumba_plugin.py']
},
include_package_data=True,
install_requires=[
'Twisted',
'txredis',
'PyYaml',
'redis'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Distributed Computing',
],
)
| {
"content_hash": "6ddf40b3b005d5233ad58b033fc59772",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 63,
"avg_line_length": 26.78787878787879,
"alnum_prop": 0.5871040723981901,
"repo_name": "calston/rhumba",
"id": "e5e46caf625a9bf9983c1f9ab343664fd458cca7",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76197"
}
],
"symlink_target": ""
} |
import jarray
import inspect
from java.lang import System
from java.util.logging import Level
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
# This will work in 4.0.1 and beyond
# from org.sleuthkit.autopsy.casemodule.services import Blackboard
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
# TODO: Rename this to something more specific. Search and replace for it because it is used a few times
class SampleJythonDataSourceIngestModuleFactory(IngestModuleFactoryAdapter):
# TODO: give it a unique name. Will be shown in module list, logs, etc.
moduleName = "Sample Data Source Module"
def getModuleDisplayName(self):
return self.moduleName
# TODO: Give it a description
def getModuleDescription(self):
return "Sample module that does X, Y, and Z."
def getModuleVersionNumber(self):
return "1.0"
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
# TODO: Change the class name to the name you'll make below
return SampleJythonDataSourceIngestModule()
# Data Source-level ingest module. One gets created per data source.
# TODO: Rename this to something more specific. Could just remove "Factory" from above name.
class SampleJythonDataSourceIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(SampleJythonDataSourceIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
# TODO: Add any setup code that you need here.
def startUp(self, context):
self.context = context
# Throw an IngestModule.IngestModuleException exception if there was a problem setting up
# raise IngestModuleException("Oh No!")
# Where the analysis is done.
# The 'dataSource' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/4.3/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progressBar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
# TODO: Add your analysis code in here.
def process(self, dataSource, progressBar):
# we don't know how much work there is yet
progressBar.switchToIndeterminate()
# This will work in 4.0.1 and beyond
# Use blackboard class to index blackboard artifacts for keyword search
# blackboard = Case.getCurrentCase().getServices().getBlackboard()
# For our example, we will use FileManager to get all
# files with the word "test"
# in the name and then count and read them
# FileManager API: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1casemodule_1_1services_1_1_file_manager.html
fileManager = Case.getCurrentCase().getServices().getFileManager()
files = fileManager.findFiles(dataSource, "%test%")
numFiles = len(files)
self.log(Level.INFO, "found " + str(numFiles) + " files")
progressBar.switchToDeterminate(numFiles)
fileCount = 0;
for file in files:
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
self.log(Level.INFO, "Processing file: " + file.getName())
fileCount += 1
# Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of
# artfiact. Refer to the developer docs for other examples.
art = file.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT)
att = BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID(), SampleJythonDataSourceIngestModuleFactory.moduleName, "Test file")
art.addAttribute(att)
# This will work in 4.0.1 and beyond
#try:
# # index the artifact for keyword search
# blackboard.indexArtifact(art)
#except Blackboard.BlackboardException as e:
# self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# To further the example, this code will read the contents of the file and count the number of bytes
inputStream = ReadContentInputStream(file)
buffer = jarray.zeros(1024, "b")
totLen = 0
readLen = inputStream.read(buffer)
while (readLen != -1):
totLen = totLen + readLen
readLen = inputStream.read(buffer)
# Update the progress bar
progressBar.progress(fileCount)
#Post a message to the ingest messages in box.
message = IngestMessage.createMessage(IngestMessage.MessageType.DATA,
"Sample Jython Data Source Ingest Module", "Found %d files" % fileCount)
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK; | {
"content_hash": "23996ebec2f3f961042c2bf3a091defa",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 165,
"avg_line_length": 46.44604316546763,
"alnum_prop": 0.7163878562577447,
"repo_name": "mhmdfy/autopsy",
"id": "6e99ee3cee7a38a0147b7ad50f7cc8dbbd72a9ee",
"size": "8071",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "pythonExamples/dataSourceIngestModule.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5199"
},
{
"name": "CSS",
"bytes": "2953"
},
{
"name": "HTML",
"bytes": "8763"
},
{
"name": "Java",
"bytes": "6099326"
},
{
"name": "Perl",
"bytes": "1145199"
},
{
"name": "Python",
"bytes": "199334"
}
],
"symlink_target": ""
} |
import httplib
from httplib import BadStatusLine
import os
import sys
import threading
import time
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
engine = cherrypy.engine
thisdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
class Root:
def index(self):
return "Hello World"
index.exposed = True
def ctrlc(self):
raise KeyboardInterrupt()
ctrlc.exposed = True
def graceful(self):
engine.graceful()
return "app was (gracefully) restarted succesfully"
graceful.exposed = True
def block_explicit(self):
while True:
if cherrypy.response.timed_out:
cherrypy.response.timed_out = False
return "broken!"
time.sleep(0.01)
block_explicit.exposed = True
def block_implicit(self):
time.sleep(0.5)
return "response.timeout = %s" % cherrypy.response.timeout
block_implicit.exposed = True
cherrypy.tree.mount(Root())
cherrypy.config.update({
'environment': 'test_suite',
'engine.deadlock_poll_freq': 0.1,
})
class Dependency:
def __init__(self, bus):
self.bus = bus
self.running = False
self.startcount = 0
self.gracecount = 0
self.threads = {}
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
self.bus.subscribe('graceful', self.graceful)
self.bus.subscribe('start_thread', self.startthread)
self.bus.subscribe('stop_thread', self.stopthread)
def start(self):
self.running = True
self.startcount += 1
def stop(self):
self.running = False
def graceful(self):
self.gracecount += 1
def startthread(self, thread_id):
self.threads[thread_id] = None
def stopthread(self, thread_id):
del self.threads[thread_id]
db_connection = Dependency(engine)
db_connection.subscribe()
# ------------ Enough helpers. Time for real live test cases. ------------ #
from cherrypy.test import helper
class ServerStateTests(helper.CPWebCase):
def test_0_NormalStateFlow(self):
if not self.server_class:
# Without having called "engine.start()", we should
# get a 503 Service Unavailable response.
self.getPage("/")
self.assertStatus(503)
# And our db_connection should not be running
self.assertEqual(db_connection.running, False)
self.assertEqual(db_connection.startcount, 0)
self.assertEqual(len(db_connection.threads), 0)
# Test server start
engine.start()
self.assertEqual(engine.state, engine.states.STARTED)
if self.server_class:
host = cherrypy.server.socket_host
port = cherrypy.server.socket_port
self.assertRaises(IOError, cherrypy._cpserver.check_port, host, port)
# The db_connection should be running now
self.assertEqual(db_connection.running, True)
self.assertEqual(db_connection.startcount, 1)
self.assertEqual(len(db_connection.threads), 0)
self.getPage("/")
self.assertBody("Hello World")
self.assertEqual(len(db_connection.threads), 1)
# Test engine stop. This will also stop the HTTP server.
engine.stop()
self.assertEqual(engine.state, engine.states.STOPPED)
# Verify that our custom stop function was called
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
if not self.server_class:
# Once the engine has stopped, we should get a 503
# error again. (If we were running an HTTP server,
# then the connection should not even be processed).
self.getPage("/")
self.assertStatus(503)
# Block the main thread now and verify that exit() works.
def exittest():
self.getPage("/")
self.assertBody("Hello World")
engine.exit()
cherrypy.server.start()
engine.start_with_callback(exittest)
engine.block()
self.assertEqual(engine.state, engine.states.EXITING)
def test_1_Restart(self):
cherrypy.server.start()
engine.start()
# The db_connection should be running now
self.assertEqual(db_connection.running, True)
grace = db_connection.gracecount
self.getPage("/")
self.assertBody("Hello World")
self.assertEqual(len(db_connection.threads), 1)
# Test server restart from this thread
engine.graceful()
self.assertEqual(engine.state, engine.states.STARTED)
self.getPage("/")
self.assertBody("Hello World")
self.assertEqual(db_connection.running, True)
self.assertEqual(db_connection.gracecount, grace + 1)
self.assertEqual(len(db_connection.threads), 1)
# Test server restart from inside a page handler
self.getPage("/graceful")
self.assertEqual(engine.state, engine.states.STARTED)
self.assertBody("app was (gracefully) restarted succesfully")
self.assertEqual(db_connection.running, True)
self.assertEqual(db_connection.gracecount, grace + 2)
# Since we are requesting synchronously, is only one thread used?
# Note that the "/graceful" request has been flushed.
self.assertEqual(len(db_connection.threads), 0)
engine.stop()
self.assertEqual(engine.state, engine.states.STOPPED)
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
def test_2_KeyboardInterrupt(self):
if self.server_class:
# Raise a keyboard interrupt in the HTTP server's main thread.
# We must start the server in this, the main thread
engine.start()
cherrypy.server.start()
self.persistent = True
try:
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody("Hello World")
self.assertNoHeader("Connection")
cherrypy.server.httpserver.interrupt = KeyboardInterrupt
engine.block()
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
self.assertEqual(engine.state, engine.states.EXITING)
finally:
self.persistent = False
# Raise a keyboard interrupt in a page handler; on multithreaded
# servers, this should occur in one of the worker threads.
# This should raise a BadStatusLine error, since the worker
# thread will just die without writing a response.
engine.start()
cherrypy.server.start()
try:
self.getPage("/ctrlc")
except BadStatusLine:
pass
else:
print self.body
self.fail("AssertionError: BadStatusLine not raised")
engine.block()
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
def test_3_Deadlocks(self):
cherrypy.config.update({'response.timeout': 0.2})
engine.start()
cherrypy.server.start()
try:
self.assertNotEqual(engine.timeout_monitor.thread, None)
# Request a "normal" page.
self.assertEqual(engine.timeout_monitor.servings, [])
self.getPage("/")
self.assertBody("Hello World")
# request.close is called async.
while engine.timeout_monitor.servings:
print ".",
time.sleep(0.01)
# Request a page that explicitly checks itself for deadlock.
# The deadlock_timeout should be 2 secs.
self.getPage("/block_explicit")
self.assertBody("broken!")
# Request a page that implicitly breaks deadlock.
# If we deadlock, we want to touch as little code as possible,
# so we won't even call handle_error, just bail ASAP.
self.getPage("/block_implicit")
self.assertStatus(500)
self.assertInBody("raise cherrypy.TimeoutError()")
finally:
engine.exit()
def test_4_Autoreload(self):
if not self.server_class:
print "skipped (no server) ",
return
# Start the demo script in a new process
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'))
p.write_conf()
p.start(imports='cherrypy.test.test_states_demo')
try:
self.getPage("/start")
start = float(self.body)
# Give the autoreloader time to cache the file time.
time.sleep(2)
# Touch the file
os.utime(os.path.join(thisdir, "test_states_demo.py"), None)
# Give the autoreloader time to re-exec the process
time.sleep(2)
cherrypy._cpserver.wait_for_occupied_port(host, port)
self.getPage("/start")
self.assert_(float(self.body) > start)
finally:
# Shut down the spawned process
self.getPage("/exit")
p.join()
def test_5_Start_Error(self):
if not self.server_class:
print "skipped (no server) ",
return
# If a process errors during start, it should stop the engine
# and exit with a non-zero exit code.
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'),
wait=True)
p.write_conf(extra="starterror: True")
p.start(imports='cherrypy.test.test_states_demo')
if p.exit_code == 0:
self.fail("Process failed to return nonzero exit code.")
class PluginTests(helper.CPWebCase):
def test_daemonize(self):
if not self.server_class:
print "skipped (no server) ",
return
if os.name not in ['posix']:
print "skipped (not on posix) ",
return
# Spawn the process and wait, when this returns, the original process
# is finished. If it daemonized properly, we should still be able
# to access pages.
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'),
wait=True, daemonize=True)
p.write_conf()
p.start(imports='cherrypy.test.test_states_demo')
try:
# Just get the pid of the daemonization process.
self.getPage("/pid")
self.assertStatus(200)
page_pid = int(self.body)
self.assertEqual(page_pid, p.get_pid())
finally:
# Shut down the spawned process
self.getPage("/exit")
p.join()
# Wait until here to test the exit code because we want to ensure
# that we wait for the daemon to finish running before we fail.
if p.exit_code != 0:
self.fail("Daemonized parent process failed to exit cleanly.")
class SignalHandlingTests(helper.CPWebCase):
def test_SIGHUP_tty(self):
# When not daemonized, SIGHUP should shut down the server.
if not self.server_class:
print "skipped (no server) ",
return
try:
from signal import SIGHUP
except ImportError:
print "skipped (no SIGHUP) ",
return
# Spawn the process.
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'))
p.write_conf()
p.start(imports='cherrypy.test.test_states_demo')
# Send a SIGHUP
os.kill(p.get_pid(), SIGHUP)
# This might hang if things aren't working right, but meh.
p.join()
def test_SIGHUP_daemonized(self):
# When daemonized, SIGHUP should restart the server.
if not self.server_class:
print "skipped (no server) ",
return
try:
from signal import SIGHUP
except ImportError:
print "skipped (no SIGHUP) ",
return
if os.name not in ['posix']:
print "skipped (not on posix) ",
return
# Spawn the process and wait, when this returns, the original process
# is finished. If it daemonized properly, we should still be able
# to access pages.
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'),
wait=True, daemonize=True)
p.write_conf()
p.start(imports='cherrypy.test.test_states_demo')
pid = p.get_pid()
try:
# Send a SIGHUP
os.kill(pid, SIGHUP)
# Give the server some time to restart
time.sleep(2)
self.getPage("/pid")
self.assertStatus(200)
new_pid = int(self.body)
self.assertNotEqual(new_pid, pid)
finally:
# Shut down the spawned process
self.getPage("/exit")
p.join()
def test_SIGTERM(self):
# SIGTERM should shut down the server whether daemonized or not.
if not self.server_class:
print "skipped (no server) ",
return
try:
from signal import SIGTERM
except ImportError:
print "skipped (no SIGTERM) ",
return
try:
from os import kill
except ImportError:
print "skipped (no os.kill) ",
return
# Spawn a normal, undaemonized process.
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'))
p.write_conf()
p.start(imports='cherrypy.test.test_states_demo')
# Send a SIGTERM
os.kill(p.get_pid(), SIGTERM)
# This might hang if things aren't working right, but meh.
p.join()
if os.name in ['posix']:
# Spawn a daemonized process and test again.
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'),
wait=True, daemonize=True)
p.write_conf()
p.start(imports='cherrypy.test.test_states_demo')
# Send a SIGTERM
os.kill(p.get_pid(), SIGTERM)
# This might hang if things aren't working right, but meh.
p.join()
def test_signal_handler_unsubscribe(self):
if not self.server_class:
print "skipped (no server) ",
return
try:
from signal import SIGTERM
except ImportError:
print "skipped (no SIGTERM) ",
return
try:
from os import kill
except ImportError:
print "skipped (no os.kill) ",
return
# Spawn a normal, undaemonized process.
p = helper.CPProcess(ssl=(self.scheme.lower()=='https'))
p.write_conf(extra="unsubsig: True")
p.start(imports='cherrypy.test.test_states_demo')
# Send a SIGTERM
os.kill(p.get_pid(), SIGTERM)
# This might hang if things aren't working right, but meh.
p.join()
# Assert the old handler ran.
target_line = open(p.error_log, 'rb').readlines()[-10]
if not "I am an old SIGTERM handler." in target_line:
self.fail("Old SIGTERM handler did not run.\n%r" % target_line)
cases = [v for v in globals().values()
if isinstance(v, type) and issubclass(v, helper.CPWebCase)]
def run(server, conf):
helper.setConfig(conf)
for tc in cases:
tc.server_class = server
suites = [helper.CPTestLoader.loadTestsFromTestCase(tc) for tc in
(ServerStateTests, PluginTests, SignalHandlingTests)]
try:
try:
import pyconquer
except ImportError:
for suite in suites:
helper.CPTestRunner.run(suite)
else:
tr = pyconquer.Logger("cherrypy")
tr.out = open(os.path.join(os.path.dirname(__file__), "test_states_conquer.log"), "wb")
try:
tr.start()
for suite in suites:
helper.CPTestRunner.run(suite)
finally:
tr.stop()
tr.out.close()
finally:
engine.exit()
def run_all(host, port, ssl=False):
conf = {'server.socket_host': host,
'server.socket_port': port,
'server.thread_pool': 10,
'environment': "test_suite",
}
if host:
for tc in cases:
tc.HOST = host
if port:
for tc in cases:
tc.PORT = port
if ssl:
localDir = os.path.dirname(__file__)
serverpem = os.path.join(os.getcwd(), localDir, 'test.pem')
conf['server.ssl_certificate'] = serverpem
conf['server.ssl_private_key'] = serverpem
for tc in cases:
tc.scheme = "https"
tc.HTTP_CONN = httplib.HTTPSConnection
def _run(server):
print
print "Testing %s on %s:%s..." % (server, host, port)
run(server, conf)
_run("cherrypy._cpwsgi.CPWSGIServer")
if __name__ == "__main__":
import sys
host = '127.0.0.1'
port = 8000
ssl = False
argv = sys.argv[1:]
if argv:
help_args = [prefix + atom for atom in ("?", "h", "help")
for prefix in ("", "-", "--", "\\")]
for arg in argv:
if arg in help_args:
print
print "test_states.py -? -> this help page"
print "test_states.py [-host=h] [-port=p] -> run the tests on h:p"
print "test_states.py -ssl [-host=h] [-port=p] -> run the tests using SSL on h:p"
sys.exit(0)
if arg == "-ssl":
ssl = True
elif arg.startswith("-host="):
host = arg[6:].strip("\"'")
elif arg.startswith("-port="):
port = int(arg[6:].strip())
run_all(host, port, ssl)
| {
"content_hash": "4f7ce94b5881a676fbf1a9553ef97bf1",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 99,
"avg_line_length": 34.768402154398565,
"alnum_prop": 0.5367654652483734,
"repo_name": "cread/ec2id",
"id": "90e03dbde5c2b13fe9e54784d1974dc0526ff36e",
"size": "19366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cherrypy/test/test_states.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "807550"
},
{
"name": "Shell",
"bytes": "223"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from django import forms
from django.forms import widgets
import commonware.log
import happyforms
import jinja2
from django.utils.translation import ugettext as _
import mkt
from mkt.files.models import File
from mkt.versions.models import Version
log = commonware.log.getLogger('z.files')
class FileSelectWidget(widgets.Select):
def render_options(self, choices, selected_choices):
def option(files, label=None):
# Make sure that if there's a non-disabled version,
# that's the one we use for the ID.
files.sort(lambda a, b: ((a.status == mkt.STATUS_DISABLED) -
(b.status == mkt.STATUS_DISABLED)))
if label is None:
label = _('All')
output = [u'<option value="', jinja2.escape(files[0].id), u'" ']
if files[0].status == mkt.STATUS_DISABLED:
# Disabled files can be diffed on Marketplace.
output.append(u' disabled')
if selected in files:
output.append(u' selected="true"')
status = set(u'status-%s' % mkt.STATUS_CHOICES_API[f.status]
for f in files)
output.extend((u' class="', jinja2.escape(' '.join(status)), u'"'))
# Extend apps to show file status in selects.
label += ' (%s)' % mkt.STATUS_CHOICES_API[f.status]
output.extend((u'>', jinja2.escape(label), u'</option>\n'))
return output
if selected_choices[0]:
selected = File.objects.get(id=selected_choices[0])
else:
selected = None
file_ids = [int(c[0]) for c in self.choices if c[0]]
output = []
output.append(u'<option></option>')
vers = Version.objects.filter(files__id__in=file_ids).distinct()
for ver in vers.order_by('-created'):
hashes = defaultdict(list)
for f in ver.files.filter(id__in=file_ids):
hashes[f.hash].append(f)
distinct_files = hashes.values()
if len(distinct_files) == 1:
output.extend(option(distinct_files[0], ver.version))
elif distinct_files:
output.extend((u'<optgroup label="',
jinja2.escape(ver.version), u'">'))
for f in distinct_files:
output.extend(option(f))
output.append(u'</optgroup>')
return jinja2.Markup(u''.join(output))
class FileCompareForm(happyforms.Form):
left = forms.ModelChoiceField(queryset=File.objects.all(),
widget=FileSelectWidget)
right = forms.ModelChoiceField(queryset=File.objects.all(),
widget=FileSelectWidget, required=False)
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
super(FileCompareForm, self).__init__(*args, **kw)
queryset = File.objects.filter(version__addon=self.addon)
self.fields['left'].queryset = queryset
self.fields['right'].queryset = queryset
def clean(self):
if (not self.errors and
self.cleaned_data.get('right') == self.cleaned_data['left']):
raise forms.ValidationError(
_('Cannot diff a version against itself'))
return self.cleaned_data
| {
"content_hash": "d9427d742648deaa461421edc05b3a6d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 35.915789473684214,
"alnum_prop": 0.567409144196952,
"repo_name": "ingenioustechie/zamboni",
"id": "87bce086c636c3625778aec57291f785531365b7",
"size": "3412",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mkt/files/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "354315"
},
{
"name": "HTML",
"bytes": "2379391"
},
{
"name": "JavaScript",
"bytes": "529996"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4672122"
},
{
"name": "Shell",
"bytes": "11147"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
} |
'''
Runners are clases which are responsible for consuming sample blocks from
sound generator (see the generator package) and directing them to output
for playback, file writing, etc. See documentation for the runner module
for further explanation.
'''
from .file_writer import FileWriter
from .pyaudio import PyAudioRunner
from .runner import Runner
| {
"content_hash": "faa60bdd68076f439c9b1330ddf35409",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 36.9,
"alnum_prop": 0.7723577235772358,
"repo_name": "bracket/ratchet",
"id": "535f8efc9ee9e29c9301c9c29bc3ccabe2318a18",
"size": "369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ratchet/runner/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28082"
}
],
"symlink_target": ""
} |
import sys
import string
import os
import time
from osgeo import gdal
from osgeo import osr
cur_name = 'default'
success_counter = 0
failure_counter = 0
expected_failure_counter = 0
blow_counter = 0
skip_counter = 0
failure_summary = []
reason = None
count_skipped_tests_download = 0
count_skipped_tests_slow = 0
start_time = None
end_time = None
jp2kak_drv = None
jpeg2000_drv = None
jp2ecw_drv = None
jp2mrsid_drv = None
jp2kak_drv_unregistered = False
jpeg2000_drv_unregistered = False
jp2ecw_drv_unregistered = False
jp2mrsid_drv_unregistered = False
from sys import version_info
if version_info >= (3,0,0):
from gdaltest_python3 import *
else:
from gdaltest_python2 import *
# Process commandline arguments for stuff like --debug, --locale, --config
argv = gdal.GeneralCmdLineProcessor( sys.argv )
###############################################################################
def setup_run( name ):
if 'APPLY_LOCALE' in os.environ:
import locale
locale.setlocale(locale.LC_ALL, '')
global cur_name
cur_name = name
###############################################################################
def run_tests( test_list ):
global success_counter, failure_counter, expected_failure_counter, blow_counter, skip_counter
global reason, failure_summary, cur_name
global start_time, end_time
set_time = start_time is None
if set_time:
start_time = time.time()
had_errors_this_script = 0
for test_item in test_list:
if test_item is None:
continue
try:
(func, name) = test_item
if func.__name__[:4] == 'test':
outline = ' TEST: ' + func.__name__[4:] + ': ' + name + ' ... '
else:
outline = ' TEST: ' + func.__name__ + ': ' + name + ' ... '
except:
func = test_item
name = func.__name__
outline = ' TEST: ' + name + ' ... '
sys.stdout.write( outline )
sys.stdout.flush()
reason = None
result = run_func(func)
if result[:4] == 'fail':
if had_errors_this_script == 0:
failure_summary.append( 'Script: ' + cur_name )
had_errors_this_script = 1
failure_summary.append( outline + result )
if reason is not None:
failure_summary.append( ' ' + reason )
if reason is not None:
print((' ' + reason))
if result == 'success':
success_counter = success_counter + 1
elif result == 'expected_fail':
expected_failure_counter = expected_failure_counter + 1
elif result == 'fail':
failure_counter = failure_counter + 1
elif result == 'skip':
skip_counter = skip_counter + 1
else:
blow_counter = blow_counter + 1
if set_time:
end_time = time.time()
###############################################################################
def get_lineno_2framesback( frames ):
try:
import inspect
frame = inspect.currentframe()
while frames > 0:
frame = frame.f_back
frames = frames-1
return frame.f_lineno
except:
return -1
###############################################################################
def post_reason( msg, frames=2 ):
lineno = get_lineno_2framesback( frames )
global reason
if lineno >= 0:
reason = 'line %d: %s' % (lineno, msg)
else:
reason = msg
###############################################################################
def summarize():
global count_skipped_tests_download, count_skipped_tests_slow
global success_counter, failure_counter, blow_counter, skip_counter
global cur_name
global start_time, end_time
print('')
if cur_name is not None:
print('Test Script: %s' % cur_name)
print('Succeeded: %d' % success_counter)
print('Failed: %d (%d blew exceptions)' \
% (failure_counter+blow_counter, blow_counter))
print('Skipped: %d' % skip_counter)
print('Expected fail:%d' % expected_failure_counter)
if start_time is not None:
duration = end_time - start_time
if duration >= 60:
print('Duration: %02dm%02.1fs' % (duration / 60., duration % 60.))
else:
print('Duration: %02.2fs' % duration)
if count_skipped_tests_download != 0:
print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined, %d tests relying on data to downloaded from the Web have been skipped' % count_skipped_tests_download)
if count_skipped_tests_slow != 0:
print('As GDAL_RUN_SLOW_TESTS environment variable is not defined, %d "slow" tests have been skipped' % count_skipped_tests_slow)
print('')
sys.path.append( 'gcore' )
sys.path.append( '../gcore' )
import testnonboundtoswig
# Do it twice to ensure that cleanup routines properly do their jobs
for i in range(2):
testnonboundtoswig.OSRCleanup()
testnonboundtoswig.GDALDestroyDriverManager()
testnonboundtoswig.OGRCleanupAll()
return failure_counter + blow_counter
###############################################################################
def run_all( dirlist, option_list ):
global start_time, end_time
global cur_name
start_time = time.time()
for dir_name in dirlist:
files = os.listdir(dir_name)
old_path = sys.path
sys.path.append('.')
for file in files:
if not file[-3:] == '.py':
continue
module = file[:-3]
try:
wd = os.getcwd()
os.chdir( dir_name )
exec("import " + module)
try:
print('Running tests from %s/%s' % (dir_name,file))
setup_run( '%s/%s' % (dir_name,file) )
exec("run_tests( " + module + ".gdaltest_list)")
except:
pass
os.chdir( wd )
except:
os.chdir( wd )
print('... failed to load %s ... skipping.' % file)
import traceback
traceback.print_exc()
# We only add the tool directory to the python path long enough
# to load the tool files.
sys.path = old_path
end_time = time.time()
cur_name = None
if len(failure_summary) > 0:
print('')
print(' ------------ Failures ------------')
for item in failure_summary:
print(item)
print(' ----------------------------------')
###############################################################################
def clean_tmp():
all_files = os.listdir('tmp')
for file in all_files:
if file == 'CVS' or file == 'do-not-remove':
continue
try:
os.remove( 'tmp/' + file )
except:
pass
return 'success'
###############################################################################
def testCreateCopyInterruptCallback(pct, message, user_data):
if pct > 0.5:
return 0 # to stop
else:
return 1 # to continue
###############################################################################
class GDALTest:
def __init__(self, drivername, filename, band, chksum,
xoff = 0, yoff = 0, xsize = 0, ysize = 0, options = [],
filename_absolute = 0 ):
self.driver = None
self.drivername = drivername
self.filename = filename
self.filename_absolute = filename_absolute
self.band = band
self.chksum = chksum
self.xoff = xoff
self.yoff = yoff
self.xsize = xsize
self.ysize = ysize
self.options = options
def testDriver(self):
if self.driver is None:
self.driver = gdal.GetDriverByName( self.drivername )
if self.driver is None:
post_reason( self.drivername + ' driver not found!' )
return 'fail'
return 'success'
def testOpen(self, check_prj = None, check_gt = None, gt_epsilon = None, \
check_stat = None, check_approx_stat = None, \
stat_epsilon = None, skip_checksum = None):
"""check_prj - projection reference, check_gt - geotransformation
matrix (tuple), gt_epsilon - geotransformation tolerance,
check_stat - band statistics (tuple), stat_epsilon - statistics
tolerance."""
if self.testDriver() == 'fail':
return 'skip'
if self.filename_absolute:
wrk_filename = self.filename
else:
wrk_filename = 'data/' + self.filename
ds = gdal.Open( wrk_filename, gdal.GA_ReadOnly )
if ds is None:
post_reason( 'Failed to open dataset: ' + wrk_filename )
return 'fail'
if ds.GetDriver().ShortName != gdal.GetDriverByName( self.drivername ).ShortName:
post_reason( 'The driver of the returned dataset is %s instead of %s.' % ( ds.GetDriver().ShortName, self.drivername ) )
return 'fail'
if self.xsize == 0 and self.ysize == 0:
self.xsize = ds.RasterXSize
self.ysize = ds.RasterYSize
# Do we need to check projection?
if check_prj is not None:
new_prj = ds.GetProjection()
src_osr = osr.SpatialReference()
src_osr.SetFromUserInput( check_prj )
new_osr = osr.SpatialReference( wkt=new_prj )
if not src_osr.IsSame(new_osr):
print('')
print('old = %s' % src_osr.ExportToPrettyWkt())
print('new = %s' % new_osr.ExportToPrettyWkt())
post_reason( 'Projections differ' )
return 'fail'
# Do we need to check geotransform?
if check_gt:
# Default to 100th of pixel as our test value.
if gt_epsilon is None:
gt_epsilon = (abs(check_gt[1])+abs(check_gt[2])) / 100.0
new_gt = ds.GetGeoTransform()
for i in range(6):
if abs(new_gt[i]-check_gt[i]) > gt_epsilon:
print('')
print('old = ', check_gt)
print('new = ', new_gt)
post_reason( 'Geotransform differs.' )
return 'fail'
oBand = ds.GetRasterBand(self.band)
if skip_checksum is None:
chksum = oBand.Checksum(self.xoff, self.yoff, self.xsize, self.ysize)
# Do we need to check approximate statistics?
if check_approx_stat:
# Default to 1000th of pixel value range as our test value.
if stat_epsilon is None:
stat_epsilon = \
abs(check_approx_stat[1] - check_approx_stat[0]) / 1000.0
new_stat = oBand.GetStatistics(1, 1)
for i in range(4):
# NOTE - mloskot: Poor man Nan/Inf value check. It's poor
# because we need to support old and buggy Python 2.3.
# Tested on Linux, Mac OS X and Windows, with Python 2.3/2.4/2.5.
sv = str(new_stat[i]).lower()
if sv.find('n') >= 0 or sv.find('i') >= 0 or sv.find('#') >= 0:
post_reason( 'NaN or Invinite value encountered '%'.' % sv )
return 'fail'
if abs(new_stat[i]-check_approx_stat[i]) > stat_epsilon:
print('')
print('old = ', check_approx_stat)
print('new = ', new_stat)
post_reason( 'Approximate statistics differs.' )
return 'fail'
# Do we need to check statistics?
if check_stat:
# Default to 1000th of pixel value range as our test value.
if stat_epsilon is None:
stat_epsilon = abs(check_stat[1] - check_stat[0]) / 1000.0
# FIXME: how to test approximate statistic results?
new_stat = oBand.GetStatistics(1, 1)
new_stat = oBand.GetStatistics(0, 1)
for i in range(4):
sv = str(new_stat[i]).lower()
if sv.find('n') >= 0 or sv.find('i') >= 0 or sv.find('#') >= 0:
post_reason( 'NaN or Invinite value encountered '%'.' % sv )
return 'fail'
if abs(new_stat[i]-check_stat[i]) > stat_epsilon:
print('')
print('old = ', check_stat)
print('new = ', new_stat)
post_reason( 'Statistics differs.' )
return 'fail'
ds = None
if is_file_open(wrk_filename):
post_reason('file still open after dataset closing')
return 'fail'
if skip_checksum is not None:
return 'success'
elif self.chksum is None or chksum == self.chksum:
return 'success'
else:
post_reason('Checksum for band %d in "%s" is %d, but expected %d.' \
% (self.band, self.filename, chksum, self.chksum) )
return 'fail'
def testCreateCopy(self, check_minmax = 1, check_gt = 0, check_srs = None,
vsimem = 0, new_filename = None, strict_in = 0,
skip_preclose_test = 0, delete_copy = 1, gt_epsilon = None,
check_checksum_not_null = None, interrupt_during_copy = False):
if self.testDriver() == 'fail':
return 'skip'
if self.filename_absolute:
wrk_filename = self.filename
else:
wrk_filename = 'data/' + self.filename
src_ds = gdal.Open( wrk_filename )
if self.band > 0:
minmax = src_ds.GetRasterBand(self.band).ComputeRasterMinMax()
src_prj = src_ds.GetProjection()
src_gt = src_ds.GetGeoTransform()
if new_filename is None:
if vsimem:
new_filename = '/vsimem/' + self.filename + '.tst'
else:
new_filename = 'tmp/' + self.filename + '.tst'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
if interrupt_during_copy:
new_ds = self.driver.CreateCopy( new_filename, src_ds,
strict = strict_in,
options = self.options,
callback = testCreateCopyInterruptCallback)
else:
new_ds = self.driver.CreateCopy( new_filename, src_ds,
strict = strict_in,
options = self.options )
gdal.PopErrorHandler()
if interrupt_during_copy:
if new_ds is None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
self.driver.Delete( new_filename )
gdal.PopErrorHandler()
return 'success'
else:
post_reason( 'CreateCopy() should have failed due to interruption')
new_ds = None
self.driver.Delete( new_filename )
return 'fail'
if new_ds is None:
post_reason( 'Failed to create test file using CreateCopy method.'\
+ '\n' + gdal.GetLastErrorMsg() )
return 'fail'
if new_ds.GetDriver().ShortName != gdal.GetDriverByName( self.drivername ).ShortName:
post_reason( 'The driver of the returned dataset is %s instead of %s.' % ( new_ds.GetDriver().ShortName, self.drivername ) )
return 'fail'
if self.band > 0 and skip_preclose_test == 0:
bnd = new_ds.GetRasterBand(self.band)
if check_checksum_not_null is True:
if bnd.Checksum() == 0:
post_reason('Got null checksum on still-open file.')
return 'fail'
elif self.chksum is not None and bnd.Checksum() != self.chksum:
post_reason(
'Did not get expected checksum on still-open file.\n' \
' Got %d instead of %d.' % (bnd.Checksum(),self.chksum))
return 'fail'
if check_minmax:
got_minmax = bnd.ComputeRasterMinMax()
if got_minmax != minmax:
post_reason( \
'Did not get expected min/max values on still-open file.\n' \
' Got %g,%g instead of %g,%g.' \
% ( got_minmax[0], got_minmax[1], minmax[0], minmax[1] ) )
return 'fail'
bnd = None
new_ds = None
# hopefully it's closed now!
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
if self.band > 0:
bnd = new_ds.GetRasterBand(self.band)
if check_checksum_not_null is True:
if bnd.Checksum() == 0:
post_reason('Got null checksum on reopened file.')
return 'fail'
elif self.chksum is not None and bnd.Checksum() != self.chksum:
post_reason( 'Did not get expected checksum on reopened file.\n'
' Got %d instead of %d.' \
% (bnd.Checksum(), self.chksum) )
return 'fail'
if check_minmax:
got_minmax = bnd.ComputeRasterMinMax()
if got_minmax != minmax:
post_reason( \
'Did not get expected min/max values on reopened file.\n' \
' Got %g,%g instead of %g,%g.' \
% ( got_minmax[0], got_minmax[1], minmax[0], minmax[1] ) )
return 'fail'
# Do we need to check the geotransform?
if check_gt:
if gt_epsilon is None:
eps = 0.00000001
else:
eps = gt_epsilon
new_gt = new_ds.GetGeoTransform()
if abs(new_gt[0] - src_gt[0]) > eps \
or abs(new_gt[1] - src_gt[1]) > eps \
or abs(new_gt[2] - src_gt[2]) > eps \
or abs(new_gt[3] - src_gt[3]) > eps \
or abs(new_gt[4] - src_gt[4]) > eps \
or abs(new_gt[5] - src_gt[5]) > eps:
print('')
print('old = ', src_gt)
print('new = ', new_gt)
post_reason( 'Geotransform differs.' )
return 'fail'
# Do we need to check the geotransform?
if check_srs is not None:
new_prj = new_ds.GetProjection()
src_osr = osr.SpatialReference( wkt=src_prj )
new_osr = osr.SpatialReference( wkt=new_prj )
if not src_osr.IsSame(new_osr):
print('')
print('old = %s' % src_osr.ExportToPrettyWkt())
print('new = %s' % new_osr.ExportToPrettyWkt())
post_reason( 'Projections differ' )
return 'fail'
bnd = None
new_ds = None
src_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON' and delete_copy == 1 :
self.driver.Delete( new_filename )
return 'success'
def testCreate(self, vsimem = 0, new_filename = None, out_bands = 1,
check_minmax = 1 ):
if self.testDriver() == 'fail':
return 'skip'
if self.filename_absolute:
wrk_filename = self.filename
else:
wrk_filename = 'data/' + self.filename
src_ds = gdal.Open( wrk_filename )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
src_img = src_ds.GetRasterBand(self.band).ReadRaster(0,0,xsize,ysize)
minmax = src_ds.GetRasterBand(self.band).ComputeRasterMinMax()
if new_filename is None:
if vsimem:
new_filename = '/vsimem/' + self.filename + '.tst'
else:
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, out_bands,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
src_ds = None
try:
for band in range(1,out_bands+1):
new_ds.GetRasterBand(band).WriteRaster( 0, 0, xsize, ysize, src_img )
except:
post_reason( 'Failed to write raster bands to test file.' )
return 'fail'
for band in range(1,out_bands+1):
if self.chksum is not None \
and new_ds.GetRasterBand(band).Checksum() != self.chksum:
post_reason(
'Did not get expected checksum on still-open file.\n' \
' Got %d instead of %d.' \
% (new_ds.GetRasterBand(band).Checksum(),self.chksum))
return 'fail'
computed_minmax = new_ds.GetRasterBand(band).ComputeRasterMinMax()
if computed_minmax != minmax and check_minmax:
post_reason( 'Did not get expected min/max values on still-open file.' )
print('expect: ', minmax)
print('got: ', computed_minmax)
return 'fail'
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
for band in range(1,out_bands+1):
if self.chksum is not None \
and new_ds.GetRasterBand(band).Checksum() != self.chksum:
post_reason( 'Did not get expected checksum on reopened file.' \
' Got %d instead of %d.' \
% (new_ds.GetRasterBand(band).Checksum(),self.chksum))
return 'fail'
if new_ds.GetRasterBand(band).ComputeRasterMinMax() != minmax and check_minmax:
post_reason( 'Did not get expected min/max values on reopened file.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetGeoTransform(self):
if self.testDriver() == 'fail':
return 'skip'
src_ds = gdal.Open( 'data/' + self.filename )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
gt = (123.0, 1.18, 0.0, 456.0, 0.0, -1.18 )
if new_ds.SetGeoTransform( gt ) is not gdal.CE_None:
post_reason( 'Failed to set geographic transformation.' )
return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
eps = 0.00000001
new_gt = new_ds.GetGeoTransform()
if abs(new_gt[0] - gt[0]) > eps \
or abs(new_gt[1] - gt[1]) > eps \
or abs(new_gt[2] - gt[2]) > eps \
or abs(new_gt[3] - gt[3]) > eps \
or abs(new_gt[4] - gt[4]) > eps \
or abs(new_gt[5] - gt[5]) > eps:
print('')
print('old = ', gt)
print('new = ', new_gt)
post_reason( 'Did not get expected geotransform.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetProjection(self, prj = None, expected_prj = None ):
if self.testDriver() == 'fail':
return 'skip'
src_ds = gdal.Open( 'data/' + self.filename )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
gt = (123.0, 1.18, 0.0, 456.0, 0.0, -1.18 )
if prj is None:
# This is a challenging SRS since it has non-meter linear units.
prj='PROJCS["NAD83 / Ohio South",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",40.03333333333333],PARAMETER["standard_parallel_2",38.73333333333333],PARAMETER["latitude_of_origin",38],PARAMETER["central_meridian",-82.5],PARAMETER["false_easting",1968500],PARAMETER["false_northing",0],UNIT["feet",0.3048006096012192]]'
src_osr = osr.SpatialReference()
src_osr.ImportFromWkt(prj)
new_ds.SetGeoTransform( gt )
if new_ds.SetProjection( prj ) is not gdal.CE_None:
post_reason( 'Failed to set geographic projection string.' )
return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
expected_osr = osr.SpatialReference()
if expected_prj is None:
expected_osr = src_osr
else:
expected_osr.ImportFromWkt( expected_prj )
new_osr = osr.SpatialReference()
new_osr.ImportFromWkt(new_ds.GetProjection())
if not new_osr.IsSame(expected_osr):
post_reason( 'Did not get expected projection reference.' )
print('Got: ')
print(new_osr.ExportToPrettyWkt())
print('Expected:')
print(expected_osr.ExportToPrettyWkt())
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetMetadata(self):
if self.testDriver() == 'fail':
return 'skip'
src_ds = gdal.Open( 'data/' + self.filename )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
dict = {}
dict['TEST_KEY'] = 'TestValue'
new_ds.SetMetadata( dict )
# FIXME
#if new_ds.SetMetadata( dict ) is not gdal.CE_None:
#print new_ds.SetMetadata( dict )
#post_reason( 'Failed to set metadata item.' )
#return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
md_dict = new_ds.GetMetadata()
if (not 'TEST_KEY' in md_dict):
post_reason( 'Metadata item TEST_KEY does not exist.')
return 'fail'
if md_dict['TEST_KEY'] != 'TestValue':
post_reason( 'Did not get expected metadata item.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetNoDataValue(self):
if self.testDriver() == 'fail':
return 'skip'
src_ds = gdal.Open( 'data/' + self.filename )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
nodata = 11
if new_ds.GetRasterBand(1).SetNoDataValue(nodata) is not gdal.CE_None:
post_reason( 'Failed to set NoData value.' )
return 'fail'
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
if nodata != new_ds.GetRasterBand(1).GetNoDataValue():
post_reason( 'Did not get expected NoData value.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def testSetDescription(self):
if self.testDriver() == 'fail':
return 'skip'
src_ds = gdal.Open( 'data/' + self.filename )
xsize = src_ds.RasterXSize
ysize = src_ds.RasterYSize
new_filename = 'tmp/' + self.filename + '.tst'
new_ds = self.driver.Create( new_filename, xsize, ysize, 1,
src_ds.GetRasterBand(self.band).DataType,
options = self.options )
if new_ds is None:
post_reason( 'Failed to create test file using Create method.' )
return 'fail'
description = "Description test string"
new_ds.GetRasterBand(1).SetDescription(description)
src_ds = None
new_ds = None
new_ds = gdal.Open( new_filename )
if new_ds is None:
post_reason( 'Failed to open dataset: ' + new_filename )
return 'fail'
if description != new_ds.GetRasterBand(1).GetDescription():
post_reason( 'Did not get expected description string.' )
return 'fail'
new_ds = None
if gdal.GetConfigOption( 'CPL_DEBUG', 'OFF' ) != 'ON':
self.driver.Delete( new_filename )
return 'success'
def approx_equal( a, b ):
a = float(a)
b = float(b)
if a == 0 and b != 0:
return 0
if abs(b/a - 1.0) > .00000000001:
return 0
else:
return 1
def user_srs_to_wkt( user_text ):
srs = osr.SpatialReference()
srs.SetFromUserInput( user_text )
return srs.ExportToWkt()
def equal_srs_from_wkt( expected_wkt, got_wkt ):
expected_srs = osr.SpatialReference()
expected_srs.ImportFromWkt( expected_wkt )
got_srs = osr.SpatialReference()
got_srs.ImportFromWkt( got_wkt )
if got_srs.IsSame( expected_srs ):
return 1
else:
print('Expected:\n%s' % expected_wkt)
print('Got: \n%s' % got_wkt)
post_reason( 'SRS differs from expected.' )
return 0
###############################################################################
# Compare two sets of RPC metadata, and establish if they are essentially
# equivelent or not.
def rpcs_equal( md1, md2 ):
simple_fields = [ 'LINE_OFF', 'SAMP_OFF', 'LAT_OFF', 'LONG_OFF',
'HEIGHT_OFF', 'LINE_SCALE', 'SAMP_SCALE', 'LAT_SCALE',
'LONG_SCALE', 'HEIGHT_SCALE' ]
coef_fields = [ 'LINE_NUM_COEFF', 'LINE_DEN_COEFF',
'SAMP_NUM_COEFF', 'SAMP_DEN_COEFF' ]
for sf in simple_fields:
try:
if not approx_equal(float(md1[sf]),float(md2[sf])):
post_reason( '%s values differ.' % sf )
print(md1[sf])
print(md2[sf])
return 0
except:
post_reason( '%s value missing or corrupt.' % sf )
print(md1)
print(md2)
return 0
for cf in coef_fields:
try:
list1 = md1[cf].split()
list2 = md2[cf].split()
except:
post_reason( '%s value missing or corrupt.' % cf )
print(md1[cf])
print(md2[cf])
return 0
if len(list1) != 20:
post_reason( '%s value list length wrong(1)' % cf )
print(list1)
return 0
if len(list2) != 20:
post_reason( '%s value list length wrong(2)' % cf )
print(list2)
return 0
for i in range(20):
if not approx_equal(float(list1[i]),float(list2[i])):
post_reason( '%s[%d] values differ.' % (cf,i) )
print(list1[i], list2[i])
return 0
return 1
###############################################################################
# Test if geotransforms are equal with an epsilon tolerance
#
def geotransform_equals(gt1, gt2, gt_epsilon):
for i in range(6):
if abs(gt1[i]-gt2[i]) > gt_epsilon:
print('')
print('gt1 = ', gt1)
print('gt2 = ', gt2)
post_reason( 'Geotransform differs.' )
return False
return True
###############################################################################
# Download file at url 'url' and put it as 'filename' in 'tmp/cache/'
#
# If 'filename' already exits in 'tmp/cache/', it is not downloaded
# If GDAL_DOWNLOAD_TEST_DATA is not defined, the function fails
# If GDAL_DOWNLOAD_TEST_DATA is defined, 'url' is downloaded as 'filename' in 'tmp/cache/'
def download_file(url, filename, download_size = -1):
global count_skipped_tests_download
try:
os.stat( 'tmp/cache/' + filename )
return True
except:
if 'GDAL_DOWNLOAD_TEST_DATA' in os.environ:
val = None
try:
handle = gdalurlopen(url)
if download_size == -1:
try:
handle_info = handle.info()
content_length = handle_info['content-length']
print('Downloading %s (length = %s bytes)...' % (url, content_length))
except:
print('Downloading %s...' % (url))
val = handle.read()
else:
print('Downloading %d bytes from %s...' % (download_size, url))
val = handle.read(download_size)
except:
return False
try:
os.stat( 'tmp/cache' )
except:
os.mkdir('tmp/cache')
try:
open( 'tmp/cache/' + filename, 'wb').write(val)
return True
except:
print('Cannot write %s' % (filename))
return False
else:
if count_skipped_tests_download == 0:
print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined, some tests relying on data to downloaded from the Web will be skipped')
count_skipped_tests_download = count_skipped_tests_download + 1
return False
###############################################################################
# GDAL data type to python struct format
def gdal_data_type_to_python_struct_format(datatype):
type_char = 'B'
if datatype == gdal.GDT_Int16:
type_char = 'h'
elif datatype == gdal.GDT_UInt16:
type_char = 'H'
elif datatype == gdal.GDT_Int32:
type_char = 'i'
elif datatype == gdal.GDT_UInt32:
type_char = 'I'
elif datatype == gdal.GDT_Float32:
type_char = 'f'
elif datatype == gdal.GDT_Float64:
type_char = 'd'
return type_char
###############################################################################
# Compare the values of the pixels
def compare_ds(ds1, ds2, xoff = 0, yoff = 0, width = 0, height = 0, verbose=1):
import struct
if width == 0:
width = ds1.RasterXSize
if height == 0:
height = ds1.RasterYSize
data1 = ds1.GetRasterBand(1).ReadRaster(xoff, yoff, width, height)
type_char = gdal_data_type_to_python_struct_format(ds1.GetRasterBand(1).DataType)
val_array1 = struct.unpack(type_char * width * height, data1)
data2 = ds2.GetRasterBand(1).ReadRaster(xoff, yoff, width, height)
type_char = gdal_data_type_to_python_struct_format(ds2.GetRasterBand(1).DataType)
val_array2 = struct.unpack(type_char * width * height, data2)
maxdiff = 0.0
ndiffs = 0
for i in range(width*height):
diff = val_array1[i] - val_array2[i]
if diff != 0:
#print(val_array1[i])
#print(val_array2[i])
ndiffs = ndiffs + 1
if abs(diff) > maxdiff:
maxdiff = abs(diff)
if verbose:
print("Diff at pixel (%d, %d) : %f" % (i % width, i / width, float(diff)))
elif ndiffs < 10:
if verbose:
print("Diff at pixel (%d, %d) : %f" % (i % width, i / width, float(diff)))
if maxdiff != 0 and verbose:
print("Max diff : %d" % (maxdiff))
print("Number of diffs : %d" % (ndiffs))
return maxdiff
###############################################################################
# Deregister all JPEG2000 drivers, except the one passed as an argument
def deregister_all_jpeg2000_drivers_but(name_of_driver_to_keep):
global jp2kak_drv, jpeg2000_drv, jp2ecw_drv, jp2mrsid_drv, jp2openjpeg_drv
global jp2kak_drv_unregistered,jpeg2000_drv_unregistered,jp2ecw_drv_unregistered,jp2mrsid_drv_unregistered,jp2openjpeg_drv_unregistered
# Deregister other potential conflicting JPEG2000 drivers that will
# be re-registered in the cleanup
try:
jp2kak_drv = gdal.GetDriverByName('JP2KAK')
if name_of_driver_to_keep != 'JP2KAK' and jp2kak_drv:
gdal.Debug('gdaltest','Deregistering JP2KAK')
jp2kak_drv.Deregister()
jp2kak_drv_unregistered = True
except:
pass
try:
jpeg2000_drv = gdal.GetDriverByName('JPEG2000')
if name_of_driver_to_keep != 'JPEG2000' and jpeg2000_drv:
gdal.Debug('gdaltest','Deregistering JPEG2000')
jpeg2000_drv.Deregister()
jpeg2000_drv_unregistered = True
except:
pass
try:
jp2ecw_drv = gdal.GetDriverByName('JP2ECW')
if name_of_driver_to_keep != 'JP2ECW' and jp2ecw_drv:
gdal.Debug('gdaltest.','Deregistering JP2ECW')
jp2ecw_drv.Deregister()
jp2ecw_drv_unregistered = True
except:
pass
try:
jp2mrsid_drv = gdal.GetDriverByName('JP2MrSID')
if name_of_driver_to_keep != 'JP2MrSID' and jp2mrsid_drv:
gdal.Debug('gdaltest.','Deregistering JP2MrSID')
jp2mrsid_drv.Deregister()
jp2mrsid_drv_unregistered = True
except:
pass
try:
jp2openjpeg_drv = gdal.GetDriverByName('JP2OpenJPEG')
if name_of_driver_to_keep != 'JP2OpenJPEG' and jp2openjpeg_drv:
gdal.Debug('gdaltest.','Deregistering JP2OpenJPEG')
jp2openjpeg_drv.Deregister()
jp2openjpeg_drv_unregistered = True
except:
pass
return True
###############################################################################
# Re-register all JPEG2000 drivers previously disabled by
# deregister_all_jpeg2000_drivers_but
def reregister_all_jpeg2000_drivers():
global jp2kak_drv, jpeg2000_drv, jp2ecw_drv, jp2mrsid_drv, jp2openjpeg_drv
global jp2kak_drv_unregistered,jpeg2000_drv_unregistered,jp2ecw_drv_unregistered,jp2mrsid_drv_unregistered, jp2openjpeg_drv_unregistered
try:
if jp2kak_drv_unregistered:
jp2kak_drv.Register()
jp2kak_drv_unregistered = False
gdal.Debug('gdaltest','Registering JP2KAK')
except:
pass
try:
if jpeg2000_drv_unregistered:
jpeg2000_drv.Register()
jpeg2000_drv_unregistered = False
gdal.Debug('gdaltest','Registering JPEG2000')
except:
pass
try:
if jp2ecw_drv_unregistered:
jp2ecw_drv.Register()
jp2ecw_drv_unregistered = False
gdal.Debug('gdaltest','Registering JP2ECW')
except:
pass
try:
if jp2mrsid_drv_unregistered:
jp2mrsid_drv.Register()
jp2mrsid_drv_unregistered = False
gdal.Debug('gdaltest','Registering JP2MrSID')
except:
pass
try:
if jp2openjpeg_drv_unregistered:
jp2openjpeg_drv.Register()
jp2openjpeg_drv = False
gdal.Debug('gdaltest','Registering JP2OpenJPEG')
except:
pass
return True
###############################################################################
# Determine if the filesystem supports sparse files.
# Currently, this will only work on Linux (or any *NIX that has the stat
# command line utility)
def filesystem_supports_sparse_files(path):
if skip_on_travis():
return False
try:
(ret, err) = runexternal_out_and_err('stat -f -c "%T" ' + path)
except:
return False
if err != '':
post_reason('Cannot determine if filesystem supports sparse files')
return False
if ret.find('fat32') != -1:
post_reason('File system does not support sparse files')
return False
# Add here any missing filesystem supporting sparse files
# See http://en.wikipedia.org/wiki/Comparison_of_file_systems
if ret.find('ext3') == -1 and \
ret.find('ext4') == -1 and \
ret.find('reiser') == -1 and \
ret.find('xfs') == -1 and \
ret.find('jfs') == -1 and \
ret.find('zfs') == -1 and \
ret.find('ntfs') == -1 :
post_reason('Filesystem %s is not believed to support sparse files' % ret)
return False
return True
###############################################################################
# Unzip a file
def unzip(target_dir, zipfilename, verbose = False):
try:
import zipfile
zf = zipfile.ZipFile(zipfilename)
except:
os.system('unzip -d ' + target_dir + ' ' + zipfilename)
return
for filename in zf.namelist():
if verbose:
print(filename)
outfilename = os.path.join(target_dir, filename)
if filename.endswith('/'):
if not os.path.exists(outfilename):
os.makedirs(outfilename)
else:
outdirname = os.path.dirname(outfilename)
if not os.path.exists(outdirname):
os.makedirs(outdirname)
outfile = open(outfilename,'wb')
outfile.write(zf.read(filename))
outfile.close()
return
###############################################################################
# Return if a number is the NaN number
def isnan(val):
if val == val:
# Python 2.3 unlike later versions return True for nan == nan
val_str = '%f' % val
if val_str == 'nan':
return True
else:
return False
else:
return True
###############################################################################
# Return NaN
def NaN():
try:
# Python >= 2.6
return float('nan')
except:
return 1e400 / 1e400
###############################################################################
# Return positive infinity
def posinf():
try:
# Python >= 2.6
return float('inf')
except:
return 1e400
###############################################################################
# Return negative infinity
def neginf():
try:
# Python >= 2.6
return float('-inf')
except:
return -1e400
###############################################################################
# Has the user requested to run the slow tests
def run_slow_tests():
global count_skipped_tests_slow
val = gdal.GetConfigOption('GDAL_RUN_SLOW_TESTS', None)
if val != 'yes' and val != 'YES':
if count_skipped_tests_slow == 0:
print('As GDAL_RUN_SLOW_TESTS environment variable is not defined, some "slow" tests will be skipped')
count_skipped_tests_slow = count_skipped_tests_slow + 1
return False
return True
###############################################################################
# Return true if the platform support symlinks
def support_symlink():
if sys.platform.startswith('linux'):
return True
if sys.platform.find('freebsd') != -1:
return True
if sys.platform == 'darwin':
return True
if sys.platform.find('sunos') != -1:
return True
return False
###############################################################################
# Return True if the test must be skipped
def skip_on_travis():
val = gdal.GetConfigOption('TRAVIS', None)
if val is not None:
post_reason('Test skipped on Travis')
return True
return False
###############################################################################
# find_lib_linux()
# Parse /proc/self/maps to find an occurrence of libXXXXX.so.*
def find_lib_linux(libname):
f = open('/proc/self/maps')
lines = f.readlines()
f.close()
for line in lines:
if line.rfind('/lib' + libname) == -1 or line.find('.so') == -1:
continue
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
i = line.find(' ')
if i < 0:
continue
line = line[i+1:]
soname = line.lstrip().rstrip('\n')
if soname.rfind('/lib' + libname) == -1:
continue
return soname
return None
###############################################################################
# find_lib_sunos()
# Parse output of pmap to find an occurrence of libXXX.so.*
def find_lib_sunos(libname):
pid = os.getpid()
(lines, err) = runexternal_out_and_err('pmap %d' % pid)
for line in lines.split('\n'):
if line.rfind('/lib' + libname) == -1 or line.find('.so') == -1:
continue
i = line.find('/')
if i < 0:
continue
line = line[i:]
soname = line.lstrip().rstrip('\n')
if soname.rfind('/lib' + libname) == -1:
continue
return soname
return None
###############################################################################
# find_lib_windows()
# use Module32First() / Module32Next() API on the current process
def find_lib_windows(libname):
try:
import ctypes
except:
return None
kernel32 = ctypes.windll.kernel32
MAX_MODULE_NAME32 = 255
MAX_PATH = 260
TH32CS_SNAPMODULE = 0x00000008
class MODULEENTRY32(ctypes.Structure):
_fields_ = [
("dwSize", ctypes.c_int),
("th32ModuleID", ctypes.c_int),
("th32ProcessID", ctypes.c_int),
("GlblcntUsage", ctypes.c_int),
("ProccntUsage", ctypes.c_int),
("modBaseAddr", ctypes.c_char_p),
("modBaseSize", ctypes.c_int),
("hModule", ctypes.c_void_p),
("szModule", ctypes.c_char * (MAX_MODULE_NAME32 + 1)),
("szExePath", ctypes.c_char * MAX_PATH)
]
Module32First = kernel32.Module32First
Module32First.argtypes = [ ctypes.c_void_p, ctypes.POINTER(MODULEENTRY32) ]
Module32First.rettypes = ctypes.c_int
Module32Next = kernel32.Module32Next
Module32Next.argtypes = [ ctypes.c_void_p, ctypes.POINTER(MODULEENTRY32) ]
Module32Next.rettypes = ctypes.c_int
CreateToolhelp32Snapshot = kernel32.CreateToolhelp32Snapshot
CreateToolhelp32Snapshot.argtypes = [ ctypes.c_int, ctypes.c_int ]
CreateToolhelp32Snapshot.rettypes = ctypes.c_void_p
CloseHandle = kernel32.CloseHandle
CloseHandle.argtypes = [ ctypes.c_void_p ]
CloseHandle.rettypes = ctypes.c_int
GetLastError = kernel32.GetLastError
GetLastError.argtypes = []
GetLastError.rettypes = ctypes.c_int
snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE,0)
if snapshot is None:
return None
soname = None
i = 0
while True:
entry = MODULEENTRY32()
entry.dwSize = ctypes.sizeof(MODULEENTRY32)
pentry = ctypes.pointer(entry)
if i == 0:
ret = Module32First(snapshot, pentry)
else:
ret = Module32Next(snapshot, pentry)
i = i + 1
if ret == 0:
break
try:
path = entry.szExePath.decode('latin1')
except:
continue
i = path.rfind('\\' + libname)
if i < 0:
continue
if path[i+1:].find('\\') >= 0:
continue
soname = path
break
CloseHandle(snapshot)
return soname
###############################################################################
# find_lib()
def find_lib(mylib):
if sys.platform.startswith('linux'):
return find_lib_linux(mylib)
elif sys.platform.startswith('sunos'):
return find_lib_sunos(mylib)
elif sys.platform.startswith('win32'):
return find_lib_windows(mylib)
else:
# sorry mac users or other BSDs
# should be doable, but not in a blindless way
return None
###############################################################################
# get_opened_files()
def get_opened_files():
if not sys.platform.startswith('linux'):
return []
fdpath = '/proc/%d/fd' % os.getpid()
file_numbers = os.listdir(fdpath)
filenames = []
for fd in file_numbers:
try:
filename = os.readlink('%s/%s' % (fdpath, fd))
if not filename.startswith('/dev/') and not filename.startswith('pipe:'):
filenames.append(filename)
except:
pass
return filenames
###############################################################################
# is_file_open()
def is_file_open(filename):
for got_filename in get_opened_files():
if got_filename.find(filename) >= 0:
return True
return False
| {
"content_hash": "7f3d71ee2f38a13a70c66a112f1fcfd6",
"timestamp": "",
"source": "github",
"line_count": 1547,
"max_line_length": 634,
"avg_line_length": 33.506140917905626,
"alnum_prop": 0.512848709341359,
"repo_name": "avalentino/gdal-pixfun-plugin",
"id": "4802818fc606fc8e03728eb3abbe951a33c6eeae",
"size": "53453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autotest/pymod/gdaltest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33834"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Python",
"bytes": "115135"
}
],
"symlink_target": ""
} |
import unittest
import template
class TemplateTest(unittest.TestCase):
def setUp(self):
self.input = 'foo'
self.t = Template.Template(self.input)
def test_calc(self):
d = self.t.calc(False)
self.assertEqual(self.input, d)
def test_calc_pretty(self):
pretty = 'One arg is: ' + self.input
d = self.t.calc(True)
self.assertEqual(pretty, d)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "317920bc0085823c927bff53b989d702",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 46,
"avg_line_length": 22.85,
"alnum_prop": 0.5951859956236324,
"repo_name": "dougsweetser/QProcessing",
"id": "d9ac87209139d1d9789e0ca381cd32c076db3db8",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Archive/Python_devo/TemplateTest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "86"
},
{
"name": "Groovy",
"bytes": "1480"
},
{
"name": "Java",
"bytes": "26280"
},
{
"name": "Objective-C",
"bytes": "469"
},
{
"name": "Perl",
"bytes": "7898"
},
{
"name": "Python",
"bytes": "52546"
},
{
"name": "Shell",
"bytes": "1200"
}
],
"symlink_target": ""
} |
'''
Created on May 7, 2021
@author: 828567
'''
from datetime import datetime as dateTime
from dateutil import parser
import datetime
import copy
import re
import os
import hashlib
from dateutil import parser
import uuid
from ....core.BaseAgent import BaseAgent
import json
class GitLabIssueWebhookAgent(BaseAgent):
@BaseAgent.timed
def processWebhook(self,data):
self.baseLogger.info(" inside GitlabIssue processWebhook ======")
dataReceived = json.loads(data)
if "event_type" in dataReceived and dataReceived["event_type"] == "issue":
self.processIssueDetails(dataReceived)
def processIssueDetails(self, dataReceived):
self.baseLogger.info(" inside processIssueDetails method ======")
dynamicTemplate = self.config.get('dynamicTemplate', {})
responseTemplate = dynamicTemplate.get('issue', {}).get('issueResponseTemplate', {})
issueMetadata = dynamicTemplate.get('issue', {}).get('issueMetadata', {})
timeStampField = dynamicTemplate.get('issue', {}).get('insightsTimeXFieldMapping', {}).get('timefield',None)
timeStampFormat = dynamicTemplate.get('issue', {}).get('insightsTimeXFieldMapping', {}).get('timeformat',None)
isEpoch = dynamicTemplate.get('issue', {}).get('insightsTimeXFieldMapping', {}).get('isEpoch',False)
time = dataReceived.get("object_attributes",{}).get("updated_at",None)
updatedAt = parser.parse(time, ignoretz=True).strftime("%Y-%m-%dT%H:%M:%SZ")
dataReceived["object_attributes"]["updated_at"] = updatedAt
assigneeDict = {}
labelDict = {}
self.baseLogger.info(" before parseResponse issue data ======")
parsedIssueResponse = self.parseResponse(responseTemplate, dataReceived)
if "assignees" not in dataReceived :
parsedIssueResponse[0].update(self.updateMandatoryFields("assignees", responseTemplate))
if "labels" not in dataReceived :
parsedIssueResponse[0].update(self.updateMandatoryFields("labels", responseTemplate))
closed_at = dataReceived.get("object_attributes",{}).get("closed_at")
if closed_at is None: parsedIssueResponse[0]["issueClosedDate"] = ""
due_date = dataReceived.get("object_attributes",{}).get("due_date")
if due_date is None: parsedIssueResponse[0]["issueDueDate"] = ""
self.baseLogger.info(" before publish issue data ======")
self.publishToolsData(parsedIssueResponse, issueMetadata,timeStampField,timeStampFormat,isEpoch,True)
action = dataReceived["object_attributes"]["action"]
if action != "open":
changesMetaData = {"issueId" : parsedIssueResponse[0]["issueId"],
"issueDisplayId" : parsedIssueResponse[0]["issueDisplayId"],
"updatedById": parsedIssueResponse[0]["updatedById"],
"updatedAt": parsedIssueResponse[0]["updatedAt"]}
self.processChangelog(dataReceived, changesMetaData)
self.baseLogger.info(" issue details processing completed ======")
def processChangelog(self, dataReceived, changesMetaData):
self.baseLogger.info(" inside processChangelog method ======")
changeLogData = dataReceived.get("changes",{})
dynamicTemplate = self.config.get('dynamicTemplate', {})
responseTemplate = dynamicTemplate.get('issue', {}).get('issueResponseTemplate', {})
changes = dynamicTemplate.get("Changes", {})
metaData = changes.get("metadata", {})
relationMetaData = changes.get("relationMetadata", {})
finalData = []
for changedfield in changeLogData:
fieldDict = {}
issueChangeDict = {}
issueChangeDict = changesMetaData.copy()
issueChangeDict["changedfield"] = changedfield
issueChangeDict["changeId"] = str(uuid.uuid1())
prev_details = changeLogData[changedfield]["previous"]
current_details = changeLogData[changedfield]["current"]
if prev_details is not None:
keyType = type(prev_details)
else:
keyType = type(current_details)
if keyType is list:
if not prev_details:
previousData = [self.updateMandatoryFields(changedfield, responseTemplate)]
else:
fieldDict[changedfield] = prev_details
previousData = self.parseResponse(responseTemplate, fieldDict)
if not current_details:
currentData = [self.updateMandatoryFields(changedfield, responseTemplate)]
else:
fieldDict[changedfield] = current_details
currentData = self.parseResponse(responseTemplate, fieldDict)
processed_prevDetails = {"prev_" + str(key): val for key, val in previousData[0].items()}
issueChangeDict.update(processed_prevDetails)
processed_currentDetails = {"current_" + str(key): val for key, val in currentData[0].items()}
issueChangeDict.update(processed_currentDetails)
finalData.append(issueChangeDict)
elif keyType is unicode or keyType is str or keyType is int:
if prev_details is None: prev_details = ""
if current_details is None: current_details = ""
issueChangeDict["prev_"+changedfield] = prev_details
issueChangeDict["current_"+changedfield] = current_details
finalData.append(issueChangeDict)
self.baseLogger.info(" before publish issue changelog data ======")
self.publishToolsData(finalData, metaData)
for item in finalData:
item.update( {"gitlab_webhookType":"issue"})
self.publishToolsData(finalData, relationMetaData)
self.baseLogger.info(" issue changelog processing completed ======")
def updateMandatoryFields(self, field, template):
self.baseLogger.info(" inside updateMandatoryFields method ======")
field_details = {}
if field in template and type(template[field]) is list:
field_details = template.get(field, list())[0].copy()
field_details = {field_details[key]:"" for key in field_details}
return field_details
if __name__ == "__main__":
GitLabIssueWebhookAgent() | {
"content_hash": "33c3831a399b9a7d98982f9ad5f6801e",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 118,
"avg_line_length": 50.229007633587784,
"alnum_prop": 0.6227963525835867,
"repo_name": "CognizantOneDevOps/Insights",
"id": "9c61c488f72c821ea3bfc6d444f729e65c1d9b99",
"size": "7339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PlatformAgents/com/cognizant/devops/platformagents/agents/alm/gitlabissue/GitLabIssueWebhookAgent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "93761"
},
{
"name": "CSS",
"bytes": "362982"
},
{
"name": "Dockerfile",
"bytes": "30938"
},
{
"name": "HTML",
"bytes": "1118798"
},
{
"name": "Java",
"bytes": "4099059"
},
{
"name": "JavaScript",
"bytes": "39094"
},
{
"name": "Python",
"bytes": "1518111"
},
{
"name": "SCSS",
"bytes": "218059"
},
{
"name": "Shell",
"bytes": "541300"
},
{
"name": "TypeScript",
"bytes": "2097909"
}
],
"symlink_target": ""
} |
import asyncio
async def foo(y):
return y + 1
async def print_foo():
x = await foo(1)
print(x)
asyncio.run(print_foo())
| {
"content_hash": "2cd3d14b98a99d1557040b185b396c43",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 24,
"avg_line_length": 10.615384615384615,
"alnum_prop": 0.6014492753623188,
"repo_name": "JetBrains/intellij-community",
"id": "870f25693d20b05edf91f8532c157336e5b070b5",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/testData/debug/test_asyncio_debugger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import re,urllib,urlparse,base64
from liveresolver.modules import client
from liveresolver.modules.log_utils import log
def resolve(url):
try:
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except:
referer=url
page = url
result = client.request(page, referer=referer)
id = re.findall('.*ustream.vars.(?:channelId|cId)=([^;]+).*',result)[0]
#url = 'http://uhs-akamai.ustream.tv/sjc/sjc-uhs10/streams/httpflv/ustreamVideo/%s/streams/playlist.m3u8'%id
url = 'http://iphone-streaming.ustream.tv/uhls/' + id + '/streams/live/iphone/playlist.m3u8'
return url
except:
log('Ustream: Resolver failed')
return
| {
"content_hash": "0d49a513727f3d4507b1db64e87392c6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 116,
"avg_line_length": 33.95454545454545,
"alnum_prop": 0.6318607764390897,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "60f52077336d0f39b8dbe48a59294e1a770c8dc7",
"size": "774",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "script.module.liveresolver/lib/liveresolver/resolvers/ustream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import pytz
from django.db import models
from django.db.models import Q
from django.utils import timezone
from osf.models import Node
from osf.models import NodeLog
from osf.models.base import GuidMixin, Guid, BaseModel
from osf.models.mixins import CommentableMixin
from osf.models.spam import SpamMixin
from osf.models import validators
from osf.utils.fields import NonNaiveDateTimeField
from framework.exceptions import PermissionsError
from website import settings
from website.util import api_v2_url
from website.project import signals as project_signals
from website.project.model import get_valid_mentioned_users_guids
class Comment(GuidMixin, SpamMixin, CommentableMixin, BaseModel):
__guid_min_length__ = 12
OVERVIEW = 'node'
FILES = 'files'
WIKI = 'wiki'
user = models.ForeignKey('OSFUser', null=True, on_delete=models.CASCADE)
# the node that the comment belongs to
node = models.ForeignKey('AbstractNode', null=True, on_delete=models.CASCADE)
# The file or project overview page that the comment is for
root_target = models.ForeignKey(Guid, on_delete=models.SET_NULL,
related_name='comments',
null=True, blank=True)
# the direct 'parent' of the comment (e.g. the target of a comment reply is another comment)
target = models.ForeignKey(Guid, on_delete=models.SET_NULL,
related_name='child_comments',
null=True, blank=True)
edited = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
deleted = NonNaiveDateTimeField(blank=True, null=True)
# The type of root_target: node/files
page = models.CharField(max_length=255, blank=True)
content = models.TextField(
validators=[validators.CommentMaxLength(settings.COMMENT_MAXLENGTH),
validators.string_required]
)
# The mentioned users
ever_mentioned = models.ManyToManyField(blank=True, related_name='mentioned_in', to='OSFUser')
@property
def url(self):
return '/{}/'.format(self._id)
@property
def absolute_api_v2_url(self):
path = '/comments/{}/'.format(self._id)
return api_v2_url(path)
@property
def target_type(self):
"""The object "type" used in the OSF v2 API."""
return 'comments'
@property
def root_target_page(self):
"""The page type associated with the object/Comment.root_target."""
return None
def belongs_to_node(self, node_id):
"""Check whether the comment is attached to the specified node."""
return self.node._id == node_id
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
def get_comment_page_url(self):
if isinstance(self.root_target.referent, Node):
return self.node.absolute_url
return settings.DOMAIN + str(self.root_target._id) + '/'
def get_content(self, auth):
""" Returns the comment content if the user is allowed to see it. Deleted comments
can only be viewed by the user who created the comment."""
if not auth and not self.node.is_public:
raise PermissionsError
if self.is_deleted and ((not auth or auth.user.is_anonymous) or
(auth and not auth.user.is_anonymous and self.user._id != auth.user._id)):
return None
return self.content
def get_comment_page_title(self):
if self.page == Comment.FILES:
return self.root_target.referent.name
elif self.page == Comment.WIKI:
return self.root_target.referent.page_name
return ''
def get_comment_page_type(self):
if self.page == Comment.FILES:
return 'file'
elif self.page == Comment.WIKI:
return 'wiki'
return self.node.project_or_component
@classmethod
def find_n_unread(cls, user, node, page, root_id=None):
if node.is_contributor_or_group_member(user):
if page == Comment.OVERVIEW:
view_timestamp = user.get_node_comment_timestamps(target_id=node._id)
root_target = Guid.load(node._id)
elif page == Comment.FILES or page == Comment.WIKI:
view_timestamp = user.get_node_comment_timestamps(target_id=root_id)
root_target = Guid.load(root_id)
else:
raise ValueError('Invalid page')
if not view_timestamp.tzinfo:
view_timestamp = view_timestamp.replace(tzinfo=pytz.utc)
return cls.objects.filter(
Q(node=node) & ~Q(user=user) & Q(is_deleted=False) &
(Q(created__gt=view_timestamp) | Q(modified__gt=view_timestamp)) &
Q(root_target=root_target)
).count()
return 0
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
if not comment.node.can_comment(auth):
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
log_dict = {
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
}
if isinstance(comment.target.referent, Comment):
comment.root_target = comment.target.referent.root_target
else:
comment.root_target = comment.target
page = getattr(comment.root_target.referent, 'root_target_page', None)
if not page:
raise ValueError('Invalid root target.')
comment.page = page
log_dict.update(comment.root_target.referent.get_extra_log_params(comment))
new_mentions = []
if comment.content:
if not comment.id:
# must have id before accessing M2M
comment.save()
new_mentions = get_valid_mentioned_users_guids(comment, comment.node.contributors_and_group_members)
if new_mentions:
project_signals.mention_added.send(comment, new_mentions=new_mentions, auth=auth)
comment.ever_mentioned.add(*comment.node.contributors.filter(guids___id__in=new_mentions))
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
log_dict,
auth=auth,
save=False,
)
comment.node.save()
project_signals.comment_added.send(comment, auth=auth, new_mentions=new_mentions)
return comment
def edit(self, content, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to edit this comment'.format(auth.user))
log_dict = {
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
}
log_dict.update(self.root_target.referent.get_extra_log_params(self))
self.content = content
self.edited = True
self.modified = timezone.now()
new_mentions = get_valid_mentioned_users_guids(self, self.node.contributors_and_group_members)
if save:
if new_mentions:
project_signals.mention_added.send(self, new_mentions=new_mentions, auth=auth)
self.ever_mentioned.add(*self.node.contributors.filter(guids___id__in=new_mentions))
self.save()
self.node.add_log(
NodeLog.COMMENT_UPDATED,
log_dict,
auth=auth,
save=False,
)
self.node.save()
def delete(self, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
log_dict = {
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
}
self.is_deleted = True
current_time = timezone.now()
self.deleted = current_time
log_dict.update(self.root_target.referent.get_extra_log_params(self))
self.modified = current_time
if save:
self.save()
self.node.add_log(
NodeLog.COMMENT_REMOVED,
log_dict,
auth=auth,
save=False,
)
self.node.save()
def undelete(self, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
self.is_deleted = False
self.deleted = None
log_dict = {
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
}
log_dict.update(self.root_target.referent.get_extra_log_params(self))
self.modified = timezone.now()
if save:
self.save()
self.node.add_log(
NodeLog.COMMENT_RESTORED,
log_dict,
auth=auth,
save=False,
)
self.node.save()
| {
"content_hash": "95dd10e24d94996f8bed795d3af9f541",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 112,
"avg_line_length": 37.389763779527556,
"alnum_prop": 0.5949247130672844,
"repo_name": "cslzchen/osf.io",
"id": "10a380b6ef12a2dd50ed3d62ac1df6bbca4bab41",
"size": "9498",
"binary": false,
"copies": "10",
"ref": "refs/heads/develop",
"path": "osf/models/comment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373738"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11612029"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import json
import urllib
from tempest.common.rest_client import RestClient
class VolumeHostsClientJSON(RestClient):
"""
Client class to send CRUD Volume Hosts API requests to a Cinder endpoint
"""
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(VolumeHostsClientJSON, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.volume.catalog_type
self.build_interval = self.config.volume.build_interval
self.build_timeout = self.config.volume.build_timeout
def list_hosts(self, params=None):
"""Lists all hosts."""
url = 'os-hosts'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['hosts']
| {
"content_hash": "5260c78db37be4afdaa26285b5a34d94",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 31.103448275862068,
"alnum_prop": 0.6119733924611973,
"repo_name": "armando-migliaccio/tempest",
"id": "fc28ada0f499802a8689552229b9d38cc9ac27e6",
"size": "1583",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/services/volume/json/admin/volume_hosts_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1966096"
},
{
"name": "Shell",
"bytes": "5228"
}
],
"symlink_target": ""
} |
import spectral | {
"content_hash": "206532f068ddd23225b50bfd8e8d4e2a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 15,
"avg_line_length": 15,
"alnum_prop": 0.9333333333333333,
"repo_name": "parenthetical-e/neurosrc",
"id": "002f21a18bd477a95eca78afd2a5fe9f6d33e145",
"size": "15",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "25492"
},
{
"name": "Python",
"bytes": "14121"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import GlobalPublicDelegatedPrefixesTransport
from .rest import (
GlobalPublicDelegatedPrefixesRestInterceptor,
GlobalPublicDelegatedPrefixesRestTransport,
)
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[GlobalPublicDelegatedPrefixesTransport]]
_transport_registry["rest"] = GlobalPublicDelegatedPrefixesRestTransport
__all__ = (
"GlobalPublicDelegatedPrefixesTransport",
"GlobalPublicDelegatedPrefixesRestTransport",
"GlobalPublicDelegatedPrefixesRestInterceptor",
)
| {
"content_hash": "e02e2a1b490ea20f28068357ff1636c7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 31.3,
"alnum_prop": 0.8099041533546326,
"repo_name": "googleapis/python-compute",
"id": "da5235a504296c4e9ce0a23cf32734da19692ce2",
"size": "1226",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
} |
"""
Example DAG showing how to use Asana TaskOperators.
"""
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.providers.asana.operators.asana_tasks import (
AsanaCreateTaskOperator,
AsanaDeleteTaskOperator,
AsanaFindTaskOperator,
AsanaUpdateTaskOperator,
)
ASANA_TASK_TO_UPDATE = os.environ.get("ASANA_TASK_TO_UPDATE", "update_task")
ASANA_TASK_TO_DELETE = os.environ.get("ASANA_TASK_TO_DELETE", "delete_task")
# This example assumes a default project ID has been specified in the connection. If you
# provide a different id in ASANA_PROJECT_ID_OVERRIDE, it will override this default
# project ID in the AsanaFindTaskOperator example below
ASANA_PROJECT_ID_OVERRIDE = os.environ.get("ASANA_PROJECT_ID_OVERRIDE", "test_project")
# This connection should specify a personal access token and a default project ID
CONN_ID = os.environ.get("ASANA_CONNECTION_ID")
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_asana"
with DAG(
DAG_ID,
start_date=datetime(2021, 1, 1),
default_args={"conn_id": CONN_ID},
tags=["example"],
catchup=False,
) as dag:
# [START asana_example_dag]
# [START run_asana_create_task_operator]
# Create a task. `task_parameters` is used to specify attributes the new task should have.
# You must specify at least one of 'workspace', 'projects', or 'parent' in `task_parameters`
# unless these are specified in the connection. Any attributes you specify in
# `task_parameters` will override values from the connection.
create = AsanaCreateTaskOperator(
task_id="run_asana_create_task",
task_parameters={"notes": "Some notes about the task."},
name="New Task Name",
)
# [END run_asana_create_task_operator]
# [START run_asana_find_task_operator]
# Find tasks matching search criteria. `search_parameters` is used to specify these criteria.
# You must specify `project`, `section`, `tag`, `user_task_list`, or both
# `assignee` and `workspace` in `search_parameters` or in the connection.
# This example shows how you can override a project specified in the connection by
# passing a different value for project into `search_parameters`
one_week_ago = (datetime.now() - timedelta(days=7)).strftime("%Y-%m-%d")
find = AsanaFindTaskOperator(
task_id="run_asana_find_task",
search_parameters={"project": ASANA_PROJECT_ID_OVERRIDE, "modified_since": one_week_ago},
)
# [END run_asana_find_task_operator]
# [START run_asana_update_task_operator]
# Update a task. `task_parameters` is used to specify the new values of
# task attributes you want to update.
update = AsanaUpdateTaskOperator(
task_id="run_asana_update_task",
asana_task_gid=ASANA_TASK_TO_UPDATE,
task_parameters={"notes": "This task was updated!", "completed": True},
)
# [END run_asana_update_task_operator]
# [START run_asana_delete_task_operator]
# Delete a task. This task will complete successfully even if `asana_task_gid` does not exist.
delete = AsanaDeleteTaskOperator(
task_id="run_asana_delete_task",
asana_task_gid=ASANA_TASK_TO_DELETE,
)
# [END run_asana_delete_task_operator]
create >> find >> update >> delete
# [END asana_example_dag]
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"content_hash": "7e92488ffdd5218984207259fc736d60",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 98,
"avg_line_length": 41.92134831460674,
"alnum_prop": 0.7027606539801662,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "311445a01478628445e5b5367258270bc8839d57",
"size": "4590",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/system/providers/asana/example_asana.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
import sys
import urwid as uw
from urwid_timed_progress import TimedProgressBar
from urwid.html_fragment import screenshot_init, screenshot_collect
# Demo of timed progress bar.
if __name__ == '__main__':
palette = [
('normal', 'white', 'black', 'standout'),
('complete', 'white', 'dark magenta'),
]
# capture as HTML screenshot if first arg is "screenshot"
take_screenshot = len(sys.argv) > 1 and sys.argv[1] == 'screenshot'
if take_screenshot:
screenshot_init([(70, 15)], [['x'] * 7, ['q']])
# Create two timed progress bars with labels and custom units.
# Using the same label_width allows the bars to line up.
bar1 = TimedProgressBar('normal', 'complete', label='Current File',
label_width=15, units='MB', done=10)
bar2 = TimedProgressBar('normal', 'complete', label='Overall',
label_width=15, units='MB', done=100)
# Advance the second bar
bar2.add_progress(40)
footer = uw.Text('q to exit, any other key adds to progress')
progress = uw.Frame(uw.ListBox([bar1, uw.Divider(), bar2]), footer=footer)
# Pressing a key other that 'q' advances the progress bars by 1
# Calling add_progress() also updates the displayed rate and time
# remaining.
def keypress(key):
if key in ('q', 'Q'):
raise uw.ExitMainLoop()
else:
bar2.add_progress(1)
if bar1.add_progress(1) and bar2.current < bar2.done:
bar1.reset()
loop = uw.MainLoop(progress, palette, unhandled_input=keypress)
loop.run()
if take_screenshot:
for i, s in enumerate(screenshot_collect()):
with open('screenshot-{}.html'.format(i), 'w') as f:
f.write(s)
| {
"content_hash": "c4150991a71a8ac31ca8f960ee39671b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 35.72,
"alnum_prop": 0.6052631578947368,
"repo_name": "mgk/urwid_timed_progress",
"id": "7f07314cb17e803ac2db1b6fe86a3bf08229c4b8",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/current_file_and_overall_progress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "502"
},
{
"name": "Python",
"bytes": "8040"
}
],
"symlink_target": ""
} |
from django import template
from django.utils.html import format_html
register = template.Library()
@register.filter
def github_profile(login):
return format_html(
'<a href="https://github.com/{0}">@{0}</a>',
login,
)
| {
"content_hash": "a44e243945248ede0ca1da43441f47dc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 18.923076923076923,
"alnum_prop": 0.6504065040650406,
"repo_name": "FundersClub/fire",
"id": "25d380365a5bf80549f514f5e09703b87c9e2417",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fb_github/templatetags/github.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6784"
},
{
"name": "HTML",
"bytes": "42701"
},
{
"name": "JavaScript",
"bytes": "8354"
},
{
"name": "Python",
"bytes": "100665"
},
{
"name": "SCSS",
"bytes": "5265"
},
{
"name": "Shell",
"bytes": "925"
},
{
"name": "TypeScript",
"bytes": "50973"
}
],
"symlink_target": ""
} |
from smart.interfaces.qt5 import getPixmap
from smart.const import INSTALL, REMOVE
from smart import *
from PyQt5 import QtGui as QtGui
from PyQt5 import QtWidgets as QtWidgets
from PyQt5 import QtCore as QtCore
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtCore.QCoreApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtCore.QCoreApplication.translate(context, text, disambig)
class PackageListViewItem(QtWidgets.QTreeWidgetItem):
def __init__(self, parent, package = None):
QtWidgets.QTreeWidgetItem.__init__(self, parent)
self._pkg = package
class QtPackageView(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.show()
self._expandPackages = True
self._changeset = {}
self._vbox = QtWidgets.QVBoxLayout(self)
# Tree View
self._treeview = QtWidgets.QTreeWidget(self)
# Tree View Style start
self._treeview.setEnabled(True)
self._treeview.setGeometry(QtCore.QRect(10, 10, 500, 200))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(4)
sizePolicy.setHeightForWidth(self._treeview.sizePolicy().hasHeightForWidth())
self._treeview.setSizePolicy(sizePolicy)
self._treeview.setMinimumSize(QtCore.QSize(500, 200))
font = QtGui.QFont()
font.setFamily(_fromUtf8("FreeSans"))
font.setPointSize(11)
self._treeview.setFont(font)
self._treeview.setMouseTracking(True)
self._treeview.setAcceptDrops(True)
self._treeview.setAutoFillBackground(False)
self._treeview.setFrameShape(QtWidgets.QFrame.StyledPanel)
self._treeview.setFrameShadow(QtWidgets.QFrame.Raised)
self._treeview.setLineWidth(2)
self._treeview.setMidLineWidth(1)
self._treeview.setAutoScroll(False)
self._treeview.setTabKeyNavigation(True)
self._treeview.setAlternatingRowColors(True)
self._treeview.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self._treeview.setAnimated(True)
self._treeview.setHeaderHidden(False)
self._treeview.setExpandsOnDoubleClick(True)
self._treeview.setObjectName(_fromUtf8("_treeview"))
self._treeview.headerItem().setText(0, _fromUtf8("1"))
self._treeview.header().setCascadingSectionResizes(True)
self._treeview.header().setDefaultSectionSize(160)
self._treeview.header().setHighlightSections(True)
self._treeview.header().setMinimumSectionSize(50)
self._treeview.header().setSortIndicatorShown(True)
# Tree View Style end
self._treeview.itemClicked[QTreeWidgetItem.connect(self._clicked)
self._treeview.itemDoubleClicked[QTreeWidgetItem.connect(self._doubleClicked)
self._treeview.itemPressed[QTreeWidgetItem.connect(self._pressed)
self._treeview.itemSelectionChanged.connect(self._selectionChanged)
#self._treeview.setAllColumnsShowFocus(True)
#self._treeview.setRootIsDecorated(True)
self._treeview.show()
self._vbox.addWidget(self._treeview)
#self._treeview.setSelectionMode(QtGui.QTreeView.Extended)
#self._treeview.addColumn("") # pixmap
#self._treeview.addColumn(_("Package"))
#self._treeview.addColumn(_("Version"))
self._treeview.setHeaderLabels(["", _("Package"), _("Version")])
self._ipixbuf = getPixmap("package-installed")
self._ilpixbuf = getPixmap("package-installed-locked")
self._apixbuf = getPixmap("package-available")
self._alpixbuf = getPixmap("package-available-locked")
self._npixbuf = getPixmap("package-new")
self._nlpixbuf = getPixmap("package-new-locked")
self._fpixbuf = getPixmap("folder")
self._Ipixbuf = getPixmap("package-install")
self._Rpixbuf = getPixmap("package-remove")
self._rpixbuf = getPixmap("package-reinstall")
def _getPixmap(self, pkg):
if not hasattr(pkg, "name"):
return self._fpixbuf
else:
if pkg.installed:
if self._changeset.get(pkg) is REMOVE:
return self._Rpixbuf
elif self._changeset.get(pkg) is INSTALL:
return self._rpixbuf
elif pkgconf.testFlag("lock", pkg):
return self._ilpixbuf
else:
return self._ipixbuf
else:
if self._changeset.get(pkg) is INSTALL:
return self._Ipixbuf
elif pkgconf.testFlag("lock", pkg):
if pkgconf.testFlag("new", pkg):
return self._nlpixbuf
else:
return self._alpixbuf
elif pkgconf.testFlag("new", pkg):
return self._npixbuf
else:
return self._apixbuf
return self._fpixbuf #default
def _setPixmap(self, iter, pkg):
iter.setIcon(0, QtGui.QIcon(self._getPixmap(pkg)))
def _setNameVersion(self, iter, pkg):
if hasattr(pkg, "name"):
iter.setText(1, pkg.name)
else:
iter.setText(1, unicode(pkg))
if hasattr(pkg, "version"):
iter.setText(2, pkg.version)
def getTreeView(self):
return self._treeview
def _doItem(self, item, what):
what(item)
iter = 0
while iter < item.childCount():
self._doItem(item.child(iter), what)
iter += 1
def _doTree(self, tree, what):
iter = 0
while iter < tree.topLevelItemCount():
self._doItem(tree.topLevelItem(iter), what)
iter += 1
def expandAll(self):
self._doTree(self._treeview, self._treeview.expandItem)
def setExpandAll(self):
self._doTree(self._treeview, self._treeview.expandItem)
#self._doTree(self._treeview, self._treeview.expandAll)
def collapseAll(self):
self._doTree(self._treeview, self._treeview.collapseItem)
def getSelectedPkgs(self):
iter = 0
lst = []
while iter < self._treeview.topLevelItemCount():
item = self._treeview.topLevelItem(iter)
if item.isSelected():
value = item._pkg
if hasattr(value, "name"):
lst.append(value)
iter += 1
return lst
def setExpandPackage(self, flag):
self._expandpackage = flag
def getCursor(self):
treeview = self._treeview
model = treeview.get_model()
path = treeview.get_cursor()[0]
if not path:
return None
cursor = [None]*len(path)
for i in range(len(path)):
iter = model.get_iter(path[:i+1])
cursor[i] = model.get_value(iter, 0)
return cursor
def setCursor(self, cursor):
if not cursor:
return
treeview = self._treeview
#model = treeview.get_model()
#iter = None
#bestiter = None
#for i in range(len(cursor)):
# cursori = cursor[i]
# iter = model.iter_children(iter)
# while iter:
# value = model.get_value(iter, 0)
# if value == cursori:
# bestiter = iter
# break
# # Convert to str to protect against comparing
# # packages and strings.
# if str(value) < str(cursori):
# bestiter = iter
# iter = model.iter_next(iter)
# else:
# break
#if bestiter:
# path = model.get_path(bestiter)
# treeview.set_cursor(path)
# treeview.scroll_to_cell(path)
def getExpanded(self):
expanded = []
treeview = self._treeview
model = treeview.get_model()
def set(treeview, path, data):
item = [None]*len(path)
for i in range(len(path)):
iter = model.get_iter(path[:i+1])
item[i] = model.get_value(iter, 0)
expanded.append(tuple(item))
treeview.map_expanded_rows(set, None)
return expanded
def setExpanded(self, expanded):
if not expanded:
return
treeview = self._treeview
cache = {}
for item in expanded:
item = tuple(item)
iter = None
for i in range(len(item)):
cached = cache.get(item[:i+1])
if cached:
iter = cached
continue
itemi = item[i]
#iter = model.iter_children(iter)
#while iter:
# value = model.get_value(iter, 0)
# if value == itemi:
# cache[item[:i+1]] = iter
# treeview.expand_row(model.get_path(iter), False)
# break
# iter = model.iter_next(iter)
#else:
# break
break
def setChangeSet(self, changeset):
if changeset is None:
self._changeset = {}
else:
self._changeset = changeset
def updatePackages(self, packages, changeset=None):
treeview = self._treeview
for pkg in packages:
if hasattr(pkg, "name"):
name = pkg.name
else:
name = str(pkg)
#iter = treeview.findItems(name, QtCore.Qt.MatchFixedString, 1)
iter = treeview.selectedItems()
if iter:
iter = iter[0]
if iter._pkg == pkg:
self._setNameVersion(iter, pkg)
self._setPixmap(iter, pkg)
#treeview.adjustColumn(0)
def setPackages(self, packages, changeset=None, keepstate=False):
treeview = self._treeview
if not packages:
treeview.clear()
return
self.setChangeSet(changeset)
if keepstate: ###TO IMPLEMENT IN QT
if False: #treeview.get_model():
expanded = self.getExpanded()
#cursor = self.getCursor()
else:
keepstate = False
# clear the model until the new one is ready
treeview.clear()
self._setPackage(None, None, packages)
#if keepstate:
#self.setExpanded(expanded)
#self.setCursor(cursor)
treeview.update()
def _setPackage(self, report, parent, item):
if type(item) is list:
item.sort()
for subitem in item:
self._setPackage(report, parent, subitem)
elif type(item) is dict:
keys = item.keys()
keys.sort()
for key in keys:
iter = self._setPackage(report, parent, key)
self._setPackage(report, iter, item[key])
else:
if parent is None:
iter = PackageListViewItem(self._treeview, item)
else:
iter = PackageListViewItem(parent, item)
#iter.setText(0, str(item))
self._setNameVersion(iter, item)
self._setPixmap(iter, item)
return iter
def _doubleClicked(self, item, c):
if not item:
return
value = item._pkg
if not self._expandpackage and hasattr(value, "name"):
pkgs = self.getSelectedPkgs()
if len(pkgs) > 1:
self.packageActivated.emit(pkgs)
else:
self.packageActivated.emit([value])
def _pressed(self, item, c):
btn = QtWidgets.QApplication.instance().mouseButtons()
if bool(btn & QtCore.Qt.RightButton):
pnt = QtCore.QPoint(item.treeWidget().pos())
return self._rightButtonPressed(item, pnt, c)
def _rightButtonPressed(self, item, pnt, c):
if not item:
return
value = item._pkg
if item and hasattr(value, "name"):
pkgs = self.getSelectedPkgs()
if len(pkgs) > 1:
self.packagePopup.emit(self, pkgs, pnt)
else:
self.packagePopup.emit(self, [value], pnt)
def _clicked(self, item, c):
if not item:
return
value = item._pkg
if c == 0 and hasattr(value, "name"):
self.packageActivated.emit([value])
def _selectionChanged(self):
item = self._treeview.currentItem()
if item and hasattr(item._pkg, "name"):
self.packageSelected.emit(item._pkg)
else:
self.packageSelected.emit(None)
| {
"content_hash": "c913fec3f4b625685b81254753a6d0bb",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 108,
"avg_line_length": 36.09308510638298,
"alnum_prop": 0.5472699137867512,
"repo_name": "blackPantherOS/packagemanagement",
"id": "64aa2d73be137d249b60fca2014ea6bf44f40e0d",
"size": "13658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartpm/smart/interfaces/qt5/packageview.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "605504"
},
{
"name": "C++",
"bytes": "65879"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "HTML",
"bytes": "17187"
},
{
"name": "M4",
"bytes": "170666"
},
{
"name": "Makefile",
"bytes": "5031"
},
{
"name": "Perl",
"bytes": "311801"
},
{
"name": "Prolog",
"bytes": "5458"
},
{
"name": "Python",
"bytes": "2250512"
},
{
"name": "Roff",
"bytes": "1805"
},
{
"name": "Shell",
"bytes": "283804"
},
{
"name": "XSLT",
"bytes": "312"
}
],
"symlink_target": ""
} |
import os
import datetime
import random
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from v2ex.babel import Member
from v2ex.babel import Counter
#from v2ex.babel import Section
from v2ex.babel import Node
from v2ex.babel import Topic
from v2ex.babel import Reply
from v2ex.babel import Site
from v2ex.babel.security import *
from v2ex.babel.ua import *
from v2ex.babel.da import *
from v2ex.babel.l10n import *
from v2ex.babel.ext.cookies import Cookies
template.register_template_library('v2ex.templatetags.filters')
class AboutHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
note = GetKindByNum('Note', 127)
if note is False:
note = GetKindByNum('Note', 2)
template_values['note'] = note
member = CheckAuth(self)
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › About'
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'about.html')
output = template.render(path, template_values)
self.response.out.write(output)
class FAQHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
note = GetKindByNum('Note', 195)
if note is False:
note = GetKindByNum('Note', 4)
template_values['note'] = note
member = CheckAuth(self)
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › FAQ'
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'faq.html')
output = template.render(path, template_values)
self.response.out.write(output)
class MissionHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
note = GetKindByNum('Note', 240)
if note is False:
note = GetKindByNum('Note', 5)
template_values['note'] = note
member = CheckAuth(self)
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › Mission'
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'mission.html')
output = template.render(path, template_values)
self.response.out.write(output)
class AdvertiseHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
member = CheckAuth(self)
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › Advertise'
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'advertise.html')
output = template.render(path, template_values)
self.response.out.write(output)
class AdvertisersHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
member = CheckAuth(self)
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › Advertisers'
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'advertisers.html')
output = template.render(path, template_values)
self.response.out.write(output)
def main():
application = webapp.WSGIApplication([
('/about', AboutHandler),
('/faq', FAQHandler),
('/mission', MissionHandler),
('/advertise', AdvertiseHandler),
('/advertisers', AdvertisersHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main() | {
"content_hash": "5a1058b6a3fd114dc8ec301ac59a9501",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 92,
"avg_line_length": 36.128787878787875,
"alnum_prop": 0.6198364436988887,
"repo_name": "melice/akiraguru",
"id": "7dfc047300ad5d371eed9ca75944c088642f1f2f",
"size": "4817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "page.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "17553"
},
{
"name": "Python",
"bytes": "710378"
}
],
"symlink_target": ""
} |
"""Loads data into BigQuery from a local file.
For more information, see the README.md under /bigquery.
Example invocation:
$ python load_data_from_file.py example_dataset example_table \
example-data.csv
The dataset and table should already exist.
"""
import argparse
import time
from google.cloud import bigquery
def load_data_from_file(dataset_name, table_name, source_file_name):
bigquery_client = bigquery.Client()
dataset = bigquery_client.dataset(dataset_name)
table = dataset.table(table_name)
# Reload the table to get the schema.
table.reload()
with open(source_file_name, 'rb') as source_file:
# This example uses CSV, but you can use other formats.
# See https://cloud.google.com/bigquery/loading-data
job = table.upload_from_file(
source_file, source_format='text/csv')
wait_for_job(job)
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_name, table_name))
def wait_for_job(job):
while True:
job.reload()
if job.state == 'DONE':
if job.error_result:
raise RuntimeError(job.errors)
return
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dataset_name')
parser.add_argument('table_name')
parser.add_argument(
'source_file_name', help='Path to a .csv file to upload.')
args = parser.parse_args()
load_data_from_file(
args.dataset_name,
args.table_name,
args.source_file_name)
| {
"content_hash": "e63b111329e319f4a4d8fccb025fef9f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 68,
"avg_line_length": 26.951612903225808,
"alnum_prop": 0.6433273488928786,
"repo_name": "hashems/Mobile-Cloud-Development-Projects",
"id": "0bbdd7ba7d80f3fcfd38730ac5b7af1e4067559b",
"size": "2291",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "bigquery/cloud-client/load_data_from_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2924"
},
{
"name": "HTML",
"bytes": "23592"
},
{
"name": "JavaScript",
"bytes": "11222"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Protocol Buffer",
"bytes": "8810"
},
{
"name": "Python",
"bytes": "1056852"
},
{
"name": "Shell",
"bytes": "8344"
}
],
"symlink_target": ""
} |
def mergesort(list_to_sort):
if len(list_to_sort) < 2: # A list of length 1 is sorted by definition
return list_to_sort
# Split the list into left and right halves
midpoint = len(list_to_sort)/2
right = mergesort(list_to_sort[midpoint:])
left = mergesort(list_to_sort[:midpoint])
# Recursively mergesort the left and the right
right = mergesort(right)
left = mergesort(left)
# Then merge them
return merge(left, right)
def merge(left, right):
res = list()
# Add the smallest item from each list onto the result
while len(left) != 0 and len(right) != 0:
if left[0] < right[0]:
res.append(left[0])
left = left[1:]
else:
res.append(right[0])
right = right[1:]
# When we're done, either list might have items left
# If so, we can just add them all to the result as they're guaranteed to be larger
if len(left) > 0:
res.extend(left)
elif len(right) > 0:
res.extend(right)
return res
if __name__ == '__main__':
test_sort = [1, 0, 9, 2, 8, 3, 7, 4, 6, 5]
print mergesort(test_sort) | {
"content_hash": "4e4322b4851ab4fea505873c5a1580a9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 83,
"avg_line_length": 26.894736842105264,
"alnum_prop": 0.6682974559686888,
"repo_name": "ross-t/python-ds",
"id": "a074e6c5d47511bf6d42d3ddbf960cf37919ebbf",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Sorting/mergesort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24768"
}
],
"symlink_target": ""
} |
import os
import sys
import re
from google.protobuf.descriptor_pb2 import FileDescriptorSet, FieldOptions
# Add our cwd to the path so we can import generated python protobufs
# And extend our options with our Neutron protobuf
sys.path.append(os.getcwd() + '/..')
from Neutron_pb2 import pointer, PointerType, array_size
FieldOptions.RegisterExtension(pointer)
FieldOptions.RegisterExtension(array_size)
PointerType = dict(PointerType.items())
class Field:
# Static map_types field
map_types = {}
def __init__(self, f, context):
self.name = f.name
self.number = f.number
# Some booleans to describe the type
self.map_type = f.type_name in Field.map_types
self.repeated = f.label == f.LABEL_REPEATED
self.pointer = f.options.Extensions[pointer]
self.array_size = f.options.Extensions[array_size]
self.bytes_type = f.type == f.TYPE_BYTES
self.one_of = False
# Basic types are treated as primitives by the library
self.basic = f.type not in [f.TYPE_MESSAGE, f.TYPE_GROUP, f.TYPE_BYTES]
# Map types are special and a little more difficult to spot
if f.type_name in Field.map_types:
self.type = Field.map_types[f.type_name]
# Normal message types
elif f.type in [f.TYPE_MESSAGE, f.TYPE_ENUM, f.TYPE_GROUP]:
self.type = f.type_name
self.default_value = f.default_value
# Protobuf basic types
else:
# Work out what primitive type we have
# and the default default for that field
type_info = {
f.TYPE_DOUBLE: ('double', '0.0'),
f.TYPE_FLOAT: ('float', '0.0'),
f.TYPE_INT64: ('int64', '0'),
f.TYPE_UINT64: ('uint64', '0'),
f.TYPE_INT32: ('int32', '0'),
f.TYPE_FIXED64: ('fixed64', '0'),
f.TYPE_FIXED32: ('fixed32', '0'),
f.TYPE_BOOL: ('bool', 'false'),
f.TYPE_STRING: ('string', '""'),
f.TYPE_BYTES: ('bytes', ''),
f.TYPE_UINT32: ('uint32', '0'),
f.TYPE_SFIXED32: ('sfixed32', '0'),
f.TYPE_SFIXED64: ('sfixed64', '0'),
f.TYPE_SINT32: ('sint32', '0'),
f.TYPE_SINT64: ('sint64', '0')
}[f.type]
self.type = type_info[0]
self.default_value = f.default_value if f.default_value else type_info[1]
if self.type == '.google.protobuf.Timestamp':
self.default_value = 'NUClear::clock::now()'
# If we are repeated or a pointer our default is changed
if self.repeated:
self.default_value = ''
elif self.pointer:
self.default_value = 'nullptr'
# Since our cpp_type is used a lot, precalculate it
self.cpp_type, self.special_cpp_type = self.get_cpp_type_info()
def get_cpp_type_info(self):
t = self.type
# We are special unless we are not
special = True
vector_regex = re.compile(r'^\.([fiuc]?)vec(\d*)$')
matrix_regex = re.compile(r'^\.([fiuc]?)mat(\d*)$')
# Check if it is a map field
if self.map_type:
t = '::std::map<{}, {}>'.format(t[0].cpp_type, t[1].cpp_type)
# Check for matrix and vector types
elif vector_regex.match(t):
r = vector_regex.match(t)
t = '::message::conversion::math::{}vec{}'.format(r.group(1), r.group(2))
elif matrix_regex.match(t):
r = matrix_regex.match(t)
t = '::message::conversion::math::{}mat{}'.format(r.group(1), r.group(2))
# Timestamps and durations map to real time/duration classes
elif t == '.google.protobuf.Timestamp':
t = '::NUClear::clock::time_point'
elif t == '.google.protobuf.Duration':
t = '::NUClear::clock::duration'
# Standard types get mapped to their appropriate type
elif t in ['double', 'float', 'bool']:
# double and float and bool are fine as is
special = False
elif t in ['int64', 'sint64', 'sfixed64']:
t = 'int64_t'
special = False
elif t in ['uint64', 'fixed64']:
t = 'uint64_t'
special = False
elif t in ['int32', 'sint32', 'sfixed32']:
t = 'int32_t'
special = False
elif t in ['uint32', 'fixed32']:
t = 'uint32_t'
special = False
elif t in ['string']:
t = '::std::string'
special = False
elif t in ['bytes']:
t = '::std::vector<uint8_t>'
# Otherwise we assume it's a normal type and let it work out its scoping
else:
t = '::'.join(t.split('.'))
special = False
# If we are using a pointer type do the manipulation here
if self.pointer == PointerType['RAW']:
t = '{}*'.format(t)
elif self.pointer == PointerType['SHARED']:
t = '::std::shared_ptr<{}>'.format(t)
elif self.pointer == PointerType['UNIQUE']:
t = '::std::unique_ptr<{}>'.format(t)
# If it's a repeated field, and not a map, it's a vector
if self.repeated and not self.map_type:
# If we have a fixed size use std::array instead
if self.array_size > 0:
t = '::std::array<{}, {}>'.format(t, self.array_size)
else:
t = '::std::vector<{}>'.format(t)
return t, special
def generate_cpp_header(self):
return '{} {};'.format(self.cpp_type, self.name)
| {
"content_hash": "41d612c866e7dd87a14efbc263ac8b9d",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 85,
"avg_line_length": 36.84415584415584,
"alnum_prop": 0.5384208671131477,
"repo_name": "Fastcode/NUClearExample",
"id": "b62e018fa5a06120ca53a99c537514d9cf17223d",
"size": "5698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nuclear/message/generator/Field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "202371"
},
{
"name": "CMake",
"bytes": "71346"
},
{
"name": "Python",
"bytes": "105361"
}
],
"symlink_target": ""
} |
"""Setup script for admin-tools-zinnia"""
from setuptools import setup
from setuptools import find_packages
import admin_tools_zinnia
setup(
name='admin-tools-zinnia',
version=admin_tools_zinnia.__version__,
description='Admin tools for django-blog-zinnia',
long_description=open('README.rst').read(),
keywords='django, blog, weblog, zinnia, admin, dashboard',
author=admin_tools_zinnia.__author__,
author_email=admin_tools_zinnia.__email__,
url=admin_tools_zinnia.__url__,
packages=find_packages(exclude=['demo_admin_tools_zinnia']),
classifiers=[
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules'],
license=admin_tools_zinnia.__license__,
include_package_data=True,
zip_safe=False
)
| {
"content_hash": "ca5c619a5ac1ad0a5066312ca16ec3d8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 32.02857142857143,
"alnum_prop": 0.6574487065120428,
"repo_name": "django-blog-zinnia/admin-tools-zinnia",
"id": "55d3901ef820b9c72ab290ed6986c2d4929cd766",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2186"
},
{
"name": "Python",
"bytes": "19070"
}
],
"symlink_target": ""
} |
"""
Abstract base class for all hardware types.
"""
import abc
import six
from ironic.common import exception
from ironic.drivers import base as driver_base
from ironic.drivers.modules.network import noop as noop_net
from ironic.drivers.modules import noop
from ironic.drivers.modules.storage import noop as noop_storage
@six.add_metaclass(abc.ABCMeta)
class AbstractHardwareType(object):
"""Abstract base class for all hardware types.
Hardware type is a family of hardware supporting the same set of interfaces
from the ironic standpoint. This can be as wide as all hardware supporting
the IPMI protocol or as narrow as several hardware models supporting some
specific interfaces.
A hardware type defines an ordered list of supported implementations for
each driver interface (power, deploy, etc).
"""
supported = True
"""Whether hardware is supported by the community."""
# Required hardware interfaces
@abc.abstractproperty
def supported_boot_interfaces(self):
"""List of supported boot interfaces."""
@abc.abstractproperty
def supported_deploy_interfaces(self):
"""List of supported deploy interfaces."""
@abc.abstractproperty
def supported_management_interfaces(self):
"""List of supported management interfaces."""
@abc.abstractproperty
def supported_power_interfaces(self):
"""List of supported power interfaces."""
# Optional hardware interfaces
@property
def supported_console_interfaces(self):
"""List of supported console interfaces."""
return [noop.NoConsole]
@property
def supported_inspect_interfaces(self):
"""List of supported inspect interfaces."""
return [noop.NoInspect]
@property
def supported_network_interfaces(self):
"""List of supported network interfaces."""
return [noop_net.NoopNetwork]
@property
def supported_raid_interfaces(self):
"""List of supported raid interfaces."""
return [noop.NoRAID]
@property
def supported_storage_interfaces(self):
"""List of supported storage interfaces."""
return [noop_storage.NoopStorage]
@property
def supported_vendor_interfaces(self):
"""List of supported vendor interfaces."""
return [noop.NoVendor]
def get_properties(self):
"""Get the properties of the hardware type.
Note that this returns properties for the default interface of each
type, for this hardware type. Since this is not node-aware,
interface overrides can't be detected.
:returns: dictionary of <property name>:<property description> entries.
"""
# NOTE(jroll) this avoids a circular import
from ironic.common import driver_factory
properties = {}
for iface_type in driver_base.ALL_INTERFACES:
try:
default_iface = driver_factory.default_interface(self,
iface_type)
except (exception.InterfaceNotFoundInEntrypoint,
exception.NoValidDefaultForInterface):
continue
iface = driver_factory.get_interface(self, iface_type,
default_iface)
properties.update(iface.get_properties())
return properties
| {
"content_hash": "b3951b38b54574563a64407b2bf8e20a",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 32.18867924528302,
"alnum_prop": 0.6600234466588512,
"repo_name": "NaohiroTamura/ironic",
"id": "2d0713a5899a82621a4e94e7c45a83c972c04629",
"size": "3991",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ironic/drivers/hardware_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5077786"
},
{
"name": "Shell",
"bytes": "107935"
}
],
"symlink_target": ""
} |
import numpy as np
import random
import tensorflow as tf
import os
%matplotlib inline
from gridworld import gameEnv
env = gameEnv(size=5)
class Qnetwork():
def __init__(self,h_size):
#The network recieves a frame from the game, flattened into an array.
#It then resizes it and processes it through four convolutional layers.
self.scalarInput = tf.placeholder(shape=[None,21168],dtype=tf.float32)
self.imageIn = tf.reshape(self.scalarInput,shape=[-1,84,84,3])
self.conv1 = tf.contrib.layers.convolution2d( \
inputs=self.imageIn,num_outputs=32,kernel_size=[8,8],stride=[4,4],padding='VALID', biases_initializer=None)
self.conv2 = tf.contrib.layers.convolution2d( \
inputs=self.conv1,num_outputs=64,kernel_size=[4,4],stride=[2,2],padding='VALID', biases_initializer=None)
self.conv3 = tf.contrib.layers.convolution2d( \
inputs=self.conv2,num_outputs=64,kernel_size=[3,3],stride=[1,1],padding='VALID', biases_initializer=None)
self.conv4 = tf.contrib.layers.convolution2d( \
inputs=self.conv3,num_outputs=512,kernel_size=[7,7],stride=[1,1],padding='VALID', biases_initializer=None)
#We take the output from the final convolutional layer and split it into separate advantage and value streams.
self.streamAC,self.streamVC = tf.split(self.conv4,2,3)
self.streamA = tf.contrib.layers.flatten(self.streamAC)
self.streamV = tf.contrib.layers.flatten(self.streamVC)
self.AW = tf.Variable(tf.random_normal([h_size//2,env.actions]))
self.VW = tf.Variable(tf.random_normal([h_size//2,1]))
self.Advantage = tf.matmul(self.streamA,self.AW)
self.Value = tf.matmul(self.streamV,self.VW)
#Then combine them together to get our final Q-values.
self.Qout = self.Value + tf.subtract(self.Advantage,tf.reduce_mean(self.Advantage,reduction_indices=1,keep_dims=True))
self.predict = tf.argmax(self.Qout,1)
#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
self.targetQ = tf.placeholder(shape=[None],dtype=tf.float32)
self.actions = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions,env.actions,dtype=tf.float32)
self.Q = tf.reduce_sum(tf.multiply(self.Qout, self.actions_onehot), reduction_indices=1)
self.td_error = tf.square(self.targetQ - self.Q)
self.loss = tf.reduce_mean(self.td_error)
self.trainer = tf.train.AdamOptimizer(learning_rate=0.0001)
self.updateModel = self.trainer.minimize(self.loss)
class experience_buffer():
def __init__(self, buffer_size = 50000):
self.buffer = []
self.buffer_size = buffer_size
def add(self,experience):
if len(self.buffer) + len(experience) >= self.buffer_size:
self.buffer[0:(len(experience)+len(self.buffer))-self.buffer_size] = []
self.buffer.extend(experience)
def sample(self,size):
return np.reshape(np.array(random.sample(self.buffer,size)),[size,5])
def processState(states):
return np.reshape(states,[21168])
def updateTargetGraph(tfVars,tau):
total_vars = len(tfVars)
op_holder = []
for idx,var in enumerate(tfVars[0:total_vars//2]):
op_holder.append(tfVars[idx+total_vars//2].assign((var.value()*tau) + ((1-tau)*tfVars[idx+total_vars//2].value())))
return op_holder
def updateTarget(op_holder,sess):
for op in op_holder:
sess.run(op)
batch_size = 32 #How many experiences to use for each training step.
update_freq = 4 #How often to perform a training step.
y = .99 #Discount factor on the target Q-values
startE = 1 #Starting chance of random action
endE = 0.1 #Final chance of random action
anneling_steps = 10000. #How many steps of training to reduce startE to endE.
num_episodes = 10000#How many episodes of game environment to train network with.
pre_train_steps = 10000 #How many steps of random actions before training begins.
max_epLength = 50 #The max allowed length of our episode.
load_model = False #Whether to load a saved model.
path = "./dqn" #The path to save our model to.
h_size = 512 #The size of the final convolutional layer before splitting it into Advantage and Value streams.
tau = 0.001 #Rate to update target network toward primary network
tf.reset_default_graph()
mainQN = Qnetwork(h_size)
targetQN = Qnetwork(h_size)
init = tf.global_variables_initializer()
trainables = tf.trainable_variables()
targetOps = updateTargetGraph(trainables,tau)
myBuffer = experience_buffer()
#Set the rate of random action decrease.
e = startE
stepDrop = (startE - endE)/anneling_steps
#create lists to contain total rewards and steps per episode
rList = []
total_steps = 0
#Make a path for our model to be saved in.
saver = tf.train.Saver()
if not os.path.exists(path):
os.makedirs(path)
#%%
with tf.Session() as sess:
if load_model == True:
print('Loading Model...')
ckpt = tf.train.get_checkpoint_state(path)
saver.restore(sess,ckpt.model_checkpoint_path)
sess.run(init)
updateTarget(targetOps,sess) #Set the target network to be equal to the primary network.
for i in range(num_episodes+1):
episodeBuffer = experience_buffer()
#Reset environment and get first new observation
s = env.reset()
s = processState(s)
d = False
rAll = 0
j = 0
#The Q-Network
while j < max_epLength: #If the agent takes longer than 200 moves to reach either of the blocks, end the trial.
j+=1
#Choose an action by greedily (with e chance of random action) from the Q-network
if np.random.rand(1) < e or total_steps < pre_train_steps:
a = np.random.randint(0,4)
else:
a = sess.run(mainQN.predict,feed_dict={mainQN.scalarInput:[s]})[0]
s1,r,d = env.step(a)
s1 = processState(s1)
total_steps += 1
episodeBuffer.add(np.reshape(np.array([s,a,r,s1,d]),[1,5])) #Save the experience to our episode buffer.
if total_steps > pre_train_steps:
if e > endE:
e -= stepDrop
if total_steps % (update_freq) == 0:
trainBatch = myBuffer.sample(batch_size) #Get a random batch of experiences.
#Below we perform the Double-DQN update to the target Q-values
A = sess.run(mainQN.predict,feed_dict={mainQN.scalarInput:np.vstack(trainBatch[:,3])})
Q = sess.run(targetQN.Qout,feed_dict={targetQN.scalarInput:np.vstack(trainBatch[:,3])})
doubleQ = Q[range(batch_size),A]
targetQ = trainBatch[:,2] + y*doubleQ
#Update the network with our target values.
_ = sess.run(mainQN.updateModel, \
feed_dict={mainQN.scalarInput:np.vstack(trainBatch[:,0]),mainQN.targetQ:targetQ, mainQN.actions:trainBatch[:,1]})
updateTarget(targetOps,sess) #Set the target network to be equal to the primary network.
rAll += r
s = s1
if d == True:
break
#Get all experiences from this episode and discount their rewards.
myBuffer.add(episodeBuffer.buffer)
rList.append(rAll)
#Periodically save the model.
if i>0 and i % 25 == 0:
print('episode',i,', average reward of last 25 episode',np.mean(rList[-25:]))
if i>0 and i % 1000 == 0:
saver.save(sess,path+'/model-'+str(i)+'.cptk')
print("Saved Model")
saver.save(sess,path+'/model-'+str(i)+'.cptk')
#%%
rMat = np.resize(np.array(rList),[len(rList)//100,100])
rMean = np.average(rMat,1)
plt.plot(rMean)
| {
"content_hash": "19ca444ae8e806c16cc09bbd4d547636",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 137,
"avg_line_length": 40.365,
"alnum_prop": 0.6330979809240679,
"repo_name": "fx2003/tensorflow-study",
"id": "20d0ce1f47be87b5c3e5313bcc07a888d5d1c606",
"size": "8766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TensorFlow实战/《TensorFlow实战》代码/8_3_Value_Network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5440"
},
{
"name": "C++",
"bytes": "1291114"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "70949"
},
{
"name": "Makefile",
"bytes": "5174"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5306866"
},
{
"name": "Shell",
"bytes": "96467"
}
],
"symlink_target": ""
} |
"""
User model for authentication.
"""
from sqlite3 import OperationalError
from aiorest_ws.auth.user.abstractions import User
from aiorest_ws.auth.user.utils import generate_password_hash
from aiorest_ws.auth.user.exceptions import RequiredModelFieldsNotDefined, \
SearchCriteriaRequired, NotEnoughArguments
from aiorest_ws.auth.user.utils import SQL_CREATE_USER_TABLE, \
SQL_CREATE_TOKEN_FOREIGN_KEY, SQL_USER_GET, SQL_USER_GET_BY_USERNAME, \
SQL_USER_ADD, USER_MODEL_FIELDS, USER_MODEL_FIELDS_WITHOUT_PK, \
SQL_USER_GET_WITH_ID, construct_update_sql, convert_user_raw_data_to_dict
from aiorest_ws.conf import settings
from aiorest_ws.db.backends.sqlite3.constants import IN_MEMORY
from aiorest_ws.db.backends.sqlite3.managers import SQLiteManager
from aiorest_ws.log import logger
__all__ = ('UserSQLiteModel', )
class UserSQLiteModel(User):
"""
SQLite user model.
"""
db_manager = SQLiteManager
def __init__(self):
super(UserSQLiteModel, self).__init__()
if settings.DATABASES:
db_path = settings.DATABASES['default']['name']
self.db_manager = settings.DATABASES['default']['manager']
else:
db_path = IN_MEMORY
self.db_manager = self.db_manager(name=db_path)
if db_path == IN_MEMORY:
self.__create_models()
def __create_models(self):
"""
Create user model and append foreign key into token table.
"""
try:
self.db_manager.execute_script(SQL_CREATE_USER_TABLE)
self.db_manager.execute_script(SQL_CREATE_TOKEN_FOREIGN_KEY)
# This exception taken only in the case, when `user_id` foreign
# keys already created. We didn't have any opportunity to check
# existing column via SQL, because SQL syntax of SQLite is reduced
except OperationalError:
pass
def __user_defined_fields(self, init_data):
"""
Define fields in which changed data by the user.
:param init_data: data, taken from the user.
"""
overridden_fields = set(self.fields) & set(init_data.keys())
user_defined_fields = {
key: value
for key, value in init_data.items()
if key in overridden_fields
}
return user_defined_fields
@property
def fields(self):
"""
Get list of fields with primary key.
"""
return USER_MODEL_FIELDS
@property
def fields_without_pk(self):
"""
Get list of fields without primary key.
"""
return USER_MODEL_FIELDS_WITHOUT_PK
def create_user(self, *args, **kwargs):
"""
Create user in the database.
:param args: tuple of arguments.
:param kwargs: dictionary, where key is filled field of user model.
"""
if 'username' not in kwargs or 'password' not in kwargs:
raise RequiredModelFieldsNotDefined(
"Username and password fields are required"
)
kwargs['password'] = generate_password_hash(kwargs['password'])
user_defined_fields = self.__user_defined_fields(kwargs)
default_user_data = {
'first_name': '',
'last_name': '',
'is_active': True,
'is_superuser': False,
'is_staff': False,
'is_user': False,
}
default_user_data.update(user_defined_fields)
user_data = [default_user_data[key] for key in self.fields_without_pk]
try:
self.db_manager.execute_sql(SQL_USER_ADD, user_data)
except OperationalError as exc:
logger.error(exc)
def update_user(self, *args, **kwargs):
"""
Update user row in the database.
:param args: tuple of arguments.
:param kwargs: dictionary, where key is updated field of user model.
"""
username = kwargs.pop('username', None)
if not username:
raise SearchCriteriaRequired("Username for WHEN statement is required.") # NOQA
if len(kwargs) < 1:
raise NotEnoughArguments()
if 'password' in kwargs.keys():
kwargs['password'] = generate_password_hash(kwargs['password'])
updated_fields = self.__user_defined_fields(kwargs)
update_query, query_args = construct_update_sql(**updated_fields)
query_args.append(username)
try:
self.db_manager.execute_sql(update_query, query_args)
except OperationalError as exc:
logger.error(exc)
def get_user_by_username(self, username, with_id=False):
"""
Get user by his username from the database.
:param username: username as a string.
:param with_id: boolean flag, which means necessity to append to the
result object primary key of database row or not.
"""
try:
if with_id:
sql = SQL_USER_GET_WITH_ID
else:
sql = SQL_USER_GET_BY_USERNAME
user_row = self.db_manager.execute_sql(sql, (username, )).fetchone() # NOQA
if user_row:
user_data = convert_user_raw_data_to_dict(user_row, with_id)
else:
user_data = {}
except OperationalError as exc:
logger.error(exc)
user_data = {}
return User(**user_data)
def get_user_by_token(self, token):
"""
Get user object from the database, based on the his token.
:param token: passed token as a dictionary object.
"""
user_id = token['user_id']
try:
user_row = self.db_manager.execute_sql(
SQL_USER_GET, (user_id, )
).fetchone()
if user_row:
user_data = convert_user_raw_data_to_dict(user_row)
else:
user_data = {}
except OperationalError as exc:
logger.error(exc)
user_data = {}
return User(**user_data)
| {
"content_hash": "7c32504a5706ea0e0a181ca41868d15c",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 92,
"avg_line_length": 33.76111111111111,
"alnum_prop": 0.5897646865229554,
"repo_name": "Relrin/aiorest-ws",
"id": "31a905216ca9b35496b88827fd5926c96f4d972a",
"size": "6101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiorest_ws/auth/user/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "233"
},
{
"name": "Python",
"bytes": "908265"
}
],
"symlink_target": ""
} |
from threading import Lock
class ResponsibleGenerator(object):
"""A generator that will help clean up when it is done being used."""
__slots__ = ['cleanup', 'gen']
def __init__(self, gen, cleanup):
self.cleanup = cleanup
self.gen = gen
def __del__(self):
self.cleanup()
def __iter__(self):
return self
def __next__(self):
return next(self.gen)
class ConcurrentStore(object):
def __init__(self, store):
self.store = store
# number of calls to visit still in progress
self.__visit_count = 0
# lock for locking down the indices
self.__lock = Lock()
# lists for keeping track of added and removed triples while
# we wait for the lock
self.__pending_removes = []
self.__pending_adds = []
def add(self, triple):
(s, p, o) = triple
if self.__visit_count == 0:
self.store.add((s, p, o))
else:
self.__pending_adds.append((s, p, o))
def remove(self, triple):
(s, p, o) = triple
if self.__visit_count == 0:
self.store.remove((s, p, o))
else:
self.__pending_removes.append((s, p, o))
def triples(self, triple):
(su, pr, ob) = triple
g = self.store.triples((su, pr, ob))
pending_removes = self.__pending_removes
self.__begin_read()
for s, p, o in ResponsibleGenerator(g, self.__end_read):
if not (s, p, o) in pending_removes:
yield s, p, o
for (s, p, o) in self.__pending_adds:
if (su is None or su == s) \
and (pr is None or pr == p) \
and (ob is None or ob == o):
yield s, p, o
def __len__(self):
return self.store.__len__()
def __begin_read(self):
lock = self.__lock
lock.acquire()
self.__visit_count = self.__visit_count + 1
lock.release()
def __end_read(self):
lock = self.__lock
lock.acquire()
self.__visit_count = self.__visit_count - 1
if self.__visit_count == 0:
pending_removes = self.__pending_removes
while pending_removes:
(s, p, o) = pending_removes.pop()
try:
self.store.remove((s, p, o))
except:
# TODO: change to try finally?
print(s, p, o, "Not in store to remove")
pending_adds = self.__pending_adds
while pending_adds:
(s, p, o) = pending_adds.pop()
self.store.add((s, p, o))
lock.release()
| {
"content_hash": "d66d8efff2b3b651ef4fcc3d6364df12",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 73,
"avg_line_length": 28.617021276595743,
"alnum_prop": 0.49516728624535317,
"repo_name": "hwroitzsch/DayLikeTodayClone",
"id": "40747fb11fa7625086749584a42fd8e71e46bcb9",
"size": "2690",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/rdflib/plugins/stores/concurrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "8860"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "402138"
},
{
"name": "PigLatin",
"bytes": "1401"
},
{
"name": "Python",
"bytes": "4694103"
},
{
"name": "Shell",
"bytes": "3823"
}
],
"symlink_target": ""
} |
import logging
from Top import Top
log = logging.getLogger(__name__)
if __name__ == "__main__":
import sys
sys.path.append('..')
class AtomicProps(Top):
"""
This is a container for atomic properties.
Expected functionality:
1. Provide data structure to store atomic properties,
2. Visualize properties in Jmol using labels and color gradients
"""
def __init__(self, attr='partialCharge', data=None):
self.attrname = attr
self.precision = 6
if data is None:
self.data = []
else:
setattr(self, attr, data)
self.data = data
def __str__(self):
"""
Represents self.data as string with space-separated values
:rtype: string
"""
if len(self.data) == 0:
return ''
a0 = self.data[0]
type2format = {
int: '%i ',
float: '%.' + str(self.precision) + 'f ',
str: '%s '
}
template = (type2format[a0.__class__]) * len(self.data)
s = template % tuple(self.data)
return s.rstrip()
def webdata(self):
"""
Create a button on the web page that will color and label each atom
"""
h_1 = ""
h_2 = ""
col_min = -1.0
col_max = 1.0
if '_H' in self.attrname:
h_1 = "color atoms cpk; label off ; select not Hydrogen ;"
h_2 = "; select all"
elif '_proton' in self.attrname:
# Sample options for showing 1H NMR chemical shifts
h_1 = "color atoms cpk; label off ; select Hydrogen ;"
h_2 = "; select all"
col_min = 0.0
col_max = 9.0
elif '_carbon' in self.attrname:
# Sample options for showing 13C NMR chemical shifts
h_1 = "color atoms cpk; label off ; select Carbon ;"
h_2 = "; select all"
col_min = 0.0
col_max = 200.0
script_on = "x='%(a)s'; DATA '%(p)s @x'; %(h_1)s label %%.%(precision)s[%(p)s]; color atoms %(p)s 'rwb' absolute %(col_min)f %(col_max)f %(h_2)s" % {
'a': str(self),
'p': 'property_' + self.attrname,
'precision': str(self.precision),
'col_min': col_min,
'col_max': col_max,
'h_1': h_1,
'h_2': h_2
}
we = self.settings.Engine3D()
return we.html_button(script_on, self.attrname)
if __name__ == '__main__':
from Settings import Settings
from Top import Top
Top.settings = Settings()
ap = AtomicProps(attr='partialCharge')
ap.data = [-0.2, -0.2, -0.2, 0.3, 0.3, .5, -.5]
# print ap
print(ap.webdata())
| {
"content_hash": "e6d646bd7e0a9bae9fefaf1a39eb6a22",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 157,
"avg_line_length": 30.255555555555556,
"alnum_prop": 0.5045905251560778,
"repo_name": "talipovm/terse",
"id": "d704dc19e4fe7c59600706ad2de51353a4798381",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terse/Containers/AtomicProps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "22473"
},
{
"name": "HTML",
"bytes": "1203"
},
{
"name": "Python",
"bytes": "284642"
},
{
"name": "R",
"bytes": "6372"
}
],
"symlink_target": ""
} |
import sys
from rest_framework import serializers as ser
from api.base.serializers import (
JSONAPISerializer,
IDField,
TypeField,
Link,
LinksField,
RelationshipField,
DateByVersion,
)
from api.base.utils import absolute_reverse
from framework.auth.core import Auth
class WikiSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'name',
'date_modified'
])
id = IDField(source='_id', read_only=True)
type = TypeField()
name = ser.CharField(source='page_name')
kind = ser.SerializerMethodField()
size = ser.SerializerMethodField()
path = ser.SerializerMethodField()
materialized_path = ser.SerializerMethodField(method_name='get_path')
date_modified = DateByVersion(source='date')
content_type = ser.SerializerMethodField()
current_user_can_comment = ser.SerializerMethodField(help_text='Whether the current user is allowed to post comments')
extra = ser.SerializerMethodField(help_text='Additional metadata about this wiki')
user = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'}
)
# LinksField.to_representation adds link to "self"
links = LinksField({
'info': Link('wikis:wiki-detail', kwargs={'wiki_id': '<_id>'}),
'download': 'get_wiki_content'
})
class Meta:
type_ = 'wikis'
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def get_path(self, obj):
return '/{}'.format(obj._id)
def get_kind(self, obj):
return 'file'
def get_size(self, obj):
return sys.getsizeof(obj.content)
def get_current_user_can_comment(self, obj):
user = self.context['request'].user
auth = Auth(user if not user.is_anonymous() else None)
return obj.node.can_comment(auth)
def get_content_type(self, obj):
return 'text/markdown'
def get_extra(self, obj):
return {
'version': obj.version
}
def get_wiki_content(self, obj):
return absolute_reverse('wikis:wiki-content', kwargs={
'wiki_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
class NodeWikiSerializer(WikiSerializer):
node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'}
)
comments = RelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': '<_id>'}
)
class RegistrationWikiSerializer(WikiSerializer):
node = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<node._id>'}
)
comments = RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': '<_id>'}
)
class NodeWikiDetailSerializer(NodeWikiSerializer):
"""
Overrides NodeWikiSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class RegistrationWikiDetailSerializer(RegistrationWikiSerializer):
"""
Overrides NodeWikiSerializer to make id required.
"""
id = IDField(source='_id', required=True)
| {
"content_hash": "e9350c9a857dac08a018cfb7a5365386",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 122,
"avg_line_length": 27.912,
"alnum_prop": 0.6408713098308971,
"repo_name": "monikagrabowska/osf.io",
"id": "1a9acfef2f3c284cbaaa1afe752282306e22d755",
"size": "3489",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/wikis/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176566"
},
{
"name": "HTML",
"bytes": "183119"
},
{
"name": "JavaScript",
"bytes": "2017358"
},
{
"name": "Jupyter Notebook",
"bytes": "8510"
},
{
"name": "Makefile",
"bytes": "6905"
},
{
"name": "Mako",
"bytes": "755899"
},
{
"name": "PLpgSQL",
"bytes": "22144"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "9632033"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions_v5 as fxn
### data structures ###
### called/local plotting parameters ###
ps = fxn.pseasons
fs = 24
fssml = 16
mild_s = fxn.gp_mild
mod_s = fxn.gp_mod
sev_s = fxn.gp_sev
s_lab = fxn.gp_seasonlabels
sevcol = fxn.gp_severitycolors
bw = fxn.gp_barwidth
sevlab = fxn.gp_severitylabels
### functions ###
### import data ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
# d_wk[week] = seasonnum, dict_ageIncidAdjust53ls[(season, age)] = [adj incid per 100000 wk 40, ... wk 39]
d_wk, d_ageIncidAdjust53ls = fxn.week_incidCA_processing(incid, pop)
# dict_attackCA_norm[seasonnum] = (% dev from baseline child attack rate, % dev from baseline adult attack rate)
d_attackCA_norm = fxn.normalize_attackCA(d_wk, d_ageIncidAdjust53ls)
# initialize figure
fig = plt.figure()
# bar chart of normalized adult attack rates
ax = fig.add_subplot(2,1,1)
mild = ax.bar(mild_s, [d_attackCA_norm[k][1] for k in mild_s], bw, color=sevcol[0], align='center')
moderate = ax.bar(mod_s, [d_attackCA_norm[k][1] for k in mod_s], bw, color=sevcol[1], align='center')
severe = ax.bar(sev_s, [d_attackCA_norm[k][1] for k in sev_s], bw, color=sevcol[2], align='center')
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # hide xticks and xlabels
ax.hlines(y=0, xmin=0, xmax=10)
ax.legend([mild, moderate, severe], sevlab, loc='upper left')
ax.set_title('Adult Attack Rate', fontsize=fs)
ax.set_xlim([1,10])
ax.set_ylim([-40,60])
# bar chart of normalized child attack rates
ax = fig.add_subplot(2,1,2)
ax.bar(mild_s, [d_attackCA_norm[k][0] for k in mild_s], bw, color=sevcol[0], align='center')
ax.bar(mod_s, [d_attackCA_norm[k][0] for k in mod_s], bw, color=sevcol[1], align='center')
ax.bar(sev_s, [d_attackCA_norm[k][0] for k in sev_s], bw, color=sevcol[2], align='center')
ax.hlines(y=0, xmin=0, xmax=10)
ax.set_xticks(range(2,10))
ax.set_xticklabels(s_lab)
ax.set_title('Child Attack Rate', fontsize=fs)
ax.set_xlim([1,10])
ax.set_ylim([-40,60])
# reduce space between subplots
plt.subplots_adjust(hspace=0.15)
# yaxis text, vertical alignment moves text towards center of two plots
plt.text(0.25, 15,'Percent Deviation from Baseline', va='bottom', rotation='vertical', fontsize=fssml)
# save figure
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/Supp/attackCA_percdev_bipanel.png' , transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show() | {
"content_hash": "f675936c7f96ca6bf5768aa61853e3ca",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 188,
"avg_line_length": 38.605633802816904,
"alnum_prop": 0.7128785114921562,
"repo_name": "eclee25/flu-SDI-exploratory-age",
"id": "f2664164dcd0cc926678ddb62d152e47dc945d62",
"size": "3510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/create_fluseverity_figs_v5/S_AR_season_CA_normalized_v5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1262523"
},
{
"name": "R",
"bytes": "301185"
}
],
"symlink_target": ""
} |
'''
Replaces all blank values in a column with the first non-blank value in a series
of columns, falling back on a default_value if all are blank.
```
tweaks:
coalesce:
# the first of the following list of columns is the one that is modified
columns: first_priority_column second_priority_column third_priority_column
default_value: N/A
table: sheet1 # optional
```
'''
def apply(wb, params):
columns = params['columns']
default_value = params['default']
table = params.get('table')
active = False
for name, t in wb['tables'].items():
if name == table or table is None:
if len(set(columns) - set(t['columns'])) > 0:
continue
active = True
for row in t['rows']:
v = None
for column in columns:
if v is not None and v != "":
break
v = row[column]
if v is None or v == '':
v = default_value
row[columns[0]] = v
if not active:
raise KeyError(column)
| {
"content_hash": "b07ddfdaa793325e0778eb3cf558058f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 32.588235294117645,
"alnum_prop": 0.5406137184115524,
"repo_name": "paulfitz/sheetsite",
"id": "84737210bd2272c8c94993b1e308914969f846f1",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sheetsite/tweaks/coalesce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "848"
},
{
"name": "Makefile",
"bytes": "444"
},
{
"name": "Python",
"bytes": "120669"
}
],
"symlink_target": ""
} |
# Copyright 2012 Calvin Rien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A pbxproj file is an OpenStep format plist
# {} represents dictionary of key=value pairs delimited by ;
# () represents list of values delimited by ,
# file starts with a comment specifying the character type
# // !$*UTF8*$!
# when adding a file to a project, create the PBXFileReference
# add the PBXFileReference's guid to a group
# create a PBXBuildFile with the PBXFileReference's guid
# add the PBXBuildFile to the appropriate build phase
# when adding a header search path add
# HEADER_SEARCH_PATHS = "path/**";
# to each XCBuildConfiguration object
# Xcode4 will read either a OpenStep or XML plist.
# this script uses `plutil` to validate, read and write
# the pbxproj file. Plutil is available in OS X 10.2 and higher
# Plutil can't write OpenStep plists, so I save as XML
import datetime
import json
import ntpath
import os
import plistlib
import re
import shutil
import subprocess
import uuid
from UserDict import IterableUserDict
from UserList import UserList
regex = '[a-zA-Z0-9\\._/-]*'
class PBXEncoder(json.JSONEncoder):
def default(self, obj):
"""Tests the input object, obj, to encode as JSON."""
if isinstance(obj, (PBXList, PBXDict)):
return obj.data
return json.JSONEncoder.default(self, obj)
class PBXDict(IterableUserDict):
def __init__(self, d=None):
if d:
d = dict([(PBXType.Convert(k), PBXType.Convert(v)) for k, v in d.items()])
IterableUserDict.__init__(self, d)
def __setitem__(self, key, value):
IterableUserDict.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value))
def remove(self, key):
self.data.pop(PBXType.Convert(key), None)
class PBXList(UserList):
def __init__(self, l=None):
if isinstance(l, basestring):
UserList.__init__(self)
self.add(l)
return
elif l:
l = [PBXType.Convert(v) for v in l]
UserList.__init__(self, l)
def add(self, value):
value = PBXType.Convert(value)
if value in self.data:
return False
self.data.append(value)
return True
def remove(self, value):
value = PBXType.Convert(value)
if value in self.data:
self.data.remove(value)
return True
return False
def __setitem__(self, key, value):
UserList.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value))
class PBXType(PBXDict):
def __init__(self, d=None):
PBXDict.__init__(self, d)
if 'isa' not in self:
self['isa'] = self.__class__.__name__
self.id = None
@staticmethod
def Convert(o):
if isinstance(o, list):
return PBXList(o)
elif isinstance(o, dict):
isa = o.get('isa')
if not isa:
return PBXDict(o)
cls = globals().get(isa)
if cls and issubclass(cls, PBXType):
return cls(o)
print 'warning: unknown PBX type: %s' % isa
return PBXDict(o)
else:
return o
@staticmethod
def IsGuid(o):
return re.match('^[A-F0-9]{24}$', str(o))
@classmethod
def GenerateId(cls):
return ''.join(str(uuid.uuid4()).upper().split('-')[1:])
@classmethod
def Create(cls, *args, **kwargs):
return cls(*args, **kwargs)
class PBXFileReference(PBXType):
def __init__(self, d=None):
PBXType.__init__(self, d)
self.build_phase = None
types = {
'.a': ('archive.ar', 'PBXFrameworksBuildPhase'),
'.app': ('wrapper.application', None),
'.s': ('sourcecode.asm', 'PBXSourcesBuildPhase'),
'.c': ('sourcecode.c.c', 'PBXSourcesBuildPhase'),
'.cpp': ('sourcecode.cpp.cpp', 'PBXSourcesBuildPhase'),
'.framework': ('wrapper.framework', 'PBXFrameworksBuildPhase'),
'.h': ('sourcecode.c.h', None),
'.hpp': ('sourcecode.c.h', None),
'.icns': ('image.icns', 'PBXResourcesBuildPhase'),
'.m': ('sourcecode.c.objc', 'PBXSourcesBuildPhase'),
'.j': ('sourcecode.c.objc', 'PBXSourcesBuildPhase'),
'.mm': ('sourcecode.cpp.objcpp', 'PBXSourcesBuildPhase'),
'.nib': ('wrapper.nib', 'PBXResourcesBuildPhase'),
'.plist': ('text.plist.xml', 'PBXResourcesBuildPhase'),
'.json': ('text.json', 'PBXResourcesBuildPhase'),
'.png': ('image.png', 'PBXResourcesBuildPhase'),
'.rtf': ('text.rtf', 'PBXResourcesBuildPhase'),
'.tiff': ('image.tiff', 'PBXResourcesBuildPhase'),
'.txt': ('text', 'PBXResourcesBuildPhase'),
'.xcodeproj': ('wrapper.pb-project', None),
'.xib': ('file.xib', 'PBXResourcesBuildPhase'),
'.strings': ('text.plist.strings', 'PBXResourcesBuildPhase'),
'.bundle': ('wrapper.plug-in', 'PBXResourcesBuildPhase'),
'.dylib': ('compiled.mach-o.dylib', 'PBXFrameworksBuildPhase')
}
trees = [
'<absolute>',
'<group>',
'BUILT_PRODUCTS_DIR',
'DEVELOPER_DIR',
'SDKROOT',
'SOURCE_ROOT',
]
def guess_file_type(self, ignore_unknown_type=False):
self.remove('explicitFileType')
self.remove('lastKnownFileType')
ext = os.path.splitext(self.get('name', ''))[1]
if os.path.isdir(self.get('path')) and ext != '.framework' and ext != '.bundle':
f_type = 'folder'
build_phase = None
ext = ''
else:
f_type, build_phase = PBXFileReference.types.get(ext, ('?', 'PBXResourcesBuildPhase'))
self['lastKnownFileType'] = f_type
self.build_phase = build_phase
if f_type == '?' and not ignore_unknown_type:
print 'unknown file extension: %s' % ext
print 'please add extension and Xcode type to PBXFileReference.types'
return f_type
def set_file_type(self, ft):
self.remove('explicitFileType')
self.remove('lastKnownFileType')
self['explicitFileType'] = ft
@classmethod
def Create(cls, os_path, tree='SOURCE_ROOT', ignore_unknown_type=False):
if tree not in cls.trees:
print 'Not a valid sourceTree type: %s' % tree
return None
fr = cls()
fr.id = cls.GenerateId()
fr['path'] = os_path
fr['name'] = os.path.split(os_path)[1]
fr['sourceTree'] = '<absolute>' if os.path.isabs(os_path) else tree
fr.guess_file_type(ignore_unknown_type=ignore_unknown_type)
return fr
class PBXBuildFile(PBXType):
def set_weak_link(self, weak=False):
k_settings = 'settings'
k_attributes = 'ATTRIBUTES'
s = self.get(k_settings)
if not s:
if weak:
self[k_settings] = PBXDict({k_attributes: PBXList(['Weak'])})
return True
atr = s.get(k_attributes)
if not atr:
if weak:
atr = PBXList()
else:
return False
if weak:
atr.add('Weak')
else:
atr.remove('Weak')
self[k_settings][k_attributes] = atr
return True
def add_compiler_flag(self, flag):
k_settings = 'settings'
k_attributes = 'COMPILER_FLAGS'
if k_settings not in self:
self[k_settings] = PBXDict()
if k_attributes not in self[k_settings]:
self[k_settings][k_attributes] = flag
return True
flags = self[k_settings][k_attributes].split(' ')
if flag in flags:
return False
flags.append(flag)
self[k_settings][k_attributes] = ' '.join(flags)
@classmethod
def Create(cls, file_ref, weak=False):
if isinstance(file_ref, PBXFileReference):
file_ref = file_ref.id
bf = cls()
bf.id = cls.GenerateId()
bf['fileRef'] = file_ref
if weak:
bf.set_weak_link(True)
return bf
class PBXGroup(PBXType):
def add_child(self, ref):
if not isinstance(ref, PBXDict):
return None
isa = ref.get('isa')
if isa != 'PBXFileReference' and isa != 'PBXGroup':
return None
if 'children' not in self:
self['children'] = PBXList()
self['children'].add(ref.id)
return ref.id
def remove_child(self, id):
if 'children' not in self:
self['children'] = PBXList()
return
if not PBXType.IsGuid(id):
id = id.id
self['children'].remove(id)
def has_child(self, id):
if 'children' not in self:
self['children'] = PBXList()
return False
if not PBXType.IsGuid(id):
id = id.id
return id in self['children']
def get_name(self):
path_name = os.path.split(self.get('path', ''))[1]
return self.get('name', path_name)
@classmethod
def Create(cls, name, path=None, tree='SOURCE_ROOT'):
grp = cls()
grp.id = cls.GenerateId()
grp['name'] = name
grp['children'] = PBXList()
if path:
grp['path'] = path
grp['sourceTree'] = tree
else:
grp['sourceTree'] = '<group>'
return grp
class PBXNativeTarget(PBXType):
pass
class PBXProject(PBXType):
pass
class PBXContainerItemProxy(PBXType):
pass
class PBXReferenceProxy(PBXType):
pass
class PBXVariantGroup(PBXType):
pass
class PBXTargetDependency(PBXType):
pass
class PBXAggregateTarget(PBXType):
pass
class PBXHeadersBuildPhase(PBXType):
pass
class PBXBuildPhase(PBXType):
def add_build_file(self, bf):
if bf.get('isa') != 'PBXBuildFile':
return False
if 'files' not in self:
self['files'] = PBXList()
self['files'].add(bf.id)
return True
def remove_build_file(self, id):
if 'files' not in self:
self['files'] = PBXList()
return
self['files'].remove(id)
def has_build_file(self, id):
if 'files' not in self:
self['files'] = PBXList()
return False
if not PBXType.IsGuid(id):
id = id.id
return id in self['files']
class PBXFrameworksBuildPhase(PBXBuildPhase):
pass
class PBXResourcesBuildPhase(PBXBuildPhase):
pass
class PBXShellScriptBuildPhase(PBXBuildPhase):
@classmethod
def Create(cls, script, shell="/bin/sh", files=[], input_paths=[], output_paths=[], show_in_log = '0'):
bf = cls()
bf.id = cls.GenerateId()
bf['files'] = files
bf['inputPaths'] = input_paths
bf['outputPaths'] = output_paths
bf['runOnlyForDeploymentPostprocessing'] = '0';
bf['shellPath'] = shell
bf['shellScript'] = script
bf['showEnvVarsInLog'] = show_in_log
return bf
class PBXSourcesBuildPhase(PBXBuildPhase):
pass
class PBXCopyFilesBuildPhase(PBXBuildPhase):
pass
class XCBuildConfiguration(PBXType):
def add_search_paths(self, paths, base, key, recursive=True, escape=True):
modified = False
if not isinstance(paths, list):
paths = [paths]
if base not in self:
self[base] = PBXDict()
for path in paths:
if recursive and not path.endswith('/**'):
path = os.path.join(path, '**')
if key not in self[base]:
self[base][key] = PBXList()
elif isinstance(self[base][key], basestring):
self[base][key] = PBXList(self[base][key])
if escape:
if self[base][key].add('"%s"' % path): # '\\"%s\\"' % path
modified = True
else:
if self[base][key].add(path): # '\\"%s\\"' % path
modified = True
return modified
def add_header_search_paths(self, paths, recursive=True):
return self.add_search_paths(paths, 'buildSettings', 'HEADER_SEARCH_PATHS', recursive=recursive)
def add_library_search_paths(self, paths, recursive=True):
return self.add_search_paths(paths, 'buildSettings', 'LIBRARY_SEARCH_PATHS', recursive=recursive)
def add_framework_search_paths(self, paths, recursive=True):
return self.add_search_paths(paths, 'buildSettings', 'FRAMEWORK_SEARCH_PATHS', recursive=recursive)
def add_other_cflags(self, flags):
modified = False
base = 'buildSettings'
key = 'OTHER_CFLAGS'
if isinstance(flags, basestring):
flags = PBXList(flags)
if base not in self:
self[base] = PBXDict()
for flag in flags:
if key not in self[base]:
self[base][key] = PBXList()
elif isinstance(self[base][key], basestring):
self[base][key] = PBXList(self[base][key])
if self[base][key].add(flag):
self[base][key] = [e for e in self[base][key] if e]
modified = True
return modified
def add_other_ldflags(self, flags):
modified = False
base = 'buildSettings'
key = 'OTHER_LDFLAGS'
if isinstance(flags, basestring):
flags = PBXList(flags)
if base not in self:
self[base] = PBXDict()
for flag in flags:
if key not in self[base]:
self[base][key] = PBXList()
elif isinstance(self[base][key], basestring):
self[base][key] = PBXList(self[base][key])
if self[base][key].add(flag):
self[base][key] = [e for e in self[base][key] if e]
modified = True
return modified
def remove_other_ldflags(self, flags):
modified = False
base = 'buildSettings'
key = 'OTHER_LDFLAGS'
if isinstance(flags, basestring):
flags = PBXList(flags)
if base in self: # there are flags, so we can "remove" something
for flag in flags:
if key not in self[base]:
return False
elif isinstance(self[base][key], basestring):
self[base][key] = PBXList(self[base][key])
if self[base][key].remove(flag):
self[base][key] = [e for e in self[base][key] if e]
modified = True
return modified
class XCConfigurationList(PBXType):
pass
class XcodeProject(PBXDict):
plutil_path = 'plutil'
special_folders = ['.bundle', '.framework', '.xcodeproj']
def __init__(self, d=None, path=None):
if not path:
path = os.path.join(os.getcwd(), 'project.pbxproj')
self.pbxproj_path = os.path.abspath(path)
self.source_root = os.path.abspath(os.path.join(os.path.split(path)[0], '..'))
IterableUserDict.__init__(self, d)
self.data = PBXDict(self.data)
self.objects = self.get('objects')
self.modified = False
root_id = self.get('rootObject')
if root_id:
self.root_object = self.objects[root_id]
root_group_id = self.root_object.get('mainGroup')
self.root_group = self.objects[root_group_id]
else:
print "error: project has no root object"
self.root_object = None
self.root_group = None
for k, v in self.objects.iteritems():
v.id = k
def add_other_cflags(self, flags):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_other_cflags(flags):
self.modified = True
def add_other_ldflags(self, flags):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_other_ldflags(flags):
self.modified = True
def remove_other_ldflags(self, flags):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.remove_other_ldflags(flags):
self.modified = True
def add_header_search_paths(self, paths, recursive=True):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_header_search_paths(paths, recursive):
self.modified = True
def add_framework_search_paths(self, paths, recursive=True):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_framework_search_paths(paths, recursive):
self.modified = True
def add_library_search_paths(self, paths, recursive=True):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_library_search_paths(paths, recursive):
self.modified = True
# TODO: need to return value if project has been modified
def get_obj(self, id):
return self.objects.get(id)
def get_ids(self):
return self.objects.keys()
def get_files_by_os_path(self, os_path, tree='SOURCE_ROOT'):
files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference'
and f.get('path') == os_path
and f.get('sourceTree') == tree]
return files
def get_files_by_name(self, name, parent=None):
if parent:
files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference'
and f.get('name') == name
and parent.has_child(f)]
else:
files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference'
and f.get('name') == name]
return files
def get_build_files(self, id):
files = [f for f in self.objects.values() if f.get('isa') == 'PBXBuildFile'
and f.get('fileRef') == id]
return files
def get_groups_by_name(self, name, parent=None):
if parent:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup'
and g.get_name() == name
and parent.has_child(g)]
else:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup'
and g.get_name() == name]
return groups
def get_or_create_group(self, name, path=None, parent=None):
if not name:
return None
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
# assume it's an id
parent = self.objects.get(parent, self.root_group)
groups = self.get_groups_by_name(name)
for grp in groups:
if parent.has_child(grp.id):
return grp
grp = PBXGroup.Create(name, path)
parent.add_child(grp)
self.objects[grp.id] = grp
self.modified = True
return grp
def get_groups_by_os_path(self, path):
path = os.path.abspath(path)
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup'
and os.path.abspath(g.get('path', '/dev/null')) == path]
return groups
def get_build_phases(self, phase_name):
phases = [p for p in self.objects.values() if p.get('isa') == phase_name]
return phases
def get_relative_path(self, os_path):
return os.path.relpath(os_path, self.source_root)
def verify_files(self, file_list, parent=None):
# returns list of files not in the current project.
if not file_list:
return []
if parent:
exists_list = [f.get('name') for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') in file_list and parent.has_child(f)]
else:
exists_list = [f.get('name') for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') in file_list]
return set(file_list).difference(exists_list)
def add_run_script(self, target, script=None):
result = []
targets = [t for t in self.get_build_phases('PBXNativeTarget') + self.get_build_phases('PBXAggregateTarget') if t.get('name') == target]
if len(targets) != 0 :
script_phase = PBXShellScriptBuildPhase.Create(script)
for t in targets:
skip = False
for buildPhase in t['buildPhases']:
if self.objects[buildPhase].get('isa') == 'PBXShellScriptBuildPhase' and self.objects[buildPhase].get('shellScript') == script:
skip = True
if not skip:
t['buildPhases'].add(script_phase.id)
self.objects[script_phase.id] = script_phase
result.append(script_phase)
return result
def add_run_script_all_targets(self, script=None):
result = []
targets = self.get_build_phases('PBXNativeTarget') + self.get_build_phases('PBXAggregateTarget')
if len(targets) != 0 :
script_phase = PBXShellScriptBuildPhase.Create(script)
for t in targets:
skip = False
for buildPhase in t['buildPhases']:
if self.objects[buildPhase].get('isa') == 'PBXShellScriptBuildPhase' and self.objects[buildPhase].get('shellScript') == script:
skip = True
if not skip:
t['buildPhases'].add(script_phase.id)
self.objects[script_phase.id] = script_phase
result.append(script_phase)
return result
def add_folder(self, os_path, parent=None, excludes=None, recursive=True, create_build_files=True):
if not os.path.isdir(os_path):
return []
if not excludes:
excludes = []
results = []
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
# assume it's an id
parent = self.objects.get(parent, self.root_group)
path_dict = {os.path.split(os_path)[0]: parent}
special_list = []
for (grp_path, subdirs, files) in os.walk(os_path):
parent_folder, folder_name = os.path.split(grp_path)
parent = path_dict.get(parent_folder, parent)
if [sp for sp in special_list if parent_folder.startswith(sp)]:
continue
if folder_name.startswith('.'):
special_list.append(grp_path)
continue
if os.path.splitext(grp_path)[1] in XcodeProject.special_folders:
# if this file has a special extension (bundle or framework mainly) treat it as a file
special_list.append(grp_path)
new_files = self.verify_files([folder_name], parent=parent)
if new_files:
results.extend(self.add_file(grp_path, parent, create_build_files=create_build_files))
continue
# create group
grp = self.get_or_create_group(folder_name, path=self.get_relative_path(grp_path), parent=parent)
path_dict[grp_path] = grp
results.append(grp)
file_dict = {}
for f in files:
if f[0] == '.' or [m for m in excludes if re.match(m, f)]:
continue
kwds = {
'create_build_files': create_build_files,
'parent': grp,
'name': f
}
f_path = os.path.join(grp_path, f)
file_dict[f_path] = kwds
new_files = self.verify_files([n.get('name') for n in file_dict.values()], parent=grp)
add_files = [(k, v) for k, v in file_dict.items() if v.get('name') in new_files]
for path, kwds in add_files:
kwds.pop('name', None)
self.add_file(path, **kwds)
if not recursive:
break
for r in results:
self.objects[r.id] = r
return results
def path_leaf(self, path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def add_file_if_doesnt_exist(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False, ignore_unknown_type=False):
for obj in self.objects.values():
if 'path' in obj:
if self.path_leaf(f_path) == self.path_leaf(obj.get('path')):
return []
return self.add_file(f_path, parent, tree, create_build_files, weak, ignore_unknown_type=ignore_unknown_type)
def add_file(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False, ignore_unknown_type=False):
results = []
abs_path = ''
if os.path.isabs(f_path):
abs_path = f_path
if not os.path.exists(f_path):
return results
elif tree == 'SOURCE_ROOT':
f_path = os.path.relpath(f_path, self.source_root)
else:
tree = '<absolute>'
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
# assume it's an id
parent = self.objects.get(parent, self.root_group)
file_ref = PBXFileReference.Create(f_path, tree, ignore_unknown_type=ignore_unknown_type)
parent.add_child(file_ref)
results.append(file_ref)
# create a build file for the file ref
if file_ref.build_phase and create_build_files:
phases = self.get_build_phases(file_ref.build_phase)
for phase in phases:
build_file = PBXBuildFile.Create(file_ref, weak=weak)
phase.add_build_file(build_file)
results.append(build_file)
if abs_path and tree == 'SOURCE_ROOT' \
and os.path.isfile(abs_path) \
and file_ref.build_phase == 'PBXFrameworksBuildPhase':
library_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0])
self.add_library_search_paths([library_path], recursive=False)
if abs_path and tree == 'SOURCE_ROOT' \
and not os.path.isfile(abs_path) \
and file_ref.build_phase == 'PBXFrameworksBuildPhase':
framework_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0])
self.add_framework_search_paths([framework_path, '$(inherited)'], recursive=False)
for r in results:
self.objects[r.id] = r
if results:
self.modified = True
return results
def check_and_repair_framework(self, base):
name = os.path.basename(base)
if ".framework" in name:
basename = name[:-len(".framework")]
finalHeaders = os.path.join(base, "Headers")
finalCurrent = os.path.join(base, "Versions/Current")
finalLib = os.path.join(base, basename)
srcHeaders = "Versions/A/Headers"
srcCurrent = "A"
srcLib = "Versions/A/" + basename
if not os.path.exists(finalHeaders):
os.symlink(srcHeaders, finalHeaders)
if not os.path.exists(finalCurrent):
os.symlink(srcCurrent, finalCurrent)
if not os.path.exists(finalLib):
os.symlink(srcLib, finalLib)
def remove_group(self, grp):
pass
def remove_file(self, id, recursive=True):
if not PBXType.IsGuid(id):
id = id.id
if id in self.objects:
self.objects.remove(id)
if recursive:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup']
for group in groups:
if id in group['children']:
group.remove_child(id)
self.modified = True
def move_file(self, id, dest_grp=None):
pass
def apply_patch(self, patch_path, xcode_path):
if not os.path.isfile(patch_path) or not os.path.isdir(xcode_path):
print 'ERROR: couldn\'t apply "%s" to "%s"' % (patch_path, xcode_path)
return
print 'applying "%s" to "%s"' % (patch_path, xcode_path)
return subprocess.call(['patch', '-p1', '--forward', '--directory=%s' % xcode_path, '--input=%s' % patch_path])
def apply_mods(self, mod_dict, default_path=None):
if not default_path:
default_path = os.getcwd()
keys = mod_dict.keys()
for k in keys:
v = mod_dict.pop(k)
mod_dict[k.lower()] = v
parent = mod_dict.pop('group', None)
if parent:
parent = self.get_or_create_group(parent)
excludes = mod_dict.pop('excludes', [])
if excludes:
excludes = [re.compile(e) for e in excludes]
compiler_flags = mod_dict.pop('compiler_flags', {})
for k, v in mod_dict.items():
if k == 'patches':
for p in v:
if not os.path.isabs(p):
p = os.path.join(default_path, p)
self.apply_patch(p, self.source_root)
elif k == 'folders':
# get and compile excludes list
# do each folder individually
for folder in v:
kwds = {}
# if path contains ':' remove it and set recursive to False
if ':' in folder:
args = folder.split(':')
kwds['recursive'] = False
folder = args.pop(0)
if os.path.isabs(folder) and os.path.isdir(folder):
pass
else:
folder = os.path.join(default_path, folder)
if not os.path.isdir(folder):
continue
if parent:
kwds['parent'] = parent
if excludes:
kwds['excludes'] = excludes
self.add_folder(folder, **kwds)
elif k == 'headerpaths' or k == 'librarypaths':
paths = []
for p in v:
if p.endswith('/**'):
p = os.path.split(p)[0]
if not os.path.isabs(p):
p = os.path.join(default_path, p)
if not os.path.exists(p):
continue
p = self.get_relative_path(p)
paths.append(os.path.join('$(SRCROOT)', p, "**"))
if k == 'headerpaths':
self.add_header_search_paths(paths)
else:
self.add_library_search_paths(paths)
elif k == 'other_cflags':
self.add_other_cflags(v)
elif k == 'other_ldflags':
self.add_other_ldflags(v)
elif k == 'libs' or k == 'frameworks' or k == 'files':
paths = {}
for p in v:
kwds = {}
if ':' in p:
args = p.split(':')
p = args.pop(0)
if 'weak' in args:
kwds['weak'] = True
file_path = os.path.join(default_path, p)
search_path, file_name = os.path.split(file_path)
if [m for m in excludes if re.match(m, file_name)]:
continue
try:
expr = re.compile(file_name)
except re.error:
expr = None
if expr and os.path.isdir(search_path):
file_list = os.listdir(search_path)
for f in file_list:
if [m for m in excludes if re.match(m, f)]:
continue
if re.search(expr, f):
kwds['name'] = f
paths[os.path.join(search_path, f)] = kwds
p = None
if k == 'libs':
kwds['parent'] = self.get_or_create_group('Libraries', parent=parent)
elif k == 'frameworks':
kwds['parent'] = self.get_or_create_group('Frameworks', parent=parent)
if p:
kwds['name'] = file_name
if k == 'libs':
p = os.path.join('usr', 'lib', p)
kwds['tree'] = 'SDKROOT'
elif k == 'frameworks':
p = os.path.join('System', 'Library', 'Frameworks', p)
kwds['tree'] = 'SDKROOT'
elif k == 'files' and not os.path.exists(file_path):
# don't add non-existent files to the project.
continue
paths[p] = kwds
new_files = self.verify_files([n.get('name') for n in paths.values()])
add_files = [(k, v) for k, v in paths.items() if v.get('name') in new_files]
for path, kwds in add_files:
kwds.pop('name', None)
if 'parent' not in kwds and parent:
kwds['parent'] = parent
self.add_file(path, **kwds)
if compiler_flags:
for k, v in compiler_flags.items():
filerefs = []
for f in v:
filerefs.extend([fr.id for fr in self.objects.values() if fr.get('isa') == 'PBXFileReference'
and fr.get('name') == f])
buildfiles = [bf for bf in self.objects.values() if bf.get('isa') == 'PBXBuildFile'
and bf.get('fileRef') in filerefs]
for bf in buildfiles:
if bf.add_compiler_flag(k):
self.modified = True
def backup(self, file_name=None, backup_name=None):
if not file_name:
file_name = self.pbxproj_path
if not backup_name:
backup_name = "%s.%s.backup" % (file_name, datetime.datetime.now().strftime('%d%m%y-%H%M%S'))
shutil.copy2(file_name, backup_name)
def save(self, file_name=None, old_format=False):
if old_format :
self.saveFormatXML(file_name)
else:
self.saveFormat3_2(file_name)
def saveFormat3_2(self, file_name=None):
"""Alias for backward compatibility"""
self.save_new_format(file_name)
def save_format_xml(self, file_name=None):
"""Saves in old (xml) format"""
if not file_name:
file_name = self.pbxproj_path
# This code is adapted from plistlib.writePlist
with open(file_name, "w") as f:
writer = PBXWriter(f)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(self.data)
writer.writeln("</plist>")
def save_new_format(self, file_name=None):
"""Save in Xcode 3.2 compatible (new) format"""
if not file_name:
file_name = self.pbxproj_path
# process to get the section's info and names
objs = self.data.get('objects')
sections = dict()
uuids = dict()
for key in objs:
l = list()
if objs.get(key).get('isa') in sections:
l = sections.get(objs.get(key).get('isa'))
l.append(tuple([key, objs.get(key)]))
sections[objs.get(key).get('isa')] = l
if 'name' in objs.get(key):
uuids[key] = objs.get(key).get('name')
elif 'path' in objs.get(key):
uuids[key] = objs.get(key).get('path')
else:
if objs.get(key).get('isa') == 'PBXProject':
uuids[objs.get(key).get('buildConfigurationList')] = 'Build configuration list for PBXProject "Unity-iPhone"'
elif objs.get(key).get('isa')[0:3] == 'PBX':
uuids[key] = objs.get(key).get('isa')[3:-10]
else:
uuids[key] = 'Build configuration list for PBXNativeTarget "TARGET_NAME"'
ro = self.data.get('rootObject')
uuids[ro] = 'Project Object'
for key in objs:
# transitive references (used in the BuildFile section)
if 'fileRef' in objs.get(key) and objs.get(key).get('fileRef') in uuids:
uuids[key] = uuids[objs.get(key).get('fileRef')]
# transitive reference to the target name (used in the Native target section)
if objs.get(key).get('isa') == 'PBXNativeTarget':
uuids[objs.get(key).get('buildConfigurationList')] = uuids[objs.get(key).get('buildConfigurationList')].replace('TARGET_NAME', uuids[key])
self.uuids = uuids
self.sections = sections
out = open(file_name, 'w')
out.write('// !$*UTF8*$!\n')
self._printNewXCodeFormat(out, self.data, '', enters=True)
out.close()
@classmethod
def addslashes(cls, s):
d = {'"': '\\"', "'": "\\'", "\0": "\\\0", "\\": "\\\\", "\n":"\\n"}
return ''.join(d.get(c, c) for c in s)
def _printNewXCodeFormat(self, out, root, deep, enters=True):
if isinstance(root, IterableUserDict):
out.write('{')
if enters:
out.write('\n')
isa = root.pop('isa', '')
if isa != '': # keep the isa in the first spot
if enters:
out.write('\t' + deep)
out.write('isa = ')
self._printNewXCodeFormat(out, isa, '\t' + deep, enters=enters)
out.write(';')
if enters:
out.write('\n')
else:
out.write(' ')
for key in sorted(root.iterkeys()): # keep the same order as Apple.
if enters:
out.write('\t' + deep)
if re.match(regex, key).group(0) == key:
out.write(key.encode("utf-8") + ' = ')
else:
out.write('"' + key.encode("utf-8") + '" = ')
if key == 'objects':
out.write('{') # open the objects section
if enters:
out.write('\n')
#root.remove('objects') # remove it to avoid problems
sections = [
('PBXBuildFile', False),
('PBXCopyFilesBuildPhase', True),
('PBXFileReference', False),
('PBXFrameworksBuildPhase', True),
('PBXGroup', True),
('PBXAggregateTarget', True),
('PBXNativeTarget', True),
('PBXProject', True),
('PBXResourcesBuildPhase', True),
('PBXShellScriptBuildPhase', True),
('PBXSourcesBuildPhase', True),
('XCBuildConfiguration', True),
('XCConfigurationList', True),
('PBXTargetDependency', True),
('PBXVariantGroup', True),
('PBXReferenceProxy', True),
('PBXContainerItemProxy', True)]
for section in sections: # iterate over the sections
if self.sections.get(section[0]) is None:
continue
out.write('\n/* Begin %s section */' % section[0].encode("utf-8"))
self.sections.get(section[0]).sort(cmp=lambda x, y: cmp(x[0], y[0]))
for pair in self.sections.get(section[0]):
key = pair[0]
value = pair[1]
out.write('\n')
if enters:
out.write('\t\t' + deep)
out.write(key.encode("utf-8"))
if key in self.uuids:
out.write(" /* " + self.uuids[key].encode("utf-8") + " */")
out.write(" = ")
self._printNewXCodeFormat(out, value, '\t\t' + deep, enters=section[1])
out.write(';')
out.write('\n/* End %s section */\n' % section[0].encode("utf-8"))
out.write(deep + '\t}') # close of the objects section
else:
self._printNewXCodeFormat(out, root[key], '\t' + deep, enters=enters)
out.write(';')
if enters:
out.write('\n')
else:
out.write(' ')
root['isa'] = isa # restore the isa for further calls
if enters:
out.write(deep)
out.write('}')
elif isinstance(root, UserList):
out.write('(')
if enters:
out.write('\n')
for value in root:
if enters:
out.write('\t' + deep)
self._printNewXCodeFormat(out, value, '\t' + deep, enters=enters)
out.write(',')
if enters:
out.write('\n')
if enters:
out.write(deep)
out.write(')')
else:
if len(root) > 0 and re.match(regex, root).group(0) == root:
out.write(root.encode("utf-8"))
else:
out.write('"' + XcodeProject.addslashes(root.encode("utf-8")) + '"')
if root in self.uuids:
out.write(" /* " + self.uuids[root].encode("utf-8") + " */")
@classmethod
def Load(cls, path):
cls.plutil_path = os.path.join(os.path.split(__file__)[0], 'plutil')
if not os.path.isfile(XcodeProject.plutil_path):
cls.plutil_path = 'plutil'
# load project by converting to xml and then convert that using plistlib
p = subprocess.Popen([XcodeProject.plutil_path, '-convert', 'xml1', '-o', '-', path], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
# If the plist was malformed, returncode will be non-zero
if p.returncode != 0:
print stdout
return None
tree = plistlib.readPlistFromString(stdout)
return XcodeProject(tree, path)
@classmethod
def LoadFromXML(cls, path):
tree = plistlib.readPlist(path)
return XcodeProject(tree, path)
# The code below was adapted from plistlib.py.
class PBXWriter(plistlib.PlistWriter):
def writeValue(self, value):
if isinstance(value, (PBXList, PBXDict)):
plistlib.PlistWriter.writeValue(self, value.data)
else:
plistlib.PlistWriter.writeValue(self, value)
def simpleElement(self, element, value=None):
"""
We have to override this method to deal with Unicode text correctly.
Non-ascii characters have to get encoded as character references.
"""
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("ascii", "xmlcharrefreplace") # encode as ascii with xml character references | {
"content_hash": "1c77544c0e941df6f32ac0f866653f8b",
"timestamp": "",
"source": "github",
"line_count": 1398,
"max_line_length": 165,
"avg_line_length": 33.05722460658083,
"alnum_prop": 0.5182195871380967,
"repo_name": "SmallPlanetUnity/UnityExternalXcodePlugin",
"id": "4c3252d59350738e1e5dfc17d226a2fa43c5d7d6",
"size": "46214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Editor/mod_pbxproj.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47310"
},
{
"name": "Shell",
"bytes": "1880"
}
],
"symlink_target": ""
} |
import locale
locale.setlocale(locale.LC_ALL, '')
from flask import Flask, request
from nas.api import configure_api
from nas.models import configure_db
from nas.utils.converters import (
ListConverter, RangeConverter, RangeListConverter
)
DEFAULT_APPNAME = 'nobix-application-server'
def create_app(config=None, app_name=None):
if app_name is None:
app_name = DEFAULT_APPNAME
app = Flask(app_name, static_folder=None)
configure_app(app, config)
configure_db(app)
# configure_auth(app)
configure_api(app)
return app
def configure_app(app, config=None):
if config is not None:
app.config.from_object(config)
else:
try:
app.config.from_object('localconfig.LocalConfig')
except ImportError:
if os.getenv('DEV') == 'yes':
app.config.from_object('nas.config.DevelopmentConfig')
app.logger.info("Config: Development")
elif os.getenv('TEST') == 'yes':
app.config.from_object('nas.config.TestConfig')
app.logger.info("Config: Test")
else:
app.config.from_object('nas.config.ProductionConfig')
app.logger.info("Config: Production")
# Add additional converters
app.url_map.converters['list'] = ListConverter
app.url_map.converters['range'] = RangeConverter
app.url_map.converters['rangelist'] = RangeListConverter
@app.after_request
def add_cors_headers(response):
if 'Origin' in request.headers:
a = response.headers.add
a('Access-Control-Allow-Origin', request.headers['Origin'])
a('Access-Control-Allow-Credentials', 'true')
a('Access-Control-Allow-Headers', 'Content-Type,Authorization')
a('Access-Control-Allow-Methods', 'GET,PUT,PATCH,POST,DELETE')
return response
# only for debug purpose
@app.route('/urls')
def show_urls():
column_headers = ('Rule', 'Endpoint', 'Methods')
order = 'rule'
rows = [('-'*4, '-'*8, '-'*9)] # minimal values to take
rules = sorted(app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, ', '.join(rule.methods)))
rule_l = len(max(rows, key=lambda r: len(r[0]))[0])
ep_l = len(max(rows, key=lambda r: len(r[1]))[1])
meth_l = len(max(rows, key=lambda r: len(r[2]))[2])
str_template = '%-' + str(rule_l) + 's' + \
' %-' + str(ep_l) + 's' + \
' %-' + str(meth_l) + 's'
table_width = rule_l + 2 + ep_l + 2 + meth_l
out = (str_template % column_headers) + '\n' + '-' * table_width
for row in rows[1:]:
out += '\n' + str_template % row
return out+'\n', 200, {'Content-Type': 'text/table'}
| {
"content_hash": "ba72547411b2906aa9ff8c2d35b2078b",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 76,
"avg_line_length": 33.10227272727273,
"alnum_prop": 0.5787847579814624,
"repo_name": "coyotevz/nas",
"id": "6470b498b46387cb47ee5131951d3c9b48c75a47",
"size": "2938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nas/application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37166"
}
],
"symlink_target": ""
} |
import unittest
from gameplay.rock import Rock
from gameplay.player import Player
from gameplay.bullet import Bullet
from gameplay.field import Field
from gameplay.game import Game
from gui.user_interface import UserInterface
from gui.bullet_ui import BulletUI
from gui.powerup_ui import PowerupUI, PowerupType
class RockTest(unittest.TestCase):
def setUp(self):
self.rock = Rock()
def test_getting_initial_rock_speed(self):
initial_speed = 50
self.assertEqual(initial_speed, self.rock.rock_speed)
def test_setting_new_rock_speed(self):
new_speed = 60
self.rock.set_speed(new_speed)
self.assertEqual(new_speed, self.rock.rock_speed)
class PlayerTest(unittest.TestCase):
def setUp(self):
self.player = Player()
def test_check_initial_player_invinciblity(self):
self.assertEqual(False, self.player.is_player_invincible)
self.assertNotEqual(True, self.player.is_player_invincible)
def test_getting_initial_player_speed(self):
initial_speed = 10
self.assertEqual(initial_speed, self.player.player_speed)
def test_setting_player_invinciblity(self):
self.player.set_player_invinciblity()
self.assertEqual(True, self.player.is_player_invincible)
self.assertNotEqual(False, self.player.is_player_invincible)
self.player.set_player_invinciblity()
self.assertEqual(False, self.player.is_player_invincible)
self.assertNotEqual(True, self.player.is_player_invincible)
class BulletTest(unittest.TestCase):
def setUp(self):
self.bullet = Bullet()
def test_getting_initial_bullet_speed(self):
initial_speed = 50
self.assertEqual(initial_speed, self.bullet.bullet_speed)
def test_setting_new_bullet_speed(self):
new_speed = 60
self.bullet.set_speed(new_speed)
self.assertEqual(new_speed, self.bullet.bullet_speed)
class GameTest(unittest.TestCase):
def setUp(self):
self.field = Field(1080, 800)
self.game = Game(self.field)
def check_game_field_dimensions(self):
game_dimensions = (self.field.width, self.field.height)
self.assertEqual(game_dimensions, self.game.dimensions)
def test_checking_is_game_lost(self):
self.assertEqual(False, self.game.is_lost)
def test_checking_is_game_won(self):
self.assertEqual(False, self.game.is_won)
def test_checking_is_game_paused(self):
self.assertEqual(False, self.game.is_paused)
def test_checking_is_game_running(self):
self.assertEqual(True, self.game.is_running)
def test_checking_is_game_lost_after_losing(self):
self.game.lose()
self.assertEqual(True, self.game.is_lost)
self.assertEqual(False, self.game.is_running)
def test_checking_is_game_won_after_winning(self):
self.game.win()
self.assertEqual(True, self.game.is_won)
self.assertEqual(False, self.game.is_lost)
self.assertEqual(False, self.game.is_running)
def test_checking_is_game_paused_after_pausing(self):
self.game.pause()
self.assertEqual(True, self.game.is_paused)
self.assertEqual(False, self.game.is_running)
def test_checking_is_game_running_after_resuming(self):
self.game.resume()
self.assertEqual(False, self.game.is_paused)
self.assertEqual(True, self.game.is_running)
def test_setting_new_game_speed(self):
new_speed = 300
self.game.set_speed(new_speed)
self.assertEqual(new_speed, self.game.game_speed)
def test_setting_new_rock_speed(self):
new_speed = 30
self.game.set_rock_speed(new_speed)
self.assertEqual(new_speed, self.game.rock_speed)
def test_leveling_up(self):
current_level = self.game.level
self.game.level_up()
self.assertEqual(current_level + 1, self.game.level)
def test_getting_initial_game_speed(self):
initial_speed = 630
self.assertEqual(initial_speed, self.game.game_speed)
def test_getting_initial_bullet_speed(self):
initial_speed = 50
self.assertEqual(initial_speed, self.game.bullet_speed)
def test_getting_initial_rock_speed(self):
initial_speed = 50
self.assertEqual(initial_speed, self.game.rock_speed)
def test_getting_initial_level_speed(self):
initial_speed = 30000
self.assertEqual(initial_speed, self.game.level_speed)
def test_getting_initial_level(self):
initial_level = 1
self.assertEqual(initial_level, self.game.level)
def test_get_rock(self):
rock = self.field.rock
self.assertEqual(rock, self.game.rock)
def test_get_bullet(self):
bullet = self.field.bullet
self.assertEqual(bullet, self.game.bullet)
def test_get_powerup(self):
powerup = self.field.powerup
self.assertEqual(powerup, self.game.powerup)
def test_get_player(self):
player = self.field.player
self.assertEqual(player, self.game.player)
def test_get_rocks(self):
rocks = self.field.rocks
self.assertEqual(rocks, self.game.rocks)
def test_get_bullets(self):
bullets = self.field.bullets
self.assertEqual(bullets, self.game.bullets)
def test_get_powerups(self):
powerups = self.field.powerups
self.assertEqual(powerups, self.game.powerups)
def test_resetting_game(self):
self.game.reset_game_values()
self.assertEqual(630, self.game.game_speed)
self.assertEqual(30000, self.game.level_speed)
self.assertEqual(1, self.game.level)
self.assertEqual(50, self.game.rock_speed)
class FieldTest(unittest.TestCase):
def setUp(self):
self.field = Field(1080, 800)
def test_getting_initial_bullet_speed(self):
initial_speed = 50
self.assertEqual(initial_speed, self.field.bullet.bullet_speed)
def test_getting_initial_rock_speed(self):
initial_speed = 50
self.assertEqual(initial_speed, self.field.rock.rock_speed)
def test_getting_initial_player_speed(self):
initial_speed = 10
self.assertEqual(initial_speed, self.field.player.player_speed)
class GUITest(unittest.TestCase):
field = Field(1080, 800)
game = Game(field)
ui = UserInterface(game)
ui.main_loop()
main_window = UserInterface.get_main_window()
def test_check_main_window_dimensions(self):
main_window_height = UserInterface.get_main_window().height
main_window_width = UserInterface.get_main_window().width
self.assertEqual(1080, main_window_height)
self.assertEqual(800, main_window_width)
def test_bullet_moving_to_target(self):
self.field_ui = GUITest.main_window.field_ui
self.player_ui = self.field_ui.player_ui
self.bullet_ui = BulletUI(GUITest.main_window,
GUITest.game, self.player_ui)
initial_y = self.bullet_ui.y
self.bullet_ui.move_to_target()
self.assertGreater(initial_y, self.bullet_ui.y)
def test_check_pausing_the_game(self):
self.field_ui = GUITest.main_window.field_ui
self.field_ui.pause_game()
self.assertEqual(True, GUITest.game.is_paused)
def test_check_resuming_the_game(self):
self.field_ui = GUITest.main_window.field_ui
self.field_ui.resume_game()
self.assertEqual(True, GUITest.game.is_running)
def test_check_leveling_up(self):
self.game = GUITest.game
current_level = self.game.level
self.game.level_up()
self.assertLess(current_level, self.game.level)
def test_check_winning_the_game(self):
GUITest.main_window.field_ui.win_the_game()
self.assertEqual(True, GUITest.game.is_won)
def test_check_powerup_dropping_down(self):
self.field_ui = GUITest.main_window.field_ui
self.powerup_ui = PowerupUI(GUITest.main_window, GUITest.game,
PowerupType.player_invinciblility)
initial_y = self.powerup_ui.y
self.powerup_ui.drop_down()
self.assertLess(initial_y, self.powerup_ui.y)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7a1f311fd475aac6bb6f03e0b98d820d",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 71,
"avg_line_length": 34.15289256198347,
"alnum_prop": 0.667513611615245,
"repo_name": "hristy93/FallingRocks",
"id": "d9d1a7472f40a83ebb5e270aa45a575765a2c106",
"size": "8265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gameplay/falling_rocks_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72991"
}
],
"symlink_target": ""
} |
"""
Support for ATLAS as toolchain linear algebra library.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.tools.toolchain.linalg import LinAlg
class Atlas(LinAlg):
"""
Provides ATLAS BLAS/LAPACK support.
LAPACK is a build dependency only
"""
BLAS_MODULE_NAME = ['ATLAS']
BLAS_LIB = ["cblas", "f77blas", "atlas"]
BLAS_LIB_MT = ["ptcblas", "ptf77blas", "atlas"]
LAPACK_MODULE_NAME = ['ATLAS']
LAPACK_LIB = ['lapack']
| {
"content_hash": "14f8adaeb444274ed42640a4a3f400a6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 54,
"avg_line_length": 24.476190476190474,
"alnum_prop": 0.6634241245136187,
"repo_name": "ULHPC/modules",
"id": "f1320600acb5b8a3f47d07f911b371d4a94967d1",
"size": "1572",
"binary": false,
"copies": "9",
"ref": "refs/heads/devel",
"path": "easybuild/easybuild-framework/easybuild/toolchains/linalg/atlas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "36174"
},
{
"name": "Perl",
"bytes": "34780"
},
{
"name": "Python",
"bytes": "2711250"
},
{
"name": "Ruby",
"bytes": "932"
},
{
"name": "Shell",
"bytes": "51560"
}
],
"symlink_target": ""
} |
"""Support for Baidu speech service."""
import logging
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SUPPORTED_LANGUAGES = ["zh"]
DEFAULT_LANG = "zh"
CONF_APP_ID = "app_id"
CONF_SECRET_KEY = "secret_key"
CONF_SPEED = "speed"
CONF_PITCH = "pitch"
CONF_VOLUME = "volume"
CONF_PERSON = "person"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES),
vol.Required(CONF_APP_ID): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SECRET_KEY): cv.string,
vol.Optional(CONF_SPEED, default=5): vol.All(
vol.Coerce(int), vol.Range(min=0, max=9)
),
vol.Optional(CONF_PITCH, default=5): vol.All(
vol.Coerce(int), vol.Range(min=0, max=9)
),
vol.Optional(CONF_VOLUME, default=5): vol.All(
vol.Coerce(int), vol.Range(min=0, max=15)
),
vol.Optional(CONF_PERSON, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=4)
),
}
)
# Keys are options in the config file, and Values are options
# required by Baidu TTS API.
_OPTIONS = {
CONF_PERSON: "per",
CONF_PITCH: "pit",
CONF_SPEED: "spd",
CONF_VOLUME: "vol",
}
SUPPORTED_OPTIONS = [CONF_PERSON, CONF_PITCH, CONF_SPEED, CONF_VOLUME]
def get_engine(hass, config):
"""Set up Baidu TTS component."""
return BaiduTTSProvider(hass, config)
class BaiduTTSProvider(Provider):
"""Baidu TTS speech api provider."""
def __init__(self, hass, conf):
"""Init Baidu TTS service."""
self.hass = hass
self._lang = conf.get(CONF_LANG)
self._codec = "mp3"
self.name = "BaiduTTS"
self._app_data = {
"appid": conf.get(CONF_APP_ID),
"apikey": conf.get(CONF_API_KEY),
"secretkey": conf.get(CONF_SECRET_KEY),
}
self._speech_conf_data = {
_OPTIONS[CONF_PERSON]: conf.get(CONF_PERSON),
_OPTIONS[CONF_PITCH]: conf.get(CONF_PITCH),
_OPTIONS[CONF_SPEED]: conf.get(CONF_SPEED),
_OPTIONS[CONF_VOLUME]: conf.get(CONF_VOLUME),
}
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return a list of supported languages."""
return SUPPORTED_LANGUAGES
@property
def default_options(self):
"""Return a dict including default options."""
return {
CONF_PERSON: self._speech_conf_data[_OPTIONS[CONF_PERSON]],
CONF_PITCH: self._speech_conf_data[_OPTIONS[CONF_PITCH]],
CONF_SPEED: self._speech_conf_data[_OPTIONS[CONF_SPEED]],
CONF_VOLUME: self._speech_conf_data[_OPTIONS[CONF_VOLUME]],
}
@property
def supported_options(self):
"""Return a list of supported options."""
return SUPPORTED_OPTIONS
def get_tts_audio(self, message, language, options=None):
"""Load TTS from BaiduTTS."""
from aip import AipSpeech
aip_speech = AipSpeech(
self._app_data["appid"],
self._app_data["apikey"],
self._app_data["secretkey"],
)
if options is None:
result = aip_speech.synthesis(message, language, 1, self._speech_conf_data)
else:
speech_data = self._speech_conf_data.copy()
for key, value in options.items():
speech_data[_OPTIONS[key]] = value
result = aip_speech.synthesis(message, language, 1, speech_data)
if isinstance(result, dict):
_LOGGER.error(
"Baidu TTS error-- err_no:%d; err_msg:%s; err_detail:%s",
result["err_no"],
result["err_msg"],
result["err_detail"],
)
return None, None
return self._codec, result
| {
"content_hash": "77c26cf4c12c0566ab80c613683855b4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 87,
"avg_line_length": 30.68888888888889,
"alnum_prop": 0.5879797248370746,
"repo_name": "Cinntax/home-assistant",
"id": "85737d1affdc00a7a5fb9e8e817758fcade076aa",
"size": "4143",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/baidu/tts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.models import ContentType
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.urls import reverse
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media, PhoneNumber
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": str(self.mp3_media_pk),
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": str(self.png_media_pk),
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
factory = RequestFactory()
def setUp(self):
self.client.force_login(self.superuser)
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def test_max_num_param(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": str(category_id),
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_delete(self):
from .models import Contact
c = Contact.objects.create(name='foo')
PhoneNumber.objects.create(
object_id=c.id,
content_type=ContentType.objects.get_for_model(Contact),
phone_number="555-555-5555",
)
response = self.client.post(reverse('admin:generic_inline_admin_contact_delete', args=[c.pk]))
self.assertContains(response, 'Are you sure you want to delete')
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest:
pass
class MockSuperUser:
def has_perm(self, perm, obj=None):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertIs(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertIs(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
The custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
The custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
def test_get_inline_instances_override_get_inlines(self):
class MediaInline(GenericTabularInline):
model = Media
class AlternateInline(GenericTabularInline):
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = (AlternateInline, MediaInline)
def get_inlines(self, request, obj):
if hasattr(request, 'name'):
if request.name == 'alternate':
return self.inlines[:1]
elif request.name == 'media':
return self.inlines[1:2]
return []
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(ma.get_inlines(request, None), [])
self.assertEqual(ma.get_inline_instances(request), [])
for name, inline_class in (('alternate', AlternateInline), ('media', MediaInline)):
request.name = name
self.assertEqual(ma.get_inlines(request, None), (inline_class,)),
self.assertEqual(type(ma.get_inline_instances(request)[0]), inline_class)
| {
"content_hash": "ee6d6b4265fe6e69153b9ce1e5ce48d8",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 117,
"avg_line_length": 38.16411378555799,
"alnum_prop": 0.6226133822601915,
"repo_name": "ar4s/django",
"id": "fc17b9de7440821d31f1878446f10ec311b09b49",
"size": "17441",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "tests/generic_inline_admin/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from true import *
| {
"content_hash": "a21fc545a75488c170dc9ed4435085c3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 18,
"avg_line_length": 19,
"alnum_prop": 0.7368421052631579,
"repo_name": "libtrue/libtrue",
"id": "02c21ee295b7ddd026eb08c87c3b9efbe70057e6",
"size": "19",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-libtrue/true/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "3955"
},
{
"name": "C++",
"bytes": "1482"
},
{
"name": "Makefile",
"bytes": "340"
},
{
"name": "Python",
"bytes": "1040"
},
{
"name": "Ruby",
"bytes": "1342"
}
],
"symlink_target": ""
} |
import os
import sys
from json import dumps
import json
from flask import Flask, Response
from flask import request
from flask_cors import CORS, cross_origin
import logging
import utilities as _Utilities
import endpoints
import paybook
import paybook.sdk as paybook_sdk
#commit for reboot
cmd_params = _Utilities.get_cmd_params(sys.argv[1:])
paybook_api_key = cmd_params['api_key']
# database_path = cmd_params['database']
paybook_sdk.Paybook(paybook_api_key,print_calls=False)
print '\nSetting API Key to: ' + str(paybook_api_key)
print 'Server started successfully\n'
print 'Enjoy your Paybook SYNC experience V_0.1 \\0/\n\n'
# App:
app = Flask(__name__,static_folder='public')
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.secret_key = os.urandom(24)
logger = _Utilities.setup_logger('app')
@app.route('/', methods=['GET'])
def index():
print 'endpoints.index'
return endpoints.index()
@app.route("/signup", methods=['POST'])
def signup():
print 'endpoints.signup'
return endpoints.signup()
@app.route("/login", methods=['POST'])
def login():
print 'endpoints.login'
return endpoints.login()
@app.route("/catalogues")
def catalogues():
print 'endpoints.catalogues'
return endpoints.catalogues()
@app.route("/credentials", methods=['POST'])
def credentials():
print 'endpoints.credentials'
return endpoints.credentials()
@app.route("/credentials")
def get_credentials():
print 'endpoints.get_credentials'
return endpoints.get_credentials()
@app.route("/credentials", methods=['DELETE'])
def delete_credentials():
print 'endpoints.delete_credentials'
return endpoints.delete_credentials()
@app.route("/accounts")
def accounts():
print 'endpoints.accounts'
return endpoints.accounts()
@app.route("/transactions")
def transactions():
print 'endpoints.transactions'
return endpoints.transactions()
@app.route("/twofa", methods=['POST'])
def twofa():
print 'enpoints.twofa'
return endpoints.twofa()
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0")
| {
"content_hash": "f7bfc1d7b955a3f2ef6d27c083d0e2ba",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 57,
"avg_line_length": 23.964285714285715,
"alnum_prop": 0.7312468951813215,
"repo_name": "Paybook/lite-python",
"id": "0faea7037a2d73f89441124b1a5bb9a28b1d9bd1",
"size": "3433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159"
},
{
"name": "HTML",
"bytes": "1517"
},
{
"name": "Python",
"bytes": "18234"
}
],
"symlink_target": ""
} |
'''
This script was used to train the CNN for the symbol recognition.
'''
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras.utils import plot_model
from keras import backend as K
K.set_image_dim_ordering('th')
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
def load_data():
# load symbol data set
data = scipy.io.loadmat('../../data/extract/trainData.mat')
Y = data['y'] - 1
X = data['X']
X = X[Y[:,0].argsort(),:]
Y = Y[Y[:,0].argsort()]
return (X[::2], Y[::2]), (X[1::20], Y[1::20])
# load data
(X_train, y_train), (X_test, y_test) = load_data()
# shuffle training data
perm = np.random.permutation(len(X_train))
X_train = X_train[perm]
y_train = y_train[perm]
# reshape to be [samples][pixels][width][height]
X_train = X_train.reshape(X_train.shape[0], 1, 32, 32).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 32, 32).astype('float32')
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def baseline_model():
# create model
model = Sequential()
model.add(Conv2D(64, (5, 5), input_shape=(1, 32, 32), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def large_model():
# create model
model = Sequential()
model.add(Conv2D(16, (5, 5), input_shape=(1, 32, 32), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# build the model
model = large_model()
# checkpoint
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=1, callbacks=callbacks_list)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
print('TODO do create test and validation using panda')
| {
"content_hash": "9a647d08dc5cd8e9448e44d1cc4b1b45",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 125,
"avg_line_length": 33.82222222222222,
"alnum_prop": 0.6941524310118266,
"repo_name": "DavidS3141/HandwrittenEquationRecognizer",
"id": "4657deb1715db7f4066be06121877228794c671a",
"size": "3044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python-keras/convNet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "352036"
},
{
"name": "Matlab",
"bytes": "29274"
},
{
"name": "Python",
"bytes": "7189"
},
{
"name": "TeX",
"bytes": "24425"
}
],
"symlink_target": ""
} |
def extractDoramtranslationsWordpressCom(item):
'''
Parser for 'doramtranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "43890e9dee910d0447ed890819ebd8ca",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.285714285714285,
"alnum_prop": 0.643979057591623,
"repo_name": "fake-name/ReadableWebProxy",
"id": "4ab36183e38fc401e7900a5c061d5dcd3894bac8",
"size": "574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractDoramtranslationsWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class JobTerminateParameter(Model):
"""Options when terminating a job.
:param terminate_reason: The text you want to appear as the job's
TerminateReason. The default is 'UserTerminate'.
:type terminate_reason: str
"""
_attribute_map = {
'terminate_reason': {'key': 'terminateReason', 'type': 'str'},
}
def __init__(self, terminate_reason=None):
super(JobTerminateParameter, self).__init__()
self.terminate_reason = terminate_reason
| {
"content_hash": "c36e1681fe5c46e7c4bdd12d2c261207",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 29.61111111111111,
"alnum_prop": 0.6641651031894934,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "1fe22775e9fd031fb8737487db15af1e11ebdd6f",
"size": "1007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-batch/azure/batch/models/job_terminate_parameter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
""" -*- coding: utf-8 -*-
A distutils based setup module.
"""
from distutils.core import setup
setup(
name='fileasobj',
packages=['fileasobj'],
version='2.0.0',
description='Manage a file as a Python list.',
author='John Hazelwood',
author_email='jhazelwo@users.noreply.github.com',
url='https://github.com/jhazelwo/python-fileasobj',
download_url='https://github.com/jhazelwo/python-fileasobj/tarball/2.0.0',
keywords=['python', 'file', 'fileasobj'],
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| {
"content_hash": "06a6d531fdb5ecb8ac2da6d1ccc7f7ad",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 36.75,
"alnum_prop": 0.6258503401360545,
"repo_name": "jhazelwo/python-fileasobj",
"id": "aaf6c9fd23320e548b3919daafce17de140c1407",
"size": "1029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36384"
}
],
"symlink_target": ""
} |
"""
#;+
#; NAME:
#; utils
#; Version 1.0
#;
#; PURPOSE:
#; Module for spectral utilities
#; Primarily overloads of Spectrum1D
#; 07-Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
import astropy as apy
from astropy import units as u
from astropy import constants as const
from astropy.io import fits
from specutils import Spectrum1D
from specutils.wcs import BaseSpectrum1DWCS, Spectrum1DLookupWCS
from specutils.wcs.specwcs import Spectrum1DPolynomialWCS
from xastropy.xutils import xdebug as xdb
# Child Class of specutils/Spectrum1D
# Generated by JXP to add functionality before it gets ingested in the specutils distribution
class XSpectrum1D(Spectrum1D):
#### ###############################
# Instantiate from Spectrum1D [best to avoid!]
@classmethod
def from_spec1d(cls, spec1d):
# Giddy up
return cls(flux=spec1d.flux, wcs=spec1d.wcs, unit=spec1d.unit,
uncertainty=spec1d.uncertainty, mask=spec1d.mask, meta=spec1d.meta)
#### ###############################
# Normalize
def normalize(self, conti, verbose=False, no_check=False):
"""
Normalize the spectrum with an input continuum
Parameters
----------
conti: numpy array
Continuum
verbose: bool (False)
no_check: bool (False)
Check size of array?
"""
# Sanity check
if (len(conti) != len(self.flux)):
if no_check:
print('WARNING: Continuum length differs from flux')
if len(conti) > len(self.flux):
self.flux = self.flux / conti[0:len(self.flux)]
return
else:
raise ValueError('normalize: Continuum needs to be longer!')
else:
raise ValueError('normalize: Continuum needs to be same length as flux array')
# Adjust the flux
self.flux = self.flux / conti
if verbose:
print('spec.utils: Normalizing the spectrum')
#### ###############################
# Grabs spectrum pixels in a velocity window
def pix_minmax(self, *args):
"""Pixels in velocity range
Parameters
----------
Option 1: wvmnx
wvmnx: Tuple of 2 floats
wvmin, wvmax in spectral units
Option 2: zabs, wrest, vmnx [not as a tuple or list!]
zabs: Absorption redshift
wrest: Rest wavelength (with Units!)
vmnx: Tuple of 2 floats
vmin, vmax in km/s
Returns:
pix: array
Integer list of pixels
"""
if len(args) == 1: # Option 1
wvmnx = args[0]
elif len(args) == 3: # Option 2
from astropy import constants as const
# args = zabs, wrest, vmnx
wvmnx = (args[0]+1) * (args[1] + (args[1] * args[2] / const.c.to('km/s')) )
wvmnx.to(u.AA)
# Locate the values
pixmin = np.argmin( np.fabs( self.dispersion-wvmnx[0] ) )
pixmax = np.argmin( np.fabs( self.dispersion-wvmnx[1] ) )
gdpix = np.arange(pixmin,pixmax+1)
# Fill + Return
self.sub_pix = gdpix
return gdpix, wvmnx, (pixmin, pixmax)
#### ###############################
# Box car smooth
def box_smooth(self, nbox, preserve=False):
""" Box car smooth spectrum and return a new one
Is a simple wrapper to the rebin routine
Parameters
----------
nbox: integer
Number of pixels to smooth over
preserve: bool (False)
Keep the new spectrum at the same number of pixels as original
Returns:
XSpectrum1D of the smoothed spectrum
"""
from xastropy.xutils import arrays as xxa
if preserve:
from astropy.convolution import convolve, Box1DKernel
new_fx = convolve(self.flux, Box1DKernel(nbox))
new_sig = convolve(self.sig, Box1DKernel(nbox))
new_wv = self.dispersion
else:
# Truncate arrays as need be
npix = len(self.flux)
try:
new_npix = npix // nbox # New division
except ZeroDivisionError:
xdb.set_trace()
orig_pix = np.arange( new_npix * nbox )
# Rebin (mean)
new_wv = xxa.scipy_rebin( self.dispersion[orig_pix], new_npix )
new_fx = xxa.scipy_rebin( self.flux[orig_pix], new_npix )
new_sig = xxa.scipy_rebin( self.sig[orig_pix], new_npix ) / np.sqrt(nbox)
# Return
return XSpectrum1D.from_array(new_wv, new_fx,
uncertainty=apy.nddata.StdDevUncertainty(new_sig))
#### ###############################
# Rebin
def rebin(self, new_wv):
""" Rebin the existing spectrum to a new wavelength array
Uses simple linear interpolation. The default (and only) option
conserves counts (and flambda).
WARNING: Do not trust either edge pixel of the new array
Parameters
----------
new_wv: Quantity array
New wavelength array
Returns:
----------
XSpectrum1D of the rebinned spectrum
"""
from scipy.interpolate import interp1d
# Endpoints of original pixels
npix = len(self.dispersion)
wvh = (self.dispersion + np.roll(self.dispersion, -1))/2.
wvh[npix-1] = self.dispersion[npix-1] + (self.dispersion[npix-1] - self.dispersion[npix-2])/2.
dwv = wvh - np.roll(wvh,1)
dwv[0] = 2*(wvh[0]-self.dispersion[0])
# Cumulative Sum
cumsum = np.cumsum(self.flux * dwv)
# Interpolate
fcum = interp1d(wvh, cumsum, fill_value=0., bounds_error=False)
# Endpoints of new pixels
nnew = len(new_wv)
nwvh = (new_wv + np.roll(new_wv, -1))/2.
nwvh[nnew-1] = new_wv[nnew-1] + (new_wv[nnew-1] - new_wv[nnew-2])/2.
# Pad starting point
bwv = np.zeros(nnew+1) * new_wv.unit
#xdb.set_trace()
bwv[0] = new_wv[0] - (new_wv[1] - new_wv[0])/2.
bwv[1:] = nwvh
# Evaluate
newcum = fcum(bwv)
# Endpoint
if (bwv[-1] > wvh[-1]):
newcum[-1] = cumsum[-1]
# Rebinned flux
new_fx = (np.roll(newcum,-1)-newcum)[:-1]
# Normalize (preserve counts and flambda)
new_dwv = bwv - np.roll(bwv,1)
new_fx = new_fx / new_dwv[1:]
# Return new spectrum
return XSpectrum1D.from_array(new_wv, new_fx)
# Quick plot
def plot(self):
''' Plot the spectrum
Parameters
----------
'''
if self.sig is not None:
xdb.xplot(self.dispersion, self.flux, self.sig)
else:
xdb.xplot(self.dispersion, self.flux)
# Velo array
def relative_vel(self, wv_obs):
''' Return a velocity array relative to an input wavelength
Should consider adding a velocity array to this Class,
i.e. self.velo
Parameters
----------
wv_obs : float
Wavelength to set the zero of the velocity array.
Often (1+z)*wrest
Returns:
---------
velo: Quantity array (km/s)
'''
return (self.dispersion-wv_obs) * const.c.to('km/s')/wv_obs
# Write to fits
def write_to_fits(self, outfil, clobber=True, add_wave=False):
''' Write to a FITS file
Should generate a separate code to make a Binary FITS table format
Parameters
----------
outfil: String
Name of the FITS file
clobber: bool (True)
Clobber existing file?
add_wave: bool (False)
Force writing of wavelength array
'''
# TODO
# 1. Add unit support for wavelength arrays
from specutils.io import write_fits as sui_wf
prihdu = sui_wf._make_hdu(self.data) # Not for binary table format
prihdu.name = 'FLUX'
multi = 0 # Multi-extension?
# Type
if type(self.wcs) is Spectrum1DPolynomialWCS: # CRVAL1, etc. WCS
# WCS
wcs = self.wcs
wcs.write_fits_header(prihdu.header)
# Error array?
if self.sig is not None:
sighdu = fits.ImageHDU(self.sig)
sighdu.name='ERROR'
#
if add_wave:
wvhdu = fits.ImageHDU(self.dispersion.value)
wvhdu.name = 'WAVELENGTH'
hdu = fits.HDUList([prihdu, sighdu, wvhdu])
else:
hdu = fits.HDUList([prihdu, sighdu])
multi=1
else:
hdu = prihdu
elif type(self.wcs) is Spectrum1DLookupWCS: # Wavelengths as an array (without units for now)
# Add sig, wavelength to HDU
sighdu = fits.ImageHDU(self.sig)
sighdu.name='ERROR'
wvhdu = fits.ImageHDU(self.dispersion.value)
wvhdu.name = 'WAVELENGTH'
hdu = fits.HDUList([prihdu, sighdu, wvhdu])
multi=1
else:
raise ValueError('write_to_fits: Not ready for this type of spectrum wavelengths')
# Deal with header
if hasattr(self,'head'):
hdukeys = prihdu.header.keys()
# Append ones to avoid
hdukeys = hdukeys +ZZ ['BUNIT','COMMENT','', 'NAXIS2', 'HISTORY']
for key in self.head.keys():
# Use new ones
if key in hdukeys:
continue
# Update unused ones
try:
prihdu.header[key] = self.head[key]
except ValueError:
xdb.set_trace()
# History
if 'HISTORY' in self.head.keys():
prihdu.header.add_history(str(self.head['HISTORY']))
# Write
hdu.writeto(outfil, clobber=clobber)
print('Wrote spectrum to {:s}'.format(outfil))
# Quick plot
def bspline_stack(spectra):
''' "Stack" a set of spectra with a bspline algorithm
Might be useful for coadding
Parameters:
-----------
spectra: List of Spectrum1D
Returns:
-------
bspline
'''
# ################
if __name__ == "__main__":
flg_test = 0
#flg_test += 2**0 # Test write (simple)
#flg_test += 2**1 # Test write with 3 arrays
#flg_test += 2**2 # Test boxcar
flg_test += 2**3 # Test rebin
from xastropy.spec import readwrite as xsr
if (flg_test % 2**1) >= 2**0:
# Standard log-linear read + write (MagE)
fil = '~/PROGETTI/LLSZ3/data/normalize/UM669_nF.fits'
myspec = xsr.readspec(fil)
# Write
myspec.write_to_fits('tmp.fits')
if (flg_test % 2**2) >= 2**1:
# Now 2D
fil = '/Users/xavier/Dropbox/QSOPairs/data/LRIS_redux/SDSSJ231254.65-025403.1_b400_F.fits.gz'
myspec = xsr.readspec(fil)
myspec.write_to_fits('tmp.fits')
if (flg_test % 2**3) >= 2**2: # Boxcar
fil = '~/PROGETTI/LLSZ3/data/normalize/UM669_nF.fits'
myspec = xsr.readspec(fil)
newspec = myspec.box_smooth(3)
#
newspec2 = myspec.box_smooth(3, preserve=True)
xdb.xplot(myspec.dispersion, myspec.flux, newspec2.flux)
if (flg_test % 2**4) >= 2**3: # Rebin array
fil = '~/PROGETTI/LLSZ3/data/normalize/UM669_nF.fits'
myspec = xsr.readspec(fil)
new_wv = np.arange(3000., 9000., 5) * u.AA
newspec = myspec.rebin(new_wv)
#xdb.xplot(myspec.dispersion, myspec.flux,
# xtwo=new_wv, ytwo=newspec.flux)
# Test EW
wvmnx = np.array((4859., 4961.))*u.AA
gd1 = np.where( (myspec.dispersion > wvmnx[0]) &
(myspec.dispersion < wvmnx[1]))[0]
dwv1 = myspec.dispersion - np.roll(myspec.dispersion,1)
EW1 = np.sum(dwv1[gd1]*(1.-myspec.flux[gd1].value))
gd2 = np.where( (newspec.dispersion > wvmnx[0]) &
(newspec.dispersion < wvmnx[1]))[0]
dwv2 = newspec.dispersion - np.roll(newspec.dispersion,1)
EW2 = np.sum(dwv2[gd2]*(1.-newspec.flux[gd2].value))
print('EW1={:g} and EW2={:g} for wvmnx={:g},{:g}'.format(
EW1,EW2,wvmnx[0],wvmnx[1]))
print('Percent diff = {:0.2f}%'.format(100*(EW2-EW1)/EW1))
| {
"content_hash": "417560b77ac1f59c6289fc7a63d4952a",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 102,
"avg_line_length": 32.8828125,
"alnum_prop": 0.5384493545576938,
"repo_name": "profxj/old_xastropy",
"id": "a2cef8a9f1497739924ca0c080202a3b3e3281ff",
"size": "12627",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xastropy/spec/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "144038"
},
{
"name": "Python",
"bytes": "826525"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Profile
class ProfileAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'active', 'nickname', 'phone_number')
list_filter = ('user', 'active')
admin.site.register(Profile, ProfileAdmin)
| {
"content_hash": "1ff813185e119a6507199c541d24d1c6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 29.22222222222222,
"alnum_prop": 0.688212927756654,
"repo_name": "Sorjak/TurboSmartHome",
"id": "25d442765a1ec9e36d9db5021452b8bd21c6f2ac",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbosmarthome/main/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "597"
},
{
"name": "HTML",
"bytes": "7147"
},
{
"name": "JavaScript",
"bytes": "117"
},
{
"name": "Python",
"bytes": "14840"
},
{
"name": "Shell",
"bytes": "3130"
}
],
"symlink_target": ""
} |
from iptables import Iptables
# Author: Moises Gautier Gomez
# Proyecto fin de carrera - Ing. en Informatica
# Universidad de Granada
# Creamos un objeto del tipo Source y operamos con él
test = Iptables(args=(1,),
source_info={'T': 'Firewall', 'M': 'iptables', 'P': '/var/log/iptables.log',
'C': '/kernel/conf/iptables-conf.conf'})
test.start()
# test_2 = Glances('bd_project', args=(1,), source={'T' : 'Watchdog', 'M' : 'glances', 'P' : './glances.csv'})
# test_2.start()
| {
"content_hash": "130d8bcd6a9fad5b1874bb63f39641f5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 110,
"avg_line_length": 34.53333333333333,
"alnum_prop": 0.611969111969112,
"repo_name": "MGautier/security-sensor",
"id": "13226794184530d1f8b2069a98ccceab8fd3b5dc",
"size": "566",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trunk/version-1-0/webapp/secproject/secapp/controller.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "154804"
},
{
"name": "Go",
"bytes": "3432"
},
{
"name": "HTML",
"bytes": "243673"
},
{
"name": "JavaScript",
"bytes": "345834"
},
{
"name": "Makefile",
"bytes": "942"
},
{
"name": "PHP",
"bytes": "1858"
},
{
"name": "Perl",
"bytes": "1928"
},
{
"name": "Python",
"bytes": "499597"
},
{
"name": "Ruby",
"bytes": "1510"
},
{
"name": "Shell",
"bytes": "3988"
},
{
"name": "TeX",
"bytes": "232922"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'automatron',
'pyOpenSSL',
]
setup(
name='automatron-notify',
version='1.0.0',
description='Automatron IRC bot - Notification framework',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Twisted",
"Topic :: Internet :: IRC",
],
author='Ingmar Steen',
author_email='iksteen@gmail.com',
url='',
keywords='',
packages=find_packages() + ['twisted.plugins'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
| {
"content_hash": "8b229c77891e53f4e4ec3a36c079a84b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 62,
"avg_line_length": 24.647058823529413,
"alnum_prop": 0.6252983293556086,
"repo_name": "automatron/automatron-notify",
"id": "f4be3f462ee51e63744685edf28898087fd154aa",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8253"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import BaseUserManager
from django.core.validators import RegexValidator
from django.db import models
phone_regex = RegexValidator(regex=r'^\+[1-9]\d{1,14}$',
message="Mobile number must be entered in the format:"
" '+999999999'. Up to 15 digits allowed.")
class CustomUser(AbstractBaseUser):
email = models.EmailField(max_length=255, unique=True, blank=True, null=True)
email_verified = models.BooleanField(default=False)
mobile = models.CharField(validators=[phone_regex], max_length=17, unique=True, blank=True, null=True)
mobile_verified = models.BooleanField(default=False)
objects = BaseUserManager()
USERNAME_FIELD = 'email'
class Meta:
app_label = 'tests'
| {
"content_hash": "0ae5068d0e3d22353bff528a63776c25",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 106,
"avg_line_length": 37.43478260869565,
"alnum_prop": 0.686411149825784,
"repo_name": "aaronn/django-rest-framework-passwordless",
"id": "8bf37954cd220b0706c8faac79c52ff137d80aa3",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "394"
},
{
"name": "Python",
"bytes": "89165"
}
],
"symlink_target": ""
} |
import os
import platform
import re
import subprocess
import sys
import tempfile
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
from lit.llvm.subst import FindTool
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'debuginfo-tests'
# testFormat: The test format to use to interpret tests.
#
# For now we require '&&' between commands, until they get globally killed and
# the test runner updated.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.c', '.cpp', '.m']
# excludes: A list of directories to exclude from the testsuite. The 'Inputs'
# subdirectories contain auxiliary inputs for various tests in their parent
# directories.
config.excludes = ['Inputs']
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.join(config.debuginfo_tests_src_root)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = config.debuginfo_tests_obj_root
llvm_config.use_default_substitutions()
tools = [
ToolSubst('%test_debuginfo', command=os.path.join(
config.debuginfo_tests_src_root, 'llgdb-tests', 'test_debuginfo.pl')),
ToolSubst("%llvm_src_root", config.llvm_src_root),
ToolSubst("%llvm_tools_dir", config.llvm_tools_dir),
]
def get_required_attr(config, attr_name):
attr_value = getattr(config, attr_name, None)
if attr_value == None:
lit_config.fatal(
"No attribute %r in test configuration! You may need to run "
"tests from your build directory or add this attribute "
"to lit.site.cfg " % attr_name)
return attr_value
# If this is an MSVC environment, the tests at the root of the tree are
# unsupported. The local win_cdb test suite, however, is supported.
is_msvc = get_required_attr(config, "is_msvc")
if is_msvc:
config.available_features.add('msvc')
# FIXME: We should add some llvm lit utility code to find the Windows SDK
# and set up the environment appopriately.
win_sdk = 'C:/Program Files (x86)/Windows Kits/10/'
arch = 'x64'
llvm_config.with_system_environment(['LIB', 'LIBPATH', 'INCLUDE'])
# Clear _NT_SYMBOL_PATH to prevent cdb from attempting to load symbols from
# the network.
llvm_config.with_environment('_NT_SYMBOL_PATH', '')
tools.append(ToolSubst('%cdb', '"%s"' % os.path.join(win_sdk, 'Debuggers',
arch, 'cdb.exe')))
# clang_src_dir is not used by these tests, but is required by
# use_clang(), so set it to "".
if not hasattr(config, 'clang_src_dir'):
config.clang_src_dir = ""
llvm_config.use_clang()
if config.llvm_use_sanitizer:
# Propagate path to symbolizer for ASan/MSan.
llvm_config.with_system_environment(
['ASAN_SYMBOLIZER_PATH', 'MSAN_SYMBOLIZER_PATH'])
llvm_config.with_environment('PATHTOCLANG', llvm_config.config.clang)
llvm_config.with_environment('PATHTOCLANGPP', llvm_config.use_llvm_tool('clang++'))
llvm_config.with_environment('PATHTOCLANGCL', llvm_config.use_llvm_tool('clang-cl'))
# Check which debuggers are available:
built_lldb = llvm_config.use_llvm_tool('lldb', search_env='CLANG')
lldb_path = None
if built_lldb is not None:
lldb_path = built_lldb
elif lit.util.which('lldb') is not None:
lldb_path = lit.util.which('lldb')
if lldb_path is not None:
config.available_features.add('lldb')
# Produce dexter path, lldb path, and combine into the %dexter substitution
# for running a test.
dexter_path = os.path.join(config.debuginfo_tests_src_root,
'dexter', 'dexter.py')
dexter_test_cmd = '"{}" "{}" test'.format(config.python3_executable, dexter_path)
if lldb_path is not None:
dexter_test_cmd += ' --lldb-executable "{}"'.format(lldb_path)
tools.append(ToolSubst('%dexter', dexter_test_cmd))
# For testing other bits of dexter that aren't under the "test" subcommand,
# have a %dexter_base substitution.
dexter_base_cmd = '"{}" "{}"'.format(config.python3_executable, dexter_path)
tools.append(ToolSubst('%dexter_base', dexter_base_cmd))
# Set up commands for DexTer regression tests.
# Builder, debugger, optimisation level and several other flags differ
# depending on whether we're running a unix like or windows os.
if platform.system() == 'Windows':
dexter_regression_test_builder = '--builder clang-cl_vs2015'
dexter_regression_test_debugger = '--debugger dbgeng'
dexter_regression_test_cflags = '--cflags "/Zi /Od"'
dexter_regression_test_ldflags = '--ldflags "/Zi"'
else:
dexter_regression_test_builder = '--builder clang'
dexter_regression_test_debugger = "--debugger lldb"
dexter_regression_test_cflags = '--cflags "-O0 -glldb"'
dexter_regression_test_ldflags = ''
# Typical command would take the form:
# ./path_to_py/python.exe ./path_to_dex/dexter.py test --fail-lt 1.0 -w --builder clang --debugger lldb --cflags '-O0 -g'
dexter_regression_test_command = ' '.join(
# "python3", "dexter.py", test, fail_mode, builder, debugger, cflags, ldflags
["{}".format(config.python3_executable),
"{}".format(dexter_path),
'test',
'--fail-lt 1.0 -w',
dexter_regression_test_builder,
dexter_regression_test_debugger,
dexter_regression_test_cflags,
dexter_regression_test_ldflags])
tools.append(ToolSubst('%dexter_regression_test', dexter_regression_test_command))
tool_dirs = [config.llvm_tools_dir]
llvm_config.add_tool_substitutions(tools, tool_dirs)
lit.util.usePlatformSdkOnDarwin(config, lit_config)
# available_features: REQUIRES/UNSUPPORTED lit commands look at this list.
if platform.system() == 'Darwin':
import subprocess
xcode_lldb_vers = subprocess.check_output(['xcrun', 'lldb', '--version']).decode("utf-8")
match = re.search('lldb-(\d+)', xcode_lldb_vers)
if match:
apple_lldb_vers = int(match.group(1))
if apple_lldb_vers < 1000:
config.available_features.add('apple-lldb-pre-1000')
| {
"content_hash": "3213ffe9b6ec9c0978bd77dd350368dc",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 121,
"avg_line_length": 38.49044585987261,
"alnum_prop": 0.7049478735727288,
"repo_name": "endlessm/chromium-browser",
"id": "fb2f72357b263dfc383a6a7492dc015f13fc8298",
"size": "6061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/llvm/debuginfo-tests/lit.cfg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import unittest
from grako.codegen import CodeGenerator, ModelRenderer
from grako.model import Node
class Generator(CodeGenerator):
def __init__(self):
super(Generator, self).__init__()
def _find_renderer_class(self, item):
name = item.__class__.__name__
return getattr(self, name, None)
class Super(ModelRenderer):
template = 'OK {sub}'
class Sub(ModelRenderer):
template = 'and OK too'
class Sub(Node):
pass
class Super(Node):
def __init__(self, ctx):
super(Super, self).__init__(ctx)
self.sub = Sub(self.ctx)
class TestCodegen(unittest.TestCase):
def test_basic_codegen(self):
model = Super(self)
gen = Generator()
result = gen.render(model)
self.assertEqual('OK and OK too', result)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestCodegen)
| {
"content_hash": "808a9b904aed45d00986f777ff9f182c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 67,
"avg_line_length": 22.90909090909091,
"alnum_prop": 0.6279761904761905,
"repo_name": "frnknglrt/grako",
"id": "26d5a7579249a2a3e19bce16efea11b2c1d90c15",
"size": "1032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grako/test/codegen_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "20828"
},
{
"name": "GAP",
"bytes": "15180"
},
{
"name": "Makefile",
"bytes": "1895"
},
{
"name": "Python",
"bytes": "180148"
},
{
"name": "VimL",
"bytes": "2748"
}
],
"symlink_target": ""
} |
"""Tests and descriptive statistics with weights
Created on 2010-09-18
Author: josef-pktd
License: BSD (3-clause)
References
----------
SPSS manual
SAS manual
This follows in large parts the SPSS manual, which is largely the same as
the SAS manual with different, simpler notation.
Freq, Weight in SAS seems redundant since they always show up as product, SPSS
has only weights.
Notes
-----
This has potential problems with ddof, I started to follow numpy with ddof=0
by default and users can change it, but this might still mess up the t-tests,
since the estimates for the standard deviation will be based on the ddof that
the user chooses.
- fixed ddof for the meandiff ttest, now matches scipy.stats.ttest_ind
Note: scipy has now a separate, pooled variance option in ttest, but I have not
compared yet.
"""
import numpy as np
from scipy import stats
from statsmodels.tools.decorators import cache_readonly
class DescrStatsW(object):
"""
Descriptive statistics and tests with weights for case weights
Assumes that the data is 1d or 2d with (nobs, nvars) observations in rows,
variables in columns, and that the same weight applies to each column.
If degrees of freedom correction is used, then weights should add up to the
number of observations. ttest also assumes that the sum of weights
corresponds to the sample size.
This is essentially the same as replicating each observations by its
weight, if the weights are integers, often called case or frequency weights.
Parameters
----------
data : array_like, 1-D or 2-D
dataset
weights : None or 1-D ndarray
weights for each observation, with same length as zero axis of data
ddof : int
default ddof=0, degrees of freedom correction used for second moments,
var, std, cov, corrcoef.
However, statistical tests are independent of `ddof`, based on the
standard formulas.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> x1_2d = 1.0 + np.random.randn(20, 3)
>>> w1 = np.random.randint(1, 4, 20)
>>> d1 = DescrStatsW(x1_2d, weights=w1)
>>> d1.mean
array([ 1.42739844, 1.23174284, 1.083753 ])
>>> d1.var
array([ 0.94855633, 0.52074626, 1.12309325])
>>> d1.std_mean
array([ 0.14682676, 0.10878944, 0.15976497])
>>> tstat, pval, df = d1.ttest_mean(0)
>>> tstat; pval; df
array([ 9.72165021, 11.32226471, 6.78342055])
array([ 1.58414212e-12, 1.26536887e-14, 2.37623126e-08])
44.0
>>> tstat, pval, df = d1.ttest_mean([0, 1, 1])
>>> tstat; pval; df
array([ 9.72165021, 2.13019609, 0.52422632])
array([ 1.58414212e-12, 3.87842808e-02, 6.02752170e-01])
44.0
# if weights are integers, then asrepeats can be used
>>> x1r = d1.asrepeats()
>>> x1r.shape
...
>>> stats.ttest_1samp(x1r, [0, 1, 1])
...
"""
def __init__(self, data, weights=None, ddof=0):
self.data = np.asarray(data)
if weights is None:
self.weights = np.ones(self.data.shape[0])
else:
self.weights = np.asarray(weights).astype(float)
# TODO: why squeeze?
if len(self.weights.shape) > 1 and len(self.weights) > 1:
self.weights = self.weights.squeeze()
self.ddof = ddof
@cache_readonly
def sum_weights(self):
"""Sum of weights"""
return self.weights.sum(0)
@cache_readonly
def nobs(self):
"""alias for number of observations/cases, equal to sum of weights
"""
return self.sum_weights
@cache_readonly
def sum(self):
"""weighted sum of data"""
return np.dot(self.data.T, self.weights)
@cache_readonly
def mean(self):
"""weighted mean of data"""
return self.sum / self.sum_weights
@cache_readonly
def demeaned(self):
"""data with weighted mean subtracted"""
return self.data - self.mean
@cache_readonly
def sumsquares(self):
"""weighted sum of squares of demeaned data"""
return np.dot((self.demeaned ** 2).T, self.weights)
# need memoize instead of cache decorator
def var_ddof(self, ddof=0):
"""variance of data given ddof
Parameters
----------
ddof : int, float
degrees of freedom correction, independent of attribute ddof
Returns
-------
var : float, ndarray
variance with denominator ``sum_weights - ddof``
"""
return self.sumsquares / (self.sum_weights - ddof)
def std_ddof(self, ddof=0):
"""standard deviation of data with given ddof
Parameters
----------
ddof : int, float
degrees of freedom correction, independent of attribute ddof
Returns
-------
std : float, ndarray
standard deviation with denominator ``sum_weights - ddof``
"""
return np.sqrt(self.var_ddof(ddof=ddof))
@cache_readonly
def var(self):
"""variance with default degrees of freedom correction
"""
return self.sumsquares / (self.sum_weights - self.ddof)
@cache_readonly
def _var(self):
"""variance without degrees of freedom correction
used for statistical tests with controlled ddof
"""
return self.sumsquares / self.sum_weights
@cache_readonly
def std(self):
"""standard deviation with default degrees of freedom correction
"""
return np.sqrt(self.var)
@cache_readonly
def cov(self):
"""weighted covariance of data if data is 2 dimensional
assumes variables in columns and observations in rows
uses default ddof
"""
cov_ = np.dot(self.weights * self.demeaned.T, self.demeaned)
cov_ /= self.sum_weights - self.ddof
return cov_
@cache_readonly
def corrcoef(self):
"""weighted correlation with default ddof
assumes variables in columns and observations in rows
"""
return self.cov / self.std / self.std[:, None]
@cache_readonly
def std_mean(self):
"""standard deviation of weighted mean
"""
std = self.std
if self.ddof != 0:
# ddof correction, (need copy of std)
std = std * np.sqrt(
(self.sum_weights - self.ddof) / self.sum_weights
)
return std / np.sqrt(self.sum_weights - 1)
def quantile(self, probs, return_pandas=True):
"""
Compute quantiles for a weighted sample.
Parameters
----------
probs : array_like
A vector of probability points at which to calculate the
quantiles. Each element of `probs` should fall in [0, 1].
return_pandas : bool
If True, return value is a Pandas DataFrame or Series.
Otherwise returns a ndarray.
Returns
-------
quantiles : Series, DataFrame, or ndarray
If `return_pandas` = True, returns one of the following:
* data are 1d, `return_pandas` = True: a Series indexed by
the probability points.
* data are 2d, `return_pandas` = True: a DataFrame with
the probability points as row index and the variables
as column index.
If `return_pandas` = False, returns an ndarray containing the
same values as the Series/DataFrame.
Notes
-----
To compute the quantiles, first, the weights are summed over
exact ties yielding distinct data values y_1 < y_2 < ..., and
corresponding weights w_1, w_2, .... Let s_j denote the sum
of the first j weights, and let W denote the sum of all the
weights. For a probability point p, if pW falls strictly
between s_j and s_{j+1} then the estimated quantile is
y_{j+1}. If pW = s_j then the estimated quantile is (y_j +
y_{j+1})/2. If pW < p_1 then the estimated quantile is y_1.
References
----------
SAS documentation for weighted quantiles:
https://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_univariate_sect028.htm
"""
import pandas as pd
probs = np.asarray(probs)
probs = np.atleast_1d(probs)
if self.data.ndim == 1:
rslt = self._quantile(self.data, probs)
if return_pandas:
rslt = pd.Series(rslt, index=probs)
else:
rslt = []
for vec in self.data.T:
rslt.append(self._quantile(vec, probs))
rslt = np.column_stack(rslt)
if return_pandas:
columns = ["col%d" % (j + 1) for j in range(rslt.shape[1])]
rslt = pd.DataFrame(data=rslt, columns=columns, index=probs)
if return_pandas:
rslt.index.name = "p"
return rslt
def _quantile(self, vec, probs):
# Helper function to calculate weighted quantiles for one column.
# Follows definition from SAS documentation.
# Returns ndarray
import pandas as pd
# Aggregate over ties
df = pd.DataFrame(index=np.arange(len(self.weights)))
df["weights"] = self.weights
df["vec"] = vec
dfg = df.groupby("vec").agg(np.sum)
weights = dfg.values[:, 0]
values = np.asarray(dfg.index)
cweights = np.cumsum(weights)
totwt = cweights[-1]
targets = probs * totwt
ii = np.searchsorted(cweights, targets)
rslt = values[ii]
# Exact hits
jj = np.flatnonzero(np.abs(targets - cweights[ii]) < 1e-10)
jj = jj[ii[jj] < len(cweights) - 1]
rslt[jj] = (values[ii[jj]] + values[ii[jj] + 1]) / 2
return rslt
def tconfint_mean(self, alpha=0.05, alternative="two-sided"):
"""two-sided confidence interval for weighted mean of data
If the data is 2d, then these are separate confidence intervals
for each column.
Parameters
----------
alpha : float
significance level for the confidence interval, coverage is
``1-alpha``
alternative : str
This specifies the alternative hypothesis for the test that
corresponds to the confidence interval.
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: mean not equal to value (default)
'larger' : H1: mean larger than value
'smaller' : H1: mean smaller than value
Returns
-------
lower, upper : floats or ndarrays
lower and upper bound of confidence interval
Notes
-----
In a previous version, statsmodels 0.4, alpha was the confidence
level, e.g. 0.95
"""
# TODO: add asymmetric
dof = self.sum_weights - 1
ci = _tconfint_generic(
self.mean, self.std_mean, dof, alpha, alternative
)
return ci
def zconfint_mean(self, alpha=0.05, alternative="two-sided"):
"""two-sided confidence interval for weighted mean of data
Confidence interval is based on normal distribution.
If the data is 2d, then these are separate confidence intervals
for each column.
Parameters
----------
alpha : float
significance level for the confidence interval, coverage is
``1-alpha``
alternative : str
This specifies the alternative hypothesis for the test that
corresponds to the confidence interval.
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: mean not equal to value (default)
'larger' : H1: mean larger than value
'smaller' : H1: mean smaller than value
Returns
-------
lower, upper : floats or ndarrays
lower and upper bound of confidence interval
Notes
-----
In a previous version, statsmodels 0.4, alpha was the confidence
level, e.g. 0.95
"""
return _zconfint_generic(self.mean, self.std_mean, alpha, alternative)
def ttest_mean(self, value=0, alternative="two-sided"):
"""ttest of Null hypothesis that mean is equal to value.
The alternative hypothesis H1 is defined by the following
- 'two-sided': H1: mean not equal to value
- 'larger' : H1: mean larger than value
- 'smaller' : H1: mean smaller than value
Parameters
----------
value : float or array
the hypothesized value for the mean
alternative : str
The alternative hypothesis, H1, has to be one of the following:
- 'two-sided': H1: mean not equal to value (default)
- 'larger' : H1: mean larger than value
- 'smaller' : H1: mean smaller than value
Returns
-------
tstat : float
test statistic
pvalue : float
pvalue of the t-test
df : int or float
"""
# TODO: check direction with R, smaller=less, larger=greater
tstat = (self.mean - value) / self.std_mean
dof = self.sum_weights - 1
# TODO: use outsourced
if alternative == "two-sided":
pvalue = stats.t.sf(np.abs(tstat), dof) * 2
elif alternative == "larger":
pvalue = stats.t.sf(tstat, dof)
elif alternative == "smaller":
pvalue = stats.t.cdf(tstat, dof)
return tstat, pvalue, dof
def ttost_mean(self, low, upp):
"""test of (non-)equivalence of one sample
TOST: two one-sided t tests
null hypothesis: m < low or m > upp
alternative hypothesis: low < m < upp
where m is the expected value of the sample (mean of the population).
If the pvalue is smaller than a threshold, say 0.05, then we reject the
hypothesis that the expected value of the sample (mean of the
population) is outside of the interval given by thresholds low and upp.
Parameters
----------
low, upp : float
equivalence interval low < mean < upp
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1, df1 : tuple
test statistic, pvalue and degrees of freedom for lower threshold
test
t2, pv2, df2 : tuple
test statistic, pvalue and degrees of freedom for upper threshold
test
"""
t1, pv1, df1 = self.ttest_mean(low, alternative="larger")
t2, pv2, df2 = self.ttest_mean(upp, alternative="smaller")
return np.maximum(pv1, pv2), (t1, pv1, df1), (t2, pv2, df2)
def ztest_mean(self, value=0, alternative="two-sided"):
"""z-test of Null hypothesis that mean is equal to value.
The alternative hypothesis H1 is defined by the following
'two-sided': H1: mean not equal to value
'larger' : H1: mean larger than value
'smaller' : H1: mean smaller than value
Parameters
----------
value : float or array
the hypothesized value for the mean
alternative : str
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: mean not equal to value (default)
'larger' : H1: mean larger than value
'smaller' : H1: mean smaller than value
Returns
-------
tstat : float
test statistic
pvalue : float
pvalue of the t-test
Notes
-----
This uses the same degrees of freedom correction as the t-test in the
calculation of the standard error of the mean, i.e it uses
`(sum_weights - 1)` instead of `sum_weights` in the denominator.
See Examples below for the difference.
Examples
--------
z-test on a proportion, with 20 observations, 15 of those are our event
>>> import statsmodels.api as sm
>>> x1 = [0, 1]
>>> w1 = [5, 15]
>>> d1 = sm.stats.DescrStatsW(x1, w1)
>>> d1.ztest_mean(0.5)
(2.5166114784235836, 0.011848940928347452)
This differs from the proportions_ztest because of the degrees of
freedom correction:
>>> sm.stats.proportions_ztest(15, 20.0, value=0.5)
(2.5819888974716112, 0.009823274507519247).
We can replicate the results from ``proportions_ztest`` if we increase
the weights to have artificially one more observation:
>>> sm.stats.DescrStatsW(x1, np.array(w1)*21./20).ztest_mean(0.5)
(2.5819888974716116, 0.0098232745075192366)
"""
tstat = (self.mean - value) / self.std_mean
# TODO: use outsourced
if alternative == "two-sided":
pvalue = stats.norm.sf(np.abs(tstat)) * 2
elif alternative == "larger":
pvalue = stats.norm.sf(tstat)
elif alternative == "smaller":
pvalue = stats.norm.cdf(tstat)
return tstat, pvalue
def ztost_mean(self, low, upp):
"""test of (non-)equivalence of one sample, based on z-test
TOST: two one-sided z-tests
null hypothesis: m < low or m > upp
alternative hypothesis: low < m < upp
where m is the expected value of the sample (mean of the population).
If the pvalue is smaller than a threshold, say 0.05, then we reject the
hypothesis that the expected value of the sample (mean of the
population) is outside of the interval given by thresholds low and upp.
Parameters
----------
low, upp : float
equivalence interval low < mean < upp
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1 : tuple
test statistic and p-value for lower threshold test
t2, pv2 : tuple
test statistic and p-value for upper threshold test
"""
t1, pv1 = self.ztest_mean(low, alternative="larger")
t2, pv2 = self.ztest_mean(upp, alternative="smaller")
return np.maximum(pv1, pv2), (t1, pv1), (t2, pv2)
def get_compare(self, other, weights=None):
"""return an instance of CompareMeans with self and other
Parameters
----------
other : array_like or instance of DescrStatsW
If array_like then this creates an instance of DescrStatsW with
the given weights.
weights : None or array
weights are only used if other is not an instance of DescrStatsW
Returns
-------
cm : instance of CompareMeans
the instance has self attached as d1 and other as d2.
See Also
--------
CompareMeans
"""
if not isinstance(other, self.__class__):
d2 = DescrStatsW(other, weights)
else:
d2 = other
return CompareMeans(self, d2)
def asrepeats(self):
"""get array that has repeats given by floor(weights)
observations with weight=0 are dropped
"""
w_int = np.floor(self.weights).astype(int)
return np.repeat(self.data, w_int, axis=0)
def _tstat_generic(value1, value2, std_diff, dof, alternative, diff=0):
"""generic ttest based on summary statistic
The test statistic is :
tstat = (value1 - value2 - diff) / std_diff
and is assumed to be t-distributed with ``dof`` degrees of freedom.
Parameters
----------
value1 : float or ndarray
Value, for example mean, of the first sample.
value2 : float or ndarray
Value, for example mean, of the second sample.
std_diff : float or ndarray
Standard error of the difference value1 - value2
dof : int or float
Degrees of freedom
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``value1 - value2 - diff`` not equal to 0.
* 'larger' : H1: ``value1 - value2 - diff > 0``
* 'smaller' : H1: ``value1 - value2 - diff < 0``
diff : float
value of difference ``value1 - value2`` under the null hypothesis
Returns
-------
tstat : float or ndarray
Test statistic.
pvalue : float or ndarray
P-value of the hypothesis test assuming that the test statistic is
t-distributed with ``df`` degrees of freedom.
"""
tstat = (value1 - value2 - diff) / std_diff
if alternative in ["two-sided", "2-sided", "2s"]:
pvalue = stats.t.sf(np.abs(tstat), dof) * 2
elif alternative in ["larger", "l"]:
pvalue = stats.t.sf(tstat, dof)
elif alternative in ["smaller", "s"]:
pvalue = stats.t.cdf(tstat, dof)
else:
raise ValueError("invalid alternative")
return tstat, pvalue
def _tconfint_generic(mean, std_mean, dof, alpha, alternative):
"""generic t-confint based on summary statistic
Parameters
----------
mean : float or ndarray
Value, for example mean, of the first sample.
std_mean : float or ndarray
Standard error of the difference value1 - value2
dof : int or float
Degrees of freedom
alpha : float
Significance level for the confidence interval, coverage is
``1-alpha``.
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``value1 - value2 - diff`` not equal to 0.
* 'larger' : H1: ``value1 - value2 - diff > 0``
* 'smaller' : H1: ``value1 - value2 - diff < 0``
Returns
-------
lower : float or ndarray
Lower confidence limit. This is -inf for the one-sided alternative
"smaller".
upper : float or ndarray
Upper confidence limit. This is inf for the one-sided alternative
"larger".
"""
if alternative in ["two-sided", "2-sided", "2s"]:
tcrit = stats.t.ppf(1 - alpha / 2.0, dof)
lower = mean - tcrit * std_mean
upper = mean + tcrit * std_mean
elif alternative in ["larger", "l"]:
tcrit = stats.t.ppf(alpha, dof)
lower = mean + tcrit * std_mean
upper = np.inf
elif alternative in ["smaller", "s"]:
tcrit = stats.t.ppf(1 - alpha, dof)
lower = -np.inf
upper = mean + tcrit * std_mean
else:
raise ValueError("invalid alternative")
return lower, upper
def _zstat_generic(value1, value2, std_diff, alternative, diff=0):
"""generic (normal) z-test based on summary statistic
The test statistic is :
tstat = (value1 - value2 - diff) / std_diff
and is assumed to be normally distributed.
Parameters
----------
value1 : float or ndarray
Value, for example mean, of the first sample.
value2 : float or ndarray
Value, for example mean, of the second sample.
std_diff : float or ndarray
Standard error of the difference value1 - value2
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``value1 - value2 - diff`` not equal to 0.
* 'larger' : H1: ``value1 - value2 - diff > 0``
* 'smaller' : H1: ``value1 - value2 - diff < 0``
diff : float
value of difference ``value1 - value2`` under the null hypothesis
Returns
-------
tstat : float or ndarray
Test statistic.
pvalue : float or ndarray
P-value of the hypothesis test assuming that the test statistic is
t-distributed with ``df`` degrees of freedom.
"""
zstat = (value1 - value2 - diff) / std_diff
if alternative in ["two-sided", "2-sided", "2s"]:
pvalue = stats.norm.sf(np.abs(zstat)) * 2
elif alternative in ["larger", "l"]:
pvalue = stats.norm.sf(zstat)
elif alternative in ["smaller", "s"]:
pvalue = stats.norm.cdf(zstat)
else:
raise ValueError("invalid alternative")
return zstat, pvalue
def _zstat_generic2(value, std, alternative):
"""generic (normal) z-test based on summary statistic
The test statistic is :
zstat = value / std
and is assumed to be normally distributed with standard deviation ``std``.
Parameters
----------
value : float or ndarray
Value of a sample statistic, for example mean.
value2 : float or ndarray
Value, for example mean, of the second sample.
std : float or ndarray
Standard error of the sample statistic value.
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``value1 - value2 - diff`` not equal to 0.
* 'larger' : H1: ``value1 - value2 - diff > 0``
* 'smaller' : H1: ``value1 - value2 - diff < 0``
Returns
-------
zstat : float or ndarray
Test statistic.
pvalue : float or ndarray
P-value of the hypothesis test assuming that the test statistic is
normally distributed.
"""
zstat = value / std
if alternative in ["two-sided", "2-sided", "2s"]:
pvalue = stats.norm.sf(np.abs(zstat)) * 2
elif alternative in ["larger", "l"]:
pvalue = stats.norm.sf(zstat)
elif alternative in ["smaller", "s"]:
pvalue = stats.norm.cdf(zstat)
else:
raise ValueError("invalid alternative")
return zstat, pvalue
def _zconfint_generic(mean, std_mean, alpha, alternative):
"""generic normal-confint based on summary statistic
Parameters
----------
mean : float or ndarray
Value, for example mean, of the first sample.
std_mean : float or ndarray
Standard error of the difference value1 - value2
alpha : float
Significance level for the confidence interval, coverage is
``1-alpha``
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' : H1: ``value1 - value2 - diff`` not equal to 0.
* 'larger' : H1: ``value1 - value2 - diff > 0``
* 'smaller' : H1: ``value1 - value2 - diff < 0``
Returns
-------
lower : float or ndarray
Lower confidence limit. This is -inf for the one-sided alternative
"smaller".
upper : float or ndarray
Upper confidence limit. This is inf for the one-sided alternative
"larger".
"""
if alternative in ["two-sided", "2-sided", "2s"]:
zcrit = stats.norm.ppf(1 - alpha / 2.0)
lower = mean - zcrit * std_mean
upper = mean + zcrit * std_mean
elif alternative in ["larger", "l"]:
zcrit = stats.norm.ppf(alpha)
lower = mean + zcrit * std_mean
upper = np.inf
elif alternative in ["smaller", "s"]:
zcrit = stats.norm.ppf(1 - alpha)
lower = -np.inf
upper = mean + zcrit * std_mean
else:
raise ValueError("invalid alternative")
return lower, upper
class CompareMeans(object):
"""class for two sample comparison
The tests and the confidence interval work for multi-endpoint comparison:
If d1 and d2 have the same number of rows, then each column of the data
in d1 is compared with the corresponding column in d2.
Parameters
----------
d1, d2 : instances of DescrStatsW
Notes
-----
The result for the statistical tests and the confidence interval are
independent of the user specified ddof.
TODO: Extend to any number of groups or write a version that works in that
case, like in SAS and SPSS.
"""
def __init__(self, d1, d2):
"""assume d1, d2 hold the relevant attributes
"""
self.d1 = d1
self.d2 = d2
# assume nobs is available
# if not hasattr(self.d1, 'nobs'):
# d1.nobs1 = d1.sum_weights.astype(float) #float just to make sure
# self.nobs2 = d2.sum_weights.astype(float)
@classmethod
def from_data(
cls, data1, data2, weights1=None, weights2=None, ddof1=0, ddof2=0
):
"""construct a CompareMeans object from data
Parameters
----------
data1, data2 : array_like, 1-D or 2-D
compared datasets
weights1, weights2 : None or 1-D ndarray
weights for each observation of data1 and data2 respectively,
with same length as zero axis of corresponding dataset.
ddof1, ddof2 : int
default ddof1=0, ddof2=0, degrees of freedom for data1,
data2 respectively.
Returns
-------
A CompareMeans instance.
"""
return cls(
DescrStatsW(data1, weights=weights1, ddof=ddof1),
DescrStatsW(data2, weights=weights2, ddof=ddof2),
)
def summary(self, use_t=True, alpha=0.05, usevar="pooled", value=0):
"""summarize the results of the hypothesis test
Parameters
----------
use_t : bool, optional
if use_t is True, then t test results are returned
if use_t is False, then z test results are returned
alpha : float
significance level for the confidence interval, coverage is
``1-alpha``
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is
assumed to be the same. If ``unequal``, then the variance of
Welsh ttest will be used, and the degrees of freedom are those
of Satterthwaite if ``use_t`` is True.
value : float
difference between the means under the Null hypothesis.
Returns
-------
smry : SimpleTable
"""
d1 = self.d1
d2 = self.d2
confint_percents = 100 - alpha * 100
if use_t:
tstat, pvalue, _ = self.ttest_ind(usevar=usevar, value=value)
lower, upper = self.tconfint_diff(alpha=alpha, usevar=usevar)
else:
tstat, pvalue = self.ztest_ind(usevar=usevar, value=value)
lower, upper = self.zconfint_diff(alpha=alpha, usevar=usevar)
if usevar == "pooled":
std_err = self.std_meandiff_pooledvar
else:
std_err = self.std_meandiff_separatevar
std_err = np.atleast_1d(std_err)
tstat = np.atleast_1d(tstat)
pvalue = np.atleast_1d(pvalue)
lower = np.atleast_1d(lower)
upper = np.atleast_1d(upper)
conf_int = np.column_stack((lower, upper))
params = np.atleast_1d(d1.mean - d2.mean - value)
title = "Test for equality of means"
yname = "y" # not used in params_frame
xname = ["subset #%d" % (ii + 1) for ii in range(tstat.shape[0])]
from statsmodels.iolib.summary import summary_params
return summary_params(
(None, params, std_err, tstat, pvalue, conf_int),
alpha=alpha,
use_t=use_t,
yname=yname,
xname=xname,
title=title,
)
@cache_readonly
def std_meandiff_separatevar(self):
# this uses ``_var`` to use ddof=0 for formula
d1 = self.d1
d2 = self.d2
return np.sqrt(d1._var / (d1.nobs - 1) + d2._var / (d2.nobs - 1))
@cache_readonly
def std_meandiff_pooledvar(self):
"""variance assuming equal variance in both data sets
"""
# this uses ``_var`` to use ddof=0 for formula
d1 = self.d1
d2 = self.d2
# could make var_pooled into attribute
var_pooled = (
(d1.sumsquares + d2.sumsquares)
/
# (d1.nobs - d1.ddof + d2.nobs - d2.ddof))
(d1.nobs - 1 + d2.nobs - 1)
)
return np.sqrt(var_pooled * (1.0 / d1.nobs + 1.0 / d2.nobs))
def dof_satt(self):
"""degrees of freedom of Satterthwaite for unequal variance
"""
d1 = self.d1
d2 = self.d2
# this follows blindly the SPSS manual
# except I use ``_var`` which has ddof=0
sem1 = d1._var / (d1.nobs - 1)
sem2 = d2._var / (d2.nobs - 1)
semsum = sem1 + sem2
z1 = (sem1 / semsum) ** 2 / (d1.nobs - 1)
z2 = (sem2 / semsum) ** 2 / (d2.nobs - 1)
dof = 1.0 / (z1 + z2)
return dof
def ttest_ind(self, alternative="two-sided", usevar="pooled", value=0):
"""ttest for the null hypothesis of identical means
this should also be the same as onewaygls, except for ddof differences
Parameters
----------
x1 : array_like, 1-D or 2-D
first of the two independent samples, see notes for 2-D case
x2 : array_like, 1-D or 2-D
second of the two independent samples, see notes for 2-D case
alternative : str
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: difference in means not equal to value (default)
'larger' : H1: difference in means larger than value
'smaller' : H1: difference in means smaller than value
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then Welsh ttest with Satterthwait degrees
of freedom is used
value : float
difference between the means under the Null hypothesis.
Returns
-------
tstat : float
test statistic
pvalue : float
pvalue of the t-test
df : int or float
degrees of freedom used in the t-test
Notes
-----
The result is independent of the user specified ddof.
"""
d1 = self.d1
d2 = self.d2
if usevar == "pooled":
stdm = self.std_meandiff_pooledvar
dof = d1.nobs - 1 + d2.nobs - 1
elif usevar == "unequal":
stdm = self.std_meandiff_separatevar
dof = self.dof_satt()
else:
raise ValueError('usevar can only be "pooled" or "unequal"')
tstat, pval = _tstat_generic(
d1.mean, d2.mean, stdm, dof, alternative, diff=value
)
return tstat, pval, dof
def ztest_ind(self, alternative="two-sided", usevar="pooled", value=0):
"""z-test for the null hypothesis of identical means
Parameters
----------
x1 : array_like, 1-D or 2-D
first of the two independent samples, see notes for 2-D case
x2 : array_like, 1-D or 2-D
second of the two independent samples, see notes for 2-D case
alternative : str
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: difference in means not equal to value (default)
'larger' : H1: difference in means larger than value
'smaller' : H1: difference in means smaller than value
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then the standard deviations of the samples may
be different.
value : float
difference between the means under the Null hypothesis.
Returns
-------
tstat : float
test statistic
pvalue : float
pvalue of the z-test
"""
d1 = self.d1
d2 = self.d2
if usevar == "pooled":
stdm = self.std_meandiff_pooledvar
elif usevar == "unequal":
stdm = self.std_meandiff_separatevar
else:
raise ValueError('usevar can only be "pooled" or "unequal"')
tstat, pval = _zstat_generic(
d1.mean, d2.mean, stdm, alternative, diff=value
)
return tstat, pval
def tconfint_diff(
self, alpha=0.05, alternative="two-sided", usevar="pooled"
):
"""confidence interval for the difference in means
Parameters
----------
alpha : float
significance level for the confidence interval, coverage is
``1-alpha``
alternative : str
This specifies the alternative hypothesis for the test that
corresponds to the confidence interval.
The alternative hypothesis, H1, has to be one of the following :
'two-sided': H1: difference in means not equal to value (default)
'larger' : H1: difference in means larger than value
'smaller' : H1: difference in means smaller than value
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then Welsh ttest with Satterthwait degrees
of freedom is used
Returns
-------
lower, upper : floats
lower and upper limits of the confidence interval
Notes
-----
The result is independent of the user specified ddof.
"""
d1 = self.d1
d2 = self.d2
diff = d1.mean - d2.mean
if usevar == "pooled":
std_diff = self.std_meandiff_pooledvar
dof = d1.nobs - 1 + d2.nobs - 1
elif usevar == "unequal":
std_diff = self.std_meandiff_separatevar
dof = self.dof_satt()
else:
raise ValueError('usevar can only be "pooled" or "unequal"')
res = _tconfint_generic(
diff, std_diff, dof, alpha=alpha, alternative=alternative
)
return res
def zconfint_diff(
self, alpha=0.05, alternative="two-sided", usevar="pooled"
):
"""confidence interval for the difference in means
Parameters
----------
alpha : float
significance level for the confidence interval, coverage is
``1-alpha``
alternative : str
This specifies the alternative hypothesis for the test that
corresponds to the confidence interval.
The alternative hypothesis, H1, has to be one of the following :
'two-sided': H1: difference in means not equal to value (default)
'larger' : H1: difference in means larger than value
'smaller' : H1: difference in means smaller than value
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then Welsh ttest with Satterthwait degrees
of freedom is used
Returns
-------
lower, upper : floats
lower and upper limits of the confidence interval
Notes
-----
The result is independent of the user specified ddof.
"""
d1 = self.d1
d2 = self.d2
diff = d1.mean - d2.mean
if usevar == "pooled":
std_diff = self.std_meandiff_pooledvar
elif usevar == "unequal":
std_diff = self.std_meandiff_separatevar
else:
raise ValueError('usevar can only be "pooled" or "unequal"')
res = _zconfint_generic(
diff, std_diff, alpha=alpha, alternative=alternative
)
return res
def ttost_ind(self, low, upp, usevar="pooled"):
"""
test of equivalence for two independent samples, base on t-test
Parameters
----------
low, upp : float
equivalence interval low < m1 - m2 < upp
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then Welsh ttest with Satterthwait degrees
of freedom is used
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1 : tuple of floats
test statistic and pvalue for lower threshold test
t2, pv2 : tuple of floats
test statistic and pvalue for upper threshold test
"""
tt1 = self.ttest_ind(alternative="larger", usevar=usevar, value=low)
tt2 = self.ttest_ind(alternative="smaller", usevar=usevar, value=upp)
# TODO: remove tuple return, use same as for function tost_ind
return np.maximum(tt1[1], tt2[1]), (tt1, tt2)
def ztost_ind(self, low, upp, usevar="pooled"):
"""
test of equivalence for two independent samples, based on z-test
Parameters
----------
low, upp : float
equivalence interval low < m1 - m2 < upp
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then Welsh ttest with Satterthwait degrees
of freedom is used
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1 : tuple of floats
test statistic and pvalue for lower threshold test
t2, pv2 : tuple of floats
test statistic and pvalue for upper threshold test
"""
tt1 = self.ztest_ind(alternative="larger", usevar=usevar, value=low)
tt2 = self.ztest_ind(alternative="smaller", usevar=usevar, value=upp)
# TODO: remove tuple return, use same as for function tost_ind
return np.maximum(tt1[1], tt2[1]), tt1, tt2
# tost.__doc__ = tost_ind.__doc__
# does not work for 2d, does not take weights into account
## def test_equal_var(self):
## """Levene test for independence
##
## """
## d1 = self.d1
## d2 = self.d2
## #rewrite this, for now just use scipy.stats
## return stats.levene(d1.data, d2.data)
def ttest_ind(
x1,
x2,
alternative="two-sided",
usevar="pooled",
weights=(None, None),
value=0,
):
"""ttest independent sample
Convenience function that uses the classes and throws away the intermediate
results,
compared to scipy stats: drops axis option, adds alternative, usevar, and
weights option.
Parameters
----------
x1 : array_like, 1-D or 2-D
first of the two independent samples, see notes for 2-D case
x2 : array_like, 1-D or 2-D
second of the two independent samples, see notes for 2-D case
alternative : str
The alternative hypothesis, H1, has to be one of the following
* 'two-sided' (default): H1: difference in means not equal to value
* 'larger' : H1: difference in means larger than value
* 'smaller' : H1: difference in means smaller than value
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then Welsh ttest with Satterthwait degrees
of freedom is used
weights : tuple of None or ndarrays
Case weights for the two samples. For details on weights see
``DescrStatsW``
value : float
difference between the means under the Null hypothesis.
Returns
-------
tstat : float
test statistic
pvalue : float
pvalue of the t-test
df : int or float
degrees of freedom used in the t-test
"""
cm = CompareMeans(
DescrStatsW(x1, weights=weights[0], ddof=0),
DescrStatsW(x2, weights=weights[1], ddof=0),
)
tstat, pval, dof = cm.ttest_ind(
alternative=alternative, usevar=usevar, value=value
)
return tstat, pval, dof
def ttost_ind(
x1, x2, low, upp, usevar="pooled", weights=(None, None), transform=None
):
"""test of (non-)equivalence for two independent samples
TOST: two one-sided t tests
null hypothesis: m1 - m2 < low or m1 - m2 > upp
alternative hypothesis: low < m1 - m2 < upp
where m1, m2 are the means, expected values of the two samples.
If the pvalue is smaller than a threshold, say 0.05, then we reject the
hypothesis that the difference between the two samples is larger than the
the thresholds given by low and upp.
Parameters
----------
x1 : array_like, 1-D or 2-D
first of the two independent samples, see notes for 2-D case
x2 : array_like, 1-D or 2-D
second of the two independent samples, see notes for 2-D case
low, upp : float
equivalence interval low < m1 - m2 < upp
usevar : str, 'pooled' or 'unequal'
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. If ``unequal``, then Welsh ttest with Satterthwait degrees
of freedom is used
weights : tuple of None or ndarrays
Case weights for the two samples. For details on weights see
``DescrStatsW``
transform : None or function
If None (default), then the data is not transformed. Given a function,
sample data and thresholds are transformed. If transform is log, then
the equivalence interval is in ratio: low < m1 / m2 < upp
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1 : tuple of floats
test statistic and pvalue for lower threshold test
t2, pv2 : tuple of floats
test statistic and pvalue for upper threshold test
Notes
-----
The test rejects if the 2*alpha confidence interval for the difference
is contained in the ``(low, upp)`` interval.
This test works also for multi-endpoint comparisons: If d1 and d2
have the same number of columns, then each column of the data in d1 is
compared with the corresponding column in d2. This is the same as
comparing each of the corresponding columns separately. Currently no
multi-comparison correction is used. The raw p-values reported here can
be correction with the functions in ``multitest``.
"""
if transform:
if transform is np.log:
# avoid hstack in special case
x1 = transform(x1)
x2 = transform(x2)
else:
# for transforms like rankdata that will need both datasets
# concatenate works for stacking 1d and 2d arrays
xx = transform(np.concatenate((x1, x2), 0))
x1 = xx[: len(x1)]
x2 = xx[len(x1) :]
low = transform(low)
upp = transform(upp)
cm = CompareMeans(
DescrStatsW(x1, weights=weights[0], ddof=0),
DescrStatsW(x2, weights=weights[1], ddof=0),
)
pval, res = cm.ttost_ind(low, upp, usevar=usevar)
return pval, res[0], res[1]
def ttost_paired(x1, x2, low, upp, transform=None, weights=None):
"""test of (non-)equivalence for two dependent, paired sample
TOST: two one-sided t tests
null hypothesis: md < low or md > upp
alternative hypothesis: low < md < upp
where md is the mean, expected value of the difference x1 - x2
If the pvalue is smaller than a threshold,say 0.05, then we reject the
hypothesis that the difference between the two samples is larger than the
the thresholds given by low and upp.
Parameters
----------
x1 : array_like
first of the two independent samples
x2 : array_like
second of the two independent samples
low, upp : float
equivalence interval low < mean of difference < upp
weights : None or ndarray
case weights for the two samples. For details on weights see
``DescrStatsW``
transform : None or function
If None (default), then the data is not transformed. Given a function
sample data and thresholds are transformed. If transform is log the
the equivalence interval is in ratio: low < x1 / x2 < upp
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1, df1 : tuple
test statistic, pvalue and degrees of freedom for lower threshold test
t2, pv2, df2 : tuple
test statistic, pvalue and degrees of freedom for upper threshold test
"""
if transform:
if transform is np.log:
# avoid hstack in special case
x1 = transform(x1)
x2 = transform(x2)
else:
# for transforms like rankdata that will need both datasets
# concatenate works for stacking 1d and 2d arrays
xx = transform(np.concatenate((x1, x2), 0))
x1 = xx[: len(x1)]
x2 = xx[len(x1) :]
low = transform(low)
upp = transform(upp)
dd = DescrStatsW(x1 - x2, weights=weights, ddof=0)
t1, pv1, df1 = dd.ttest_mean(low, alternative="larger")
t2, pv2, df2 = dd.ttest_mean(upp, alternative="smaller")
return np.maximum(pv1, pv2), (t1, pv1, df1), (t2, pv2, df2)
def ztest(
x1, x2=None, value=0, alternative="two-sided", usevar="pooled", ddof=1.0
):
"""test for mean based on normal distribution, one or two samples
In the case of two samples, the samples are assumed to be independent.
Parameters
----------
x1 : array_like, 1-D or 2-D
first of the two independent samples
x2 : array_like, 1-D or 2-D
second of the two independent samples
value : float
In the one sample case, value is the mean of x1 under the Null
hypothesis.
In the two sample case, value is the difference between mean of x1 and
mean of x2 under the Null hypothesis. The test statistic is
`x1_mean - x2_mean - value`.
alternative : str
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: difference in means not equal to value (default)
'larger' : H1: difference in means larger than value
'smaller' : H1: difference in means smaller than value
usevar : str, 'pooled'
Currently, only 'pooled' is implemented.
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. see CompareMeans.ztest_ind for different options.
ddof : int
Degrees of freedom use in the calculation of the variance of the mean
estimate. In the case of comparing means this is one, however it can
be adjusted for testing other statistics (proportion, correlation)
Returns
-------
tstat : float
test statistic
pvalue : float
pvalue of the t-test
Notes
-----
usevar not implemented, is always pooled in two sample case
use CompareMeans instead.
"""
# TODO: this should delegate to CompareMeans like ttest_ind
# However that does not implement ddof
# usevar is not used, always pooled
if usevar != "pooled":
raise NotImplementedError('only usevar="pooled" is implemented')
x1 = np.asarray(x1)
nobs1 = x1.shape[0]
x1_mean = x1.mean(0)
x1_var = x1.var(0)
if x2 is not None:
x2 = np.asarray(x2)
nobs2 = x2.shape[0]
x2_mean = x2.mean(0)
x2_var = x2.var(0)
var_pooled = nobs1 * x1_var + nobs2 * x2_var
var_pooled /= nobs1 + nobs2 - 2 * ddof
var_pooled *= 1.0 / nobs1 + 1.0 / nobs2
else:
var_pooled = x1_var / (nobs1 - ddof)
x2_mean = 0
std_diff = np.sqrt(var_pooled)
# stat = x1_mean - x2_mean - value
return _zstat_generic(x1_mean, x2_mean, std_diff, alternative, diff=value)
def zconfint(
x1,
x2=None,
value=0,
alpha=0.05,
alternative="two-sided",
usevar="pooled",
ddof=1.0,
):
"""confidence interval based on normal distribution z-test
Parameters
----------
x1 : array_like, 1-D or 2-D
first of the two independent samples, see notes for 2-D case
x2 : array_like, 1-D or 2-D
second of the two independent samples, see notes for 2-D case
value : float
In the one sample case, value is the mean of x1 under the Null
hypothesis.
In the two sample case, value is the difference between mean of x1 and
mean of x2 under the Null hypothesis. The test statistic is
`x1_mean - x2_mean - value`.
usevar : str, 'pooled'
Currently, only 'pooled' is implemented.
If ``pooled``, then the standard deviation of the samples is assumed to be
the same. see CompareMeans.ztest_ind for different options.
ddof : int
Degrees of freedom use in the calculation of the variance of the mean
estimate. In the case of comparing means this is one, however it can
be adjusted for testing other statistics (proportion, correlation)
Notes
-----
checked only for 1 sample case
usevar not implemented, is always pooled in two sample case
``value`` shifts the confidence interval so it is centered at
`x1_mean - x2_mean - value`
See Also
--------
ztest
CompareMeans
"""
# usevar is not used, always pooled
# mostly duplicate code from ztest
if usevar != "pooled":
raise NotImplementedError('only usevar="pooled" is implemented')
x1 = np.asarray(x1)
nobs1 = x1.shape[0]
x1_mean = x1.mean(0)
x1_var = x1.var(0)
if x2 is not None:
x2 = np.asarray(x2)
nobs2 = x2.shape[0]
x2_mean = x2.mean(0)
x2_var = x2.var(0)
var_pooled = nobs1 * x1_var + nobs2 * x2_var
var_pooled /= nobs1 + nobs2 - 2 * ddof
var_pooled *= 1.0 / nobs1 + 1.0 / nobs2
else:
var_pooled = x1_var / (nobs1 - ddof)
x2_mean = 0
std_diff = np.sqrt(var_pooled)
ci = _zconfint_generic(
x1_mean - x2_mean - value, std_diff, alpha, alternative
)
return ci
def ztost(x1, low, upp, x2=None, usevar="pooled", ddof=1.0):
"""Equivalence test based on normal distribution
Parameters
----------
x1 : array_like
one sample or first sample for 2 independent samples
low, upp : float
equivalence interval low < m1 - m2 < upp
x1 : array_like or None
second sample for 2 independent samples test. If None, then a
one-sample test is performed.
usevar : str, 'pooled'
If `pooled`, then the standard deviation of the samples is assumed to be
the same. Only `pooled` is currently implemented.
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1 : tuple of floats
test statistic and pvalue for lower threshold test
t2, pv2 : tuple of floats
test statistic and pvalue for upper threshold test
Notes
-----
checked only for 1 sample case
"""
tt1 = ztest(
x1, x2, alternative="larger", usevar=usevar, value=low, ddof=ddof
)
tt2 = ztest(
x1, x2, alternative="smaller", usevar=usevar, value=upp, ddof=ddof
)
return (
np.maximum(tt1[1], tt2[1]),
tt1,
tt2,
)
| {
"content_hash": "5026f8e75d9e0b17ae458e133bfaf56d",
"timestamp": "",
"source": "github",
"line_count": 1677,
"max_line_length": 123,
"avg_line_length": 32.932617769827075,
"alnum_prop": 0.5911494169624104,
"repo_name": "jseabold/statsmodels",
"id": "3f469f4745c6c35a183f0a0707c83b3261801789",
"size": "55228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statsmodels/stats/weightstats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
} |
import abc
from concurrent import futures
import six
from taskflow.utils import async_utils
from taskflow.utils import misc
from taskflow.utils import threading_utils
# Execution and reversion events.
EXECUTED = 'executed'
REVERTED = 'reverted'
def _execute_task(task, arguments, progress_callback):
with task.autobind('update_progress', progress_callback):
try:
result = task.execute(**arguments)
except Exception:
# NOTE(imelnikov): wrap current exception with Failure
# object and return it.
result = misc.Failure()
return (task, EXECUTED, result)
def _revert_task(task, arguments, result, failures, progress_callback):
kwargs = arguments.copy()
kwargs['result'] = result
kwargs['flow_failures'] = failures
with task.autobind('update_progress', progress_callback):
try:
result = task.revert(**kwargs)
except Exception:
# NOTE(imelnikov): wrap current exception with Failure
# object and return it.
result = misc.Failure()
return (task, REVERTED, result)
@six.add_metaclass(abc.ABCMeta)
class TaskExecutorBase(object):
"""Executes and reverts tasks.
This class takes task and its arguments and executes or reverts it.
It encapsulates knowledge on how task should be executed or reverted:
right now, on separate thread, on another machine, etc.
"""
@abc.abstractmethod
def execute_task(self, task, task_uuid, arguments, progress_callback=None):
"""Schedules task execution."""
@abc.abstractmethod
def revert_task(self, task, task_uuid, arguments, result, failures,
progress_callback=None):
"""Schedules task reversion."""
@abc.abstractmethod
def wait_for_any(self, fs, timeout=None):
"""Wait for futures returned by this executor to complete."""
def start(self):
"""Prepare to execute tasks."""
pass
def stop(self):
"""Finalize task executor."""
pass
class SerialTaskExecutor(TaskExecutorBase):
"""Execute task one after another."""
def execute_task(self, task, task_uuid, arguments, progress_callback=None):
return async_utils.make_completed_future(
_execute_task(task, arguments, progress_callback))
def revert_task(self, task, task_uuid, arguments, result, failures,
progress_callback=None):
return async_utils.make_completed_future(
_revert_task(task, arguments, result,
failures, progress_callback))
def wait_for_any(self, fs, timeout=None):
# NOTE(imelnikov): this executor returns only done futures.
return fs, []
class ParallelTaskExecutor(TaskExecutorBase):
"""Executes tasks in parallel.
Submits tasks to executor which should provide interface similar
to concurrent.Futures.Executor.
"""
def __init__(self, executor=None):
self._executor = executor
self._own_executor = executor is None
def execute_task(self, task, task_uuid, arguments, progress_callback=None):
return self._executor.submit(
_execute_task, task, arguments, progress_callback)
def revert_task(self, task, task_uuid, arguments, result, failures,
progress_callback=None):
return self._executor.submit(
_revert_task, task,
arguments, result, failures, progress_callback)
def wait_for_any(self, fs, timeout=None):
return async_utils.wait_for_any(fs, timeout)
def start(self):
if self._own_executor:
thread_count = threading_utils.get_optimal_thread_count()
self._executor = futures.ThreadPoolExecutor(thread_count)
def stop(self):
if self._own_executor:
self._executor.shutdown(wait=True)
self._executor = None
| {
"content_hash": "db97209bb68dedf1b53c7982bbfb30e6",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 32.36363636363637,
"alnum_prop": 0.6493871297242084,
"repo_name": "varunarya10/taskflow",
"id": "846cc56844eb69ca9aa0c89de32e91a657135ebb",
"size": "4573",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taskflow/engines/action_engine/executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "734087"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from future.utils import with_metaclass
from future.builtins import zip
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
from IPython.core.pylabtools import print_figure
from IPython.core.display import Image, SVG
from skbio.stats._misc import _pprint_strs
from skbio.util._decorator import experimental
class SkbioObject(with_metaclass(ABCMeta, object)):
"""Abstract base class defining core API common to all scikit-bio objects.
Public scikit-bio classes should subclass this class to ensure a common,
core API is present. All abstract methods and properties defined here must
be implemented in subclasses, otherwise they will not be instantiable.
"""
@abstractmethod
def __str__(self):
pass
class OrdinationResults(SkbioObject):
"""Store ordination results, providing serialization and plotting support.
Stores various components of ordination results. Provides methods for
serializing/deserializing results, as well as generation of basic
matplotlib 3-D scatterplots. Will automatically display PNG/SVG
representations of itself within the IPython Notebook.
Attributes
----------
short_method_name : str
Abbreviated ordination method name.
long_method_name : str
Ordination method name.
eigvals : pd.Series
The resulting eigenvalues. The index corresponds to the ordination
axis labels
samples : pd.DataFrame
The position of the samples in the ordination space, row-indexed by the
sample id.
features : pd.DataFrame
The position of the features in the ordination space, row-indexed by
the feature id.
biplot_scores : pd.DataFrame
Correlation coefficients of the samples with respect to the features.
sample_constraints : pd.DataFrame
Site constraints (linear combinations of constraining variables):
coordinates of the sites in the space of the explanatory variables X.
These are the fitted site scores
proportion_explained : pd.Series
Proportion explained by each of the dimensions in the ordination space.
The index corresponds to the ordination axis labels
png
svg
See Also
--------
ca
cca
pcoa
rda
"""
default_write_format = 'ordination'
@experimental(as_of="0.4.0")
def __init__(self, short_method_name, long_method_name, eigvals,
samples, features=None, biplot_scores=None,
sample_constraints=None, proportion_explained=None):
self.short_method_name = short_method_name
self.long_method_name = long_method_name
self.eigvals = eigvals
self.samples = samples
self.features = features
self.biplot_scores = biplot_scores
self.sample_constraints = sample_constraints
self.proportion_explained = proportion_explained
@experimental(as_of="0.4.0")
def __str__(self):
"""Return a string representation of the ordination results.
String representation lists ordination results attributes and indicates
whether or not they are present. If an attribute is present, its
dimensions are listed. A truncated list of features and sample IDs are
included (if they are present).
Returns
-------
str
String representation of the ordination results.
.. shownumpydoc
"""
lines = ['Ordination results:']
method = '%s (%s)' % (self.long_method_name, self.short_method_name)
lines.append(self._format_attribute(method, 'Method', str))
attrs = [(self.eigvals, 'Eigvals'),
(self.proportion_explained, 'Proportion explained'),
(self.features, 'Features'),
(self.samples, 'Samples'),
(self.biplot_scores, 'Biplot Scores'),
(self.sample_constraints, 'Sample constraints')]
for attr, attr_label in attrs:
def formatter(e):
return 'x'.join(['%d' % s for s in e.shape])
lines.append(self._format_attribute(attr, attr_label, formatter))
lines.append(self._format_attribute(
self.features, 'Feature IDs',
lambda e: _pprint_strs(e.index.tolist())))
lines.append(self._format_attribute(
self.samples, 'Sample IDs',
lambda e: _pprint_strs(e.index.tolist())))
return '\n'.join(lines)
@experimental(as_of="0.4.0")
def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
title='', cmap=None, s=20):
"""Create a 3-D scatterplot of ordination results colored by metadata.
Creates a 3-D scatterplot of the ordination results, where each point
represents a sample. Optionally, these points can be colored by
metadata (see `df` and `column` below).
Parameters
----------
df : pd.DataFrame, optional
``DataFrame`` containing sample metadata. Must be indexed by sample
ID, and all sample IDs in the ordination results must exist in the
``DataFrame``. If ``None``, samples (i.e., points) will not be
colored by metadata.
column : str, optional
Column name in `df` to color samples (i.e., points in the plot) by.
Cannot have missing data (i.e., ``np.nan``). `column` can be
numeric or categorical. If numeric, all values in the column will
be cast to ``float`` and mapped to colors using `cmap`. A colorbar
will be included to serve as a legend. If categorical (i.e., not
all values in `column` could be cast to ``float``), colors will be
chosen for each category using evenly-spaced points along `cmap`. A
legend will be included. If ``None``, samples (i.e., points) will
not be colored by metadata.
axes : iterable of int, optional
Indices of sample coordinates to plot on the x-, y-, and z-axes.
For example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
Must contain exactly three elements.
axis_labels : iterable of str, optional
Labels for the x-, y-, and z-axes. If ``None``, labels will be the
values of `axes` cast as strings.
title : str, optional
Plot title.
cmap : str or matplotlib.colors.Colormap, optional
Name or instance of matplotlib colormap to use for mapping `column`
values to colors. If ``None``, defaults to the colormap specified
in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
are recommended for categorical data, while sequential colormaps
(e.g., ``Greys``) are recommended for numeric data. See [1]_ for
these colormap classifications.
s : scalar or iterable of scalars, optional
Size of points. See matplotlib's ``Axes3D.scatter`` documentation
for more details.
Returns
-------
matplotlib.figure.Figure
Figure containing the scatterplot and legend/colorbar if metadata
were provided.
Raises
------
ValueError
Raised on invalid input, including the following situations:
- there are not at least three dimensions to plot
- there are not exactly three values in `axes`, they are not
unique, or are out of range
- there are not exactly three values in `axis_labels`
- either `df` or `column` is provided without the other
- `column` is not in the ``DataFrame``
- sample IDs in the ordination results are not in `df` or have
missing data in `column`
See Also
--------
mpl_toolkits.mplot3d.Axes3D.scatter
Notes
-----
This method creates basic plots of ordination results, and is intended
to provide a quick look at the results in the context of metadata
(e.g., from within the IPython Notebook). For more customization and to
generate publication-quality figures, we recommend EMPeror [2]_.
References
----------
.. [1] http://matplotlib.org/examples/color/colormaps_reference.html
.. [2] EMPeror: a tool for visualizing high-throughput microbial
community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
Examples
--------
.. plot::
Define a distance matrix with four samples labelled A-D:
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
... [0.21712454, 0., 0.45995501, 0.80332382],
... [0.5007512, 0.45995501, 0., 0.65463348],
... [0.91769271, 0.80332382, 0.65463348, 0.]],
... ['A', 'B', 'C', 'D'])
Define metadata for each sample in a ``pandas.DataFrame``:
>>> import pandas as pd
>>> metadata = {
... 'A': {'body_site': 'skin'},
... 'B': {'body_site': 'gut'},
... 'C': {'body_site': 'gut'},
... 'D': {'body_site': 'skin'}}
>>> df = pd.DataFrame.from_dict(metadata, orient='index')
Run principal coordinate analysis (PCoA) on the distance matrix:
>>> from skbio.stats.ordination import pcoa
>>> pcoa_results = pcoa(dm)
Plot the ordination results, where each sample is colored by body
site (a categorical variable):
>>> fig = pcoa_results.plot(df=df, column='body_site',
... title='Samples colored by body site',
... cmap='Set1', s=50)
"""
# Note: New features should not be added to this method and should
# instead be added to EMPeror (http://biocore.github.io/emperor/).
# Only bug fixes and minor updates should be made to this method.
coord_matrix = self.samples.values.T
self._validate_plot_axes(coord_matrix, axes)
# derived from
# http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = coord_matrix[axes[0]]
ys = coord_matrix[axes[1]]
zs = coord_matrix[axes[2]]
point_colors, category_to_color = self._get_plot_point_colors(
df, column, self.samples.index, cmap)
scatter_fn = partial(ax.scatter, xs, ys, zs, s=s)
if point_colors is None:
plot = scatter_fn()
else:
plot = scatter_fn(c=point_colors, cmap=cmap)
if axis_labels is None:
axis_labels = ['%d' % axis for axis in axes]
elif len(axis_labels) != 3:
raise ValueError("axis_labels must contain exactly three elements "
"(found %d elements)." % len(axis_labels))
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
ax.set_zlabel(axis_labels[2])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_title(title)
# create legend/colorbar
if point_colors is not None:
if category_to_color is None:
fig.colorbar(plot)
else:
self._plot_categorical_legend(ax, category_to_color)
return fig
def _validate_plot_axes(self, coord_matrix, axes):
"""Validate `axes` against coordinates matrix."""
num_dims = coord_matrix.shape[0]
if num_dims < 3:
raise ValueError("At least three dimensions are required to plot "
"ordination results. There are only %d "
"dimension(s)." % num_dims)
if len(axes) != 3:
raise ValueError("`axes` must contain exactly three elements "
"(found %d elements)." % len(axes))
if len(set(axes)) != 3:
raise ValueError("The values provided for `axes` must be unique.")
for idx, axis in enumerate(axes):
if axis < 0 or axis >= num_dims:
raise ValueError("`axes[%d]` must be >= 0 and < %d." %
(idx, num_dims))
def _get_plot_point_colors(self, df, column, ids, cmap):
"""Return a list of colors for each plot point given a metadata column.
If `column` is categorical, additionally returns a dictionary mapping
each category (str) to color (used for legend creation).
"""
if ((df is None and column is not None) or (df is not None and
column is None)):
raise ValueError("Both df and column must be provided, or both "
"must be None.")
elif df is None and column is None:
point_colors, category_to_color = None, None
else:
if column not in df:
raise ValueError("Column '%s' not in data frame." % column)
col_vals = df.loc[ids, column]
if col_vals.isnull().any():
raise ValueError("One or more IDs in the ordination results "
"are not in the data frame, or there is "
"missing data in the data frame's '%s' "
"column." % column)
category_to_color = None
try:
point_colors = col_vals.astype(float)
except ValueError:
# we have categorical data, so choose a color for each
# category, where colors are evenly spaced across the
# colormap.
# derived from http://stackoverflow.com/a/14887119
categories = col_vals.unique()
cmap = plt.get_cmap(cmap)
category_colors = cmap(np.linspace(0, 1, len(categories)))
category_to_color = dict(zip(categories, category_colors))
point_colors = col_vals.apply(lambda x: category_to_color[x])
point_colors = point_colors.tolist()
return point_colors, category_to_color
def _plot_categorical_legend(self, ax, color_dict):
"""Add legend to plot using specified mapping of category to color."""
# derived from http://stackoverflow.com/a/20505720
proxies = []
labels = []
for category in color_dict:
proxy = mpl.lines.Line2D([0], [0], linestyle='none',
c=color_dict[category], marker='o')
proxies.append(proxy)
labels.append(category)
# place legend outside of the axes (centered)
# derived from http://matplotlib.org/users/legend_guide.html
ax.legend(proxies, labels, numpoints=1, loc=6,
bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
# Here we define the special repr methods that provide the IPython display
# protocol. Code derived from:
# https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
# Custom%20Display%20Logic.ipynb
# See licenses/ipython.txt for more details.
def _repr_png_(self):
return self._figure_data('png')
def _repr_svg_(self):
return self._figure_data('svg')
# We expose the above reprs as properties, so that the user can see them
# directly (since otherwise the client dictates which one it shows by
# default)
@property
@experimental(as_of="0.4.0")
def png(self):
"""Display basic 3-D scatterplot in IPython Notebook as PNG."""
return Image(self._repr_png_(), embed=True)
@property
@experimental(as_of="0.4.0")
def svg(self):
"""Display basic 3-D scatterplot in IPython Notebook as SVG."""
return SVG(self._repr_svg_())
def _figure_data(self, format):
fig = self.plot()
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
def _format_attribute(self, attr, attr_label, formatter):
if attr is None:
formatted_attr = 'N/A'
else:
formatted_attr = formatter(attr)
return '\t%s: %s' % (attr_label, formatted_attr)
| {
"content_hash": "cdf11512b956f02c7e757b9ddbc3d466",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 79,
"avg_line_length": 40.54761904761905,
"alnum_prop": 0.5904286553141515,
"repo_name": "jairideout/scikit-bio",
"id": "09cc3fbf7a4619b3d13fb700fa88e24697396df3",
"size": "17384",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "skbio/_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "259"
},
{
"name": "Makefile",
"bytes": "1054"
},
{
"name": "Python",
"bytes": "2262551"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from genericfile.models import FileStore
admin.site.register(FileStore) | {
"content_hash": "5dba0ec38b927c6b74c6e3fd5c1a0ca9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 26.25,
"alnum_prop": 0.8571428571428571,
"repo_name": "djangothon/django-generic-file",
"id": "42299dec203fff4a5e7f7ae56fca4a0b5b54eb30",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genericfile/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2711"
},
{
"name": "HTML",
"bytes": "2128"
},
{
"name": "JavaScript",
"bytes": "35807"
},
{
"name": "Python",
"bytes": "7771"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.core.urlresolvers import reverse
from django.db import models
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.sites.models import Site
from django.utils.encoding import python_2_unicode_compatible
from django.utils.crypto import get_random_string
from ..utils import build_absolute_uri
from .. import app_settings as allauth_app_settings
from . import app_settings
from . import signals
from .utils import user_email
from .managers import EmailAddressManager, EmailConfirmationManager
from .adapter import get_adapter
@python_2_unicode_compatible
class EmailAddress(models.Model):
user = models.ForeignKey(allauth_app_settings.USER_MODEL,
verbose_name=_('user'))
email = models.EmailField(unique=app_settings.UNIQUE_EMAIL,
verbose_name=_('e-mail address'))
verified = models.BooleanField(verbose_name=_('verified'), default=False)
primary = models.BooleanField(verbose_name=_('primary'), default=False)
objects = EmailAddressManager()
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
if not app_settings.UNIQUE_EMAIL:
unique_together = [("user", "email")]
def __str__(self):
return "%s (%s)" % (self.email, self.user)
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
user_email(self.user, self.email)
self.user.save()
return True
def send_confirmation(self, request, signup=False):
confirmation = EmailConfirmation.create(self)
confirmation.send(request, signup=signup)
return confirmation
def change(self, request, new_email, confirm=True):
"""
Given a new email address, change self and re-confirm.
"""
with transaction.commit_on_success():
user_email(self.user, new_email)
self.user.save()
self.email = new_email
self.verified = False
self.save()
if confirm:
self.send_confirmation(request)
@python_2_unicode_compatible
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(EmailAddress,
verbose_name=_('e-mail address'))
created = models.DateTimeField(verbose_name=_('created'),
default=timezone.now)
sent = models.DateTimeField(verbose_name=_('sent'), null=True)
key = models.CharField(verbose_name=_('key'), max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __str__(self):
return "confirmation for %s" % self.email_address
@classmethod
def create(cls, email_address):
key = get_random_string(64).lower()
return cls._default_manager.create(email_address=email_address,
key=key)
def key_expired(self):
expiration_date = self.sent \
+ datetime.timedelta(days=app_settings
.EMAIL_CONFIRMATION_EXPIRE_DAYS)
return expiration_date <= timezone.now()
key_expired.boolean = True
def confirm(self, request):
if not self.key_expired() and not self.email_address.verified:
email_address = self.email_address
get_adapter().confirm_email(request, email_address)
signals.email_confirmed.send(sender=self.__class__,
request=request,
email_address=email_address)
return email_address
def send(self, request, signup=False, **kwargs):
current_site = kwargs["site"] if "site" in kwargs \
else Site.objects.get_current()
activate_url = reverse("account_confirm_email", args=[self.key])
activate_url = build_absolute_uri(request,
activate_url,
protocol=app_settings.DEFAULT_HTTP_PROTOCOL)
ctx = {
"user": self.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": self.key,
}
if signup:
email_template = 'account/email/email_confirmation_signup'
else:
email_template = 'account/email/email_confirmation'
get_adapter().send_mail(email_template,
self.email_address.email,
ctx)
self.sent = timezone.now()
self.save()
signals.email_confirmation_sent.send(sender=self.__class__,
confirmation=self)
| {
"content_hash": "dfc045eae30b6ced608e9e068d2ad13a",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 86,
"avg_line_length": 37.00709219858156,
"alnum_prop": 0.596780375622844,
"repo_name": "tejesh95/Zubio.in",
"id": "4a13d47c6fec9ebee186c3e5852d6b35683d4c50",
"size": "5218",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "zubio/allauth/account/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28486"
},
{
"name": "HTML",
"bytes": "76386"
},
{
"name": "JavaScript",
"bytes": "27346"
},
{
"name": "Python",
"bytes": "584967"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from ebird.api.observations import OBSERVATIONS_URL, get_observations
from tests import mixins
class GetObservationsTests(
TestCase,
mixins.AreaTestsMixin,
mixins.BackTestsMixin,
mixins.CategoryTestsMixin,
mixins.DetailTestsMixin,
mixins.HeaderTestsMixin,
mixins.HotspotTestsMixin,
mixins.MaxObservationsTestsMixin,
mixins.ProvisionalTestsMixin,
mixins.SpeciesLocaleTestsMixin,
):
"""Tests for the get_nearest_notable() API call."""
def get_max_results_default(self):
return None
def get_callable(self):
return get_observations
def get_params(self, **kwargs):
params = {
"token": "12345",
"area": "US-NV",
}
params.update(kwargs)
return params
def test_request_url(self):
url = self.api_call()[0]
self.assertEqual(OBSERVATIONS_URL % "US-NV", url)
def test_request_for_multiple_areas_url(self):
url = self.api_call(area="US-NV,US-WY,US-AZ")[0]
self.assertEqual(OBSERVATIONS_URL % "US-NV", url)
def test_request_for_multiple_areas_parameter(self):
query = self.api_call(area="US-NV,US-WY,US-AZ")[1]
self.assertEqual(query["r"], "US-NV,US-WY,US-AZ")
| {
"content_hash": "7a3de0ac302d47fcc74ebcce7c4588d9",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 69,
"avg_line_length": 28.31111111111111,
"alnum_prop": 0.6530612244897959,
"repo_name": "ProjectBabbler/ebird-api",
"id": "93e816e7657eeb3cc9a95fbe885e955031abae44",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/observations/test_get_observations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5063"
},
{
"name": "Python",
"bytes": "127932"
}
],
"symlink_target": ""
} |
''' Best buy or sell '''
# Copyright (c) 2012, 2013, 2014 Toomore Chiang, http://toomore.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class BestFourPoint(object):
""" 四大買點組合
:param grs.Stock data: 個股資料
"""
def __init__(self, data):
self.data = data
def bias_ratio(self, positive_or_negative=False):
""" 判斷乖離
:param bool positive_or_negative: 正乖離 為 True,負乖離 為 False
"""
return self.data.check_moving_average_bias_ratio(
self.data.moving_average_bias_ratio(3, 6)[0],
positive_or_negative=positive_or_negative)[0]
def check_plus_bias_ratio(self):
""" 正乖離扣至最大 """
return self.bias_ratio(True)
def check_mins_bias_ratio(self):
""" 負乖離扣至最大 """
return self.bias_ratio()
##### 四大買點 #####
def best_buy_1(self):
""" 量大收紅
:rtype: bool
"""
result = self.data.value[-1] > self.data.value[-2] and \
self.data.price[-1] > self.data.openprice[-1]
return result
def best_buy_2(self):
""" 量縮價不跌
:rtype: bool
"""
result = self.data.value[-1] < self.data.value[-2] and \
self.data.price[-1] > self.data.price[-2]
return result
def best_buy_3(self):
""" 三日均價由下往上
:rtype: bool
"""
return self.data.moving_average(3)[1] == 1
def best_buy_4(self):
""" 三日均價大於六日均價
:rtype: bool
"""
return self.data.moving_average(3)[0][-1] > \
self.data.moving_average(6)[0][-1]
##### 四大賣點 #####
def best_sell_1(self):
""" 量大收黑
:rtype: bool
"""
result = self.data.value[-1] > self.data.value[-2] and \
self.data.price[-1] < self.data.openprice[-1]
return result
def best_sell_2(self):
""" 量縮價跌
:rtype: bool
"""
result = self.data.value[-1] < self.data.value[-2] and \
self.data.price[-1] < self.data.price[-2]
return result
def best_sell_3(self):
""" 三日均價由上往下
:rtype: bool
"""
return self.data.moving_average(3)[1] == -1
def best_sell_4(self):
""" 三日均價小於六日均價
:rtype: bool
"""
return self.data.moving_average(3)[0][-1] < \
self.data.moving_average(6)[0][-1]
def best_four_point_to_buy(self):
""" 判斷是否為四大買點
:rtype: str or False
"""
result = []
if self.check_mins_bias_ratio() and \
(self.best_buy_1() or self.best_buy_2() or self.best_buy_3() or \
self.best_buy_4()):
if self.best_buy_1():
result.append(self.best_buy_1.__doc__.strip().decode('utf-8'))
if self.best_buy_2():
result.append(self.best_buy_2.__doc__.strip().decode('utf-8'))
if self.best_buy_3():
result.append(self.best_buy_3.__doc__.strip().decode('utf-8'))
if self.best_buy_4():
result.append(self.best_buy_4.__doc__.strip().decode('utf-8'))
result = ', '.join(result)
else:
result = False
return result
def best_four_point_to_sell(self):
""" 判斷是否為四大賣點
:rtype: str or False
"""
result = []
if self.check_plus_bias_ratio() and \
(self.best_sell_1() or self.best_sell_2() or self.best_sell_3() or \
self.best_sell_4()):
if self.best_sell_1():
result.append(self.best_sell_1.__doc__.strip().decode('utf-8'))
if self.best_sell_2():
result.append(self.best_sell_2.__doc__.strip().decode('utf-8'))
if self.best_sell_3():
result.append(self.best_sell_3.__doc__.strip().decode('utf-8'))
if self.best_sell_4():
result.append(self.best_sell_4.__doc__.strip().decode('utf-8'))
result = ', '.join(result)
else:
result = False
return result
def best_four_point(self):
""" 判斷買點或賣點
:rtype: tuple
:returns: (bool, str)
"""
buy = self.best_four_point_to_buy()
sell = self.best_four_point_to_sell()
if buy:
return True, buy
elif sell:
return False, sell
return None
| {
"content_hash": "2c191d462f1cf986616bec1d2b7e3f49",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 80,
"avg_line_length": 31.632183908045977,
"alnum_prop": 0.5421511627906976,
"repo_name": "toomore/grs",
"id": "5a550219475a9d06a3495a175ca350789c3d81b0",
"size": "5774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grs/best_buy_or_sell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62260"
}
],
"symlink_target": ""
} |
"""Test class for common methods used by iLO modules."""
import tempfile
import mock
from oslo.config import cfg
from oslo.utils import importutils
from ironic.common import exception
from ironic.common import images
from ironic.common import swift
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers import utils as driver_utils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ilo_client = importutils.try_import('proliantutils.ilo.ribcl')
CONF = cfg.CONF
class IloValidateParametersTestCase(db_base.DbTestCase):
def setUp(self):
super(IloValidateParametersTestCase, self).setUp()
self.node = obj_utils.create_test_node(self.context,
driver='fake_ilo', driver_info=db_utils.get_test_ilo_info())
def test_parse_driver_info(self):
info = ilo_common.parse_driver_info(self.node)
self.assertIsNotNone(info.get('ilo_address'))
self.assertIsNotNone(info.get('ilo_username'))
self.assertIsNotNone(info.get('ilo_password'))
self.assertIsNotNone(info.get('client_timeout'))
self.assertIsNotNone(info.get('client_port'))
def test_parse_driver_info_missing_address(self):
del self.node.driver_info['ilo_address']
self.assertRaises(exception.MissingParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_username(self):
del self.node.driver_info['ilo_username']
self.assertRaises(exception.MissingParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_password(self):
del self.node.driver_info['ilo_password']
self.assertRaises(exception.MissingParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_timeout(self):
self.node.driver_info['client_timeout'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_port(self):
self.node.driver_info['client_port'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_multiple_params(self):
del self.node.driver_info['ilo_password']
del self.node.driver_info['ilo_address']
try:
ilo_common.parse_driver_info(self.node)
self.fail("parse_driver_info did not throw exception.")
except exception.MissingParameterValue as e:
self.assertIn('ilo_password', str(e))
self.assertIn('ilo_address', str(e))
class IloCommonMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloCommonMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.info = db_utils.get_test_ilo_info()
self.node = obj_utils.create_test_node(self.context,
driver='fake_ilo', driver_info=self.info)
@mock.patch.object(ilo_common, 'ilo_client')
def test_get_ilo_object(self, ilo_client_mock):
self.info['client_timeout'] = 60
self.info['client_port'] = 443
ilo_client_mock.IloClient.return_value = 'ilo_object'
returned_ilo_object = ilo_common.get_ilo_object(self.node)
ilo_client_mock.IloClient.assert_called_with(
self.info['ilo_address'],
self.info['ilo_username'],
self.info['ilo_password'],
self.info['client_timeout'],
self.info['client_port'])
self.assertEqual('ilo_object', returned_ilo_object)
@mock.patch.object(ilo_common, 'ilo_client')
def test_get_ilo_license(self, ilo_client_mock):
ilo_advanced_license = {'LICENSE_TYPE': 'iLO 3 Advanced'}
ilo_standard_license = {'LICENSE_TYPE': 'iLO 3'}
ilo_mock_object = ilo_client_mock.IloClient.return_value
ilo_mock_object.get_all_licenses.return_value = ilo_advanced_license
license = ilo_common.get_ilo_license(self.node)
self.assertEqual(ilo_common.ADVANCED_LICENSE, license)
ilo_mock_object.get_all_licenses.return_value = ilo_standard_license
license = ilo_common.get_ilo_license(self.node)
self.assertEqual(ilo_common.STANDARD_LICENSE, license)
@mock.patch.object(ilo_common, 'ilo_client')
def test_get_ilo_license_fail(self, ilo_client_mock):
ilo_client_mock.IloError = Exception
ilo_mock_object = ilo_client_mock.IloClient.return_value
ilo_mock_object.get_all_licenses.side_effect = [Exception()]
self.assertRaises(exception.IloOperationError,
ilo_common.get_ilo_license,
self.node)
def test_update_ipmi_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ipmi_info = {
"ipmi_address": "1.2.3.4",
"ipmi_username": "admin",
"ipmi_password": "fake",
"ipmi_terminal_port": 60
}
self.info['console_port'] = 60
task.node.driver_info = self.info
ilo_common.update_ipmi_properties(task)
actual_info = task.node.driver_info
expected_info = dict(self.info, **ipmi_info)
self.assertEqual(expected_info, actual_info)
def test__get_floppy_image_name(self):
image_name_expected = 'image-' + self.node.uuid
image_name_actual = ilo_common._get_floppy_image_name(self.node)
self.assertEqual(image_name_expected, image_name_actual)
@mock.patch.object(swift, 'SwiftAPI')
@mock.patch.object(images, 'create_vfat_image')
@mock.patch.object(utils, 'write_to_file')
@mock.patch.object(tempfile, 'NamedTemporaryFile')
def test__prepare_floppy_image(self, tempfile_mock, write_mock,
fatimage_mock, swift_api_mock):
mock_token_file_obj = mock.MagicMock()
mock_token_file_obj.name = 'token-tmp-file'
mock_image_file_handle = mock.MagicMock(spec=file)
mock_image_file_obj = mock.MagicMock()
mock_image_file_obj.name = 'image-tmp-file'
mock_image_file_handle.__enter__.return_value = mock_image_file_obj
tempfile_mock.side_effect = [mock_image_file_handle,
mock_token_file_obj]
swift_obj_mock = swift_api_mock.return_value
self.config(swift_ilo_container='ilo_cont', group='ilo')
self.config(swift_object_expiry_timeout=1, group='ilo')
deploy_args = {'arg1': 'val1', 'arg2': 'val2'}
swift_obj_mock.get_temp_url.return_value = 'temp-url'
timeout = CONF.ilo.swift_object_expiry_timeout
object_headers = {'X-Delete-After': timeout}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.context.auth_token = 'token'
temp_url = ilo_common._prepare_floppy_image(task, deploy_args)
node_uuid = task.node.uuid
object_name = 'image-' + node_uuid
files_info = {'token-tmp-file': 'token'}
write_mock.assert_called_once_with('token-tmp-file', 'token')
mock_token_file_obj.close.assert_called_once_with()
fatimage_mock.assert_called_once_with('image-tmp-file',
files_info=files_info,
parameters=deploy_args)
swift_obj_mock.create_object.assert_called_once_with('ilo_cont',
object_name, 'image-tmp-file', object_headers=object_headers)
swift_obj_mock.get_temp_url.assert_called_once_with('ilo_cont',
object_name, timeout)
self.assertEqual('temp-url', temp_url)
@mock.patch.object(swift, 'SwiftAPI')
@mock.patch.object(images, 'create_vfat_image')
@mock.patch.object(tempfile, 'NamedTemporaryFile')
def test__prepare_floppy_image_noauth(self, tempfile_mock, fatimage_mock,
swift_api_mock):
mock_token_file_obj = mock.MagicMock()
mock_token_file_obj.name = 'token-tmp-file'
mock_image_file_handle = mock.MagicMock(spec=file)
mock_image_file_obj = mock.MagicMock()
mock_image_file_obj.name = 'image-tmp-file'
mock_image_file_handle.__enter__.return_value = mock_image_file_obj
tempfile_mock.side_effect = [mock_image_file_handle]
self.config(swift_ilo_container='ilo_cont', group='ilo')
self.config(swift_object_expiry_timeout=1, group='ilo')
deploy_args = {'arg1': 'val1', 'arg2': 'val2'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.context.auth_token = None
ilo_common._prepare_floppy_image(task, deploy_args)
files_info = {}
fatimage_mock.assert_called_once_with('image-tmp-file',
files_info=files_info,
parameters=deploy_args)
@mock.patch.object(ilo_common, 'ilo_client')
def test_attach_vmedia(self, ilo_client_mock):
ilo_client_mock.IloError = Exception
ilo_mock_object = ilo_client_mock.IloClient.return_value
insert_media_mock = ilo_mock_object.insert_virtual_media
set_status_mock = ilo_mock_object.set_vm_status
ilo_common.attach_vmedia(self.node, 'FLOPPY', 'url')
insert_media_mock.assert_called_once_with('url', device='FLOPPY')
set_status_mock.assert_called_once_with(device='FLOPPY',
boot_option='CONNECT', write_protect='YES')
set_status_mock.side_effect = Exception()
self.assertRaises(exception.IloOperationError,
ilo_common.attach_vmedia, self.node, 'FLOPPY', 'url')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_set_boot_mode(self, get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
get_pending_boot_mode_mock = ilo_object_mock.get_pending_boot_mode
set_pending_boot_mode_mock = ilo_object_mock.set_pending_boot_mode
get_pending_boot_mode_mock.return_value = 'LEGACY'
ilo_common.set_boot_mode(self.node, 'uefi')
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
set_pending_boot_mode_mock.assert_called_once_with('UEFI')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_set_boot_mode_without_set_pending_boot_mode(self,
get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
get_pending_boot_mode_mock = ilo_object_mock.get_pending_boot_mode
get_pending_boot_mode_mock.return_value = 'LEGACY'
ilo_common.set_boot_mode(self.node, 'bios')
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
self.assertFalse(ilo_object_mock.set_pending_boot_mode.called)
@mock.patch.object(ilo_common, 'ilo_client')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_set_boot_mode_with_IloOperationError(self,
get_ilo_object_mock,
ilo_client_mock):
ilo_object_mock = get_ilo_object_mock.return_value
get_pending_boot_mode_mock = ilo_object_mock.get_pending_boot_mode
get_pending_boot_mode_mock.return_value = 'UEFI'
set_pending_boot_mode_mock = ilo_object_mock.set_pending_boot_mode
ilo_client_mock.IloError = Exception
set_pending_boot_mode_mock.side_effect = Exception
self.assertRaises(exception.IloOperationError,
ilo_common.set_boot_mode, self.node, 'bios')
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
@mock.patch.object(driver_utils, 'rm_node_capability')
@mock.patch.object(driver_utils, 'add_node_capability')
@mock.patch.object(ilo_common, 'get_ilo_object')
@mock.patch.object(ilo_common, 'ilo_client')
def test_update_boot_mode_capability(self, ilo_client_mock,
get_ilo_object_mock,
add_node_capability_mock,
rm_node_capability_mock):
ilo_client_mock.IloCommandNotSupportedError = Exception
ilo_mock_obj = get_ilo_object_mock.return_value
ilo_mock_obj.get_pending_boot_mode.return_value = 'legacy'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.update_boot_mode_capability(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
rm_node_capability_mock.assert_called_once_with(task, 'boot_mode')
add_node_capability_mock.assert_called_once_with(task,
'boot_mode',
'bios')
@mock.patch.object(driver_utils, 'add_node_capability')
@mock.patch.object(ilo_common, 'get_ilo_object')
@mock.patch.object(ilo_common, 'ilo_client')
def test_update_boot_mode_capability_unknown(self, ilo_client_mock,
get_ilo_object_mock,
add_node_capability_mock):
ilo_client_mock.IloCommandNotSupportedError = Exception
ilo_mock_obj = get_ilo_object_mock.return_value
ilo_mock_obj.get_pending_boot_mode.return_value = 'UNKNOWN'
set_pending_boot_mode_mock = ilo_mock_obj.set_pending_boot_mode
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.update_boot_mode_capability(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
set_pending_boot_mode_mock.assert_called_once_with('UEFI')
add_node_capability_mock.assert_called_once_with(task,
'boot_mode',
'uefi')
@mock.patch.object(driver_utils, 'add_node_capability')
@mock.patch.object(ilo_common, 'get_ilo_object')
@mock.patch.object(ilo_common, 'ilo_client')
def test_update_boot_mode_capability_legacy(self, ilo_client_mock,
get_ilo_object_mock,
add_node_capability_mock):
ilo_client_mock.IloCommandNotSupportedError = Exception
ilo_mock_obj = get_ilo_object_mock.return_value
ilo_mock_obj.get_pending_boot_mode.side_effect = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.update_boot_mode_capability(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
add_node_capability_mock.assert_called_once_with(task,
'boot_mode',
'bios')
@mock.patch.object(images, 'get_temp_url_for_glance_image')
@mock.patch.object(ilo_common, 'attach_vmedia')
@mock.patch.object(ilo_common, '_prepare_floppy_image')
def test_setup_vmedia_for_boot_with_parameters(self, prepare_image_mock,
attach_vmedia_mock, temp_url_mock):
parameters = {'a': 'b'}
boot_iso = 'glance:image-uuid'
prepare_image_mock.return_value = 'floppy_url'
temp_url_mock.return_value = 'image_url'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.setup_vmedia_for_boot(task, boot_iso, parameters)
prepare_image_mock.assert_called_once_with(task, parameters)
attach_vmedia_mock.assert_any_call(task.node, 'FLOPPY',
'floppy_url')
temp_url_mock.assert_called_once_with(task.context, 'image-uuid')
attach_vmedia_mock.assert_any_call(task.node, 'CDROM', 'image_url')
@mock.patch.object(swift, 'SwiftAPI')
@mock.patch.object(ilo_common, 'attach_vmedia')
def test_setup_vmedia_for_boot_with_swift(self, attach_vmedia_mock,
swift_api_mock):
swift_obj_mock = swift_api_mock.return_value
boot_iso = 'swift:object-name'
swift_obj_mock.get_temp_url.return_value = 'image_url'
CONF.keystone_authtoken.auth_uri = 'http://authurl'
CONF.ilo.swift_ilo_container = 'ilo_cont'
CONF.ilo.swift_object_expiry_timeout = 1
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.setup_vmedia_for_boot(task, boot_iso)
swift_obj_mock.get_temp_url.assert_called_once_with('ilo_cont',
'object-name', 1)
attach_vmedia_mock.assert_called_once_with(task.node, 'CDROM',
'image_url')
@mock.patch.object(ilo_common, 'get_ilo_object')
@mock.patch.object(swift, 'SwiftAPI')
@mock.patch.object(ilo_common, '_get_floppy_image_name')
def test_cleanup_vmedia_boot(self, get_name_mock, swift_api_mock,
get_ilo_object_mock):
swift_obj_mock = swift_api_mock.return_value
CONF.ilo.swift_ilo_container = 'ilo_cont'
ilo_object_mock = mock.MagicMock()
get_ilo_object_mock.return_value = ilo_object_mock
get_name_mock.return_value = 'image-node-uuid'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.cleanup_vmedia_boot(task)
swift_obj_mock.delete_object.assert_called_once_with('ilo_cont',
'image-node-uuid')
ilo_object_mock.eject_virtual_media.assert_any_call('CDROM')
ilo_object_mock.eject_virtual_media.assert_any_call('FLOPPY')
| {
"content_hash": "73636673b47d30a725c4b244267a3ac1",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 79,
"avg_line_length": 48.57289002557545,
"alnum_prop": 0.6087826453243471,
"repo_name": "rackerlabs/ironic",
"id": "7e19760cff1c72a29433593594b9bde2562be970",
"size": "19649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/drivers/ilo/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2267329"
}
],
"symlink_target": ""
} |
from __future__ import division,print_function,unicode_literals,with_statement
import sys
from django import VERSION
from django.db import transaction
from django.utils import encoding
PY2=sys.version_info[0]==2
# commit_on_success was removed in 1.8, use atomic
if hasattr(transaction,'atomic'):
atomic_decorator=getattr(transaction,'atomic')
else:
atomic_decorator=getattr(transaction,'commit_on_success')
# ugly hack required for Python 2/3 compat
if hasattr(encoding,'force_unicode'):
force_unicode=encoding.force_unicode
elif hasattr(encoding,'force_text'):
force_unicode=encoding.force_text
else:
force_unicode=lambda x:x
if VERSION[1]>=8:
pass
else:
pass
if not PY2:
string_types=(str,)
else:
string_types=(str,unicode)
| {
"content_hash": "01a73988626749b9031bda71e376f965",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 22.78787878787879,
"alnum_prop": 0.7632978723404256,
"repo_name": "hikelee/launcher",
"id": "bb885eec874577b63b6809a225d4176ec7e919a5",
"size": "752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "launcher/utils/common/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "58461"
},
{
"name": "JavaScript",
"bytes": "14117"
},
{
"name": "Python",
"bytes": "212230"
}
],
"symlink_target": ""
} |
"""Tests"""
from unittest import TestCase
class TestDummy(TestCase):
"""
Dummy test case to prevent `pytest` to report failure.
When no tests are run, `pytest` return a non-zero status, this single test
prevents that.
"""
def test_one_equals_one(self):
"""Dummy test."""
assert 1 == 1
| {
"content_hash": "c889a4440a6995e04582b550179b1ee6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 23.428571428571427,
"alnum_prop": 0.625,
"repo_name": "jricardo27/holiday_planner",
"id": "1ba721e508650788da7e4c60663a6a581d161d5b",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holiday_planner/tests/test_something.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1518"
},
{
"name": "Gherkin",
"bytes": "839"
},
{
"name": "HTML",
"bytes": "36302"
},
{
"name": "JavaScript",
"bytes": "19616"
},
{
"name": "Python",
"bytes": "98712"
},
{
"name": "Shell",
"bytes": "4362"
}
],
"symlink_target": ""
} |
from django.db.models import Q
from rest_framework import status
from rest_framework.filters import (
SearchFilter,
OrderingFilter,
)
from rest_framework.generics import (
ListAPIView, RetrieveAPIView, CreateAPIView, RetrieveUpdateAPIView, DestroyAPIView)
from rest_framework.permissions import (
AllowAny,
)
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.container.models import Container, Trip
from core.utils.read_exif import get_exif_dumps, get_exif_path_url
from .pagination import ContainerPageNumberPagination, TripPageNumberPagination
from .serializers import (
ContainerListSerializer,
ContainerCreateUpdateSerializer,
ContainerDetailSerializer,
TripListSerializer,
TripDetailSerializer,
create_trip_serializer,
TripCreateUpdateSerializer)
# ---------------API BOAT --------------------------
class ContainerCreateAPIView(CreateAPIView):
queryset = Container.objects.all()
serializer_class = ContainerCreateUpdateSerializer
class ContainerDetailAPIView(RetrieveAPIView):
queryset = Container.objects.all()
serializer_class = ContainerDetailSerializer
lookup_field = 'identifier_mac'
lookup_url_kwarg = 'identifier_mac'
class ContainerUpdateAPIView(RetrieveUpdateAPIView):
queryset = Container.objects.all()
serializer_class = ContainerCreateUpdateSerializer
lookup_field = 'identifier_mac'
lookup_url_kwarg = 'identifier_mac'
class ContainerDeleteAPIView(DestroyAPIView):
queryset = Container.objects.all()
serializer_class = ContainerDetailSerializer
lookup_field = 'identifier_mac'
class ContainerListAPIView(ListAPIView):
serializer_class = ContainerListSerializer
filter_backends = [SearchFilter, OrderingFilter]
permission_classes = [AllowAny]
search_fields = ['identifier_mac', 'name_mac', 'number_mac']
pagination_class = ContainerPageNumberPagination
def get_queryset(self, *args, **kwargs):
queryset_list = Container.objects.all()
query = self.request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(identifier_mac__icontains=query) |
Q(name_mac__icontains=query) |
Q(number_mac__icontains=query)
).distinct()
return queryset_list
# ---------------API TRIP --------------------------
class TripCreateAPIView(CreateAPIView):
queryset = Trip.objects.all()
def get_serializer_class(self):
return create_trip_serializer()
class TripDetailAPIView(RetrieveAPIView):
queryset = Trip.objects.all()
serializer_class = TripDetailSerializer
class TripGroupByMacAPIView(ListAPIView):
serializer_class = TripListSerializer
lookup_url_kwarg = 'identifier_mac'
pagination_class = TripPageNumberPagination
def get_queryset(self):
identifier_mac = self.kwargs['identifier_mac']
trip = Trip.objects.filter(container__identifier_mac=identifier_mac)
if not trip.exists():
content = {'please move along': 'nothing to see here'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
else:
queryset = Trip.objects.filter(container__identifier_mac=identifier_mac)
return queryset
class TripGroupByDateAPIView(APIView):
def get(self, request, *args, **kwargs):
identifier_mac = kwargs['identifier_mac']
trip = Trip.objects.filter(container__identifier_mac=identifier_mac)
if not trip.exists():
content = {'It has not found any record associated with this mac'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
else:
trip = Trip.objects.filter(container__identifier_mac=identifier_mac).values_list('datetime_image',
flat=True)
print(trip)
return Response(trip)
class TripUpdateAPIView(RetrieveUpdateAPIView):
queryset = Trip.objects.all()
serializer_class = TripCreateUpdateSerializer
def put(self, request, *args, **kwargs):
trip = Trip.objects.get(pk=kwargs['pk'])
picture = request.data['picture']
if picture:
trip.picture.delete()
return super(TripUpdateAPIView, self).update(request, *args, **kwargs)
def perform_update(self, serializer):
instance = serializer.save()
url = get_exif_path_url(instance.picture)
instance.metadata = get_exif_dumps(url)
instance.save()
class TripDeleteAPIView(DestroyAPIView):
queryset = Trip.objects.all()
serializer_class = TripDetailSerializer
class TripListAPIView(ListAPIView):
serializer_class = TripListSerializer
filter_backends = [SearchFilter, OrderingFilter]
permission_classes = [AllowAny]
search_fields = ['container', 'datetime_image']
pagination_class = ContainerPageNumberPagination
def get_queryset(self, *args, **kwargs):
queryset_list = Trip.objects.all()
query = self.request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(container__icontains=query) |
Q(datetime_image__icontains=query)
).distinct()
return queryset_list
| {
"content_hash": "cac2fd342f8dd218fde4caae4076e93a",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 110,
"avg_line_length": 34.458064516129035,
"alnum_prop": 0.6759033888784872,
"repo_name": "jonaqp/heroku",
"id": "7ebfac4772697f0ef7472e65c34f88f06de07a3f",
"size": "5341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/container/api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40949"
},
{
"name": "HTML",
"bytes": "329134"
},
{
"name": "JavaScript",
"bytes": "78825"
},
{
"name": "Python",
"bytes": "182554"
}
],
"symlink_target": ""
} |
enabled = False
try:
import pxpcap as pcapc
enabled = True
except:
# We can at least import the rest
pass
from pox.lib.addresses import IPAddr, EthAddr
import parser
from threading import Thread
import pox.lib.packet as pkt
import copy
class PCap (object):
@staticmethod
def get_devices ():
def ip (addr):
if addr is None: return None
return IPAddr(addr, networkOrder=True)
def link (addr):
if addr is None: return None
if len(addr) != 6: return None
return EthAddr(addr)
devs = pcapc.findalldevs()
out = {}
for d in devs:
addrs = {}
n = {'desc':d[1],'addrs':addrs}
out[d[0]] = n
for a in d[2]:
if a[0] == 'AF_INET':
na = {}
addrs[a[0]] = na
na['addr'] = ip(a[1])
na['netmask'] = ip(a[2])
na['broadaddr'] = ip(a[3])
na['dstaddr'] = ip(a[4])
elif a[0] == 'AF_LINK':
na = {}
addrs[a[0]] = na
na['addr'] = link(a[1])
na['netmask'] = link(a[2])
na['broadaddr'] = link(a[3])
na['dstaddr'] = link(a[4])
elif a[0] == 'AF_PACKET':
addrs[a[0]] = {'addr':link(a[1])}
elif a[0] == 'ethernet':
addrs[a[0]] = {'addr':link(a[1])}
return out
@staticmethod
def get_device_names ():
return [d[0] for d in pcapc.findalldevs()]
def __init__ (self, device = None, promiscuous = True, period = 10,
start = True, callback = None, filter = None):
if filter is not None:
self.deferred_filter = (filter,)
else:
self.deferred_filter = None
self.packets_received = 0
self.packets_dropped = 0
self._thread = None
self.pcap = None
self.promiscuous = promiscuous
self.device = None
self.period = period
self.netmask = IPAddr("0.0.0.0")
self._quitting = False
self.addresses = {}
if callback is None:
self.callback = self.__class__._handle_rx
else:
self.callback = callback
if device is not None:
self.open(device)
if self.pcap is not None:
if start:
self.start()
def _handle_rx (self, data, sec, usec, length):
pass
def open (self, device, promiscuous = None, period = None,
incoming = True, outgoing = False):
assert self.device is None
self.addresses = self.get_devices()[device]['addrs']
if 'AF_INET' in self.addresses:
self.netmask = self.addresses['AF_INET'].get('netmask')
if self.netmask is None: self.netmask = IPAddr("0.0.0.0")
#print "NM:",self.netmask
#print self.addresses['AF_LINK']['addr']
self.device = device
if period is not None:
self.period = period
if promiscuous is not None:
self.promiscuous = promiscuous
self.pcap = pcapc.open_live(device, 65535,
1 if self.promiscuous else 0, self.period)
pcapc.setdirection(self.pcap, incoming, outgoing)
self.packets_received = 0
self.packets_dropped = 0
if self.deferred_filter is not None:
self.set_filter(*self.deferred_filter)
self.deferred_filter = None
def set_direction (self, incoming, outgoing):
pcapc.setdirection(self.pcap, incoming, outgoing)
def _thread_func (self):
while not self._quitting:
pcapc.dispatch(self.pcap,100,self.callback,self)
self.packets_received,self.packets_dropped = pcapc.stats(self.pcap)
self._quitting = False
self._thread = None
def _handle_GoingDownEvent (self, event):
self.close()
def start (self):
assert self._thread is None
from pox.core import core
core.addListeners(self, weak=True)
self._thread = Thread(target=self._thread_func)
#self._thread.daemon = True
self._thread.start()
def stop (self):
t = self._thread
if t is not None:
self._quitting = True
pcapc.breakloop(self.pcap)
t.join()
def close (self):
if self.pcap is None: return
self.stop()
pcapc.close(self.pcap)
self.pcap = None
def __del__ (self):
self.close()
def inject (self, data):
if isinstance(data, pkt.ethernet):
data = data.pack()
if not isinstance(data, bytes):
data = bytes(data) # Give it a try...
return pcapc.inject(self.pcap, data)
def set_filter (self, filter, optimize = True):
if self.pcap is None:
self.deferred_filter = (filter, optimize)
return
if isinstance(filter, str):
filter = Filter(filter, optimize, self.netmask.toSignedN(),
pcap_obj=self)
elif isinstance(filter, Filter):
pass
else:
raise RuntimeError("Filter must be string or Filter object")
pcapc.setfilter(self.pcap, filter._pprogram)
class Filter (object):
def __init__ (self, filter, optimize = True, netmask = None,
pcap_obj = None, link_type = 1, snaplen = 65535):
self._pprogram = None
if netmask is None:
netmask = 0
elif isinstance(netmask, IPAddr):
netmask = netmask.toSignedN()
delpc = False
if pcap_obj is None:
delpc = True
pcap_obj = pcapc.open_dead(link_type, snaplen)
if isinstance(pcap_obj, PCap):
pcap_obj = pcap_obj.pcap
self._pprogram = pcapc.compile(pcap_obj, filter,
1 if optimize else 0, netmask)
if delpc:
pcapc.close(pcap_obj)
def __del__ (self):
if self._pprogram:
pcapc.freecode(self._pprogram)
try:
_link_type_names = {}
for k,v in copy.copy(pcapc.__dict__).iteritems():
if k.startswith("DLT_"):
_link_type_names[v] = k
except:
pass
def get_link_type_name (dlt):
return _link_type_names.get(dlt, "<Unknown " + str(dlt) + ">")
def launch (interface = "en1"):
""" Test function """
global drop,total,bytes_got,bytes_real,bytes_diff
drop = 0
total = 0
bytes_got = 0
bytes_real = 0
bytes_diff = 0
def cb (obj, data, sec, usec, length):
global drop,total,bytes_got,bytes_real,bytes_diff
#print ">>>",data
t,d = pcapc.stats(obj.pcap)
bytes_got += len(data)
bytes_real += length
nbd = bytes_real - bytes_got
if nbd != bytes_diff:
bytes_diff = nbd
print "lost bytes:",nbd
if t > total:
total = t + 500
print t,"total"
if d > drop:
drop = d
print d, "dropped"
p = pkt.ethernet(data)
ip = p.find('ipv4')
if ip:
print ip.srcip,"\t",ip.dstip, p
print "\n".join(["%i. %s" % x for x in
enumerate(PCap.get_device_names())])
if interface.startswith("#"):
interface = int(interface[1:])
interface = PCap.get_device_names()[interface]
print "Interface:",interface
p = PCap(interface, callback = cb,
filter = "icmp")#[icmptype] != icmp-echoreply")
#filter = "ip host 74.125.224.148")
def ping (eth='00:18:02:6e:ce:55', ip='192.168.0.1'):
e = pkt.ethernet()
e.src = p.addresses['ethernet']['addr']
e.dst = EthAddr(eth)
e.type = e.IP_TYPE
ipp = pkt.ipv4()
ipp.protocol = ipp.ICMP_PROTOCOL
ipp.srcip = p.addresses['AF_INET']['addr']
ipp.dstip = IPAddr(ip)
icmp = pkt.icmp()
icmp.type = pkt.ICMP.TYPE_ECHO_REQUEST
icmp.payload = "PingPing" * 6
ipp.payload = icmp
e.payload = ipp
p.inject(e)
import code
code.interact(local=locals())
| {
"content_hash": "f5cbd112e5837fc4c50b8c919a4695da",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 74,
"avg_line_length": 27.60754716981132,
"alnum_prop": 0.5908966648441771,
"repo_name": "kavitshah8/SDNDeveloper",
"id": "52a6d918dccd854878527cfaf63a4082880c02fe",
"size": "7896",
"binary": false,
"copies": "1",
"ref": "refs/heads/SDN_Developer",
"path": "pox/lib/pxpcap/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15160"
},
{
"name": "JavaScript",
"bytes": "9048"
},
{
"name": "Python",
"bytes": "1113186"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
} |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class BankTransferMetadata(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('value',): {
'max_properties': 50,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (str,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
}
@cached_property
def discriminator():
return None
attribute_map = {
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BankTransferMetadata - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| {
"content_hash": "8cde599fcd656084155ec2a468517edf",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 110,
"avg_line_length": 38.66470588235294,
"alnum_prop": 0.5545413053400274,
"repo_name": "plaid/plaid-python",
"id": "b28b07b48d6b7d7b53c46770b3d61538af090831",
"size": "6573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/bank_transfer_metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
} |
from flask import request, g
def jinja_get_translation_engine():
try:
return getattr(request, 'i18n_engine')
except:
return getattr(g, 'i18n_engine', None)
| {
"content_hash": "d80f240a9298dd560c795339c664c111",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 46,
"avg_line_length": 23.25,
"alnum_prop": 0.6344086021505376,
"repo_name": "0xbaadf00d/phial",
"id": "5d3a92ab973d34a5b804b424a9d4c555a50ede37",
"size": "210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phial/jinja_tools/get_translation_engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6279"
},
{
"name": "Python",
"bytes": "43000"
}
],
"symlink_target": ""
} |
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.7"
__versionTime__ = "3 August 2012 05:00"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
else:
_MAX_INT = sys.maxint
range = xrange
set = lambda s : dict( [(c,0) for c in s] )
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_lowercase + string.ascii_uppercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputline __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{L{ParseFatalException}}, but thrown internally when an
C{L{ErrorStop<And._ErrorStop>}} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = []
for i in self.__toklist:
if isinstance(i, ParseResults):
out.append(_ustr(i))
else:
out.append(repr(i))
return '[' + ', '.join(out) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + list(self.keys())
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
def wrapper(*args):
while 1:
try:
return func(*args[limit[0]:])
except TypeError:
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
"""
ParserElement.literalStringClass = cls
inlineLiteralsUsing = staticmethod(inlineLiteralsUsing)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns C{L{And}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{L{And}} with error stop"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{L{MatchFirst}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{L{Or}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{L{Each}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{L{NotAny}}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "r")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
ParserElement.literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{exclude} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join([c for c in initChars if c not in excludeChars])
if bodyChars:
bodyChars = ''.join([c for c in bodyChars if c not in excludeChars])
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')
self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
__ilshift__ = __lshift__
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr )
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{L{originalTextFor}}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| {
"content_hash": "c52c593e84dc69d79da4bc9efa4b4c23",
"timestamp": "",
"source": "github",
"line_count": 3716,
"max_line_length": 196,
"avg_line_length": 41.60844994617869,
"alnum_prop": 0.5653259344056605,
"repo_name": "5monkeys/pyparsing",
"id": "bbe38b8b99399462e5b1bbe91f77abbc4329a61b",
"size": "155804",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyparsing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "563828"
}
],
"symlink_target": ""
} |
import pytest
import types
import sys
import asyncio
import collections.abc
from functools import wraps
import gc
from . import (
async_generator, yield_, yield_from_, isasyncgen, isasyncgenfunction,
)
# like list(it) but works on async iterators
async def collect(ait):
items = []
async for value in ait:
items.append(value)
return items
################################################################
#
# Basic test
#
################################################################
@async_generator
async def async_range(count):
for i in range(count):
print("Calling yield_({})".format(i))
await yield_(i)
@async_generator
async def double(ait):
async for value in ait:
await yield_(value * 2)
await asyncio.sleep(0.001)
class HasAsyncGenMethod:
def __init__(self, factor):
self._factor = factor
@async_generator
async def async_multiplied(self, ait):
async for value in ait:
await yield_(value * self._factor)
@pytest.mark.asyncio
async def test_async_generator():
assert await collect(async_range(10)) == list(range(10))
assert (await collect(double(async_range(5)))
== [0, 2, 4, 6, 8])
tripler = HasAsyncGenMethod(3)
assert (await collect(tripler.async_multiplied(async_range(5)))
== [0, 3, 6, 9, 12])
@async_generator
async def agen_yield_no_arg():
await yield_()
@pytest.mark.asyncio
async def test_yield_no_arg():
assert await collect(agen_yield_no_arg()) == [None]
################################################################
#
# async_generators return value
#
################################################################
@async_generator
async def async_gen_with_non_None_return():
await yield_(1)
await yield_(2)
return "hi"
@pytest.mark.asyncio
async def test_bad_return_value():
gen = async_gen_with_non_None_return()
async for item in gen: # pragma: no branch
assert item == 1
break
async for item in gen: # pragma: no branch
assert item == 2
break
try:
await gen.__anext__()
except StopAsyncIteration as e:
assert e.args[0] == "hi"
################################################################
#
# Exhausitve tests of the different ways to re-enter a coroutine.
#
# It used to be that re-entering via send/__next__ would work, but throw()
# immediately followed by an await yield_(...) wouldn't work, and the
# YieldWrapper object would propagate back out to the coroutine runner.
#
# Before I fixed this, the 'assert value is None' check below would fail
# (because of the YieldWrapper leaking out), and if you removed that
# assertion, then the code would appear to run successfully but the final list
# would just be [1, 3] instead of [1, 2, 3].
#
################################################################
class MyTestError(Exception):
pass
# This unconditionally raises a MyTestError exception, so from the outside
# it's equivalent to a simple 'raise MyTestError`. But, for this test to check
# the thing we want it to check, the point is that the exception must be
# thrown in from the coroutine runner -- this simulates something like an
# 'await sock.recv(...) -> TimeoutError'.
@types.coroutine
def hit_me():
yield "hit me"
@types.coroutine
def number_me():
assert (yield "number me") == 1
@types.coroutine
def next_me():
assert (yield "next me") is None
@async_generator
async def yield_after_different_entries():
await yield_(1)
try:
await hit_me()
except MyTestError:
await yield_(2)
await number_me()
await yield_(3)
await next_me()
await yield_(4)
def hostile_coroutine_runner(coro):
coro_iter = coro.__await__()
value = None
while True:
try:
if value == "hit me":
value = coro_iter.throw(MyTestError())
elif value == "number me":
value = coro_iter.send(1)
else:
assert value in (None, "next me")
value = coro_iter.__next__()
except StopIteration as exc:
return exc.value
def test_yield_different_entries():
coro = collect(yield_after_different_entries())
yielded = hostile_coroutine_runner(coro)
assert yielded == [1, 2, 3, 4]
@pytest.mark.asyncio
async def test_reentrance_forbidden():
@async_generator
async def recurse():
async for obj in agen: # pragma: no branch
await yield_(obj) # pragma: no cover
agen = recurse()
with pytest.raises(ValueError):
async for _ in agen: # pragma: no branch
pass # pragma: no cover
# https://bugs.python.org/issue32526
@pytest.mark.asyncio
async def test_reentrance_forbidden_while_suspended_in_coroutine_runner():
@async_generator
async def f():
await asyncio.sleep(1)
await yield_()
ag = f()
asend_coro = ag.asend(None)
fut = asend_coro.send(None)
# Now the async generator's frame is not executing, but a call to asend()
# *is* executing. Make sure that in this case, ag_running is True, and we
# can't start up another call to asend().
assert ag.ag_running
with pytest.raises(ValueError):
await ag.asend(None)
################################################################
#
# asend
#
################################################################
@async_generator
async def asend_me():
assert (await yield_(1)) == 2
assert (await yield_(3)) == 4
@pytest.mark.asyncio
async def test_asend():
aiter = asend_me()
assert (await aiter.__anext__()) == 1
assert (await aiter.asend(2)) == 3
with pytest.raises(StopAsyncIteration):
await aiter.asend(4)
################################################################
#
# athrow
#
################################################################
@async_generator
async def athrow_me():
with pytest.raises(KeyError):
await yield_(1)
with pytest.raises(ValueError):
await yield_(2)
await yield_(3)
@pytest.mark.asyncio
async def test_athrow():
aiter = athrow_me()
assert (await aiter.__anext__()) == 1
assert (await aiter.athrow(KeyError("oops"))) == 2
assert (await aiter.athrow(ValueError("oops"))) == 3
with pytest.raises(OSError):
await aiter.athrow(OSError("oops"))
################################################################
#
# aclose
#
################################################################
@async_generator
async def close_me_aiter(track):
try:
await yield_(1)
except GeneratorExit:
track[0] = "closed"
raise
else: # pragma: no cover
track[0] = "wtf"
@pytest.mark.asyncio
async def test_aclose():
track = [None]
aiter = close_me_aiter(track)
async for obj in aiter: # pragma: no branch
assert obj == 1
break
assert track[0] is None
await aiter.aclose()
assert track[0] == "closed"
@pytest.mark.asyncio
async def test_aclose_on_unstarted_generator():
aiter = close_me_aiter([None])
await aiter.aclose()
async for obj in aiter:
assert False # pragma: no cover
@pytest.mark.asyncio
async def test_aclose_on_finished_generator():
aiter = async_range(3)
async for obj in aiter:
pass # pragma: no cover
await aiter.aclose()
@async_generator
async def sync_yield_during_aclose():
try:
await yield_(1)
finally:
await asyncio.sleep(0)
@async_generator
async def async_yield_during_aclose():
try:
await yield_(1)
finally:
await yield_(2)
@pytest.mark.asyncio
async def test_aclose_yielding():
aiter = sync_yield_during_aclose()
assert (await aiter.__anext__()) == 1
# Doesn't raise:
await aiter.aclose()
aiter = async_yield_during_aclose()
assert (await aiter.__anext__()) == 1
with pytest.raises(RuntimeError):
await aiter.aclose()
################################################################
#
# yield from
#
################################################################
@async_generator
async def async_range_twice(count):
await yield_from_(async_range(count))
await yield_(None)
await yield_from_(async_range(count))
if sys.version_info >= (3, 6):
exec("""
async def native_async_range(count):
for i in range(count):
yield i
# XX uncomment if/when we re-enable the ctypes hacks:
# async def native_async_range_twice(count):
# # make sure yield_from_ works inside a native async generator
# await yield_from_(async_range(count))
# yield None
# # make sure we can yield_from_ a native async generator
# await yield_from_(native_async_range(count))
""")
@pytest.mark.asyncio
async def test_async_yield_from_():
assert await collect(async_range_twice(3)) == [
0, 1, 2, None, 0, 1, 2,
]
if sys.version_info >= (3, 6):
# Make sure we can yield_from_ a native generator
@async_generator
async def yield_from_native():
await yield_from_(native_async_range(3))
assert await collect(yield_from_native()) == [0, 1, 2]
# XX uncomment if/when we re-enable the ctypes hacks:
# if sys.version_info >= (3, 6):
# assert await collect(native_async_range_twice(3)) == [
# 0, 1, 2, None, 0, 1, 2,
# ]
@async_generator
async def doubles_sends(value):
while True:
value = await yield_(2 * value)
@async_generator
async def wraps_doubles_sends(value):
await yield_from_(doubles_sends(value))
@pytest.mark.asyncio
async def test_async_yield_from_asend():
gen = wraps_doubles_sends(10)
await gen.__anext__() == 20
assert (await gen.asend(2)) == 4
assert (await gen.asend(5)) == 10
assert (await gen.asend(0)) == 0
await gen.aclose()
@pytest.mark.asyncio
async def test_async_yield_from_athrow():
gen = async_range_twice(2)
assert (await gen.__anext__()) == 0
with pytest.raises(ValueError):
await gen.athrow(ValueError)
@async_generator
async def returns_1():
await yield_(0)
return 1
@async_generator
async def yields_from_returns_1():
await yield_(await yield_from_(returns_1()))
@pytest.mark.asyncio
async def test_async_yield_from_return_value():
assert await collect(yields_from_returns_1()) == [0, 1]
# Special cases to get coverage
@pytest.mark.asyncio
async def test_yield_from_empty():
@async_generator
async def empty():
return "done"
@async_generator
async def yield_from_empty():
assert (await yield_from_(empty())) == "done"
assert await collect(yield_from_empty()) == []
@pytest.mark.asyncio
async def test_yield_from_non_generator():
class Countdown:
def __init__(self, count):
self.count = count
self.closed = False
if sys.version_info < (3, 5, 2):
async def __aiter__(self):
return self
else:
def __aiter__(self):
return self
async def __anext__(self):
self.count -= 1
if self.count < 0:
raise StopAsyncIteration("boom")
return self.count
async def aclose(self):
self.closed = True
@async_generator
async def yield_from_countdown(count, happenings):
try:
c = Countdown(count)
assert (await yield_from_(c)) == "boom"
except BaseException as e:
if c.closed:
happenings.append("countdown closed")
happenings.append("raise")
return e
h = []
assert await collect(yield_from_countdown(3, h)) == [2, 1, 0]
assert h == []
# Throwing into a yield_from_(object with no athrow) just raises the
# exception in the generator.
h = []
agen = yield_from_countdown(3, h)
assert await agen.__anext__() == 2
exc = ValueError("x")
try:
await agen.athrow(exc)
except StopAsyncIteration as e:
assert e.args[0] is exc
assert h == ["raise"]
# Calling aclose on the generator calls aclose on the iterator
h = []
agen = yield_from_countdown(3, h)
assert await agen.__anext__() == 2
await agen.aclose()
assert h == ["countdown closed", "raise"]
# Throwing GeneratorExit into the generator calls *aclose* on the iterator
# (!)
h = []
agen = yield_from_countdown(3, h)
assert await agen.__anext__() == 2
exc = GeneratorExit()
with pytest.raises(StopAsyncIteration):
await agen.athrow(exc)
assert h == ["countdown closed", "raise"]
@pytest.mark.asyncio
async def test_yield_from_non_generator_with_no_aclose():
class Countdown:
def __init__(self, count):
self.count = count
self.closed = False
if sys.version_info < (3, 5, 2):
async def __aiter__(self):
return self
else:
def __aiter__(self):
return self
async def __anext__(self):
self.count -= 1
if self.count < 0:
raise StopAsyncIteration("boom")
return self.count
@async_generator
async def yield_from_countdown(count):
return await yield_from_(Countdown(count))
assert await collect(yield_from_countdown(3)) == [2, 1, 0]
agen = yield_from_countdown(3)
assert await agen.__anext__() == 2
assert await agen.__anext__() == 1
# It's OK that Countdown has no aclose
await agen.aclose()
@pytest.mark.asyncio
async def test_yield_from_with_old_style_aiter():
# old-style 'async def __aiter__' should still work even on newer pythons
class Countdown:
def __init__(self, count):
self.count = count
self.closed = False
# This is wrong, that's the point
async def __aiter__(self):
return self
async def __anext__(self):
self.count -= 1
if self.count < 0:
raise StopAsyncIteration("boom")
return self.count
@async_generator
async def yield_from_countdown(count):
return await yield_from_(Countdown(count))
assert await collect(yield_from_countdown(3)) == [2, 1, 0]
@pytest.mark.asyncio
async def test_yield_from_athrow_raises_StopAsyncIteration():
@async_generator
async def catch():
try:
while True:
await yield_("hi")
except Exception as exc:
return ("bye", exc)
@async_generator
async def yield_from_catch():
return await yield_from_(catch())
agen = yield_from_catch()
assert await agen.__anext__() == "hi"
assert await agen.__anext__() == "hi"
thrown = ValueError("oops")
try:
print(await agen.athrow(thrown))
except StopAsyncIteration as caught:
assert caught.args == (("bye", thrown),)
else:
raise AssertionError # pragma: no cover
################################################################
# __del__
################################################################
@pytest.mark.asyncio
async def test___del__():
gen = async_range(10)
# Hasn't started yet, so no problem
gen.__del__()
gen = async_range(10)
await collect(gen)
# Exhausted, so no problem
gen.__del__()
gen = async_range(10)
await gen.aclose()
# Closed, so no problem
gen.__del__()
gen = async_range(10)
await gen.__anext__()
await gen.aclose()
# Closed, so no problem
gen.__del__()
gen = async_range(10)
await gen.__anext__()
# Started, but not exhausted or closed -- big problem
with pytest.raises(RuntimeError):
gen.__del__()
################################################################
# introspection
################################################################
def test_isasyncgen():
assert not isasyncgen(async_range)
assert isasyncgen(async_range(10))
if sys.version_info >= (3, 6):
assert not isasyncgen(native_async_range)
assert isasyncgen(native_async_range(10))
def test_isasyncgenfunction():
assert isasyncgenfunction(async_range)
assert not isasyncgenfunction(list)
assert not isasyncgenfunction(async_range(10))
if sys.version_info >= (3, 6):
assert isasyncgenfunction(native_async_range)
assert not isasyncgenfunction(native_async_range(10))
# Very subtle bug: functools.wraps copies across the entire contents of the
# wrapped function's __dict__. We used to use a simple _is_async_gen=True
# attribute to mark async generators. But if we do that, then simple wrappers
# like async_range_wrapper *do* return True for isasyncgenfunction. But that's
# not how inspect.isasyncgenfunction works, and it also caused problems for
# sphinxcontrib-trio, because given a function like:
#
# @acontextmanager
# @async_generator
# async def async_cm():
# ...
#
# then we end up with async_cm introspecting as both an async context manager
# and an async generator, and it doesn't know who to believe. With the
# correct, inspect.isasyncgenfunction-compliant behavior, we have async_cm
# introspecting as an async context manager, and async_cm.__wrapped__
# introspecting as an async generator.
def test_isasyncgenfunction_is_not_inherited_by_wrappers():
@wraps(async_range)
def async_range_wrapper(*args, **kwargs): # pragma: no cover
return async_range(*args, **kwargs)
assert not isasyncgenfunction(async_range_wrapper)
assert isasyncgenfunction(async_range_wrapper.__wrapped__)
def test_collections_abc_AsyncGenerator():
if hasattr(collections.abc, "AsyncGenerator"):
assert isinstance(async_range(10), collections.abc.AsyncGenerator)
@pytest.mark.asyncio
async def test_ag_attributes():
@async_generator
async def f():
x = 1
await yield_()
agen = f()
assert agen.ag_code.co_name == "f"
async for _ in agen: # pragma: no branch
assert agen.ag_frame.f_locals["x"] == 1
break
################################################################
# Finicky tests to check that the overly clever ctype stuff has plausible
# refcounting
from . import impl
@pytest.mark.skipif(not hasattr(sys, "getrefcount"), reason="CPython only")
def test_refcnt():
x = object()
print(sys.getrefcount(x))
print(sys.getrefcount(x))
print(sys.getrefcount(x))
print(sys.getrefcount(x))
base_count = sys.getrefcount(x)
l = [impl._wrap(x) for _ in range(100)]
print(sys.getrefcount(x))
print(sys.getrefcount(x))
print(sys.getrefcount(x))
assert sys.getrefcount(x) >= base_count + 100
l2 = [impl._unwrap(box) for box in l]
assert sys.getrefcount(x) >= base_count + 200
print(sys.getrefcount(x))
print(sys.getrefcount(x))
print(sys.getrefcount(x))
print(sys.getrefcount(x))
del l
print(sys.getrefcount(x))
print(sys.getrefcount(x))
print(sys.getrefcount(x))
del l2
print(sys.getrefcount(x))
print(sys.getrefcount(x))
print(sys.getrefcount(x))
assert sys.getrefcount(x) == base_count
print(sys.getrefcount(x))
################################################################
#
# Edge cases
#
################################################################
# PEP 479: StopIteration or StopAsyncIteration exiting from inside an async
# generator should produce a RuntimeError with the __cause__ set to the
# original exception. Note that contextlib.asynccontextmanager depends on this
# behavior.
@async_generator
async def lets_exception_out():
await yield_()
@pytest.mark.asyncio
async def test_throw_StopIteration_or_StopAsyncIteration():
for cls in [StopIteration, StopAsyncIteration]:
agen = lets_exception_out()
await agen.asend(None)
exc = cls()
with pytest.raises(RuntimeError) as excinfo:
await agen.athrow(exc)
assert excinfo.type is RuntimeError
assert excinfo.value.__cause__ is exc
# No "coroutine was never awaited" warnings for async generators that are not
# iterated
@pytest.mark.asyncio
async def test_no_spurious_unawaited_coroutine_warning(recwarn):
agen = async_range(10)
del agen
# Run collection a few times to make sure any
# loops/resurrection/etc. stuff gets fully handled (necessary on pypy)
for _ in range(4):
gc.collect()
# I've seen DeprecationWarnings here triggered by pytest-asyncio, so let's
# filter for RuntimeWarning. But if there are no warnings at all, then
# that's OK too, so tell coverage not to worry about it.
for msg in recwarn: # pragma: no cover
print(msg)
assert not issubclass(msg.category, RuntimeWarning)
| {
"content_hash": "e211475aa0f58404066963bfdcf59043",
"timestamp": "",
"source": "github",
"line_count": 716,
"max_line_length": 78,
"avg_line_length": 29.058659217877096,
"alnum_prop": 0.5918004421801404,
"repo_name": "njsmith/async_generator",
"id": "9dca401b04dd7085e1e2d743dfb8d1d94713dd43",
"size": "20806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "async_generator/test_async_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35235"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
]
extensions += [
'ecto.sphinx.programoutput',
'ecto.sphinx.EctoShLexer',
'ecto.sphinx.EctoPlotDirective',
'ecto.sphinx.EctoCellDirective',
'ecto.sphinx.EctoModuleDirective',
'ecto.sphinx.ToggleDirective',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'capture'
copyright = u'2011, Ethan Rublee, Vincent Rabaud'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'capturedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('genidx', 'capture.tex', u'capture Documentation',
u'Ethan Rublee, Vincent Rabaud', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('genidx', 'capture', u'capture Documentation',
[u'Ethan Rublee, Vincent Rabaud'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('genidx', 'capture', u'capture Documentation',
u'Ethan Rublee, Vincent Rabaud', 'capture', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "4baed0957dfabd69fa593bb9eac48974",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 82,
"avg_line_length": 32.25819672131148,
"alnum_prop": 0.6883496379113201,
"repo_name": "WalkingMachine/sara_commun",
"id": "6dfb916cc0417583c3a87147e557261b82563cb2",
"size": "8289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wm_ork/capture/doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "6113"
}
],
"symlink_target": ""
} |
from boussole.conf.base_backend import SettingsBackendBase
def test_ok_001(settings):
"""
Dummy content parsing
"""
backend = SettingsBackendBase(basedir=settings.fixtures_path)
path, filename = backend.parse_filepath()
filepath = backend.check_filepath(path, filename)
content = backend.open(filepath)
assert backend.parse(filepath, content) == {}
| {
"content_hash": "62db50ae134ad8f1e1aa2ddb4c22e75e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 25.733333333333334,
"alnum_prop": 0.7124352331606217,
"repo_name": "sveetch/boussole",
"id": "802b7136be5cf2efa2ebd4b0c29f447e68a4ce49",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/050_conf/002_base_backend/004_parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15109"
},
{
"name": "Makefile",
"bytes": "1424"
},
{
"name": "Python",
"bytes": "240310"
}
],
"symlink_target": ""
} |
from airflow.contrib.hooks.wasb_hook import WasbHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class WasbDeleteBlobOperator(BaseOperator):
"""
Deletes blob(s) on Azure Blob Storage.
:param container_name: Name of the container. (templated)
:type container_name: str
:param blob_name: Name of the blob. (templated)
:type blob_name: str
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
:param check_options: Optional keyword arguments that
`WasbHook.check_for_blob()` takes.
:param is_prefix: If blob_name is a prefix, delete all files matching prefix.
:type is_prefix: bool
:param ignore_if_missing: if True, then return success even if the
blob does not exist.
:type ignore_if_missing: bool
"""
template_fields = ('container_name', 'blob_name')
@apply_defaults
def __init__(self, container_name, blob_name,
wasb_conn_id='wasb_default', check_options=None,
is_prefix=False, ignore_if_missing=False,
*args,
**kwargs):
super(WasbDeleteBlobOperator, self).__init__(*args, **kwargs)
if check_options is None:
check_options = {}
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.blob_name = blob_name
self.check_options = check_options
self.is_prefix = is_prefix
self.ignore_if_missing = ignore_if_missing
def execute(self, context):
self.log.info(
'Deleting blob: {self.blob_name}\n'
'in wasb://{self.container_name}'.format(**locals())
)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
hook.delete_file(self.container_name, self.blob_name,
self.is_prefix, self.ignore_if_missing,
**self.check_options)
| {
"content_hash": "114c4bae33aadee49fab579050e46b25",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 81,
"avg_line_length": 37.46153846153846,
"alnum_prop": 0.6247433264887063,
"repo_name": "artwr/airflow",
"id": "4173d7e7a8a5d2a6b15e5bf80beface313638dbc",
"size": "2761",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/wasb_delete_blob_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879976"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
} |
"""
Загрузчик справки с сайта
Microsoft's Old API Help File Reborn
http://laurencejackson.com/win32
"""
from urllib.request import urlopen
#
# read_chunk(url, max_chunk)
#
def read_chunk(url, max_chunk=8192*8):
"""Генератор содержимого web-файла, для использования в инструкции for"""
with urlopen(url) as response:
while True:
chunk = response.read(max_chunk)
if chunk:
yield chunk
else:
return
#
# download(url, name)
#
def download(url, name):
"""Скачивает web-файл на диск"""
# определяем размер скачиваемого файла
file_size = 0
with urlopen(url) as response:
header = response.getheader('Content-Length')
file_size = int(header)
print('File size: %dKB' % (file_size / 1024))
# скачиваем файл на диск
with open(name, 'wb') as file:
downloaded_size = 0
for chunk in read_chunk(url):
downloaded_size += len(chunk)
file.write(chunk)
# выводим прогресс чтения файла
status = 'Downloading... %3.2f%% complete.' % (downloaded_size * 100. / file_size)
print(status, end='\r')
print()
#
# main()
#
def main():
"""Точка входа в программу"""
download('http://laurencejackson.com/win32/Win32.chm', 'Win32.chm')
#
# start script
#
if __name__ == '__main__':
main()
| {
"content_hash": "b470d7ecff8e0030349cc4975e6c6088",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 94,
"avg_line_length": 22.126984126984127,
"alnum_prop": 0.5882352941176471,
"repo_name": "maltsevda/University",
"id": "222aaacd8d372ee1ec2b57fca042575b727ba049",
"size": "1608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "000_Tools/HelpDownloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2963"
},
{
"name": "C++",
"bytes": "40935"
},
{
"name": "Makefile",
"bytes": "1080"
},
{
"name": "Python",
"bytes": "1608"
}
],
"symlink_target": ""
} |
import pygame
import sys
import math
import random
print("\n\n# # # # # INSTRUCTIONS FOR USE # # # # #")
print("This was made to facilitate development of my\n"
"Asteroids! clone's programmatically-defined\n"
"art assets.")
print("You can use it by left clicking once inside\n"
"the green square on the screen that pops up\n"
"to start drawing a line, then left clicking\n"
"again somewhere else to finish drawing it.")
print("The green square is the extent of the object's\n"
"hitbox in-game, and you can actually draw\n"
"outside it, too.")
print("\nWhen saved, the design will be written\n"
"to a text file in the same directory as\n"
"this program with a name indicative of\n"
"the number of lines used.")
print("In order to incorporate your design into the\n"
"Asteroids! game, simply copy and paste the\n"
"contents of the file over the similarly-formatted\n"
"data inside the game's draw_programmatic_object()\n"
"function, or comment out the appropriate line\n"
"and simply paste this beneath it.")
print("\nKEY COMMANDS:")
print(" -- s saves your design and quits the program")
print(" -- r cancels the current line")
print(" -- g will remove a line you've just drawn")
print(" -- q has the same function as left clicking")
print(" -- esc quits without saving")
# # # # Goal statement # # # #
# When the user clicks, a point is added to points_array.
# If the user has clicked, a line is draw from the first point in
# points_array to the cursor.
# If len(points_array) > 1, lines are drawn between the most recently
# added point and the next most recently added point.
# ((edit: start and finish each line separately now))
# ...
# When the user hits the Save key, points_array will be exported to
# a file for use in other programs as a programmatically drawn object.
# # # # Notes for future improvement # # # #
# I think this program might be using the wrong kind of event/keypress
# monitoring. See http://www.pygame.org/docs/tut/newbieguide.html
# for details, specifically the event subsystem section.
# Update: This problem has something to do with why I put in
# user_recently_clicked and tied it to the game clock via a ticker variable.
# As a result of that there's a touch of unresponsiveness if you're
# drawing very quickly. This is to prevent unwanted oversensitivity.
# The way the program is handling clicks makes it too likely to interpret
# what the user thought was a single click as multiple clicks in succession.
# The solution was to put duct tape over it and be overjoyed that the
# result actually worked.
# I am told this constitutes valuable work experience.
# # # # Constants # # # #
SCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = 300, 300
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
GREEN = [0, 255, 0]
SCREEN_CENTER_X, SCREEN_CENTER_Y = (SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2)
# # # # Functions # # # #
def add_previous_point_and_current_point_to_lines_array_as_a_line():
# Todo: Parameterize state
global user_is_currently_adding_a_line
lines_array.append([[previous_point[0], previous_point[1]],
[cursor_position[0], cursor_position[1]]])
user_is_currently_adding_a_line = False
def add_point_to_points_array():
'''
Places the x, y values of the cursor's current position into points_array.
'''
# This fails at [0, 0], but fixing that opens up another unknown.
# What placeholder value should the array be initialized with that
# the user could never click... that itself wouldn't change some
# hidden property of the array? Negative numbers? Strings??
# Edit: Refactor to "None" when I do the second pass
if points_array[0] == [0, 0]:
points_array[0][0] = cursor_position[0]
points_array[0][1] = cursor_position[1]
else:
points_array.append([cursor_position[0], cursor_position[1]])
def write_something_to_a_text_file(filename, string_to_write):
'''
Write a supplied string to a text file with the supplied name.
Generate such a file if none exists.
'''
# Edit: Refactor this whole function to a context manager.
# Jeez, the things I didn't know...
text_file = open(filename, "w")
text_file.write(string_to_write)
text_file.close()
def render_all():
'''
Draw all lines in points_array on the screen.
Also draw the tentative next line connecting the last placed point
to the cursor, if the user is currently drawing, and draws UI elements.
'''
screen.fill(BLACK)
if len(points_array) > 1:
for each_line_index_number in range(1, (len(points_array))):
pygame.draw.line(screen, WHITE, [points_array[(each_line_index_number - 1)][0], points_array[(each_line_index_number - 1)][1]], [points_array[(each_line_index_number)][0], points_array[(each_line_index_number)][1]], 1)
if len(lines_array) >= 1:
for each_line in range(0, (len(lines_array))):
pygame.draw.line(screen, WHITE, [lines_array[each_line][0][0], lines_array[each_line][0][1]], [lines_array[each_line][1][0], lines_array[each_line][1][1]], 1)
if user_is_drawing is True:
# If the user is currently drawing,
# connect their cursor to the last placed point.
if len(points_array) > 1:
pygame.draw.line(screen, WHITE, [previous_point[0][0], previous_point[0][1]], [cursor_position[0], cursor_position[1]], 1)
elif len(lines_array) >= 0:
pygame.draw.line(screen, WHITE, [previous_point[0], previous_point[1]], [cursor_position[0], cursor_position[1]], 1)
# Draws a tiny green dot in the center of the screen.
# This dot is NOT included in the saved programmatic object file.
# This is for measuring purposes only.
pygame.draw.rect(screen, GREEN, [(SCREEN_CENTER_X - 1), (SCREEN_CENTER_Y - 1), 2, 2])
# Draws a rectangle around the center 200x200 pixels
# for measuring purposes.
# Doing it this way because I want it to be here at the end,
# drawn on top of user inputted things, alongside the center dot.
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y - 100)], [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y - 100)], 1)
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y - 100)], [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y + 100)], 1)
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X + 100), (SCREEN_CENTER_Y + 100)], [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y + 100)], 1)
pygame.draw.line(screen, GREEN, [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y + 100)], [(SCREEN_CENTER_X - 100), (SCREEN_CENTER_Y - 100)], 1)
pygame.display.flip()
def handle_keys():
'''
Interpret pressed keys as input commands
and execute them, mostly via state changes.
'''
# Ow. More refactoring to do later.
# Maybe a GameState singleton?
# Or is the better design pattern something involving
# multiple GameState-esque subdivision classes?
# At the very least it should accept and hand off parameters,
# perhaps in a simple dict (basically just a singleton then)
global previous_point
global keep_window_open
global user_is_drawing
global user_is_currently_adding_a_line
global lines_array
for event in pygame.event.get(): # NOTE: This does not seem to allow for continuously-held keys being re-read if another key is pressed and released during the first key's held period.
if event.type == pygame.QUIT:
sys.exit
elif event.type == pygame.KEYDOWN:
# events and KEYDOWN prevent multiple firings from holding down the button.
if event.key == pygame.K_ESCAPE:
keep_window_open = False
# Note: Previous program functionality has been disabled. Point-pair lines only now.
#if event.key == pygame.K_q:
# # Then the user is placing a point at the cursor's position.
# user_is_drawing = True
# add_point_to_points_array()
# previous_point = [cursor_position]
if event.key == pygame.K_r:
# Cancels drawing mode.
user_is_currently_adding_a_line = False
user_is_drawing = False
previous_point = [0, 0]
if event.key == pygame.K_q:
# Then the user is beginning or ending a line.
if user_is_currently_adding_a_line is True:
# Ending a line
add_previous_point_and_current_point_to_lines_array_as_a_line()
previous_point = [0, 0]
# Note: The next line is also checked in add_..._a_line() function. Redundancy. Also safety!
user_is_currently_adding_a_line = False
user_is_drawing = False
else:
# Beginning a line
user_is_currently_adding_a_line = True
user_is_drawing = True
previous_point[0] = cursor_position[0]
previous_point[1] = cursor_position[1]
if event.key == pygame.K_g:
# Then the user is removing the last completed line.
if len(lines_array) > 0:
lines_array.pop()
if event.key == pygame.K_s:
# Then the user is saving the array to a file.
random_code = random.randint(0, 1000000)
generated_filename = str(len(lines_array)) + '-line programmatic object -- randcode ' + str(random_code) + '.txt'
if len(lines_array) >= 1:
for each_line_index in range(0, (len(lines_array))):
# IMPORTANT! This is is only for the scaling system used in my Asteroids! test game.
# Please consider changing this if you're using it in the future; it's better not to divide them at all and use pixels as the yardstick, I'd guess.
# But maybe not?! There might be something to be said for having an independent scale.
# Note that the Asteroids! test game uses (object_size / 20) and here dividing the numbers by 10 as seen will fit them to that (foo / 20) metric.
# Imagine a grid, 20x20, with scaling from -10 to +10 on both axes...
# That system is conceptually useful when centerpoints are important for things like radius-based collision detection.
# start X
lines_array[each_line_index][0][0] = ((SCREEN_CENTER_X - lines_array[each_line_index][0][0]) / 10)
# start Y
lines_array[each_line_index][0][1] = ((SCREEN_CENTER_Y - lines_array[each_line_index][0][1]) / 10)
# end X
lines_array[each_line_index][1][0] = ((SCREEN_CENTER_X - lines_array[each_line_index][1][0]) / 10)
# end Y
lines_array[each_line_index][1][1] = ((SCREEN_CENTER_Y - lines_array[each_line_index][1][1]) / 10)
# If the end point of one line are close to the start point of the next, this code splits the difference. Note this assumes you care about exactly matching endpoints.
for each_line_index in range(0, (len(lines_array))):
# Special case of the first and last points:
if each_line_index == 0:
start_x_of_current_line = lines_array[each_line_index][0][0]
end_x_of_previous_line = lines_array[(len(lines_array) - 1)][1][0]
start_y_of_current_line = lines_array[each_line_index][0][1]
end_y_of_previous_line = lines_array[(len(lines_array) - 1)][1][1]
else:
start_x_of_current_line = lines_array[each_line_index][0][0]
end_x_of_previous_line = lines_array[(each_line_index - 1)][1][0]
start_y_of_current_line = lines_array[each_line_index][0][1]
end_y_of_previous_line = lines_array[(each_line_index - 1)][1][1]
# X
if ( (abs(start_x_of_current_line - end_x_of_previous_line)) <= 0.4 ):
# If abs(difference between the end points) <= 0.4, split the difference and set it to that.
difference_between_them = (abs(start_x_of_current_line - end_x_of_previous_line))
half_of_the_difference = (difference_between_them / 2)
start_x_of_current_line += half_of_the_difference
end_x_of_previous_line -= half_of_the_difference
# Round to the nearest tenth
start_x_of_current_line *= 10
start_x_of_current_line = start_x_of_current_line // 10
end_x_of_previous_line *= 10
end_x_of_previous_line = end_x_of_previous_line // 10
# Y
if ( (abs(start_y_of_current_line - end_y_of_previous_line)) <= 0.4 ):
# If abs(difference between the end points) <= 0.4, split the difference and set it to that.
difference_between_them = (abs(start_y_of_current_line - end_y_of_previous_line))
half_of_the_difference = (difference_between_them / 2)
start_y_of_current_line += half_of_the_difference
end_y_of_previous_line -= half_of_the_difference
# Round to the nearest tenth
start_y_of_current_line *= 10
start_y_of_current_line = start_y_of_current_line // 10
end_y_of_previous_line *= 10
end_y_of_previous_line = end_y_of_previous_line // 10
# This part actually does the setting. I feel like some kind of list comprehension would have helped with the index numbers. To-do list: Learn everything about list comprehensions.
if each_line_index == 0:
lines_array[each_line_index][0][0] = start_x_of_current_line
lines_array[(len(lines_array) - 1)][1][0] = end_x_of_previous_line
lines_array[each_line_index][0][1] = start_y_of_current_line
lines_array[(len(lines_array) - 1)][1][1] = end_y_of_previous_line
else:
lines_array[each_line_index][0][0] = start_x_of_current_line
lines_array[(each_line_index - 1)][1][0] = end_x_of_previous_line
lines_array[each_line_index][0][1] = start_y_of_current_line
lines_array[(each_line_index - 1)][1][1] = end_y_of_previous_line
write_something_to_a_text_file(generated_filename, str(lines_array))
keep_window_open = False
# # # # Initializations # # # #
screen = pygame.display.set_mode(SCREEN_SIZE)
user_is_drawing = False
user_is_currently_adding_a_line = False
user_recently_clicked = False
points_array = [[0, 0]]
lines_array = []
previous_point = [0, 0]
# To keep the game running
keep_window_open = True
# Create a clock object to make the game run at a specified speed in the main loop
clock = pygame.time.Clock()
# Using the game_ticker model is currently necessary to decouple program running speed from pygame's Clock function. There's probably a better way to do this somewhere... This is fairly simple, though.
game_ticker = 0
# # # # Main Loop # # # #
while keep_window_open is True:
cursor_position = cursor_x, cursor_y = pygame.mouse.get_pos()
button1_pressed, button2_pressed, button3_pressed = pygame.mouse.get_pressed()
# Process keyboard input
handle_keys()
# Event progression metering
clock.tick(40)
if game_ticker < 80:
game_ticker += 1
elif game_ticker >= 80:
game_ticker = 0
# Note: Previous program functionality has been disabled. Point-pair lines only now.
#if button1_pressed is True:
# # Left mouse click enables drawing mode and places a point in points_array.
# user_is_drawing = True
# add_point_to_points_array()
# previous_point = [cursor_position]
if ( (user_recently_clicked is True) and ((game_ticker % 30) == 1) ):
user_recently_clicked = False
if ((game_ticker % 1) == 0):
if ((button1_pressed is True) and (user_recently_clicked is False)):
if user_is_currently_adding_a_line is True:
# Ending a line
add_previous_point_and_current_point_to_lines_array_as_a_line()
previous_point = [0, 0]
# Note: The next line is also checked in add_..._a_line() function. Redundancy. Also safety!
user_is_currently_adding_a_line = False
user_is_drawing = False
user_recently_clicked = True
else:
# Beginning a line
user_is_currently_adding_a_line = True
user_is_drawing = True
previous_point[0] = cursor_position[0]
previous_point[1] = cursor_position[1]
user_recently_clicked = True
if button3_pressed is True:
# Right mouse click cancels drawing mode.
user_is_currently_adding_a_line = False
user_is_drawing = False
previous_point = [0, 0]
# Debugging section ---v
# Note: Previous program functionality has been disabled. Point-pair lines only now.
#print("\npoints_array == " + str(points_array))
#print("\nprevious_point == " + str(previous_point))
# print("\nlines_array == " + str(lines_array))
# print("\nprevious_point == " + str(previous_point))
# Display everything that needs to be displayed
render_all()
# "Be IDLE friendly," they said.
pygame.quit
| {
"content_hash": "9d1e3f18f6099f3b7d7e34ea9cd44571",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 230,
"avg_line_length": 45.076388888888886,
"alnum_prop": 0.5608278128691008,
"repo_name": "BFriedland/AsteroidsTest",
"id": "8a10e2bb45397381a497ebb29c0b4675bd078be1",
"size": "19473",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "AsteroidsTest/programmatic_object_creator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "86"
},
{
"name": "Python",
"bytes": "187303"
}
],
"symlink_target": ""
} |
import os
import signal
import contextlib
# Used to build usages
AUTHOR = "qs-devel@yunify.com"
COPYRIGHT = "Copyright (C) 2016-2017 Yunify, Inc"
COMMANDS = (
'qsctl', 'qsctl-ls', 'qsctl-cp', 'qsctl-mb', 'qsctl-mv', 'qsctl-rb',
'qsctl-rm', 'qsctl-sync', 'qsctl-presign'
)
def to_rst_style_title(title):
style = "=" * len(title)
return "%s\n%s\n%s" % (style, title, style)
def gen_see_also(command):
content = ""
for c in COMMANDS:
if c != command:
content += "* ``%s help``\n\n" % c.replace("-", " ")
return content
class RstDocument(object):
def __init__(self, rst_source=""):
self.rst_source = rst_source
def from_file(self, filepath):
with open(filepath) as f:
self.rst_source += f.read()
def add_reporting_bug(self):
title = to_rst_style_title("Reporting Bug")
content = "Report bugs to email <%s>." % AUTHOR
block = "\n%s\n\n%s\n" % (title, content)
self.rst_source += block
def add_see_also(self, command):
title = to_rst_style_title("See Also")
content = gen_see_also(command)
block = "\n%s\n\n%s\n" % (title, content)
self.rst_source += block
def add_copyright(self):
title = to_rst_style_title("Copyright")
content = COPYRIGHT
block = "\n%s\n\n%s\n" % (title, content)
self.rst_source += block
def getvalue(self):
return self.rst_source
def gen_rst_doc(command):
rst_doc = RstDocument()
# Descriptions and examples are in the 'source' directory.
# We need read them out first.
current_path = os.path.split(os.path.realpath(__file__))[0]
source_path = "source/%s.rst" % command
filepath = os.path.join(current_path, source_path)
rst_doc.from_file(filepath)
rst_doc.add_reporting_bug()
rst_doc.add_see_also(command)
rst_doc.add_copyright()
return rst_doc.getvalue()
def gen_sphinx_doc(command):
# Generating ReST documents for sphinx
sphinx_doc = RstDocument()
# Descriptions and examples are in the 'source' directory.
# We need read them out first.
current_path = os.path.split(os.path.realpath(__file__))[0]
source_path = "source/%s.rst" % command
filepath = os.path.join(current_path, source_path)
sphinx_doc.from_file(filepath)
return sphinx_doc.getvalue()
@contextlib.contextmanager
def ignore_ctrl_c():
# Ctrl-c shoule be ignored when using 'less -r' to print usage.
original = signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
yield
finally:
signal.signal(signal.SIGINT, original)
| {
"content_hash": "1869ec360c94d2ea3a40b8dfc39029dd",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 72,
"avg_line_length": 27.458333333333332,
"alnum_prop": 0.6172230652503794,
"repo_name": "Fiile/qsctl",
"id": "3a628b4f6e4793773693438f705b9d2665dd34d3",
"size": "2661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingstor/qsctl/helper/qsdocutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1707"
},
{
"name": "Gherkin",
"bytes": "7515"
},
{
"name": "Makefile",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "125160"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from difflib import ndiff
from sys import argv
from zipfile import ZipFile, ZIP_DEFLATED
def diff(name, content, updated_content):
diffs = list(ndiff(content.splitlines(1), updated_content.splitlines(1)))
if diffs:
print(f"{name}\n{''.join(diffs)}")
def replace(content, replacements):
updated_content = content
for old, new in replacements.items():
updated_content = updated_content.replace(old, new)
return updated_content
def zip_read(zip_file, encoding="utf-8"):
with ZipFile(zip_file, "r") as z:
for name in z.namelist():
yield (name, z.read(name).decode(encoding))
def update_zip(
original_zip, updated_zip, replacements, encoding="utf-8", verbose=False
):
with ZipFile(updated_zip, "w", compression=ZIP_DEFLATED) as z:
for name, content in zip_read(original_zip, encoding=encoding):
updated_content = replace(content, replacements)
if verbose:
diff(name, content, updated_content)
z.writestr(name, updated_content)
def replace_in_zip(
original_zip, updated_zip, replacements, encoding="utf-8", verbose=False
):
# make a dict of replacements list
d = dict(zip(replacements[::2], replacements[1::2]))
update_zip(original_zip, updated_zip, d, encoding, verbose)
def parse_args(args):
parser = ArgumentParser(description="Replace in zip (e.g. zip, jar or war")
parser.add_argument("original_zip", help="original zip path")
parser.add_argument("updated_zip", help="updated zip path")
parser.add_argument(
"replacements",
nargs="+",
help="A list of replacements to be done in the zip. Index 0 will be replaced with index 1 and so on.",
)
parser.add_argument(
"-e",
"--encoding",
default="utf-8",
help="TFS projects, e.g organisation/project",
)
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
return parser.parse_args(args)
def main() -> None: # pragma: no cover
"""
Example:
./replace_in_zip.py original.zip updated.zip old1 new1 old2 new2 oldN newN
"""
replace_in_zip(**parse_args(argv[1:]).__dict__)
if __name__ == "__main__": # pragma: no cover
main()
| {
"content_hash": "2fdf96b514b3bd4310a78e86a5e5efae",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 110,
"avg_line_length": 31.175675675675677,
"alnum_prop": 0.6454269614217598,
"repo_name": "bjuvensjo/scripts",
"id": "9afbd853c624555efe7f9e1adbde9e76349520fe",
"size": "2330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vang/pio/replace_in_zip.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "502278"
}
],
"symlink_target": ""
} |
"""
Created on Tue Dec 16 15:58:38 2014
@author: egendron
"""
import math
import numpy as np
import gral_zer
import pyfits
def rotateCoupleOfCoeffs(a, b, m, angledeg):
"""
Inputs:
<a> : scalar, floating point. Coefficient of 1st zernike mode.
<b> : scalar, floating point. Coefficient of 2nd zernike mode.
<m> : scalar, integer (azimutal order of the 2 zernike)
<angledeg> : scalar, floating point (rotation angle)
Output:
An array of 2 floating point values: the 2 output coefficient of rotated wavefront.
For a couple [a,b] of Zernike coefficients with same azimutal order
m, returns the couple of coefficients of Zernike decomposition
rotated by an angle <angledeg> (angle expressed in degrees)
"""
anglerad = angledeg * math.pi / 180.0
aa = a*np.cos(m*anglerad) - b*np.sin(m*anglerad)
bb = a*np.sin(m*anglerad) + b*np.cos(m*anglerad)
return (aa,bb)
def checkConsistencyOfNumberOfElementsOfZernikeArray( imax ):
"""
Input:
<imax> : scalar, integer. A number of Zernike mode.
Output:
An boolean: True=error, False=no error
The function returns a boolean error code.
True when there is an error, False when there is no error.
"""
# determine radial and azimutal orders n and m
n,m = gral_zer.nm(imax)
if( m==0 ):
return False
n2 = n//2 # Warning : interger division ! 5//2 = 2, and not 2.500 !!
if( (imax&1)==(n2&1) ):
# If parity of <imax> and <n2> is the same: not good.
return True
else:
# If parity is different: ok this is correct.
return False
def rotateZernike(z, i0, angledeg):
"""
Inputs:
<z> : 1D array of floating point values (zernike coefficients)
<i0> : scalar, integer. It is the zernike index of the 1st coeff contained in <z>.
<angledeg> : scalar, floating point
Output:
An array of floating-point values (zernike coefficients of the rotated wavefront).
For a couple [a,b] of Zernike coefficients with same azimutal order
m, returns the couple of coefficients of Zernike decomposition
rotated by an angle <angledeg> (angle expressed in degrees)
"""
# number of coefficients contained in the vector z
nzer = z.shape[0]
# index of the last Zernike coefficient
imax = nzer+i0-1
# check whether <imax> is ok or not
errorImax = checkConsistencyOfNumberOfElementsOfZernikeArray( nzer+i0-1 )
# produces an error if not consistent
if( errorImax ):
print "The number of Zernike modes (%d to %d) does not entierely fill a radial order." % (i0, imax)
print "A coefficient is missing : it is impossible to rotate the wavefront properly."
print "I will ignore the last Zernike coefficient."
nzer = nzer-1
# allocates memory for the result: an array of <nzer> coefficient, same length as <z>, floats.
z_output = np.zeros(nzer)
k = 0
while(k<nzer):
i = k+i0 # index of the Zernike mode, as described in the literature (Z_1 is piston)
# determine radial and azimutal orders, called <n> and <m>
n,m = gral_zer.nm(i)
if( m==0 ):
# do nothing, coefficient is unchanged
z_output[k] = z[k]
else:
if( (i&1)==0 ): # equivalent to "i modulo 2"
# if i is an even number (2,4,6,..)
tmp = rotateCoupleOfCoeffs(z[k], z[k+1], m, angledeg)
z_output[k] = tmp[0]
z_output[k+1] = tmp[1]
else:
# if i is an odd number (1,3,5,7,...)
tmp = rotateCoupleOfCoeffs(z[k+1], z[k], m, angledeg)
# warning: SWAP coefficients !!!!
z_output[k] = tmp[1]
z_output[k+1] = tmp[0]
# skip the next coefficient z[k+1], that has already been processed
k = k+1
k = k+1
return z_output
def flipZernike(z, i0):
"""
Input parameters:
<z> : 1D array of zernike coefficients
<i0> : Zernike index of the first element of the array
Output parameters:
1D array of zernike coefficients
The function returns a list of zernike coefficient that represent a
flipped version of the wavefront wrt to the input. The flip is
operated around the X axis (equation: y=0, i.e. "horizontal" axis).
Note: the Yorick version provides a new array of values in
output. For a C version of the code, it would be MUCH better to do
the transformation in place, just modifying the coefficients in the
same memory area, as the transformation is straightforward (just
modifying the sign of some of the coefficients).
"""
# number of coefficients contained in the vector z
nzer = z.shape[0]
# allocates memory for the result: an array of <nzer> coefficient, same length as <z>, floats.
z_output = np.zeros(nzer)
for k in range(nzer):
i= k+i0 # index of the Zernike mode, as described in the literature (Z_1 is piston)
# determine radial and azimutal orders, called <n> and <m> (well, actually, n is not required here...)
n,m = gral_zer.nm(i)
if( m==0 ):
z_output[k] = z[k] # coeff unchanged
else:
if( (i&1)==1 ) :
# if i is an odd number (1,3,5..)
z_output[k] = -z[k] # change sign
else :
# if i is an even number (2,4,6..)
z_output[k] = z[k] # coeff unchanged
return z_output
def transformZernikeIntoSlopes(z, i0, flip, angledeg, scalingFactor, pixelSizeArcsec, DiamVLT):
"""
Input parameters:
<z> : 1D array of floating-point zernike coefficients.
Zernike coefficients are expected to be expressed in "nanometers rms".
<i0> : scalar integer, Zernike index of the first element of the array
<flip> : boolean
<angledeg> : scalar, floating point. Angle of rotation in degrees.
<scalingFactor> : scalar floating point, nominally equal to 1.000
<pixelSizeArcsec> : scalar floating point = pixel size of WFS in arcsec = 0.51 (Pixel size of WFS is 0.51'')
<DiamVLT> : scalar floating point = VLT diameter in meters = 8.0 (VLT is 8m diameter)
Output parameters:
1D array of slopes.
"""
# Reading the Z2S somewhere .. (the path needs to be changed according to your config)
# <Z2S> : transformation matrix from zernike to slopes provided by E. Gendron as a FITS
# file, that should be part of the system configuration
Z2S = pyfits.getdata("/home/egendron/Projets/GRAVITY/Z2S_136s_119z.fits").T
# number of coefficients contained in the vector z
nzer = z.shape[0]
# Zernike index of the last coefficient of z
imax = nzer-1+i0
# number of zernike coefficients contained in the transformation matrix
nzermat = Z2S.shape[1]
# Maximum Zernike index in the transformation matrix
imaxmat = nzermat-1+i0
if( imax>imaxmat ):
# hopefully, this should never happen ..
print "The list of Zernike given in input is too large (%d modes), the transformation matrix only contains %d modes.\n" % (imax,imaxmat)
print "I will skip all the zernike coeffs greater than %d." % imaxmat
# allocate memory, 1D array of floating points filled with 0.00
z_out = np.zeros(nzermat)
# Copies zernike coefficients in another array that will match the dimension of the Z2S
# with coeffs at the right place
for k in range(nzermat):
# Filling z_out with proper values
j = k-2+i0 # j is an index parsing the array z
if( (j>=0) and (j<nzer) ):
z_out[k] = z[j] * scalingFactor
else:
z_out[k] = 0.00
# flipping wavefront, if required
if( flip==True ):
z_out = flipZernike(z_out, 2)
# rotating wavefront by an angle <angledeg>
if( angledeg!=0 ):
z_out = rotateZernike(z_out, 2, angledeg)
# transforming zernike coeffs into slopes by a matrix product
slopes = np.dot(Z2S, z_out)
# scaling factor to transform "nanometers rms" to "WFS pixels"
RASC = 180*3600/3.14159265358979324 # number of arcsec in 1 radian
unitfactor = 1e-9 * 2.0/ DiamVLT / (pixelSizeArcsec/RASC)
slopes = slopes * unitfactor
return slopes
def moveDerotatorToPosition( angle ):
"""
:-)
Super-dummy function, supposed to turn the de-rotator ...
Of course this function needs to be replaced by something *actually* rotating the de-rotator
"""
print "Moving de-rotator to position %g degrees\n" % float(angle)
def acquireSPARTAData( nframes ):
"""
This function simulates the acquisition of <nframes> acquisitions of slopes
made by SPARTA.
Obviously, this has to be replaced by a real SPARTA acquisition, or by reading a FITS file
of slopes from SPARTA.
"""
NSLOPES = 136 # number of slopes returned by sparta ...
# creating a dummy array, plenty of random numbers, just to test procedures ...
return np.random.rand(nframes, NSLOPES)-0.5
def getCurrentReferenceSlopes( void ):
"""
This function simulates the query of the currently applied reference slopes
Obviously, this has to be replaced by a real SPARTA procedure
"""
NSLOPES = 136 # number of slopes returned by sparta ...
# creating a dummy array, filled with 0.00s, just to test procedures ...
return np.zeros(NSLOPES)
def calibrateRefSlopesSimple(nframes):
"""
Input parameters:
<nframes> : scalar integer, number of frames to grab at each acquisition.
Recommended default value (if ever required for whatever reason) : 200
Output:
A 1D array of floating point numbers, correponding to the reference slopes
"""
# calls a function/procedure from sparta to get the cuently applied reference slopes
curRefSlopes = getCurrentReferenceSlopes( )
# acquire slopes data from SPARTA
slopes = acquireSPARTAData( nframes )
# time-average slopes, and store result in an array
newRefSlopes = np.average(slopes, axis=0) + curRefSlopes
return newRefSlopes
def calibrateRefSlopesUsingDerotator(nframes, nsteps):
"""
Input parameters:
<nframes> : scalar integer, number of frames to grab at each acquisition.
Recommended default value (if ever required for whatever reason) : 200
<nsteps> : number of position angles the rotator needs to do
Recommended efault value (if ever required for whatever reason) : 12
Output:
A 2D array of floating point numbers, correponding to the reference slopes
for every degree from 0 to 360.
How it works:
The procedure will grab <nframes> frames of slopes, for a given
set of de-rotator angles. It will time-average them and add the current
reference slopes back to it. Then, it will interpolate all these
data between the measurement angles, for another set
of <nsamp> angles spanning the range 0-360 degrees.
What to do with the output:
This interpolated data set will have to be saved somewhere in the system
configuration as reference slopes.
Then, while on-sky observing, for any angle A of the derotator, the
reference slopes should be taken in the calibrated+interpolated dataset
at the closest angle to A.
"""
# number of slopes of the Shack-Hartmann
NSLOPE = 136
# allocate memory for result
calibref = np.zeros((NSLOPE, nsteps))
# Create a list of angles for the derotator
# from 0 (included) to 360 (excluded !!) degrees.
angle = np.arange(nsteps)*360.0/nsteps # if nsteps=12, then angle=[0,30,60,90,120,150,180,210,240,270,300,330]
# ................ MEASUREMENTS .......................
#
# calls a function/procedure from sparta to get the cuently applied reference slopes
curRefSlopes = getCurrentReferenceSlopes( 0 )
# loop over de-rotator positions
for k in range(nsteps):
# move de-rotator to position
moveDerotatorToPosition, angle[k]
# acquire slopes data from SPARTA
slopes = acquireSPARTAData( nframes )
# time-average slopes, and store result in an array
calibref[:,k] = np.average(slopes, axis=0) + curRefSlopes
# ................ INTERPOLATION .......................
#
# Now the array of slopes will be interpolated for any angle from 0
# to 360 degree, on <nsamp> steps.
nsamp = 361 # this number could possibly change after some tests during AITs.
nfourier = nsteps//2 # integer division
# allocate memory for the interpolated result
calibinterp = np.zeros((NSLOPE, nsamp))
# list of the angles where the interpolation will be calculated.
# these angles range from 0 (included) to 360 (included) degrees.
theta = np.arange(nsamp)*360.0/(nsamp-1)
# loop over slopes
for i in range(NSLOPE):
# loop on Fourier modes, from 0 to nfourier (included, i.e. nfourier+1 modes)
for j in range(nfourier+1): # warning: nfourier+1 iterations here !!
cosinus = np.cos(angle * np.pi/180.0 * j) * 2.0 / nsteps
sinus = np.sin(angle * np.pi/180.0 * j) * 2.0 / nsteps
acc = np.sum(calibref[i,:] * cosinus)
ass = np.sum(calibref[i,:] * sinus)
if (j==0) or (j==nfourier):
acc /= 2.0
ass /= 2.0
calibinterp[i,:] += acc * np.cos(theta * np.pi/180.0 * j) + ass * np.sin(theta * np.pi/180.0 * j)
# .................. CENTERING ...........................
#
# We now need to re-center all the reference slopes, in order to
# set them at the center of the circle they've made when the
# rotator turns. One has to subtract the tilt in X, and the tilt in
# Y, and replace them by the average value in X and Y.
#
# First, computation of tilt in x and y for each theta, by
# averaging all the slopes in X, and all slopes in Y.
# Slopes 1 to NSLOPE/2=68 correspond to X, 69 to 136 correspond to Y.
tx = np.average(calibinterp[0:NSLOPE/2,:], axis=0)
ty = np.average(calibinterp[NSLOPE/2:NSLOPE,:], axis=0)
avg_tx = np.average(tx) # this is the average (over theta) of the x-tilt
avg_ty = np.average(ty) # this is the average (over theta) of the y-tilt
tx -= avg_tx
ty -= avg_ty
# Then subtraction of the tilt for any angle
for k in range(NSLOPE/2):
j = k+NSLOPE/2 # index of slopes in Y (yorick numbering, start at 1)
calibinterp[k,:] -= tx
calibinterp[j,:] -= ty
return calibinterp
| {
"content_hash": "95a1b99a75ebb24fc0cf55e97b96ce2c",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 144,
"avg_line_length": 34.283720930232555,
"alnum_prop": 0.6361416361416361,
"repo_name": "soylentdeen/CIAO-commissioning-tools",
"id": "25f78411d17f102dc4d2d5ae07e4f66ea4bc44df",
"size": "14766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fromEric/gral_referenceSlopes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "251894"
},
{
"name": "Tcl",
"bytes": "584"
}
],
"symlink_target": ""
} |
"""
Provides a subclass of Disk Cache which saves in a simple z/x/y.extension, with
Y=0 being the top of the map, and heading down. This tile layout makes for
generation of tiles that is friendly to Google/OSM, and the opposite of TMS.
This is useful for pre-generating tiles for Google Maps which are going to be
used offline. This allows one to use TileCache in a gdal2tiles-like setup,
using the cache to write out a directory which can be used in other places.
Note that ext3 (a common Linux filesystem) will not support more than 32000
files in a directory, so if you plan to store a whole world at z15 or greater,
you should not use this cache class. (The Disk.py file is designed for this use
case.)
>>> from TileCache.Layer import Layer, Tile
>>> l = Layer("test")
>>> t = Tile(l, 14, 18, 12)
>>> c = GoogleDisk("/tmp/tilecache")
>>> c.getKey(t)
'/tmp/tilecache/test/12/14/4077.png'
"""
from TileCache.Cache import Cache
from TileCache.Caches.Disk import Disk
import os
class GoogleDisk(Disk):
def getKey (self, tile):
grid = tile.layer.grid(tile.z)
components = ( self.basedir,
tile.layer.name,
"%s" % int(tile.z),
"%s" % int(tile.x),
"%s.%s" % (int(grid[1] - 1 - tile.y), tile.layer.extension)
)
filename = os.path.join( *components )
return filename
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "e1b4252709ac881a1e23ee788ca8eb13",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 82,
"avg_line_length": 36.26829268292683,
"alnum_prop": 0.6435776731674513,
"repo_name": "DOE-NEPA/mapwarper",
"id": "78cf1b9c06430e6a4f1cc0c4ed17037ddb2c6d54",
"size": "1540",
"binary": false,
"copies": "7",
"ref": "refs/heads/ruby1.9.1",
"path": "publicoldtestsite/cgi/tilecache/TileCache/Caches/GoogleDisk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "684811"
},
{
"name": "Groff",
"bytes": "8"
},
{
"name": "HTML",
"bytes": "1750094"
},
{
"name": "JavaScript",
"bytes": "513548"
},
{
"name": "Makefile",
"bytes": "4734"
},
{
"name": "Python",
"bytes": "304410"
},
{
"name": "Ruby",
"bytes": "1108095"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
postgres_owner = "owner"
postgres_user = "user"
postgres_host = "localhost"
postgres_passwd = "pass"
postgres_db = "db"
SQLALCHEMY_DATABASE_URI = "postgresql://" + postgres_user + ":" + postgres_passwd + "@" + postgres_host + "/" + postgres_db
SQLALCHEMY_DATABASE_URI_OWNER = "postgresql://" + postgres_owner + ":" + postgres_passwd + "@" + postgres_host + "/" + postgres_db
| {
"content_hash": "52d841d5e237be1fda4be6804d014030",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 130,
"avg_line_length": 47,
"alnum_prop": 0.6622340425531915,
"repo_name": "niclabs/NetworkInformation-Server",
"id": "5edddf8d79359a9a677280b13ae31fe7502d5b90",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11183"
}
],
"symlink_target": ""
} |
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfserving
from kfserving.models.v1beta1_explainer_config import V1beta1ExplainerConfig # noqa: E501
from kfserving.rest import ApiException
class TestV1beta1ExplainerConfig(unittest.TestCase):
"""V1beta1ExplainerConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1ExplainerConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfserving.models.v1beta1_explainer_config.V1beta1ExplainerConfig() # noqa: E501
if include_optional :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0'
)
else :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0',
)
def testV1beta1ExplainerConfig(self):
"""Test V1beta1ExplainerConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2646eb961c3bfbbe5edf458fe3357b01",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 98,
"avg_line_length": 28.245283018867923,
"alnum_prop": 0.6412825651302605,
"repo_name": "kubeflow/kfserving-lts",
"id": "01587be6ce9875a65a6ec7051c7d58a4b7d33824",
"size": "2091",
"binary": false,
"copies": "1",
"ref": "refs/heads/release-0.6",
"path": "python/kfserving/test/test_v1beta1_explainer_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "Dockerfile",
"bytes": "10549"
},
{
"name": "Go",
"bytes": "1251102"
},
{
"name": "HTML",
"bytes": "17922"
},
{
"name": "JavaScript",
"bytes": "1828"
},
{
"name": "Jsonnet",
"bytes": "2434415"
},
{
"name": "Makefile",
"bytes": "16071"
},
{
"name": "Python",
"bytes": "1860674"
},
{
"name": "SCSS",
"bytes": "1789"
},
{
"name": "Shell",
"bytes": "36788"
},
{
"name": "TypeScript",
"bytes": "78886"
}
],
"symlink_target": ""
} |
import scrapy
class QiubaiItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
_id = scrapy.Field()
avatar = scrapy.Field()
profile_link = scrapy.Field()
name = scrapy.Field()
gender = scrapy.Field()
age = scrapy.Field()
content = scrapy.Field()
content_link = scrapy.Field()
up = scrapy.Field()
comment_num = scrapy.Field()
| {
"content_hash": "b377708ce19a6c048c35a5be6dcec23e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 25.625,
"alnum_prop": 0.6292682926829268,
"repo_name": "ychenracing/Spiders",
"id": "792091588798652fc74a75c6e942925554fc4793",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiubai/qiubai/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34779"
}
],
"symlink_target": ""
} |
"""DAG run APIs."""
from datetime import datetime
from typing import Dict
from deprecated import deprecated
from airflow.api.common.experimental import check_and_get_dag, check_and_get_dagrun
@deprecated(reason="Use DagRun().get_state() instead", version="2.2.4")
def get_dag_run_state(dag_id: str, execution_date: datetime) -> Dict[str, str]:
"""Return the Dag Run state identified by the given dag_id and execution_date.
:param dag_id: DAG id
:param execution_date: execution date
:return: Dictionary storing state of the object
"""
dag = check_and_get_dag(dag_id=dag_id)
dagrun = check_and_get_dagrun(dag, execution_date)
return {'state': dagrun.get_state()}
| {
"content_hash": "ef1d451ed7771f1ac22e4ea67cc4d600",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 83,
"avg_line_length": 31.863636363636363,
"alnum_prop": 0.7104136947218259,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "7201186ea9331c630b76966ecb7f6d4c037a0576",
"size": "1488",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "airflow/api/common/experimental/get_dag_run_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.