repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
MatthewWilkes/mw4068-packaging | refs/heads/master | src/melange/src/soc/models/base.py | 1 | #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing enhanced db.Model classes.
The classes in this module are intended to serve as base classes for all
Melange Datastore Models.
"""
__authors__ = [
'"Todd Larsen" <tlarsen@google.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
from soc.logic import dicts
from soc.views.helper import forms as forms_helper
class ModelWithFieldAttributes(db.Model):
"""A db.Model extension that provides access to Model properties attributes.
Due to the way the Property class in Google App Engine implements __get__()
and __set__(), it is not possible to access attributes of Model properties,
such as verbose_name, from within a Django template. This class works
around that limitation by creating an inner Form class per Model class,
since an unbound Form object contains (most of?) the property attributes
attached to each corresponding Form field.
Some are attributes are renamed during the conversion from a Model Property
to a Form field; for example, verbose_name becomes label. This is tolerable
because any actual Form code refers to these new names, so they are should
be familiar to view creators.
"""
_fields_cache = None
@classmethod
def fields(cls):
"""Called by the Django template engine during template instantiation.
Since the attribute names use the Form fields naming instead of the
Property attribute naming, accessing, for example:
{{ entity.property.verbose_name }}
is accomplished using:
{{ entity.fields.property.label }}
Args:
cls: Model class, so that each Model class can create its own
unbound Form the first time fields() is called by the Django
template engine.
Returns:
A (created-on-first-use) unbound Form object that can be used to
access Property attributes that are not accessible from the
Property itself via the Model entity.
"""
if not cls._fields_cache or (cls != cls._fields_cache.__class__.Meta.model):
class FieldsProxy(forms_helper.BaseForm):
"""Form used as a proxy to access User model properties attributes.
"""
class Meta:
"""Inner Meta class that pairs the User Model with this "form".
"""
#: db.Model subclass for which to access model properties attributes
model = cls
cls._fields_cache = FieldsProxy()
return cls._fields_cache
toDict = dicts.toDict
|
hms-dbmi/fourfront | refs/heads/master | src/encoded/visualization.py | 2 | from pyramid.response import Response
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPBadRequest
from snovault import CONNECTION
from snovault.util import debug_log
from copy import (
copy,
deepcopy
)
import json
from urllib.parse import (
parse_qs,
urlencode,
)
from datetime import datetime
import uuid
from .search import (
DEFAULT_BROWSE_PARAM_LISTS,
make_search_subreq,
search as perform_search_request
)
from .types.base import Item
from .types.workflow import (
trace_workflows,
DEFAULT_TRACING_OPTIONS,
WorkflowRunTracingException,
item_model_to_object
)
from .types.base import get_item_or_none
def includeme(config):
config.add_route('trace_workflow_runs', '/trace_workflow_run_steps/{file_uuid}/', traverse='/{file_uuid}')
config.add_route('bar_plot_chart', '/bar_plot_aggregations')
config.add_route('date_histogram_aggregations', '/date_histogram_aggregations/')
config.add_route('add_files_to_higlass_viewconf', '/add_files_to_higlass_viewconf/')
config.scan(__name__)
# TODO: figure out how to make one of those cool /file/ACCESSION/@@download/-like URLs for this.
@view_config(route_name='trace_workflow_runs', request_method='GET', permission='view', context=Item)
@debug_log
def trace_workflow_runs(context, request):
'''
Traces workflow runs from context (an Item instance), which may be one of the following @types:
`ExperimentSet`, `File`, or `Experiment`.
Gets @@object representation of files from which to trace, then passes them to `trace_workflow_runs`.
@@object representation is needed currently because trace_workflow_runs grabs `output_of_workflow_runs` from
the files and requires them in UUID form. THIS SHOULD BE IMPROVED UPON AT EARLIEST CONVENIENCE.
Requires that all files and workflow runs which are part of trace be indexed in ElasticSearch, else a
WorkflowRunTracingException will be thrown.
URI Paramaters:
all_runs If true, will not group similar workflow_runs
track_performance If true, will record time it takes for execution
Returns:
List of steps (JSON objects) with inputs and outputs representing IO nodes / files.
'''
# Default opts += overrides
options = copy(DEFAULT_TRACING_OPTIONS)
if request.params.get('all_runs'):
options['group_similar_workflow_runs'] = False
if request.params.get('track_performance'):
options['track_performance'] = True
item_types = context.jsonld_type()
item_model_obj = item_model_to_object(context.model, request)
files_objs_to_trace = []
if 'File' in item_types:
files_objs_to_trace.append(item_model_obj)
elif 'Experiment' in item_types:
for file_uuid in item_model_obj.get('processed_files', []):
file_model = request.registry[CONNECTION].storage.get_by_uuid(file_uuid)
file_obj = item_model_to_object(file_model, request)
files_objs_to_trace.append(file_obj)
files_objs_to_trace.reverse()
elif 'ExperimentSet' in item_types:
file_uuids_to_trace_from_experiment_set = item_model_obj.get('processed_files', [])
file_uuids_to_trace_from_experiments = []
for exp_uuid in item_model_obj.get('experiments_in_set', []):
experiment_model = request.registry[CONNECTION].storage.get_by_uuid(exp_uuid)
experiment_obj = item_model_to_object(experiment_model, request)
file_uuids_to_trace_from_experiments.extend(experiment_obj.get('processed_files', []))
for file_uuid in file_uuids_to_trace_from_experiments + file_uuids_to_trace_from_experiment_set:
file_model = request.registry[CONNECTION].storage.get_by_uuid(file_uuid)
file_obj = item_model_to_object(file_model, request)
files_objs_to_trace.append(file_obj)
files_objs_to_trace.reverse()
else:
raise HTTPBadRequest(detail="This type of Item is not traceable: " + ', '.join(item_types))
try:
return trace_workflows(files_objs_to_trace, request, options)
except WorkflowRunTracingException as e:
raise HTTPBadRequest(detail=e.args[0])
# This must be same as can be used for search query, e.g. &?experiments_in_set.digestion_enzyme.name=No%20value, so that clicking on bar section to filter by this value works.
TERM_NAME_FOR_NO_VALUE = "No value"
# Common definition for aggregating all files, exps, and set **counts**.
# This works four our ElasticSearch mapping though has some non-ideal-ities.
# For example, we use "cardinality" instead of "value_count" agg (which would (more correctly) count duplicate files, etc.)
# because without a more complex "type" : "nested" it will uniq file accessions within a hit (ExpSetReplicate).
SUM_FILES_EXPS_AGGREGATION_DEFINITION = {
# Returns count of _unique_ raw file accessions encountered along the search.
"total_exp_raw_files" : {
"cardinality" : {
"field" : "embedded.experiments_in_set.files.accession.raw",
"precision_threshold" : 10000
}
},
# Alternate approaches -- saved for record / potential future usage:
#
# (a) Needs to have "type" : "nested" mapping, but then faceting & filtering needs to be changed (lots of effort)
# Without "type" : "nested", "value_count" agg will not account for nested arrays and _unique_ on file accessions within a hit (exp set).
#
#"total_exp_raw_files_new2" : {
# "nested" : {
# "path" : "embedded.experiments_in_set"
# },
# "aggs" : {
# "total" : {
# "value_count" : {
# "field" : "embedded.experiments_in_set.files.accession.raw",
# #"script" : "doc['embedded.experiments_in_set.accession.raw'].value + '~' + doc['embedded.experiments_in_set.files.accession.raw'].value",
# #"precision_threshold" : 10000
# }
# }
# }
#},
#
# (b) Returns only 1 value per exp-set
# When using a script without "type" : "nested". If "type" : "nested" exists, need to loop over the array (2nd example -ish).
#
#"total_exp_raw_files_new" : {
# "terms" : {
# "script" : "doc['embedded.experiments_in_set.accession.raw'].value + '~' + doc['embedded.experiments_in_set.files.accession.raw'].value"
# #"script" : "int total = 0; for (int i = 0; i < doc['embedded.experiments_in_set.accession.raw'].length; ++i) { total += doc['links.experiments_in_set'][i]['embedded.files.accession.raw'].length; } return total;",
# #"precision_threshold" : 10000
# }
#},
#
# (c) Same as (b)
#
#"test" : {
# "terms" : {
# "script" : "return doc['embedded.experiments_in_set.accession.raw'].getValue().concat('~').concat(doc['embedded.experiments_in_set.accession.raw'].getValue()).concat('~').concat(doc['embedded.experiments_in_set.files.accession.raw'].getValue());",
# #"precision_threshold" : 10000
# }
#},
"total_exp_processed_files" : {
"cardinality" : {
"field" : "embedded.experiments_in_set.processed_files.accession.raw",
"precision_threshold" : 10000
}
},
"total_expset_processed_files" : {
"cardinality" : {
"field" : "embedded.processed_files.accession.raw",
"precision_threshold" : 10000
}
},
"total_files" : {
"bucket_script" : {
"buckets_path": {
"expSetProcessedFiles": "total_expset_processed_files",
"expProcessedFiles": "total_exp_processed_files",
"expRawFiles": "total_exp_raw_files"
},
"script" : "params.expSetProcessedFiles + params.expProcessedFiles + params.expRawFiles"
}
},
"total_experiments" : {
"value_count" : {
"field" : "embedded.experiments_in_set.accession.raw"
}
}
}
@view_config(route_name='bar_plot_chart', request_method=['GET', 'POST'])
@debug_log
def bar_plot_chart(context, request):
MAX_BUCKET_COUNT = 30 # Max amount of bars or bar sections to return, excluding 'other'.
try:
json_body = request.json_body
search_param_lists = json_body.get('search_query_params', deepcopy(DEFAULT_BROWSE_PARAM_LISTS))
fields_to_aggregate_for = json_body.get('fields_to_aggregate_for', request.params.getall('field'))
except json.decoder.JSONDecodeError:
search_param_lists = deepcopy(DEFAULT_BROWSE_PARAM_LISTS)
del search_param_lists['award.project']
fields_to_aggregate_for = request.params.getall('field')
if len(fields_to_aggregate_for) == 0:
raise HTTPBadRequest(detail="No fields supplied to aggregate for.")
primary_agg = {
"field_0" : {
"terms" : {
"field" : "embedded." + fields_to_aggregate_for[0] + '.raw',
"missing" : TERM_NAME_FOR_NO_VALUE,
"size" : MAX_BUCKET_COUNT
},
"aggs" : deepcopy(SUM_FILES_EXPS_AGGREGATION_DEFINITION)
}
}
primary_agg.update(deepcopy(SUM_FILES_EXPS_AGGREGATION_DEFINITION))
del primary_agg['total_files'] # "bucket_script" not supported on root-level aggs
# Nest in additional fields, if any
curr_field_aggs = primary_agg['field_0']['aggs']
for field_index, field in enumerate(fields_to_aggregate_for):
if field_index == 0:
continue
curr_field_aggs['field_' + str(field_index)] = {
'terms' : {
"field" : "embedded." + field + '.raw',
"missing" : TERM_NAME_FOR_NO_VALUE,
"size" : MAX_BUCKET_COUNT
},
"aggs" : deepcopy(SUM_FILES_EXPS_AGGREGATION_DEFINITION)
}
curr_field_aggs = curr_field_aggs['field_' + str(field_index)]['aggs']
search_param_lists['limit'] = search_param_lists['from'] = [0]
subreq = make_search_subreq(request, '{}?{}'.format('/browse/', urlencode(search_param_lists, True)) )
search_result = perform_search_request(None, subreq, custom_aggregations=primary_agg)
for field_to_delete in ['@context', '@id', '@type', '@graph', 'title', 'filters', 'facets', 'sort', 'clear_filters', 'actions', 'columns']:
if search_result.get(field_to_delete) is None:
continue
del search_result[field_to_delete]
ret_result = { # We will fill up the "terms" here from our search_result buckets and then return this dictionary.
"field" : fields_to_aggregate_for[0],
"terms" : {},
"total" : {
"experiment_sets" : search_result['total'],
"experiments" : search_result['aggregations']['total_experiments']['value'],
"files" : (
search_result['aggregations']['total_expset_processed_files']['value'] +
search_result['aggregations']['total_exp_raw_files']['value'] +
search_result['aggregations']['total_exp_processed_files']['value']
)
},
"other_doc_count": search_result['aggregations']['field_0'].get('sum_other_doc_count', 0),
"time_generated" : str(datetime.utcnow())
}
def format_bucket_result(bucket_result, returned_buckets, curr_field_depth = 0):
curr_bucket_totals = {
'experiment_sets' : int(bucket_result['doc_count']),
'experiments' : int(bucket_result['total_experiments']['value']),
'files' : int(bucket_result['total_files']['value'])
}
next_field_name = None
if len(fields_to_aggregate_for) > curr_field_depth + 1: # More fields agg results to add
next_field_name = fields_to_aggregate_for[curr_field_depth + 1]
returned_buckets[bucket_result['key']] = {
"term" : bucket_result['key'],
"field" : next_field_name,
"total" : curr_bucket_totals,
"terms" : {},
"other_doc_count" : bucket_result['field_' + str(curr_field_depth + 1)].get('sum_other_doc_count', 0),
}
for bucket in bucket_result['field_' + str(curr_field_depth + 1)]['buckets']:
format_bucket_result(bucket, returned_buckets[bucket_result['key']]['terms'], curr_field_depth + 1)
else:
# Terminal field aggregation -- return just totals, nothing else.
returned_buckets[bucket_result['key']] = curr_bucket_totals
for bucket in search_result['aggregations']['field_0']['buckets']:
format_bucket_result(bucket, ret_result['terms'], 0)
return ret_result
@view_config(route_name='date_histogram_aggregations', request_method=['GET', 'POST'])
@debug_log
def date_histogram_aggregations(context, request):
'''PREDEFINED aggregations which run against type=ExperimentSet'''
# Defaults - may be overriden in URI params
date_histogram_fields = ['public_release', 'project_release']
group_by_fields = ['award.center_title']
date_histogram_intervals = ['weekly']
# Mapping of 'date_histogram_interval' options we accept to ElasticSearch interval vocab term.
interval_to_es_interval = {
'hourly' : 'hour',
'daily' : 'day',
'weekly' : 'week',
'monthly' : 'month',
'yearly' : 'year'
}
try:
json_body = request.json_body
search_param_lists = json_body.get('search_query_params', deepcopy(DEFAULT_BROWSE_PARAM_LISTS))
except Exception:
search_param_lists = request.GET.dict_of_lists()
if 'group_by' in search_param_lists:
group_by_fields = search_param_lists['group_by']
del search_param_lists['group_by'] # We don't wanna use it as search filter.
if len(group_by_fields) == 1 and group_by_fields[0] in ['None', 'null']:
group_by_fields = None
if 'date_histogram' in search_param_lists:
date_histogram_fields = search_param_lists['date_histogram']
del search_param_lists['date_histogram'] # We don't wanna use it as search filter.
if 'date_histogram_interval' in search_param_lists:
date_histogram_intervals = search_param_lists['date_histogram_interval']
for interval in date_histogram_intervals:
if interval not in interval_to_es_interval.keys():
raise IndexError('"{}" is not one of daily, weekly, monthly, or yearly.'.format(interval))
del search_param_lists['date_histogram_interval'] # We don't wanna use it as search filter.
if not search_param_lists:
search_param_lists = deepcopy(DEFAULT_BROWSE_PARAM_LISTS)
del search_param_lists['award.project']
if 'ExperimentSet' in search_param_lists['type'] or 'ExperimentSetReplicate' in search_param_lists['type']:
# Add predefined sub-aggs to collect Exp and File counts from ExpSet items, in addition to getting own doc_count.
common_sub_agg = deepcopy(SUM_FILES_EXPS_AGGREGATION_DEFINITION)
# Add on file_size_volume
for key_name in ['total_exp_raw_files', 'total_exp_processed_files', 'total_expset_processed_files']:
common_sub_agg[key_name + "_volume"] = {
"sum" : {
"field" : common_sub_agg[key_name]["cardinality"]["field"].replace('.accession.raw', '.file_size')
}
}
common_sub_agg["total_files_volume"] = {
"bucket_script" : {
"buckets_path": {
"expSetProcessedFilesVol": "total_expset_processed_files_volume",
"expProcessedFilesVol": "total_exp_processed_files_volume",
"expRawFilesVol": "total_exp_raw_files_volume"
},
"script" : "params.expSetProcessedFilesVol + params.expProcessedFilesVol + params.expRawFilesVol"
}
}
if group_by_fields is not None:
group_by_agg_dict = {
group_by_field : {
"terms" : {
"field" : "embedded." + group_by_field + ".raw",
"missing" : TERM_NAME_FOR_NO_VALUE,
"size" : 30
},
"aggs" : common_sub_agg
}
for group_by_field in group_by_fields if group_by_field is not None
}
histogram_sub_aggs = dict(common_sub_agg, **group_by_agg_dict)
else:
histogram_sub_aggs = common_sub_agg
else:
if group_by_fields is not None:
# Do simple date_histogram group_by sub agg, unless is set to 'None'
histogram_sub_aggs = {
group_by_field : {
"terms" : {
"field" : "embedded." + group_by_field + ".raw",
"missing" : TERM_NAME_FOR_NO_VALUE,
"size" : 30
}
}
for group_by_field in group_by_fields if group_by_field is not None
}
else:
histogram_sub_aggs = None
# Create an agg item for each interval in `date_histogram_intervals` x each date field in `date_histogram_fields`
# TODO: Figure out if we want to align these up instead of do each combination.
outer_date_histogram_agg = {}
for interval in date_histogram_intervals:
for dh_field in date_histogram_fields:
outer_date_histogram_agg[interval + '_interval_' + dh_field] = {
"date_histogram" : {
"field": "embedded." + dh_field,
"interval": interval_to_es_interval[interval],
"format": "yyyy-MM-dd"
}
}
if histogram_sub_aggs:
outer_date_histogram_agg[interval + '_interval_' + dh_field]['aggs'] = histogram_sub_aggs
search_param_lists['limit'] = search_param_lists['from'] = [0]
subreq = make_search_subreq(request, '{}?{}'.format('/browse/', urlencode(search_param_lists, True)) )
search_result = perform_search_request(None, subreq, custom_aggregations=outer_date_histogram_agg)
for field_to_delete in ['@context', '@id', '@type', '@graph', 'title', 'filters', 'facets', 'sort', 'clear_filters', 'actions', 'columns']:
if search_result.get(field_to_delete) is None:
continue
del search_result[field_to_delete]
return search_result
@view_config(route_name='add_files_to_higlass_viewconf', request_method='POST')
@debug_log
def add_files_to_higlass_viewconf(context, request):
""" Add multiple files to the given Higlass view config.
Args:
request(obj): Http request object. Assumes request's request is JSON and contains these keys:
higlass_viewconfig(obj) : JSON of the current Higlass views. If None, uses a default view.
files(array) : A list of file uuids to add.
firstViewLocationAndZoom(array, optional) : A list of three numbers indicating the location and zoom levels of the first existing view.
remove_unneeded_tracks(boolean, optional, default=False): If True, we'll remove tracks that are not needed for the view.
height(integer, optional, default=300) : Maximum Height the viewconfig can occupy.
Returns:
A dictionary.
success(bool) : Boolean indicating success.
errors(str) : A string containing errors. Will be None if this is successful.
new_viewconfig(dict) : New dict representing the new viewconfig.
new_genome_assembly(str): A string showing the new genome assembly.
"""
# Get the view config and its genome assembly. (Use a fall back if none was provided.)
higlass_viewconfig = request.json_body.get('higlass_viewconfig', None)
if not higlass_viewconfig:
# @todo: this block will be removed when a workaround to run tests correctly.
default_higlass_viewconf = get_item_or_none(request, "00000000-1111-0000-1111-000000000000")
higlass_viewconfig = default_higlass_viewconf["viewconfig"]
# Add a view section if the default higlass_viewconfig lacks one
if "views" not in higlass_viewconfig:
higlass_viewconfig["views"] = []
# If no view config could be found, fail
if not higlass_viewconfig:
return {
"success" : False,
"errors": "No view config found.",
"new_viewconfig": None,
"new_genome_assembly" : None
}
# Get the list of files.
file_uuids = request.json_body.get('files')
if not isinstance(file_uuids, list):
raise Exception("Expecting list of files.")
# Collect other parameters from the request.
first_view_location_and_zoom = request.json_body.get('firstViewLocationAndZoom', [None, None, None])
remove_unneeded_tracks = request.json_body.get('remove_unneeded_tracks', None)
genome_assembly = request.json_body.get('genome_assembly', None)
maximum_height = request.json_body.get('height', 600)
# Check the height of the display.
if maximum_height < 100:
return {
"success" : False,
"errors" : "Height cannot be below 100.",
"new_viewconfig": None,
"new_genome_assembly" : None
}
# Collect more info on each file.
files_info, errors = get_file_higlass_information(request, file_uuids)
if errors:
return {
"success" : False,
"errors" : errors,
"new_viewconfig": None,
"new_genome_assembly" : None
}
# Validate the files to make sure they exist and have the correct genome assemblies.
validation_check = validate_higlass_file_sources(files_info, genome_assembly)
if not validation_check["success"]:
return_keys = ("success", "errors")
error_response = { key:validation_check[key] for key in return_keys if key in validation_check }
error_response["new_viewconfig"] = None
error_response["new_genome_assembly"] = None
return error_response
# Extract the current_genome_assembly from the validation check.
genome_assembly = genome_assembly or validation_check["genome_assembly"]
views = higlass_viewconfig["views"]
# For each file
for current_file in files_info:
# Try to add this file to the current views.
views, errors = add_single_file_to_higlass_viewconf(views, current_file, genome_assembly, higlass_viewconfig, first_view_location_and_zoom, maximum_height)
if errors:
return {
"success" : False,
"errors" : "errors found while adding {file_uuid} : {errors}".format(file_uuid=current_file["uuid"], errors=errors),
"new_viewconfig": None,
"new_genome_assembly" : None
}
# Remove tracks that we don't need to represent this view conf.
if remove_unneeded_tracks:
remove_left_side_if_all_1D(views)
higlass_viewconfig["zoomFixed"] = False
higlass_viewconfig["views"] = views
return {
"success" : True,
"errors": "",
"new_viewconfig" : higlass_viewconfig,
"new_genome_assembly" : genome_assembly
}
def get_file_higlass_information(request, file_uuids):
"""Retrieve the given file data and their formats.
Args:
request : Network request
file_uuids(list): A list of strings, where each string is a unique identifier to find a file.
Returns:
A list of dictionaries, one for each file. They contain one of these keys:
uuid(string) : The text identifier
data(dict) : Information on the file.
file_format(string) : The type of file present.
A string containing an error.
"""
# Collect more info on each file.
files_info = []
for file_uuid in file_uuids:
data = {
"uuid" : file_uuid,
"data" : get_item_or_none(request, file_uuid),
}
if data["data"] == None:
return [], "{uuid} does not exist, aborting".format(uuid=file_uuid)
data["file_format"] = data["data"]["file_format"]
files_info.append(data)
return files_info, ""
def validate_higlass_file_sources(files_info, expected_genome_assembly):
"""
Args:
files_info(list) : A list of dicts. Each dict contains the
file's uuid and data.
expected_genome_assembly(str, optional, default=None): If provided,
each file should have this genome assembly. If it's not provided,
all of the files will be checked to ensure they have a matching
genome assembly.
Returns:
A dictionary with the following keys:
success(bool) : True if there were no errors.
current_genome_assembly(str): A string indicating the genome assembly of the files.
errors(str) : A string (or None if there are no errors)
"""
files_by_genome_assembly = {}
for file in files_info:
# Get the uuid.
uuid = file["uuid"]
# Get the file data.
data = file["data"]
if not data:
return {
"success" : False,
"errors" : "File {uuid} does not exist".format(uuid=uuid),
}
# Get the higlass_uid.
if "higlass_uid" not in data:
return {
"success" : False,
"errors" : "File {uuid} does not have higlass_uid".format(uuid=uuid)
}
# Get the genome_assembly.
if "genome_assembly" not in data:
return {
"success" : False,
"errors" : "File {uuid} does not have genome assembly".format(uuid=uuid)
}
if data["genome_assembly"] not in files_by_genome_assembly:
files_by_genome_assembly[data["genome_assembly"]] = []
files_by_genome_assembly[data["genome_assembly"]].append(uuid)
# Make sure all of the files have the same genome assembly.
human_readable_ga_listings = []
for ga in [g for g in files_by_genome_assembly if g != expected_genome_assembly]:
human_readable_ga_listings.append(
"{ga}: {uuids}".format(
ga=ga,
uuids=", ".join(files_by_genome_assembly[ga])
)
)
if len(files_info) > 0:
if expected_genome_assembly:
if expected_genome_assembly not in files_by_genome_assembly or \
len(files_by_genome_assembly.keys()) > 1:
return {
"success" : False,
"errors" : "All files are not {expected} genome assembly: {files_by_ga}".format(
expected = expected_genome_assembly,
files_by_ga = "; ".join(human_readable_ga_listings),
)
}
else:
if len(files_by_genome_assembly.keys()) > 1:
return {
"success" : False,
"errors" : "Files have multiple genome assemblies: {files_by_ga}".format(
expected = expected_genome_assembly,
files_by_ga = "; ".join(human_readable_ga_listings),
)
}
# Make sure we found a genome assembly.
if not (expected_genome_assembly or files_by_genome_assembly):
return {
"success" : False,
"errors": "No Genome Assembly provided or found in files."
}
# Everything is verified.
return {
"success" : True,
"errors": "",
"genome_assembly": expected_genome_assembly or list(files_by_genome_assembly.keys())[0]
}
def add_single_file_to_higlass_viewconf(views, file, genome_assembly, higlass_viewconfig, first_view_location_and_zoom, maximum_height):
""" Add a single file to the list of views.
Args:
views(list) : All of the views from the view config.
file(dict) : The file to add.
genome_assembly(str) : A string showing the new genome assembly.
higlass_viewconfig(dict): View config description.
first_view_location_and_zoom(list): 3 numbers (or 3 None) used to describe the camera position of the first view.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# Investigate the base view to see if it has a center track with contents (excluding 2d-chromosome-grid)
base_view_info = get_view_content_info(views[0])
base_view_has_center_content = base_view_info["has_center_content"]
# If there is only one view with no files inside, set the initial domains based on the genome assembly
if len(views) == 1 and not (base_view_has_center_content or base_view_info["has_left_tracks"] or base_view_info["has_top_tracks"]):
domain_sizes = get_initial_domains_by_genome_assembly(genome_assembly)
views[0].update(domain_sizes)
# Determine the kind of file we're working on:
# - Is it 1D or 2D? (chromsize is considered 1D)
# - Is it a reference file? (Positioning rules are different)
file_format_settings = {
"/file-formats/bg/" : {
"dimensions": 1,
"reference": None,
"function": add_bg_bw_multivec_bed_file,
},
"/file-formats/bw/" : {
"dimensions": 1,
"reference": None,
"function": add_bg_bw_multivec_bed_file,
},
"/file-formats/bed/" : {
"dimensions": 1,
"reference": None,
"function": add_bg_bw_multivec_bed_file,
},
"/file-formats/bigbed/": {
"dimensions": 1,
"reference": None,
"function": add_bigbed_file,
},
"/file-formats/beddb/": {
"dimensions": 1,
"reference": "gene-annotations",
"function": add_beddb_file,
},
"/file-formats/chromsizes/" : {
"dimensions": 1,
"reference": "chromsizes",
"function": add_chromsizes_file,
},
"/file-formats/mcool/" : {
"dimensions": 2,
"reference": None,
"function": add_mcool_hic_file,
},
"/file-formats/hic/" : {
"dimensions": 2,
"reference": None,
"function": add_mcool_hic_file,
},
}
file_format = file["file_format"]
if file_format not in file_format_settings:
return None, "Unknown file format {file_format}".format(file_format=file_format)
file_settings = file_format_settings[file_format]
# Add a new view if all of these are true:
# - This file is 2D.
# - The base view has a central track with a 2D file.
add_new_view = file_settings["dimensions"] == 2 and base_view_has_center_content
if add_new_view:
# If there are already 6 views and we need to add a new one, stop and return an error.
if len(views) >= 6:
return None, "You cannot have more than 6 views in a single display."
# Based on the file type, call a subfunction to add the given file.
return file_settings["function"](
views,
file["data"],
genome_assembly,
{
"higlass_viewconfig": higlass_viewconfig,
"first_view_location_and_zoom": first_view_location_and_zoom,
},
maximum_height,
)
def get_initial_domains_by_genome_assembly(genome_assembly):
"""Get a list of defaults HiGlass data ranges for a file.
Args:
genome_assembly(string): Description of the genome assembly.
Returns:
A dict with these keys:
initialXDomain(list): Contains 2 numbers. The HiGlass display will horizontally span all of these data points along the X axis. 0 would be the start of chr1, for example.
initialYDomain(list): Contains 2 numbers. The HiGlass display will focus on the center of this data (for 2D views) or ignore initialYDomain entirely (for 1D views.)
"""
# Create default view options.
domain_size_by_genome_assembly = {
"GRCm38": 2725521370,
"GRCh38": 3088269832,
"dm6": 137547960,
"galGal5": 1022704034
}
domain_size = domain_size_by_genome_assembly.get(genome_assembly, 2000000000)
domain_ranges = {
"initialXDomain": [
domain_size * -1 / 4,
domain_size * 5 / 4
],
"initialYDomain": [
domain_size * -1 / 4,
domain_size * 5 / 4
]
}
return domain_ranges
def get_view_content_info(view):
""" Determines if the view has an empty center, and looks for 2d chromosome grids.
Args:
view(dict): The view to analyze.
Returns:
A dictionary with these keys:
has_top_tracks(bool) : True if the view has top side tracks
has_left_tracks(bool) : True if the view has left side tracks
has_center_content(bool) : True if there is any content in the center tracks.
center_chromsize_index(int) : If there is a 2d chromosome grid in the center, get the index in the list to find it.
"""
view_has_left_tracks = len(view["tracks"].get("left", [])) > 0
view_has_top_tracks = len(view["tracks"].get("top", [])) > 0
# See if there is any center content (including chromsize grids)
view_has_any_center_content = len(view["tracks"].get("center", [])) > 0 \
and len(view["tracks"]["center"][0].get("contents", [])) > 0
# See if there is any non-chromosome grid content.
view_has_center_content = view_has_any_center_content and \
any([t for t in view["tracks"]["center"][0]["contents"] if "type" != "2d-chromosome-grid"])
# Determine the index of the chromosome grid (we assume there is only 1)
view_center_chromsize_indecies = []
if view_has_any_center_content:
view_center_chromsize_indecies = [i for i, t in enumerate(view["tracks"]["center"][0]["contents"]) if "type" == "2d-chromosome-grid"]
view_center_chromsize_index = None
if len(view_center_chromsize_indecies) > 0:
view_center_chromsize_index = view_center_chromsize_indecies[0]
return {
"has_top_tracks" : view_has_top_tracks,
"has_left_tracks" : view_has_left_tracks,
"has_center_content" : view_has_center_content,
"center_chromsize_index" : view_center_chromsize_index,
}
def add_bg_bw_multivec_bed_file(views, file, genome_assembly, viewconfig_info, maximum_height):
""" Add the bedGraph, bed, or bigwig file to add to the given views.
Args:
views(list) : All of the views from the view config.
file(dict) : The file to add.
genome_assembly(str): A string showing the new genome assembly.
viewconfig_info(dict): Information for the viewconfig, including the view parameters and view locks.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# Create a new track.
new_track_base = {
"server": "https://higlass.4dnucleome.org/api/v1",
"tilesetUid": file["higlass_uid"],
"options": {
"name": get_title(file),
"labelPosition": "topLeft",
"showMousePosition": True,
"mousePositionColor": "#999999",
"labelTextOpacity": 0.6
},
"type": "horizontal-divergent-bar",
"uid": uuid.uuid4(),
}
# bed files use the bedlike type instead.
if file["file_format"] == "/file-formats/bed/":
new_track_base["type"] = "bedlike"
if file.get("extra_files"):
for extra_file in file["extra_files"]:
# handle multivec files
if extra_file["file_format"].endswith("bed.multires.mv5/"):
new_track_base["type"] = "horizontal-stacked-bar"
new_track_base["options"]["barBorder"] = False
break
if file.get("higlass_defaults"):
new_track_base["options"].update(file["higlass_defaults"])
return add_1d_file(views, new_track_base, genome_assembly, maximum_height)
def get_title(file):
""" Returns a string containing the title for the given file.
Args:
file(dict): Describes the file.
Returns:
String representing the title.
"""
# Use the track title. As a fallback, use the display title.
title = file.get("track_and_facet_info", {}).get("track_title", file["display_title"])
return title
def add_bigbed_file(views, file, genome_assembly, viewconfig_info, maximum_height):
""" Use the bigbed file to add to the given views.
Args:
views(list) : All of the views from the view config.
file(dict) : The file to add.
genome_assembly(str): A string showing the new genome assembly.
viewconfig_info(dict): Information for the viewconfig, including the view parameters and view locks.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# Create a new track.
new_track_base = {
"server": "https://higlass.4dnucleome.org/api/v1",
"tilesetUid": file["higlass_uid"],
"options": {
"name": get_title(file),
"colorRange": [],
"labelPosition": "topLeft",
"heatmapValueScaling": "log",
},
"height": 18,
"type": "horizontal-vector-heatmap",
"uid": uuid.uuid4(),
}
def get_color_by_index(color_index, known_colors):
"""Use linear interpolation to get a color for the given index.
Assumes indecies and values are between 0 and 255.
Args:
color_index(integer): The index we want to get the color for.
known_colors(dict): A dictionary containing index to value mappings.
If indecies are missing, we'll use linear interpolation to guess
the value.
Returns:
An integer noting the value.
"""
# If the color_index is in known_colors, return that value
if color_index in known_colors:
return known_colors[color_index]
# We need to linearly interpolate using 2 known values the value is between.
# Sort all of the indecies, adding 0 and 255 if they don't exist.
known_color_indecies = [k for k in known_colors.keys()]
if 0 not in known_color_indecies:
known_color_indecies.append(0)
if 255 not in known_color_indecies:
known_color_indecies.append(255)
known_color_indecies = sorted(known_color_indecies)
# Get the two nearest indecies the color_index is inbetween.
lower_bound_index = known_color_indecies[0]
upper_bound_index = known_color_indecies[-1]
for index in known_color_indecies:
if index >= color_index:
upper_bound_index = index
break
else:
lower_bound_index = index
# Get the values for the two bounding indecies. Assume 0:0 and 255:255 if they are not provided.
lower_value = known_colors.get(lower_bound_index, 0)
upper_value = known_colors.get(upper_bound_index, 255)
# Begin linear interpolation. First, calculate the slope.
slope = (upper_value - lower_value) / (upper_bound_index - lower_bound_index)
# Use the lower bound to discover the offset.
offset = lower_value - (lower_bound_index * slope)
# With the slope and the offset, we can calculate the expected value.
interpolated_color = (slope * color_index) + offset
return int(interpolated_color)
# Add the color values for an RGB display. Add known values here and we will linearly interpolate everything else.
color_range_by_color = {
"red": {
0:99,
128:60,
255:25,
},
"green": {
0:20,
128:12,
255:13,
},
"blue": {
0:99,
128:60,
255:25,
},
}
# HiGlass expects a list of 256 strings, each containing an integer.
for index in range(256):
# Get derived colors.
colors = { color : get_color_by_index(index, color_range_by_color[color]) for color in color_range_by_color.keys()}
new_track_base["options"]["colorRange"].append(
"rgba({r},{g},{b},1)".format(
r=colors["red"],
g=colors["green"],
b=colors["blue"],
)
)
if file.get("higlass_defaults"):
new_track_base["options"].update(file["higlass_defaults"])
return add_1d_file(views, new_track_base, genome_assembly, maximum_height)
def add_1d_file(views, new_track, genome_assembly, maximum_height):
""" Use file to add to all of view's tracks.
Args:
views(list) : All of the views from the view config.
new_track(dict) : The track to add.
genome_assembly(str): A string showing the new genome assembly.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# For each view:
for view in views:
# Add to the "top" tracks, after the gene annotation track but before the chromsize tracks.
non_gene_annotation_indecies = [i for i, track in enumerate(view["tracks"]["top"]) if "gene-annotations" not in track["type"]]
new_track_to_add = deepcopy(new_track)
if len(non_gene_annotation_indecies) > 0:
view["tracks"]["top"].insert(non_gene_annotation_indecies[-1], new_track_to_add)
else:
view["tracks"]["top"].insert(0, new_track_to_add)
views, error = resize_1d_tracks(views, maximum_height)
return views, error
def resize_1d_tracks(views, maximum_height):
""" For each view, resize the top 1D tracks (excluding gene-annotation and chromosome)
Args:
views(list) : All of the views from the view config.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
maximum_height(integer): Maximum height for the viewconf to hold all tracks.
"""
for view in views:
view_info = get_view_content_info(view)
# Skip to the next view if there are no top tracks.
if not view_info["has_top_tracks"]:
continue
top_tracks = [ t for t in view["tracks"]["top"] if t["type"] not in ("horizontal-gene-annotations", "horizontal-chromosome-labels", "horizontal-vector-heatmap") ]
# Skip to the next view if there are no data tracks.
if len(top_tracks) < 1:
continue
gene_chromosome_tracks = [ t for t in view["tracks"]["top"] if t["type"] in ("horizontal-gene-annotations", "horizontal-chromosome-labels") ]
horizontal_vector_heatmap_tracks = [ t for t in view["tracks"]["top"] if t["type"] in ("horizontal-vector-heatmap") ]
# Get the height allocated for all of the top tracks.
remaining_height = maximum_height - 50
# If there is a central view, the top rows will have less height to work with.
if view_info["has_center_content"]:
remaining_height = 100
# Remove the height from the chromosome and gene-annotation tracks
for track in gene_chromosome_tracks:
remaining_height -= track.get("height", 50)
# Remove the height from the horizontal-vector-heatmap tracks
for track in horizontal_vector_heatmap_tracks:
remaining_height -= track.get("height", 18)
# Evenly divide the remaining height.
height_per_track = remaining_height / len(top_tracks)
# Set the maximum track height.
if view_info["has_center_content"]:
# We want to maximize the center track space, so cap the top track height to 35.
if height_per_track > 35:
height_per_track = 35
else:
# The height should be no more than half the height
if height_per_track > remaining_height / 2:
height_per_track = remaining_height / 2
# Cap the height to about 125
if height_per_track > 125:
height_per_track = 125
# Minimum height is 20.
if height_per_track < 20:
height_per_track = 20
for track in top_tracks:
# If it's too tall or too short, set it to the fixed height.
if "height" not in track or track["height"] > height_per_track or track["height"] < height_per_track * 0.8:
track["height"] = int(height_per_track)
return views, ""
def add_beddb_file(views, file, genome_assembly, viewconfig_info, maximum_height):
""" Use the beddb file to add to the given view.
Args:
views(list) : All of the views from the view config.
file(dict) : The file to add.
genome_assembly(str): A string showing the new genome assembly.
viewconfig_info(dict): Information for the viewconfig, including the view parameters and view locks.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# Create a new track.
new_track_base = {
"server": "https://higlass.4dnucleome.org/api/v1",
"tilesetUid": file["higlass_uid"],
"options": {
"name": get_title(file),
"labelPosition": "hidden",
"showMousePosition": True,
"mousePositionColor": "#999999",
"fontSize": 10, # has to be set explicitly since Higlass has a different default value
"geneAnnotationHeight": 12 # has to be set explicitly since Higlass has a different default value
}
}
if file.get("higlass_defaults"):
new_track_base["options"].update(file["higlass_defaults"])
new_tracks_by_side = {
"top": deepcopy(new_track_base),
"left": deepcopy(new_track_base),
}
new_tracks_by_side["top"]["type"] = "horizontal-gene-annotations"
new_tracks_by_side["top"]["height"] = 55 # has to be set explicitly since Higlass has a different default value
new_tracks_by_side["top"]["uid"] = uuid.uuid4()
new_tracks_by_side["left"]["type"] = "vertical-gene-annotations"
new_tracks_by_side["left"]["width"] = 55 # has to be set explicitly since Higlass has a different default value
new_tracks_by_side["left"]["uid"] = uuid.uuid4()
# For each view:
for view in views:
# Find out about the left and center tracks.
view_content_info = get_view_content_info(view)
# Update the genome position search bar
update_genome_position_search_box(view, file)
# Add the track to the 0th position
for side in ("top", "left"):
new_track = new_tracks_by_side[side]
# Add the track to the left side if there is left content or there is central content.
if side == "left" and not (view_content_info["has_left_tracks"] or view_content_info["has_center_content"]):
continue
# Add in the 0th position if it doesn't exist already.
view["tracks"][side].insert(0, new_track)
return views, ""
def update_genome_position_search_box(view, new_file):
""" Update the genome position search box for this view so it uses the given file.
Args:
view(dict) : Modifies the view containing the search box.
new_file(dict) : Description of the source file.
Returns:
None
"""
view["autocompleteSource"] = "/api/v1/suggest/?d={uuid}&".format(uuid=new_file["higlass_uid"])
if not "genomePositionSearchBox" in view:
view["genomePositionSearchBox"] = {
"autocompleteServer" : "https://higlass.4dnucleome.org/api/v1",
"chromInfoServer" : "https://higlass.4dnucleome.org/api/v1"
}
view["genomePositionSearchBox"]["autocompleteId"] = new_file["higlass_uid"]
try:
view["genomePositionSearchBox"]["chromInfoId"] = new_file["genome_assembly"]
except KeyError:
pass
view["genomePositionSearchBox"]["visible"] = True
def add_chromsizes_file(views, file, genome_assembly, viewconfig_info, maximum_height):
""" Use the chromsizes file to add to the given view.
Args:
views(list) : All of the views from the view config.
file(dict) : The file to add.
genome_assembly(str): A string showing the new genome assembly.
viewconfig_info(dict): Information for the viewconfig, including the view parameters and view locks.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# Create a new track.
new_track_base_1d = {
"server": "https://higlass.4dnucleome.org/api/v1",
"tilesetUid": file["higlass_uid"],
"options": {
"name": get_title(file),
"showMousePosition": True,
"mousePositionColor": "#999999",
}
}
if file.get("higlass_defaults"):
new_track_base_1d["options"].update(file["higlass_defaults"])
new_tracks_by_side = {
"top": deepcopy(new_track_base_1d),
"left": deepcopy(new_track_base_1d),
"center": create_2d_content(file, "2d-chromosome-grid"),
}
new_tracks_by_side["top"]["type"] = "horizontal-chromosome-labels"
new_tracks_by_side["top"]["uid"] = uuid.uuid4()
new_tracks_by_side["left"]["type"] = "vertical-chromosome-labels"
new_tracks_by_side["left"]["uid"] = uuid.uuid4()
del new_tracks_by_side["center"]["options"]["name"]
# For each view:
for view in views:
# Find out about the left and center tracks.
view_content_info = get_view_content_info(view)
# Add the track to the 0th position
for side in ("top", "left"):
new_track = new_tracks_by_side[side]
# Add the track to the left side if there is left content or there is central content.
if side == "left" and not (view_content_info["has_left_tracks"] or view_content_info["has_center_content"]):
continue
# Add in the last position if it doesn't exist already.
view["tracks"][side].append(new_track)
# add a 2D chromsize grid overlay on the center (replace the existing track if it exists)
if view_content_info["has_center_content"]:
if view_content_info["center_chromsize_index"] != None:
view["tracks"]["center"][0]["contents"][view_content_info["center_chromsize_index"]] = new_tracks_by_side["center"]
else:
view["tracks"]["center"][0]["contents"].append(new_tracks_by_side["center"])
return views, ""
def add_mcool_hic_file(views, file, genome_assembly, viewconfig_info, maximum_height):
""" Use the mcool or hic file to add to the given view.
Args:
views(list) : All of the views from the view config.
file(dict) : The file to add.
genome_assembly(str): A string showing the new genome assembly.
viewconfig_info(dict): Information for the viewconfig, including the view parameters and view locks.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# Make 2D content.
new_content = create_2d_content(file, "heatmap")
return add_2d_file(views, new_content, viewconfig_info, maximum_height)
def add_2d_file(views, new_content, viewconfig_info, maximum_height):
""" Add the new 2D content generated by the file to add to the first available view (create a new view if needed.)
Args:
views(list) : All of the views from the view config.
new_content(dict) : Description of the new center track.
viewconfig_info(dict): Information for the viewconfig, including the view parameters and view locks.
maximum_height(integer) : All of the tracks should fit within this much height or less.
Returns:
views(list) : A list of the modified views. None if there is an error.
error(str) : A string explaining the error. This is None if there is no error.
"""
# Look at the first view.
base_view_info = get_view_content_info(views[0])
# If there is no center non-chromsize content, then this means the base view has an empty central contents.
if not base_view_info["has_center_content"]:
# Create a base central track if one doesn't exist.
if len(views[0]["tracks"]["center"]) == 0:
views[0]["tracks"]["center"] = [
{
"contents":[],
"type": "combined",
}
]
# Add the file to the center
views[0]["tracks"]["center"][0]["contents"].append(new_content)
# Copy the top reference tracks to the left
copy_top_reference_tracks_into_left(views[0], views)
# Add the chromsize track as a 2D grid, if it doesn't exist.
if base_view_info["center_chromsize_index"] == None:
# Get the chromsize from the top tracks.
chromsize_tracks = [ t for t in views[0]["tracks"]["top"] if "-chromosome-labels" in t["type"] ]
contents = {
}
if len(chromsize_tracks) > 0:
chrom_source = chromsize_tracks[-1]
for key in ("tilesetUid", "server"):
contents[key] = chrom_source.get(key, "")
contents["options"] = {}
contents["type"] = "2d-chromosome-grid"
# The grid should be the last item to draw so it is always visible.
views[0]["tracks"]["center"][0]["contents"].append(contents)
# Resize 1d tracks.
views, error = resize_1d_tracks(views, maximum_height)
return views, error
# If there is central content, then we need to make a new view.
# Stop if there are already 6 views.
if len(views) >= 6:
return None, "You cannot have more than 6 views in a single display."
# Clone the base view, including tracks. Make sure the view and layout uids are unique.
new_view = deepcopy(views[0])
new_view["uid"] = uuid.uuid4()
new_view["tracks"]["center"][0]["uid"] = uuid.uuid4()
# Replace the central track with the new file
for i, track in enumerate(new_view["tracks"]["center"][0]["contents"]):
if track["type"] != "2d-chromosome-grid":
new_view["tracks"]["center"][0]["contents"][i] = new_content
break
# Change the uid of the chromosome grid on the central track.
for i, track in enumerate(new_view["tracks"]["center"][0]["contents"]):
if track["type"] == "2d-chromosome-grid":
new_view["tracks"]["center"][0]["contents"][i]["uid"] = uuid.uuid4()
views.append(new_view)
# Resize/Repack views
repack_higlass_views(views)
# Create locks based on the base view.
if len(views) > 1:
for view in views:
add_zoom_lock_if_needed(viewconfig_info["higlass_viewconfig"], view, viewconfig_info["first_view_location_and_zoom"])
# Resize 1d tracks.
views, error = resize_1d_tracks(views, maximum_height)
return views, error
def create_2d_content(file, viewtype):
""" Generates a 2D track.
Args:
file(dict): Information about the given file.
viewtype(string): The desired content type.
Returns:
A dictionary that describes the content.
"""
contents = {}
contents["tilesetUid"] = file["higlass_uid"]
contents["type"] = viewtype
contents["server"] = "https://higlass.4dnucleome.org/api/v1"
# Add specific information for this file.
contents["options"] = {}
contents["options"]["name"] = get_title(file)
if file.get("higlass_defaults"):
contents["options"].update(file["higlass_defaults"])
return contents
def copy_top_reference_tracks_into_left(target_view, views):
""" Copy the reference tracks from the top track into the left (if the left doesn't have them already.)
Args:
target_view(dict) : View which will be modified to get the new tracks.
views(list) : The first view contains the top tracks to copy from.
Returns:
Boolean value indicating success.
"""
if len(views) < 1:
return target_view
reference_file_type_mappings = {
"horizontal-chromosome-labels": "vertical-chromosome-labels",
"horizontal-gene-annotations": "vertical-gene-annotations",
}
# Look through all of the top views for the chromsize and the gene annotation tracks.
# Make a shallow copy of the found reference tracks.
new_tracks = []
for track in (t for t in views[0]["tracks"]["top"] if t["type"] in reference_file_type_mappings.keys()):
new_tracks.append(deepcopy(track))
# Change the horizontal track type to vertical track types.
for track in new_tracks:
# Rename the uid so it doesn't conflict with the top track.
if "uid" in track:
track_string = str(track["uid"])
if track_string.startswith("top"):
track["uid"] = track_string.replace("top", "left", 1)
else:
track["uid"] = uuid.uuid4()
else:
track["uid"] = uuid.uuid4()
if track["type"] in reference_file_type_mappings:
track["type"] = reference_file_type_mappings[ track["type"] ]
# Swap the height and widths, if they are here.
temp_height = track.get("width", None)
temp_width = track.get("height", None)
if temp_height and temp_width:
track["height"] = temp_height
track["width"] = temp_width
elif temp_height:
track["height"] = temp_height
del track["width"]
elif temp_width:
track["width"] = temp_width
del track["height"]
# Add the copied tracks to the left side of this view if it doesn't have the track already.
for track in reversed(new_tracks):
if any([t for t in target_view["tracks"]["left"] if t["type"] == track["type"]] ) == False:
target_view["tracks"]["left"].insert(0, track)
return target_view
def repack_higlass_views(views):
"""Set up the higlass views so they fit in a 3 x 2 grid. The packing order is:
1 2 5
3 4 6
Args:
views(list): Modifies the views and changes their position and size.
Returns:
None
"""
# Get the number of views. Do nothing if there are more than 6.
views_count = len(views)
if views_count < 1:
return
if views_count > 6:
return
# Determine the width and height of each view, evenly dividing a 12 x 12 area.
width = 12
if views_count >= 5:
width = 4
elif views_count > 1:
width = 6
height = 12
if views_count > 2:
height = 6
# Keep track of the x and y coordinates for each view.
x = 0
y = 0
# For each view
for higlass_view in views:
# Set the x and y coordinate for this view
higlass_view["layout"]["x"] = x
higlass_view["layout"]["y"] = y
higlass_view["layout"]["w"] = width
higlass_view["layout"]["h"] = height
# Increment the x counter
x += width
# Increment the x & y counter if the x counter needs to wrap around
if x >= 12:
y += height
x = 0
def add_zoom_lock_if_needed(view_config, view, scales_and_center_k):
""" If there are multiple views, create a lock to keep them at the same position and scale.
Args:
view_config (dict) : The HiGlass view config. Will be modified.
view (dict) : The view to add the lock to. Will be modified.
scales_and_center_k(list) : 3 numbers used to note the position and zoom level.
Returns:
Boolean indicating success.
"""
# If there is only 1 view, then there is no need to add a lock.
if len(view_config["views"]) <= 1:
view_config["locationLocks"] = {}
view_config["zoomLocks"] = {}
return
# Get the uid for this view
view_uid = str(view["uid"])
# If the view already exists in the viewconf, no work is needed.
if view_uid in view_config["locationLocks"]["locksByViewUid"]:
return
# Find the lock the first view is in.
base_uid = str(view_config["views"][0]["uid"])
base_view_x = scales_and_center_k[0]
base_view_y = scales_and_center_k[1]
base_view_zoom = scales_and_center_k[2]
base_initial_x_domain = view_config["views"][0]["initialXDomain"]
base_initial_y_domain = view_config["views"][0]["initialYDomain"]
# If there is no base view zoom, calculate it based on the X domain.
if base_view_x == None and base_view_y == None and base_view_zoom == None:
# Use the Domain's midway point for the lock's x and y coordinates.
base_view_x = (base_initial_x_domain[0] + base_initial_x_domain[1]) / 2.0
base_view_y = (base_initial_y_domain[0] + base_initial_y_domain[1]) / 2.0
# The zoom level just needs to be the same.
base_view_zoom = 1
# Set the location and zoom locks.
for lock_name in ("locationLocks", "zoomLocks"):
# Refer to the same lock the base view uses.
lockUuid = view_config[lock_name]["locksByViewUid"].get(base_uid, None)
if not lockUuid:
# The base view doesn't have a lock, so create a new one and add the base view to it.
lockUuid = str(uuid.uuid4())
view_config[lock_name]["locksByViewUid"][base_uid] = lockUuid
view_config[lock_name]["locksDict"][lockUuid] = {}
view_config[lock_name]["locksDict"][lockUuid][base_uid] = [
base_view_x,
base_view_y,
base_view_zoom
]
else:
base_view_x = view_config[lock_name]["locksDict"][lockUuid][base_uid][0]
base_view_y = view_config[lock_name]["locksDict"][lockUuid][base_uid][1]
base_view_zoom = view_config[lock_name]["locksDict"][lockUuid][base_uid][2]
# Lock the new view with the base view.
view_config[lock_name]["locksByViewUid"][view_uid] = lockUuid
view_config[lock_name]["locksDict"][lockUuid][view_uid] = [
base_view_x,
base_view_y,
base_view_zoom
]
# Copy the initialXDomain and initialYDomain
view["initialXDomain"] = view_config["views"][0]["initialXDomain"] or view["initialXDomain"]
view["initialYDomain"] = view_config["views"][0]["initialYDomain"] or view["initialYDomain"]
return True
def remove_left_side_if_all_1D(new_views):
""" If the view config has no 2D files, then remove the left side from the view config.
Args:
new_views(list): The views that will make the new HiGlass view config. May be modified.
Returns:
True if the left side tracks were removed, False otherwise.
"""
# Search all views' central contents for any 2D files.
for view in new_views:
for center_track in view["tracks"]["center"]:
if "contents" not in center_track:
continue
# If 2D files are found, we shouldn't remove any tracks.
if any([ t for t in center_track["contents"] if t["type"] in ("heatmap", "2d-chromosome-grid")]):
return False
# Remove the left side from each file in the view config.
for view in new_views:
view["tracks"]["left"] = []
return True
|
3dfxsoftware/cbss-addons | refs/heads/master | portal_crm/__init__.py | 55 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import contact
|
Distrotech/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/conf/locale/ta/formats.py | 434 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F, Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M, Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
skosukhin/spack | refs/heads/esiwace | var/spack/repos/builtin/packages/libxkbui/package.py | 1 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libxkbui(AutotoolsPackage):
"""X.org libxkbui library."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libxkbui/"
url = "https://www.x.org/archive/individual/lib/libxkbui-1.0.2.tar.gz"
version('1.0.2', 'a6210171defde64d9e8bcf6a6f6074b0')
depends_on('libx11')
depends_on('libxt')
depends_on('libxkbfile')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
|
kvar/ansible | refs/heads/seas_master_2.9.5 | lib/ansible/modules/cloud/amazon/ec2_group.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
requirements: [ boto3 ]
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
- One of and only one of I(name) or I(group_id) is required.
- Required if I(state=present).
required: false
group_id:
description:
- Id of group to delete (works only with absent).
- One of and only one of I(name) or I(group_id) is required.
required: false
version_added: "2.4"
description:
description:
- Description of the security group. Required when C(state) is C(present).
required: false
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example). If none are supplied,
no inbound rules will be enabled. Rules list may include its own name in `group_name`.
This allows idempotent loopback additions (e.g. allow group to access itself).
Rule sources list support was added in version 2.4. This allows to define multiple sources per
source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
In version 2.5 support for rule descriptions was added.
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied,
a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
was added.
required: false
version_added: "1.6"
state:
version_added: "1.4"
description:
- Create or delete a security group
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules
required: false
default: 'true'
aliases: []
type: bool
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egress on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
type: bool
tags:
version_added: "2.4"
description:
- A dictionary of one or more tags to assign to the security group.
required: false
purge_tags:
version_added: "2.4"
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
tags will not be modified.
required: false
default: yes
type: bool
extends_documentation_fragment:
- aws
- ec2
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
- Preview diff mode support is added in version 2.7.
'''
EXAMPLES = '''
- name: example using security group rule descriptions
ec2_group:
name: "{{ name }}"
description: sg with rule descriptions
vpc_id: vpc-xxxxxxxx
profile: "{{ aws_profile }}"
region: us-east-1
rules:
- proto: tcp
ports:
- 80
cidr_ip: 0.0.0.0/0
rule_desc: allow all on port 80
- name: example ec2 group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
# this should only be needed for EC2 Classic security group rules
# because in a VPC an ELB will use a user-account security group
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
- proto: all
# in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
# traffic on all ports is allowed, regardless of any ports you specify
from_port: 10050 # this value is ignored
to_port: 10050 # this value is ignored
cidr_ip: 10.0.0.0/8
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
cidr_ipv6: 64:ff9b::/96
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
- name: example2 ec2 group
ec2_group:
name: example2
description: an example2 EC2 group
vpc_id: 12345
region: eu-west-1
rules:
# 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
- proto: tcp
ports: 22
group_name: example-vpn
- proto: tcp
ports:
- 80
- 443
- 8080-8099
cidr_ip: 0.0.0.0/0
# Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
- proto: tcp
ports:
- 6379
- 26379
group_name:
- example-vpn
- example-redis
- proto: tcp
ports: 5665
group_name: example-vpn
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
cidr_ipv6:
- 2607:F8B0::/32
- 64:ff9b::/96
group_id:
- sg-edcd9784
diff: True
- name: "Delete group by its id"
ec2_group:
region: eu-west-1
group_id: sg-33b4ee5b
state: absent
'''
RETURN = '''
group_name:
description: Security group name
sample: My Security Group
type: str
returned: on create/update
group_id:
description: Security group id
sample: sg-abcd1234
type: str
returned: on create/update
description:
description: Description of security group
sample: My Security Group
type: str
returned: on create/update
tags:
description: Tags associated with the security group
sample:
Name: My Security Group
Purpose: protecting stuff
type: dict
returned: on create/update
vpc_id:
description: ID of VPC to which the security group belongs
sample: vpc-abcd1234
type: str
returned: on create/update
ip_permissions:
description: Inbound rules associated with the security group.
sample:
- from_port: 8182
ip_protocol: tcp
ip_ranges:
- cidr_ip: "1.1.1.1/32"
ipv6_ranges: []
prefix_list_ids: []
to_port: 8182
user_id_group_pairs: []
type: list
returned: on create/update
ip_permissions_egress:
description: Outbound rules associated with the security group.
sample:
- ip_protocol: -1
ip_ranges:
- cidr_ip: "0.0.0.0/0"
ipv6_ranges: []
prefix_list_ids: []
user_id_group_pairs: []
type: list
returned: on create/update
owner_id:
description: AWS Account ID of the security group
sample: 123456789012
type: int
returned: on create/update
'''
import json
import re
import itertools
from copy import deepcopy
from time import sleep
from collections import namedtuple
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.aws.iam import get_aws_account_id
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet
from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network
from ansible.module_utils._text import to_text
from ansible.module_utils.six import string_types
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
current_account_id = None
def rule_cmp(a, b):
"""Compare rules without descriptions"""
for prop in ['port_range', 'protocol', 'target', 'target_type']:
if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
# equal protocols can interchange `(-1, -1)` and `(None, None)`
if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
continue
elif getattr(a, prop) != getattr(b, prop):
return False
elif getattr(a, prop) != getattr(b, prop):
return False
return True
def rules_to_permissions(rules):
return [to_permission(rule) for rule in rules]
def to_permission(rule):
# take a Rule, output the serialized grant
perm = {
'IpProtocol': rule.protocol,
}
perm['FromPort'], perm['ToPort'] = rule.port_range
if rule.target_type == 'ipv4':
perm['IpRanges'] = [{
'CidrIp': rule.target,
}]
if rule.description:
perm['IpRanges'][0]['Description'] = rule.description
elif rule.target_type == 'ipv6':
perm['Ipv6Ranges'] = [{
'CidrIpv6': rule.target,
}]
if rule.description:
perm['Ipv6Ranges'][0]['Description'] = rule.description
elif rule.target_type == 'group':
if isinstance(rule.target, tuple):
pair = {}
if rule.target[0]:
pair['UserId'] = rule.target[0]
# group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
if rule.target[1]:
pair['GroupId'] = rule.target[1]
elif rule.target[2]:
pair['GroupName'] = rule.target[2]
perm['UserIdGroupPairs'] = [pair]
else:
perm['UserIdGroupPairs'] = [{
'GroupId': rule.target
}]
if rule.description:
perm['UserIdGroupPairs'][0]['Description'] = rule.description
elif rule.target_type == 'ip_prefix':
perm['PrefixListIds'] = [{
'PrefixListId': rule.target,
}]
if rule.description:
perm['PrefixListIds'][0]['Description'] = rule.description
elif rule.target_type not in valid_targets:
raise ValueError('Invalid target type for rule {0}'.format(rule))
return fix_port_and_protocol(perm)
def rule_from_group_permission(perm):
def ports_from_permission(p):
if 'FromPort' not in p and 'ToPort' not in p:
return (None, None)
return (int(perm['FromPort']), int(perm['ToPort']))
# outputs a rule tuple
for target_key, target_subkey, target_type in [
('IpRanges', 'CidrIp', 'ipv4'),
('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
('PrefixListIds', 'PrefixListId', 'ip_prefix'),
]:
if target_key not in perm:
continue
for r in perm[target_key]:
# there may be several IP ranges here, which is ok
yield Rule(
ports_from_permission(perm),
to_text(perm['IpProtocol']),
r[target_subkey],
target_type,
r.get('Description')
)
if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
for pair in perm['UserIdGroupPairs']:
target = (
pair.get('UserId', None),
pair.get('GroupId', None),
pair.get('GroupName', None),
)
if pair.get('UserId', '').startswith('amazon-'):
# amazon-elb and amazon-prefix rules don't need
# group-id specified, so remove it when querying
# from permission
target = (
target[0],
None,
target[2],
)
elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id:
target = (
pair.get('UserId', None),
pair.get('GroupId', None),
pair.get('GroupName', None),
)
yield Rule(
ports_from_permission(perm),
to_text(perm['IpProtocol']),
target,
'group',
pair.get('Description')
)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_security_groups_with_backoff(connection, **kwargs):
return connection.describe_security_groups(**kwargs)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def sg_exists_with_backoff(connection, **kwargs):
try:
return connection.describe_security_groups(**kwargs)
except is_boto3_error_code('InvalidGroup.NotFound'):
return {'SecurityGroups': []}
def deduplicate_rules_args(rules):
"""Returns unique rules"""
if rules is None:
return None
return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port', 'rule_desc')
if not isinstance(rule, dict):
module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
elif 'group_name' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
"""
Returns tuple of (target_type, target, group_created) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
group_id = None
group_name = None
target_group_created = False
validate_rule(module, rule)
if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
groups[group_id] = group_instance
groups[group_name] = group_instance
# group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
if group_id and group_name:
group_name = None
return 'group', (owner_id, group_id, group_name), False
elif 'group_id' in rule:
return 'group', rule['group_id'], False
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name == name:
group_id = group['GroupId']
groups[group_id] = group
groups[group_name] = group
elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
# both are VPC groups, this is ok
group_id = groups[group_name]['GroupId']
elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
# both are EC2 classic, this is ok
group_id = groups[group_name]['GroupId']
else:
auto_group = None
filters = {'group-name': group_name}
if vpc_id:
filters['vpc-id'] = vpc_id
# if we got here, either the target group does not exist, or there
# is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
# is bad, so we have to create a new SG because no compatible group
# exists
if not rule.get('group_desc', '').strip():
# retry describing the group once
try:
auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
module.fail_json(msg="group %s will be automatically created by rule %s but "
"no description was provided" % (group_name, rule))
except ClientError as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
elif not module.check_mode:
params = dict(GroupName=group_name, Description=rule['group_desc'])
if vpc_id:
params['VpcId'] = vpc_id
try:
auto_group = client.create_security_group(**params)
get_waiter(
client, 'security_group_exists',
).wait(
GroupIds=[auto_group['GroupId']],
)
except is_boto3_error_code('InvalidGroup.Duplicate'):
# The group exists, but didn't show up in any of our describe-security-groups calls
# Try searching on a filter for the name, and allow a retry window for AWS to update
# the model on their end.
try:
auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
except IndexError as e:
module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
except ClientError as e:
module.fail_json_aws(
e,
msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
if auto_group is not None:
group_id = auto_group['GroupId']
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
return 'group', group_id, target_group_created
elif 'cidr_ip' in rule:
return 'ipv4', validate_ip(module, rule['cidr_ip']), False
elif 'cidr_ipv6' in rule:
return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
elif 'ip_prefix' in rule:
return 'ip_prefix', rule['ip_prefix'], False
module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
def ports_expand(ports):
# takes a list of ports and returns a list of (port_from, port_to)
ports_expanded = []
for port in ports:
if not isinstance(port, string_types):
ports_expanded.append((port,) * 2)
elif '-' in port:
ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
else:
ports_expanded.append((int(port.strip()),) * 2)
return ports_expanded
def rule_expand_ports(rule):
# takes a rule dict and returns a list of expanded rule dicts
if 'ports' not in rule:
if isinstance(rule.get('from_port'), string_types):
rule['from_port'] = int(rule.get('from_port'))
if isinstance(rule.get('to_port'), string_types):
rule['to_port'] = int(rule.get('to_port'))
return [rule]
ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
rule_expanded = []
for from_to in ports_expand(ports):
temp_rule = rule.copy()
del temp_rule['ports']
temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
rule_expanded.append(temp_rule)
return rule_expanded
def rules_expand_ports(rules):
# takes a list of rules and expands it based on 'ports'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_ports(rule_complex)]
def rule_expand_source(rule, source_type):
# takes a rule dict and returns a list of expanded rule dicts for specified source_type
sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
rule_expanded = []
for source in sources:
temp_rule = rule.copy()
for s in source_types_all:
temp_rule.pop(s, None)
temp_rule[source_type] = source
rule_expanded.append(temp_rule)
return rule_expanded
def rule_expand_sources(rule):
# takes a rule dict and returns a list of expanded rule discts
source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
return [r for stype in source_types
for r in rule_expand_source(rule, stype)]
def rules_expand_sources(rules):
# takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
if not rules:
return rules
return [rule for rule_complex in rules
for rule in rule_expand_sources(rule_complex)]
def update_rules_description(module, client, rule_type, group_id, ip_permissions):
if module.check_mode:
return
try:
if rule_type == "in":
client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions)
if rule_type == "out":
client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
def fix_port_and_protocol(permission):
for key in ('FromPort', 'ToPort'):
if key in permission:
if permission[key] is None:
del permission[key]
else:
permission[key] = int(permission[key])
permission['IpProtocol'] = to_text(permission['IpProtocol'])
return permission
def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
if revoke_ingress:
revoke(client, module, revoke_ingress, group_id, 'in')
if revoke_egress:
revoke(client, module, revoke_egress, group_id, 'out')
return bool(revoke_ingress or revoke_egress)
def revoke(client, module, ip_permissions, group_id, rule_type):
if not module.check_mode:
try:
if rule_type == 'in':
client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
elif rule_type == 'out':
client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
except (BotoCoreError, ClientError) as e:
rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
def add_new_permissions(client, module, new_ingress, new_egress, group_id):
if new_ingress:
authorize(client, module, new_ingress, group_id, 'in')
if new_egress:
authorize(client, module, new_egress, group_id, 'out')
return bool(new_ingress or new_egress)
def authorize(client, module, ip_permissions, group_id, rule_type):
if not module.check_mode:
try:
if rule_type == 'in':
client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
elif rule_type == 'out':
client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
except (BotoCoreError, ClientError) as e:
rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
def validate_ip(module, cidr_ip):
split_addr = cidr_ip.split('/')
if len(split_addr) == 2:
# this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
# Get the network bits if IPv4, and validate if IPv6.
try:
ip = to_subnet(split_addr[0], split_addr[1])
if ip != cidr_ip:
module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
"check the network mask and make sure that only network bits are set: {1}.".format(
cidr_ip, ip))
except ValueError:
# to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
try:
isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
ip = cidr_ip
except ValueError:
# If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
# The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
if ip6 != cidr_ip:
module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
"check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
return ip6
return ip
return cidr_ip
def update_tags(client, module, group_id, current_tags, tags, purge_tags):
tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
if not module.check_mode:
if tags_to_delete:
try:
client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
# Add/update tags
if tags_need_modify:
try:
client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
except (BotoCoreError, ClientError) as e:
module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify))
return bool(tags_need_modify or tags_to_delete)
def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
changed = False
client = module.client('ec2')
ingress_needs_desc_update = []
egress_needs_desc_update = []
for present_rule in present_egress:
needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
for r in needs_update:
named_tuple_egress_list.remove(r)
egress_needs_desc_update.extend(needs_update)
for present_rule in present_ingress:
needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
for r in needs_update:
named_tuple_ingress_list.remove(r)
ingress_needs_desc_update.extend(needs_update)
if ingress_needs_desc_update:
update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
changed |= True
if egress_needs_desc_update:
update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
changed |= True
return changed
def create_security_group(client, module, name, description, vpc_id):
if not module.check_mode:
params = dict(GroupName=name, Description=description)
if vpc_id:
params['VpcId'] = vpc_id
try:
group = client.create_security_group(**params)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to create security group")
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while True:
sleep(3)
group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
if group.get('VpcId') and not group.get('IpPermissionsEgress'):
pass
else:
break
return group
return None
def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
group_id = group['GroupId']
tries = 6
def await_rules(group, desired_rules, purge, rule_key):
for i in range(tries):
current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
if purge and len(current_rules ^ set(desired_rules)) == 0:
return group
elif purge:
conflicts = current_rules ^ set(desired_rules)
# For cases where set comparison is equivalent, but invalid port/proto exist
for a, b in itertools.combinations(conflicts, 2):
if rule_cmp(a, b):
conflicts.discard(a)
conflicts.discard(b)
if not len(conflicts):
return group
elif current_rules.issuperset(desired_rules) and not purge:
return group
sleep(10)
group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
return group
group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
if 'VpcId' in group and module.params.get('rules_egress') is not None:
group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
def group_exists(client, module, vpc_id, group_id, name):
params = {'Filters': []}
if group_id:
params['GroupIds'] = [group_id]
if name:
# Add name to filters rather than params['GroupNames']
# because params['GroupNames'] only checks the default vpc if no vpc is provided
params['Filters'].append({'Name': 'group-name', 'Values': [name]})
if vpc_id:
params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
# Don't filter by description to maintain backwards compatibility
try:
security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Error in describe_security_groups")
if security_groups:
groups = dict((group['GroupId'], group) for group in all_groups)
groups.update(dict((group['GroupName'], group) for group in all_groups))
if vpc_id:
vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
groups.update(vpc_wins)
# maintain backwards compatibility by using the last matching group
return security_groups[-1], groups
return None, {}
def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
if not hasattr(client, "update_security_group_rule_descriptions_egress"):
all_rules = rules if rules else [] + rules_egress if rules_egress else []
if any('rule_desc' in rule for rule in all_rules):
module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
def get_diff_final_resource(client, module, security_group):
def get_account_id(security_group, module):
try:
owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account'])
except (BotoCoreError, ClientError) as e:
owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
return owner_id
def get_final_tags(security_group_tags, specified_tags, purge_tags):
if specified_tags is None:
return security_group_tags
tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
end_result_tags.update(tags_need_modify)
return end_result_tags
def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
if specified_rules is None:
return security_group_rules
if purge_rules:
final_rules = []
else:
final_rules = list(security_group_rules)
specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
for rule in specified_rules:
format_rule = {
'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
}
if rule.get('proto', 'tcp') in ('all', '-1', -1):
format_rule['ip_protocol'] = '-1'
format_rule.pop('from_port')
format_rule.pop('to_port')
elif rule.get('ports'):
if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
rule['ports'] = [rule['ports']]
for port in rule.get('ports'):
if isinstance(port, string_types) and '-' in port:
format_rule['from_port'], format_rule['to_port'] = port.split('-')
else:
format_rule['from_port'] = format_rule['to_port'] = port
elif rule.get('from_port') or rule.get('to_port'):
format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
if rule.get(source_type):
rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
if rule.get('rule_desc'):
format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
else:
if not isinstance(rule[source_type], list):
rule[source_type] = [rule[source_type]]
format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
if rule.get('group_id') or rule.get('group_name'):
rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
format_rule['user_id_group_pairs'] = [{
'description': rule_sg.get('description', rule_sg.get('group_desc')),
'group_id': rule_sg.get('group_id', rule.get('group_id')),
'group_name': rule_sg.get('group_name', rule.get('group_name')),
'peering_status': rule_sg.get('peering_status'),
'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
}]
for k, v in list(format_rule['user_id_group_pairs'][0].items()):
if v is None:
format_rule['user_id_group_pairs'][0].pop(k)
final_rules.append(format_rule)
# Order final rules consistently
final_rules.sort(key=get_ip_permissions_sort_key)
return final_rules
security_group_ingress = security_group.get('ip_permissions', [])
specified_ingress = module.params['rules']
purge_ingress = module.params['purge_rules']
security_group_egress = security_group.get('ip_permissions_egress', [])
specified_egress = module.params['rules_egress']
purge_egress = module.params['purge_rules_egress']
return {
'description': module.params['description'],
'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
'group_name': security_group.get('group_name', module.params['name']),
'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
'owner_id': get_account_id(security_group, module),
'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
def flatten_nested_targets(module, rules):
def _flatten(targets):
for target in targets:
if isinstance(target, list):
for t in _flatten(target):
yield t
elif isinstance(target, string_types):
yield target
if rules is not None:
for rule in rules:
target_list_type = None
if isinstance(rule.get('cidr_ip'), list):
target_list_type = 'cidr_ip'
elif isinstance(rule.get('cidr_ipv6'), list):
target_list_type = 'cidr_ipv6'
if target_list_type is not None:
rule[target_list_type] = list(_flatten(rule[target_list_type]))
return rules
def get_rule_sort_key(dicts):
if dicts.get('cidr_ip'):
return dicts.get('cidr_ip')
elif dicts.get('cidr_ipv6'):
return dicts.get('cidr_ipv6')
elif dicts.get('prefix_list_id'):
return dicts.get('prefix_list_id')
elif dicts.get('group_id'):
return dicts.get('group_id')
return None
def get_ip_permissions_sort_key(rule):
if rule.get('ip_ranges'):
rule.get('ip_ranges').sort(key=get_rule_sort_key)
return rule.get('ip_ranges')[0]['cidr_ip']
elif rule.get('ipv6_ranges'):
rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
return rule.get('ipv6_ranges')[0]['cidr_ipv6']
elif rule.get('prefix_list_ids'):
rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
return rule.get('prefix_list_ids')[0]['prefix_list_id']
elif rule.get('user_id_group_pairs'):
rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
return rule.get('user_id_group_pairs')[0]['group_id']
return None
def main():
argument_spec = dict(
name=dict(),
group_id=dict(),
description=dict(),
vpc_id=dict(),
rules=dict(type='list'),
rules_egress=dict(type='list'),
state=dict(default='present', type='str', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
tags=dict(required=False, type='dict', aliases=['resource_tags']),
purge_tags=dict(default=True, required=False, type='bool')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['name', 'group_id']],
required_if=[['state', 'present', ['name']]],
)
name = module.params['name']
group_id = module.params['group_id']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
tags = module.params['tags']
purge_tags = module.params['purge_tags']
if state == 'present' and not description:
module.fail_json(msg='Must provide description when state is present.')
changed = False
client = module.client('ec2')
verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
group, groups = group_exists(client, module, vpc_id, group_id, name)
group_created_new = not bool(group)
global current_account_id
current_account_id = get_aws_account_id(module)
before = {}
after = {}
# Ensure requested group is absent
if state == 'absent':
if group:
# found a match, delete it
before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
try:
if not module.check_mode:
client.delete_security_group(GroupId=group['GroupId'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
else:
group = None
changed = True
else:
# no match found, no changes required
pass
# Ensure requested group is present
elif state == 'present':
if group:
# existing group
before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
if group['Description'] != description:
module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
"and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
else:
# no match found, create it
group = create_security_group(client, module, name, description, vpc_id)
changed = True
if tags is not None and group is not None:
current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
if group:
named_tuple_ingress_list = []
named_tuple_egress_list = []
current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
(rules_egress, 'out', named_tuple_egress_list)]:
if new_rules is None:
continue
for rule in new_rules:
target_type, target, target_group_created = get_target_from_rule(
module, client, rule, name, group, groups, vpc_id)
changed |= target_group_created
if rule.get('proto', 'tcp') in ('all', '-1', -1):
rule['proto'] = '-1'
rule['from_port'] = None
rule['to_port'] = None
try:
int(rule.get('proto', 'tcp'))
rule['proto'] = to_text(rule.get('proto', 'tcp'))
rule['from_port'] = None
rule['to_port'] = None
except ValueError:
# rule does not use numeric protocol spec
pass
named_tuple_rule_list.append(
Rule(
port_range=(rule['from_port'], rule['to_port']),
protocol=to_text(rule.get('proto', 'tcp')),
target=target, target_type=target_type,
description=rule.get('rule_desc'),
)
)
# List comprehensions for rules to add, rules to modify, and rule ids to determine purging
new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
if module.params.get('rules_egress') is None and 'VpcId' in group:
# when no egress rules are specified and we're in a VPC,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
if rule in current_egress:
named_tuple_egress_list.append(rule)
if rule not in current_egress:
current_egress.append(rule)
# List comprehensions for rules to add, rules to modify, and rule ids to determine purging
present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
if purge_rules:
revoke_ingress = []
for p in present_ingress:
if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
revoke_ingress.append(to_permission(p))
else:
revoke_ingress = []
if purge_rules_egress and module.params.get('rules_egress') is not None:
if module.params.get('rules_egress') is []:
revoke_egress = [
to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
]
else:
revoke_egress = []
for p in present_egress:
if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
revoke_egress.append(to_permission(p))
else:
revoke_egress = []
# named_tuple_ingress_list and named_tuple_egress_list got updated by
# method update_rule_descriptions, deep copy these two lists to new
# variables for the record of the 'desired' ingress and egress sg permissions
desired_ingress = deepcopy(named_tuple_ingress_list)
desired_egress = deepcopy(named_tuple_egress_list)
changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list)
# Revoke old rules
changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
# Authorize new rules
changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
# A new group with no rules provided is already being awaited.
# When it is created we wait for the default egress rule to be added by AWS
security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
elif changed and not module.check_mode:
# keep pulling until current security group rules match the desired ingress and egress rules
security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
else:
security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
else:
security_group = {'group_id': None}
if module._diff:
if module.params['state'] == 'present':
after = get_diff_final_resource(client, module, security_group)
if before.get('ip_permissions'):
before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
security_group['diff'] = [{'before': before, 'after': after}]
module.exit_json(changed=changed, **security_group)
if __name__ == '__main__':
main()
|
Narrat/eispice | refs/heads/master | module/subckt.py | 3 | #
# Copyright (C) 2006-2007 Cooper Street Innovations Inc.
# Charles Eidsness <charles@cooper-street.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
"""
This module provides a class that can be used as a base class to
create esipice sub-circuits.
Classes:
Subckt -- Base calss used to define a sub-circuit.
"""
import mutex
_subcktCnt = 0
class Subckt(list):
"""
Is used as a base class that is inherited by User Defined a
sub-circuit. Similar to a subckt in Berkely Spice.
Example:
>>> import eispice
>>> class Subckt1(eispice.Subckt):
... def __init__(self, pNode, nNode, Rx, Ry):
... self.Rx = eispice.R(pNode, self.node('node'), Rx)
... self.Ry = eispice.R(nNode, self.node('node'), Ry)
>>> class Subckt2(eispice.Subckt):
... def __init__(self, pNode, nNode, Rx, Ry):
... self.Rx = eispice.R(pNode, self.node('node'), Rx)
... self.Xy = Subckt1(nNode, self.node('node'), Rx, Ry)
>>> cct = eispice.Circuit("Subckt Test")
>>> cct.Vx = eispice.V(1, 0, 10)
>>> cct.Xx = Subckt2(1, 0, 100, 100)
>>> cct.op()
>>> cct.check_i('Vx', -10.0 / 300.0)
True
"""
def __new__(self, *args, **argsk):
# NOTE: This is not necissailly the best way to do this
self.subcktCnt = globals()['_subcktCnt']
globals()['_subcktCnt'] += 1
return list.__new__(self, args, argsk)
def node(self, name):
"""Identifys a local node, that doesn't exit the Subckt"""
return "%s@%s" % (self.subcktCnt, name)
def device(self, name):
"""Identifys a local device, that doesn't exit the Subckt"""
return "%s#%s" % (self.subcktCnt, name)
def __setattr__(self, name, value):
"""
Adds a device that can be accessed using its name, and creates a
flat version by adding a prefix based on the instantiations id to
uniquly label the device instances. This is done because the base
simulator doesn't support hiearchy and every device needs a unique
identifier. If the device being attached is a Subckt it pulls in
its flat circuit dict.
"""
self.__dict__[name] = value
try:
for (subName, subValue) in value:
self.append((subName, subValue))
except TypeError:
self.append((self.device(name),value))
# --------------------------------------------------------------------------- #
# Test #
# --------------------------------------------------------------------------- #
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=False)
print 'Testing Complete'
|
Fiware/ops.Fuel-main-dev | refs/heads/master | fuelweb_ui_test/tests/test_deploy.py | 4 | import time
from selenium.webdriver import ActionChains
import browser
from pageobjects.environments import DeployChangesPopup
from pageobjects.header import TaskResultAlert
from pageobjects.node_disks_settings import DisksSettings
from pageobjects.node_interfaces_settings import InterfacesSettings
from pageobjects.nodes import Nodes, RolesPanel, DeleteNodePopup, NodeInfo
from pageobjects.base import PageObject
from tests import preconditions
from tests.base import BaseTestCase
class TestDeploy(BaseTestCase):
@classmethod
def setUpClass(cls):
BaseTestCase.setUpClass()
def setUp(self):
"""Each test precondition
Steps:
1. Create simple environment with default values
2. Click on created environment
"""
BaseTestCase.clear_nailgun_database()
BaseTestCase.setUp(self)
preconditions.Environment.simple_flat()
time.sleep(1)
def test_add_nodes(self):
"""Deploy environment with controller and compute nodes
Scenario:
1. Add controller and compute node
2. Deploy changes
3. Verify that nodes statuses are ready
"""
Nodes().add_nodes.click()
Nodes().nodes_discovered[0].checkbox.click()
RolesPanel().controller.click()
Nodes().apply_changes.click()
time.sleep(2)
Nodes().add_nodes.click()
time.sleep(1)
Nodes().nodes_discovered[0].checkbox.click()
RolesPanel().compute.click()
Nodes().apply_changes.click()
time.sleep(1)
for node in Nodes().nodes:
self.assertEqual(
'pending addition', node.status.text.lower(),
'Node status is Pending Addition')
Nodes().deploy_changes.click()
DeployChangesPopup().deploy.click()
TaskResultAlert().close.click()
with Nodes() as n:
self.assertEqual(2, len(n.nodes), 'Nodes amount')
for node in n.nodes:
self.assertEqual('ready', node.status.text.lower(),
'Node status is READY')
def test_delete_node(self):
"""Delete one node and deploy changes
Scenario:
1. Add controller and compute node
2. Deploy changes
3. Delete one node
4. Deploy changes
5. Verify that only one node is present
"""
self.test_add_nodes()
with Nodes() as n:
n.nodes[1].checkbox.click()
n.delete_nodes.click()
with DeleteNodePopup() as p:
p.delete.click()
p.wait_until_exists()
time.sleep(1)
self.assertEqual(
'pending deletion', Nodes().nodes[1].status.text.lower(),
'Node status is Pending Deletion')
Nodes().deploy_changes.click()
DeployChangesPopup().deploy.click()
PageObject.click_element(TaskResultAlert(), 'close')
with Nodes() as n:
self.assertEqual(1, len(n.nodes), 'Nodes amount')
for node in n.nodes:
self.assertEqual('ready', node.status.text.lower(),
'Node status is READY')
def test_node_configure_networks_is_readonly(self):
"""Configure network interfaces after deploy
Scenario:
1. Add controller node
2. Deploy changes
3. Select controller node and click configure interfaces
4. Drag and drop Storage network to eth1
5. Verify that Storage network can't be dragged and dropped
6. Apply, Load defaults, Cancel Changes buttons are not active
"""
Nodes().add_nodes.click()
Nodes().nodes_discovered[0].checkbox.click()
RolesPanel().controller.click()
Nodes().apply_changes.click()
time.sleep(2)
Nodes().deploy_changes.click()
DeployChangesPopup().deploy.click()
time.sleep(1)
Nodes().nodes[0].details.click()
NodeInfo().edit_networks.click()
with InterfacesSettings() as s:
ActionChains(browser.driver).drag_and_drop(
s.interfaces[0].networks['storage'],
s.interfaces[1].networks_box).perform()
time.sleep(1)
self.assertNotIn(
'storage', s.interfaces[1].networks,
'storage at eht1')
self.assertFalse(s.apply.is_enabled(), 'Apply is disabled')
self.assertFalse(s.load_defaults.is_enabled(),
'Load defaults is disabled')
self.assertFalse(s.cancel_changes.is_enabled(),
'Cancel changes is disabled')
def test_node_configure_disks_is_readonly(self):
"""Configure disks after deploy
Scenario:
1. Add controller node
2. Deploy changes
3. Select controller node and click configure disks
4. Verify that volume inputs are disabled
6. Apply, Load defaults, Cancel Changes buttons are not active
"""
Nodes().add_nodes.click()
Nodes().nodes_discovered[0].checkbox.click()
RolesPanel().controller.click()
Nodes().apply_changes.click()
time.sleep(2)
Nodes().deploy_changes.click()
DeployChangesPopup().deploy.click()
time.sleep(1)
Nodes().nodes[0].details.click()
NodeInfo().edit_disks.click()
time.sleep(1)
with DisksSettings() as s:
for i in range(2):
self.assertFalse(
s.disks[i].volume_group_os.input.is_enabled(),
'Base system input is disabled at disk #{0}'.format(i))
self.assertFalse(
s.disks[i].volume_group_image.input.is_enabled(),
'Image storage input is disabled at disk #{0}'.format(i))
self.assertFalse(s.apply.is_enabled(), 'Apply is disabled')
self.assertFalse(s.load_defaults.is_enabled(),
'Load defaults is disabled')
self.assertFalse(s.cancel_changes.is_enabled(),
'Cancel changes is disabled')
|
stackforge/tricircle | refs/heads/master | tricircle/common/restapp.py | 1 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystonemiddleware import auth_token
from oslo_config import cfg
from oslo_middleware import request_id
from oslo_service import service
from tricircle.common import exceptions as t_exceptions
from tricircle.common.i18n import _
def auth_app(app):
app = request_id.RequestId(app)
if cfg.CONF.auth_strategy == 'noauth':
pass
elif cfg.CONF.auth_strategy == 'keystone':
# NOTE(zhiyuan) pkg_resources will try to load tricircle to get module
# version, passing "project" as empty string to bypass it
app = auth_token.AuthProtocol(app, {'project': ''})
else:
raise t_exceptions.InvalidConfigurationOption(
opt_name='auth_strategy', opt_value=cfg.CONF.auth_strategy)
return app
_launcher = None
def serve(api_service, conf, workers=1):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.ProcessLauncher(conf, restart_method='mutate')
_launcher.launch_service(api_service, workers=workers)
def wait():
_launcher.wait()
|
LeMaker/RPi.GPIO_BP | refs/heads/bananapro | test/wait_for_edge.py | 3 | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
from threading import Timer
PIN_NUM = 12
channel = 7
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIN_NUM,GPIO.OUT)
GPIO.output(PIN_NUM,True)
print "\n value_%d = %d\n" %(PIN_NUM,GPIO.input(PIN_NUM))
GPIO.setup(channel,GPIO.IN,GPIO.PUD_DOWN)
print "\n value_%d = %d\n" %(channel,GPIO.input(channel))
def makehigh():
print "\n value_%d = %d\n" %(channel,GPIO.input(channel))
GPIO.output(PIN_NUM,False)
print "\n value_%d = %d\n" %(PIN_NUM,GPIO.input(PIN_NUM))
GPIO.wait_for_edge(channel, GPIO.RISING)
t = Timer(1,makehigh)
t.start()
|
theflofly/tensorflow | refs/heads/master | tensorflow/python/autograph/pyct/qual_names.py | 24 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for manipulating qualified names.
A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite
(e.g. 'foo.bar') syntactic symbols.
This is *not* related to the __qualname__ attribute used by inspect, which
refers to scopes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class Symbol(collections.namedtuple('Symbol', ['name'])):
"""Represents a Python symbol."""
class StringLiteral(collections.namedtuple('StringLiteral', ['value'])):
"""Represents a Python string literal."""
def __str__(self):
return '\'%s\'' % self.value
def __repr__(self):
return str(self)
class NumberLiteral(collections.namedtuple('NumberLiteral', ['value'])):
"""Represents a Python numeric literal."""
def __str__(self):
return '%s' % self.value
def __repr__(self):
return str(self)
# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans.
class QN(object):
"""Represents a qualified name."""
def __init__(self, base, attr=None, subscript=None):
if attr is not None and subscript is not None:
raise ValueError('A QN can only be either an attr or a subscript, not '
'both: attr={}, subscript={}.'.format(attr, subscript))
self._has_attr = False
self._has_subscript = False
if attr is not None:
if not isinstance(base, QN):
raise ValueError(
'for attribute QNs, base must be a QN; got instead "%s"' % base)
if not isinstance(attr, str):
raise ValueError('attr may only be a string; got instead "%s"' % attr)
self._parent = base
# TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now.
self.qn = (base, attr)
self._has_attr = True
elif subscript is not None:
if not isinstance(base, QN):
raise ValueError('For subscript QNs, base must be a QN.')
self._parent = base
self.qn = (base, subscript)
self._has_subscript = True
else:
if not isinstance(base, (str, StringLiteral, NumberLiteral)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
'for simple QNs, base must be a string or a Literal object;'
' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
def is_symbol(self):
return isinstance(self.qn[0], str)
def is_simple(self):
return len(self.qn) <= 1
def is_composite(self):
return len(self.qn) > 1
def has_subscript(self):
return self._has_subscript
def has_attr(self):
return self._has_attr
@property
def parent(self):
if self._parent is None:
raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0])
return self._parent
@property
def owner_set(self):
"""Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
"""
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
@property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
def __hash__(self):
return hash(self.qn + (self._has_attr, self._has_subscript))
def __eq__(self, other):
return (isinstance(other, QN) and self.qn == other.qn and
self.has_subscript() == other.has_subscript() and
self.has_attr() == other.has_attr())
def __str__(self):
if self.has_subscript():
return str(self.qn[0]) + '[' + str(self.qn[1]) + ']'
if self.has_attr():
return '.'.join(map(str, self.qn))
else:
return str(self.qn[0])
def __repr__(self):
return str(self)
def ssf(self):
"""Simple symbol form."""
ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]
ssf_string = ''
for i in range(0, len(self.qn) - 1):
if self.has_subscript():
delimiter = '_sub_'
else:
delimiter = '_'
ssf_string += ssfs[i] + delimiter
return ssf_string + ssfs[-1]
def ast(self):
# The caller must adjust the context appropriately.
if self.has_subscript():
return gast.Subscript(self.parent.ast(), gast.Index(self.qn[-1].ast()),
None)
if self.has_attr():
return gast.Attribute(self.parent.ast(), self.qn[-1], None)
base = self.qn[0]
if isinstance(base, str):
return gast.Name(base, None, None)
elif isinstance(base, StringLiteral):
return gast.Str(base.value)
elif isinstance(base, NumberLiteral):
return gast.Num(base.value)
else:
assert False, ('the constructor should prevent types other than '
'str, StringLiteral and NumberLiteral')
class QnResolver(gast.NodeTransformer):
"""Annotates nodes with QN information.
Note: Not using NodeAnnos to avoid circular dependencies.
"""
def visit_Name(self, node):
node = self.generic_visit(node)
anno.setanno(node, anno.Basic.QN, QN(node.id))
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr))
return node
def visit_Subscript(self, node):
# TODO(mdan): This may no longer apply if we overload getitem.
node = self.generic_visit(node)
s = node.slice
if not isinstance(s, gast.Index):
# TODO(mdan): Support range and multi-dimensional indices.
# Continuing silently because some demos use these.
return node
if isinstance(s.value, gast.Num):
subscript = QN(NumberLiteral(s.value.n))
elif isinstance(s.value, gast.Str):
subscript = QN(StringLiteral(s.value.s))
else:
# The index may be an expression, case in which a name doesn't make sense.
if anno.hasanno(node.slice.value, anno.Basic.QN):
subscript = anno.getanno(node.slice.value, anno.Basic.QN)
else:
return node
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN),
subscript=subscript))
return node
def resolve(node):
return QnResolver().visit(node)
def from_str(qn_str):
node = parser.parse_expression(qn_str)
node = resolve(node)
return anno.getanno(node, anno.Basic.QN)
|
rlugojr/django | refs/heads/master | tests/model_package/models/__init__.py | 580 | # Import all the models from subpackages
from .article import Article
from .publication import Publication
__all__ = ['Article', 'Publication']
|
aYukiSekiguchi/ACCESS-Chromium | refs/heads/master | chrome_frame/combine_libs.py | 79 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(slightlyoff): move to using shared version of this script.
'''This script makes it easy to combine libs and object files to a new lib,
optionally removing some of the object files in the input libs by regular
expression matching.
For usage information, run the script with a --help argument.
'''
import optparse
import os
import re
import subprocess
import sys
def Shell(*args):
'''Runs the program and args in args, returns the output from the program.'''
process = subprocess.Popen(args,
stdin = None,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
output = process.stdout.readlines()
process.wait()
retcode = process.returncode
if retcode != 0:
raise RuntimeError('%s exited with status %d' % (args[0], retcode))
return output
def CollectRemovals(remove_re, inputs):
'''Returns a list of all object files in inputs that match remove_re.'''
removals = []
for input in inputs:
output = Shell('lib.exe', '/list', input)
for line in output:
line = line.rstrip()
if remove_re.search(line):
removals.append(line)
return removals
def CombineLibraries(output, remove_re, inputs):
'''Combines all the libraries and objects in inputs, while removing any
object files that match remove_re.
'''
removals = []
if remove_re:
removals = CollectRemovals(remove_re, inputs)
print removals
args = ['lib.exe', '/out:%s' % output]
args += ['/remove:%s' % obj for obj in removals]
args += inputs
Shell(*args)
USAGE = '''usage: %prog [options] <lib or obj>+
Combines input libraries or objects into an output library, while removing
any object file (in the input libraries) that matches a given regular
expression.
'''
def GetOptionParser():
parser = optparse.OptionParser(USAGE)
parser.add_option('-o', '--output', dest = 'output',
help = 'write to this output library')
parser.add_option('-r', '--remove', dest = 'remove',
help = 'object files matching this regexp will be removed '
'from the output library')
return parser
def Main():
'''Main function for this script'''
parser = GetOptionParser()
(opt, args) = parser.parse_args()
output = opt.output
remove = opt.remove
if not output:
parser.error('You must specify an output file')
if not args:
parser.error('You must specify at least one object or library')
output = output.strip()
remove = remove.strip()
if remove:
try:
remove_re = re.compile(opt.remove)
except:
parser.error('%s is not a valid regular expression' % opt.remove)
else:
remove_re = None
if sys.platform != 'win32' and sys.platform != 'cygwin':
parser.error('this script only works on Windows for now')
# If this is set, we can't capture lib.exe's output.
if 'VS_UNICODE_OUTPUT' in os.environ:
del os.environ['VS_UNICODE_OUTPUT']
CombineLibraries(output, remove_re, args)
return 0
if __name__ == '__main__':
sys.exit(Main())
|
Pragmatismo/Pigrow | refs/heads/master | scripts/gui/graph_modules/graph_day_night.py | 1 |
def read_graph_options():
'''
Returns a dictionary of settings and their default values for use by the remote gui
'''
graph_module_settings_dict = {
"use_time":"lamp", # sun or lamp
"latitude":"51.50",
"longitude":"0.12",
"light_on_time_hour":"7",
"light_on_time_min":"0",
"light_off_time_hour":"22",
"light_off_time_min":"00",
"label_duration":"false",
"title_text":"",
"show_time_period":"true",
"color_cycle":"false",
"line_style":"-",
"marker":"",
"show_grid":"true",
"major_ticks":"",
"minor_ticks":"1",
"ylabel":""
}
return graph_module_settings_dict
def make_graph(list_of_datasets, graph_path, ymax="", ymin="", size_h="", size_v="", dh="", th="", tc="", dc="", extra=[]):
print("Making a day/night graph graph...")
import matplotlib
import datetime
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as plticker
day_color = "yellow"
night_color = "darkblue"
if extra == {}:
extra = read_graph_options()
# set variables to settings from dictionary converting to the appropriate type
use_time = extra['use_time'].lower()
latitude = float(extra['latitude'])
longitude = float(extra['longitude'])
light_on_time_hour = extra['light_on_time_hour']
light_on_time_min = extra['light_on_time_min']
light_off_time_hour = extra['light_off_time_hour']
light_off_time_min = extra['light_off_time_min']
label_duration = extra['label_duration']
title_text = extra['title_text']
color_cycle = extra['color_cycle'].lower()
if ',' in color_cycle:
color_cycle.split(",")
line_style = extra['line_style']
marker = extra['marker']
line_flags = marker + line_style
show_grid = extra['show_grid'].lower()
major_ticks = extra['major_ticks']
minor_ticks = extra['minor_ticks']
show_time_period = extra["show_time_period"]
ylabel = extra['ylabel']
#import the tools we'll be using
if use_time == "sun":
from suntime import Sun # to install run the command pip3 install suntime
sun = Sun(latitude, longitude)
def make_dict_of_sets(date_list, value_list, key_list):
# make a dictionary containing every day's list of dates and values'
dictionary_of_sets = {}
light_markers = []
durations = []
for log_item_pos in range(0, len(date_list)):
day_group = date_list[log_item_pos].strftime("%Y:%m:%d")
log_time = date_list[log_item_pos]
#log_time = log_time.replace(year=1980, month=1, day=1)
if day_group in dictionary_of_sets:
# Read existing lists of dates and values
values_to_graph = dictionary_of_sets[day_group][0]
dates_to_graph = dictionary_of_sets[day_group][1]
# add current value and date to lists
values_to_graph.append(value_list[log_item_pos])
dates_to_graph.append(log_time)
else:
# create new date and value lists if the day_group doesn't exists yet
values_to_graph = [value_list[log_item_pos]]
dates_to_graph = [log_time]
# creat light on and off values for lamp
# create sunrise and set markers
day_text_split = day_group.split(":")
ymd_dayname = datetime.date(int(day_text_split[0]), int(day_text_split[1]), int(day_text_split[2]))
if use_time == "sun":
sunrise = sun.get_local_sunrise_time(ymd_dayname)
sunset = sun.get_local_sunset_time(ymd_dayname)
light_markers.append(sunrise)
light_markers.append(sunset)
duration = sunset - sunrise
print(duration)
durations.append(duration)
durations.append("")
else:
light_on = day_group + " " + light_on_time_hour + ":" + light_on_time_min + ":00"
light_off = day_group + " " + light_off_time_hour + ":" + light_off_time_min + ":00"
light_on = datetime.datetime.strptime(light_on, "%Y:%m:%d %H:%M:%S")
light_off = datetime.datetime.strptime(light_off, "%Y:%m:%d %H:%M:%S")
light_markers.append(light_on)
light_markers.append(light_off)
duration = light_off - light_on
print(duration)
durations.append(duration)
durations.append("")
# put the lists of values and dates into the dictionary of sets under the daygroup key
dictionary_of_sets[day_group]=[values_to_graph, dates_to_graph]
return dictionary_of_sets, light_markers, durations
# define a graph space
fig, ax = plt.subplots(figsize=(size_h, size_v))
if not color_cycle == 'false' and not color_cycle.strip() == '':
ax.set_prop_cycle(color=color_cycle)
# cycle through and make plot
for x in list_of_datasets:
date_list = x[0]
value_list = x[1]
key_list = x[2]
dictionary_of_sets, light_markers, durations = make_dict_of_sets(date_list, value_list, key_list)
print(len(light_markers), len(durations))
ax.plot(date_list, value_list, label=key_list[0], lw=1)
flip_color = day_color
for x in range(0, len(light_markers)-1):
pos1 = mdates.date2num(light_markers[x])
pos2 = mdates.date2num(light_markers[x+1])
ax.axvspan(pos1, pos2, color=flip_color, alpha=0.3)
text_pos = pos2
if label_duration == "true":
if not ymin == "":
label_height = float(ymin)
else:
label_height = 0
ax.text(text_pos, label_height, " " + str(durations[x]), rotation=90,va='bottom',ha='right')
if flip_color == night_color:
flip_color = day_color
else:
flip_color = night_color
#plt.axvline(x, color='darkblue', linewidth=5,alpha=0.3)
# organise the graphing area
if not major_ticks == "":
loc = plticker.MultipleLocator(base=float(major_ticks)) # this locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
if not minor_ticks == "":
loc = plticker.MultipleLocator(base=float(minor_ticks)) # this locator puts ticks at regular intervals
ax.yaxis.set_minor_locator(loc)
if show_grid == "true":
plt.grid(axis='y')
if show_time_period == "true":
title_text = title_text + "\nTime Perod; " + str(date_list[0].strftime("%b-%d %H:%M")) + " to " + str(date_list[-1].strftime("%b-%d %H:%M"))
plt.title(title_text)
if len(list_of_datasets) > 1:
ax.legend()
ax.xaxis_date()
fig.autofmt_xdate()
plt.ylabel(ylabel)
if not ymax == "":
plt.ylim(ymax=float(ymax))
if not ymin == "":
plt.ylim(ymin=float(ymin))
# save the graph and tidy up our workspace
plt.savefig(graph_path)
print("divided days created and saved to " + graph_path)
plt.close(fig)
|
sinmaniphel/py_isear_dataset | refs/heads/master | py_isear/isear_loader.py | 1 | import py_isear.enums as enums
import csv
class IsearSubset:
def __init__(self,
labels,
values):
self.labels = labels
self.values = values
class IsearDataSet:
def __init__(self,
data=IsearSubset([], []),
target=IsearSubset([], []),
text_data=[]):
self.__data = data
self.__target = target
self.__text_data = text_data
def get_data(self):
return self.__data.values
def get_target(self):
return self.__target.values
def get_data_label_at(self, i):
return self.__data.labels[i]
def get_target_label_at(self, i):
return self.__target.labels[i]
def get_freetext_content(self):
return self.__text_data
class NoSuchFieldException:
def __init__(self, field_name):
self.message = "No such field in dataset : " + field_name
def get_message(self):
return self.message
class IsearLoader:
def load_isear(self, s_isear_path):
f_isear = open(s_isear_path, "r")
'''
The isear file extracted for the purpose of this initial
loading is a pipe delimited csv-like file with headings
'''
isear_reader = csv.reader(f_isear,
delimiter="|",
quotechar='"')
i = 0
entry_attributes = []
text_data = []
entry_target = []
for isear_row in isear_reader:
if i == 0:
i = i + 1
continue
result = self.__parse_entry(isear_row,
i,
text_data)
entry_attributes.append(result["attributes"])
entry_target.append(result["target"])
i = i + 1
attributes_subset = IsearSubset(self.attribute_list,
entry_attributes)
target_subset = IsearSubset(self.target_list,
entry_target)
return IsearDataSet(attributes_subset,
target_subset,
text_data)
def __parse_entry(self,
isear_row, # The row of the entry
index, # row number
text_data): # the text data
i_col = 0
l_attributes = []
l_target = []
# start parsing the columns
for isear_col in isear_row:
# we need to know to which field we are refering
# handling the excess columns
if i_col >= len(enums.CONST_ISEAR_CODES):
break
s_cur_col = enums.CONST_ISEAR_CODES[i_col]
# for further test this will tell whether we are in the SIT column,
# which is a text column
b_is_sit = bool(s_cur_col == "SIT")
if b_is_sit:
if self.provide_text:
# should be clear enough
text_data.append(isear_col)
else:
# should be an int
if s_cur_col in self.attribute_list:
i_isear_col = int(isear_col)
l_attributes.append(i_isear_col)
if s_cur_col in self.target_list:
i_isear_col = int(isear_col)
l_target.append(i_isear_col)
# next column
i_col = i_col + 1
# we will return a pretty "free form" object
return {"attributes": l_attributes,
"target": l_target}
def __init__(self,
attribute_list=[],
target_list=[],
provide_text=True):
# list of attributes to extract, please refer to enums.py
self.attribute_list = []
self.set_attribute_list(attribute_list)
# list of targets to extract
self.target_list = []
self.set_target_list(target_list)
# provide the text, true by default
self.provide_text = provide_text
# compares attribute existence in the Isear labels
def __check_attr_exists(self, attribute):
return attribute in enums.CONST_ISEAR_CODES
def set_attribute_list(self, attrs):
"""Set a list of attributes to extract
Args:
attrs (list): a list of strings refering Isear fields .
Returns:
self. in order to ease fluent programming (loader.set().set())
Raises:
NoSuchFieldException
"""
self.attribute_list = []
for attr in attrs:
self.add_attribute(attr)
return self
def set_target_list(self, target):
"""Set a list of fields to extract as target
Args:
attrs (list): a list of strings refering Isear fields .
Returns:
self. in order to ease fluent programming (loader.set().set())
Raises:
NoSuchFieldException
"""
self.target_list = []
for tgt in target:
self.add_target(tgt)
return self
def set_provide_text(self, is_provide_text):
""" Tell the extractor whether to load the free text field.
Behaviour is true by default
Args:
is_provide_text (bool): whether to provide the text field or not
Return
self. For fluent API
"""
self.provide_text = is_provide_text
return self
def add_attribute(self, attr):
b_att_ex = self.__check_attr_exists(attr)
if b_att_ex is not True:
ex = NoSuchFieldException(attr)
raise ex
self.attribute_list.append(attr)
return self
def add_target(self, attr):
b_att_ex = self.__check_attr_exists(attr)
if b_att_ex is not True:
ex = NoSuchFieldException(attr)
raise ex
self.target_list.append(attr)
return self
# def load_isear(self):
|
methoxid/micropystat | refs/heads/master | tests/extmod/uheapq1.py | 70 | try:
import uheapq as heapq
except:
import heapq
try:
heapq.heappop([])
except IndexError:
print("IndexError")
try:
heapq.heappush((), 1)
except TypeError:
print("TypeError")
def pop_and_print(h):
l = []
while h:
l.append(str(heapq.heappop(h)))
print(' '.join(l))
h = []
heapq.heappush(h, 3)
heapq.heappush(h, 1)
heapq.heappush(h, 2)
print(h)
pop_and_print(h)
h = [4, 3, 8, 9, 10, 2, 7, 11, 5]
heapq.heapify(h)
print(h)
heapq.heappush(h, 1)
heapq.heappush(h, 6)
heapq.heappush(h, 12)
print(h)
pop_and_print(h)
|
flodolo/bedrock | refs/heads/master | bedrock/mozorg/tests/test_hierarchy.py | 9 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from mock import patch
from bedrock.mozorg.hierarchy import PageNode, PageRoot
from bedrock.mozorg.tests import TestCase
class TestPageNode(TestCase):
def test_children_parents(self):
"""
If a node is given children in the constructor, the children must mark
the node as their parent.
"""
children = [PageNode('test'), PageNode('test2')]
parent = PageRoot('parent', children=children)
for child in children:
assert child.parent == parent
def test_full_path(self):
"""
full_path should return the path of this node and all of its parents
joined by slashes.
"""
child = PageNode('test', path='asdf')
PageRoot('test', path='blah', children=[
PageNode('test', path='whoo', children=[child])
])
assert child.full_path == 'blah/whoo/asdf'
def test_full_path_empty(self):
"""
If one of a node's parents have an empty path, they should not be
included in the full path.
"""
child = PageNode('test', path='asdf')
PageRoot('', path='blah', children=[PageNode('', children=[child])])
assert child.full_path == 'blah/asdf'
@patch('bedrock.mozorg.hierarchy.page')
def test_page(self, page):
"""
If a pagenode is given a template, it should provide a page for
inclusion in a urlconf.
"""
page.return_value = 'testreturn'
assert PageNode('test').page is None
node = PageNode('test', path='blah', template='test.html')
parent = PageRoot('testparent', path='yo', children=[node])
assert node.page == 'testreturn'
page.assert_called_with('yo/blah', 'test.html', node_root=parent,
node=node)
def test_path_to_root(self):
"""
path_to_root should return an iterable of nodes following the route from
the child node to the root of the tree.
"""
child1 = PageNode('test')
child2 = PageNode('test', children=[child1])
root = PageRoot('test', children=[child2, PageNode('test')])
assert list(child1.path_to_root) == [child1, child2, root]
def test_breadcrumbs(self):
"""
breadcrumbs should return a list of nodes following the path from the
root to the child node.
"""
child1 = PageNode('test')
child2 = PageNode('test', children=[child1])
root = PageRoot('test', children=[child2, PageNode('test')])
assert list(child1.breadcrumbs) == [root, child2, child1]
def test_root(self):
"""root should return the root of the page tree."""
child1 = PageNode('test')
child2 = PageNode('test', children=[child1])
root = PageRoot('test', children=[child2, PageNode('test')])
assert child1.root == root
def test_no_root(self):
"""If the root of a tree is not a PageRoot, raise a ValueError."""
child1 = PageNode('test')
child2 = PageNode('test', children=[child1])
PageNode('test', children=[child2, PageNode('test')])
self.assertRaises(ValueError, lambda: child1.root)
def test_previous(self):
"""
Previous should return the previous sibling node, or None if one doesn't
exist.
"""
child1 = PageNode('', template='test1.html')
child2 = PageNode('', template='test2.html')
PageRoot('', children=[child1, child2])
assert child2.previous == child1
assert child1.previous is None
def test_previous_cross(self):
"""
If a node has no siblings, attempt to cross over to the children of the
parent's sibling.
"""
# Diagram of the final tree:
# root
# / \
# O O--
# / / \
# O O O
# / / \ / \
# c1 c2 c3 c4 O
child1 = PageNode('', template='test1.html')
child2 = PageNode('', template='test2.html')
child3 = PageNode('', template='test3.html')
child4 = PageNode('', template='test4.html')
root = PageRoot('', template='root.html', children=[
PageNode('', children=[
PageNode('', children=[child1])
]),
PageNode('', children=[
PageNode('', children=[child2, child3]),
PageNode('', children=[child4, PageNode('')])
])
])
assert root.previous is None
assert child1.previous == root
assert child2.previous == child1
assert child3.previous == child2
assert child4.previous == child3
def test_next(self):
"""
Next should return the next sibling node, or None if one doesn't exist.
"""
child1 = PageNode('', template='test1.html')
child2 = PageNode('', template='test1.html')
PageRoot('', children=[child1, child2])
assert child1.next == child2
assert child2.next is None
def test_next_cross(self):
"""
If a node has no siblings, attempt to cross over to the children of the
parent's sibling.
"""
# Diagram of the final tree:
# root
# / \
# O O--
# / / \
# O O O
# / / \ / \
# c1 c2 c3 c4 O
child1 = PageNode('', template='test1.html')
child2 = PageNode('', template='test2.html')
child3 = PageNode('', template='test3.html')
child4 = PageNode('', template='test4.html')
root = PageRoot('', template='root.html', children=[
PageNode('', children=[
PageNode('', children=[child1])
]),
PageNode('', children=[
PageNode('', children=[child2, child3]),
PageNode('', children=[child4, PageNode('')])
])
])
assert root.next == child1
assert child1.next == child2
assert child2.next == child3
assert child3.next == child4
assert child4.next is None
@patch('bedrock.mozorg.hierarchy.reverse')
def test_url(self, reverse):
"""If a node has a page, url should return the url for that page."""
node = PageRoot('test', path='asdf/qwer', template='fake.html')
reverse.return_value = 'asdf'
assert node.url == 'asdf'
reverse.assert_called_with('fake')
@patch('bedrock.mozorg.hierarchy.reverse')
def test_url_child(self, reverse):
"""
If a node doesn't have a page, but has children, it should return the
url of its first child.
"""
child1 = PageNode('test', path='asdf/qwer', template='fake.html')
child2 = PageNode('test', path='bb/qr', template='fake2.html')
parent = PageRoot('', children=[child1, child2])
reverse.return_value = 'asdf'
assert parent.url == 'asdf'
reverse.assert_called_with('fake')
def test_url_none(self):
"""If a node doesn't have a page or children, url should return None."""
node = PageNode('')
assert node.url is None
class TestPageRoot(TestCase):
@patch.object(PageNode, 'page')
def test_as_urlpatterns(self, page):
"""
as_urlpatterns should return a urlconf with the pages for all the nodes
included in the tree.
"""
child1 = PageNode('child1', path='asdf/qwer', template='fake.html')
child2 = PageNode('child2', path='bb/qr', template='fake2.html')
parent = PageNode('parent', children=[child1, child2])
root = PageRoot('root', path='badsbi', template='fake3.html',
children=[parent])
# Mocking properties
page.__get__ = lambda mock, self, cls: self.display_name
assert root.as_urlpatterns() == ['root', 'child1', 'child2']
|
Bodidze/21v-python | refs/heads/master | unit_02/calc/9.py | 2 | # Program make a simple calculator that can add, subtract, multiply and divide using functions
# define functions
def add(x, y):
"""This function adds two numbers"""
return x + y
def subtract(x, y):
"""This function subtracts two numbers"""
return x - y
def multiply(x, y):
"""This function multiplies two numbers"""
return x * y
def divide(x, y):
"""This function divides two numbers"""
return x / y
def myhelp():
print " "*3 + "=" * 59
print """ | |
| Usage operation: |
| 0 Display this usage message |
| 1 Add |
| 2 Subtract |
| 3 Multiply |
| 4 Divide |
| | """
print " "*3 + "=" * 59
# take input from the user
def menu():
print("Select operation:".upper().center(24, '~'))
print("0.help".capitalize().ljust(16, '~'))
print("1.add".title())
print("2.Subtract")
print("3.Multiply")
print("4.Divide\n")
print "=" * 21
choice = raw_input("enter choice(0/1/2/3/4):")
return str(choice) if choice != '' else '0'
choice = menu()
if choice == '0':
myhelp()
else:
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
if choice == '1':
print('The sum of {0} and {1} is {2}'.format(num1, num2, add(num1,num2)))
elif choice == '2':
print('The subtract of {0} and {1} is {2}'.format(num1, num2, subtract(num1,num2)))
elif choice == '3':
print('The multiply of {0} and {1} is int: {2:d} hex: {2:x}'.format(num1, num2, multiply(num1,num2)))
elif choice == '4':
print('The divide of {0} and {1} is {2:+08.2f}'.format(num1, num2, divide(num1,num2)))
else:
myhelp()
|
seibert/numba | refs/heads/master | numba/tests/test_dataflow.py | 2 | import warnings
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba.core import types, errors
from numba.tests.support import (TestCase, CompilationCache,
skip_tryexcept_supported)
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
def assignments(a):
b = c = str(a)
return b + c
def assignments2(a):
b = c = d = str(a)
return b + c + d
# Use cases for issue #503
def var_propagate1(a, b):
c = (a if a > b else b) + 5
return c
def var_propagate2(a, b):
c = 5 + (a if a > b else b + 12) / 2.0
return c
def var_propagate3(a, b):
c = 5 + (a > b and a or b)
return c
def var_propagate4(a, b):
c = 5 + (a - 1 and b + 1) or (a + 1 and b - 1)
return c
# Issue #480
def chained_compare(a):
return 1 < a < 3
# Issue #591
def stack_effect_error(x):
i = 2
c = 1
if i == x:
for i in range(3):
c = i
return i + c
# Some more issues with stack effect and blocks
def for_break(n, x):
for i in range(n):
n = 0
if i == x:
break
else:
n = i
return i, n
# Issue #571
def var_swapping(a, b, c, d, e):
a, b = b, a
c, d, e = e, c, d
a, b, c, d = b, c, d, a
return a + b + c + d +e
def unsupported_op_code():
# needs unsupported "SETUP_EXCEPT" opcode
try:
pass
except:
pass
class TestDataFlow(TestCase):
def setUp(self):
self.cache = CompilationCache()
# All tests here should run without warnings
self.w_cm = warnings.catch_warnings()
self.w_cm.__enter__()
warnings.simplefilter("error")
# some builds of NumPy use a Cython that reports spurious
# ufunc object size mismatch warnings. These are safe to
# ignore and not generated by later Cython versions.
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
def tearDown(self):
self.w_cm.__exit__(None, None, None)
def test_assignments(self, flags=force_pyobj_flags):
pyfunc = assignments
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_assignments2(self, flags=force_pyobj_flags):
pyfunc = assignments2
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
if flags is force_pyobj_flags:
cfunc("a")
# The dataflow analysis must be good enough for native mode
# compilation to succeed, hence the no_pyobj_flags in the following tests.
def run_propagate_func(self, pyfunc, args):
cr = self.cache.compile(pyfunc, (types.int32, types.int32),
flags=no_pyobj_flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(*args), pyfunc(*args))
def test_var_propagate1(self):
self.run_propagate_func(var_propagate1, (2, 3))
self.run_propagate_func(var_propagate1, (3, 2))
def test_var_propagate2(self):
self.run_propagate_func(var_propagate2, (2, 3))
self.run_propagate_func(var_propagate2, (3, 2))
def test_var_propagate3(self):
self.run_propagate_func(var_propagate3, (2, 3))
self.run_propagate_func(var_propagate3, (3, 2))
self.run_propagate_func(var_propagate3, (2, 0))
self.run_propagate_func(var_propagate3, (-1, 0))
self.run_propagate_func(var_propagate3, (0, 2))
self.run_propagate_func(var_propagate3, (0, -1))
def test_var_propagate4(self):
self.run_propagate_func(var_propagate4, (1, 1))
self.run_propagate_func(var_propagate4, (1, 0))
self.run_propagate_func(var_propagate4, (1, -1))
self.run_propagate_func(var_propagate4, (0, 1))
self.run_propagate_func(var_propagate4, (0, 0))
self.run_propagate_func(var_propagate4, (0, -1))
self.run_propagate_func(var_propagate4, (-1, 1))
self.run_propagate_func(var_propagate4, (-1, 0))
self.run_propagate_func(var_propagate4, (-1, -1))
def test_chained_compare(self, flags=force_pyobj_flags):
pyfunc = chained_compare
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [0, 1, 2, 3, 4]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_chained_compare_npm(self):
self.test_chained_compare(no_pyobj_flags)
def test_stack_effect_error(self, flags=force_pyobj_flags):
# Issue #591: POP_BLOCK must undo all stack pushes done inside
# the block.
pyfunc = stack_effect_error
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in (0, 1, 2, 3):
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_stack_effect_error_npm(self):
self.test_stack_effect_error(no_pyobj_flags)
def test_var_swapping(self, flags=force_pyobj_flags):
pyfunc = var_swapping
cr = compile_isolated(pyfunc, (types.int32,) * 5, flags=flags)
cfunc = cr.entry_point
args = tuple(range(0, 10, 2))
self.assertPreciseEqual(pyfunc(*args), cfunc(*args))
def test_var_swapping_npm(self):
self.test_var_swapping(no_pyobj_flags)
def test_for_break(self, flags=force_pyobj_flags):
# BREAK_LOOP must unwind the current inner syntax block.
pyfunc = for_break
cr = compile_isolated(pyfunc, (types.intp, types.intp), flags=flags)
cfunc = cr.entry_point
for (n, x) in [(4, 2), (4, 6)]:
self.assertPreciseEqual(pyfunc(n, x), cfunc(n, x))
def test_for_break_npm(self):
self.test_for_break(no_pyobj_flags)
@skip_tryexcept_supported
def test_unsupported_op_code(self, flags=force_pyobj_flags):
pyfunc = unsupported_op_code
with self.assertRaises(errors.UnsupportedError) as raises:
compile_isolated(pyfunc, (), flags=flags)
msg="SETUP_EXCEPT"
self.assertIn(msg, str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
tarballs-are-good/sympy | refs/heads/master | sympy/concrete/tests/test_gosper.py | 5 | def test_normal():
pass
def test_gosper():
pass
|
IronLanguages/ironpython2 | refs/heads/master | Src/StdLib/Lib/test/leakers/test_ctypes.py | 173 |
# Taken from Lib/ctypes/test/test_keeprefs.py, PointerToStructure.test().
from ctypes import Structure, c_int, POINTER
import gc
def leak_inner():
class POINT(Structure):
_fields_ = [("x", c_int)]
class RECT(Structure):
_fields_ = [("a", POINTER(POINT))]
def leak():
leak_inner()
gc.collect()
|
smmribeiro/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/mercurial/filemerge.py | 90 | # filemerge.py - file-level merge handling for Mercurial
#
# Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import short
from i18n import _
import util, simplemerge, match, error
import os, tempfile, re, filecmp
def _toolstr(ui, tool, part, default=""):
return ui.config("merge-tools", tool + "." + part, default)
def _toolbool(ui, tool, part, default=False):
return ui.configbool("merge-tools", tool + "." + part, default)
def _toollist(ui, tool, part, default=[]):
return ui.configlist("merge-tools", tool + "." + part, default)
internals = {}
def internaltool(name, trymerge, onfailure=None):
'''return a decorator for populating internal merge tool table'''
def decorator(func):
fullname = 'internal:' + name
func.__doc__ = "``%s``\n" % fullname + func.__doc__.strip()
internals[fullname] = func
func.trymerge = trymerge
func.onfailure = onfailure
return func
return decorator
def _findtool(ui, tool):
if tool in internals:
return tool
for kn in ("regkey", "regkeyalt"):
k = _toolstr(ui, tool, kn)
if not k:
continue
p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
if p:
p = util.findexe(p + _toolstr(ui, tool, "regappend"))
if p:
return p
exe = _toolstr(ui, tool, "executable", tool)
return util.findexe(util.expandpath(exe))
def _picktool(repo, ui, path, binary, symlink):
def check(tool, pat, symlink, binary):
tmsg = tool
if pat:
tmsg += " specified for " + pat
if not _findtool(ui, tool):
if pat: # explicitly requested tool deserves a warning
ui.warn(_("couldn't find merge tool %s\n") % tmsg)
else: # configured but non-existing tools are more silent
ui.note(_("couldn't find merge tool %s\n") % tmsg)
elif symlink and not _toolbool(ui, tool, "symlink"):
ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
elif binary and not _toolbool(ui, tool, "binary"):
ui.warn(_("tool %s can't handle binary\n") % tmsg)
elif not util.gui() and _toolbool(ui, tool, "gui"):
ui.warn(_("tool %s requires a GUI\n") % tmsg)
else:
return True
return False
# forcemerge comes from command line arguments, highest priority
force = ui.config('ui', 'forcemerge')
if force:
toolpath = _findtool(ui, force)
if toolpath:
return (force, util.shellquote(toolpath))
else:
# mimic HGMERGE if given tool not found
return (force, force)
# HGMERGE takes next precedence
hgmerge = os.environ.get("HGMERGE")
if hgmerge:
return (hgmerge, hgmerge)
# then patterns
for pat, tool in ui.configitems("merge-patterns"):
mf = match.match(repo.root, '', [pat])
if mf(path) and check(tool, pat, symlink, False):
toolpath = _findtool(ui, tool)
return (tool, util.shellquote(toolpath))
# then merge tools
tools = {}
for k, v in ui.configitems("merge-tools"):
t = k.split('.')[0]
if t not in tools:
tools[t] = int(_toolstr(ui, t, "priority", "0"))
names = tools.keys()
tools = sorted([(-p, t) for t, p in tools.items()])
uimerge = ui.config("ui", "merge")
if uimerge:
if uimerge not in names:
return (uimerge, uimerge)
tools.insert(0, (None, uimerge)) # highest priority
tools.append((None, "hgmerge")) # the old default, if found
for p, t in tools:
if check(t, None, symlink, binary):
toolpath = _findtool(ui, t)
return (t, util.shellquote(toolpath))
# internal merge or prompt as last resort
if symlink or binary:
return "internal:prompt", None
return "internal:merge", None
def _eoltype(data):
"Guess the EOL type of a file"
if '\0' in data: # binary
return None
if '\r\n' in data: # Windows
return '\r\n'
if '\r' in data: # Old Mac
return '\r'
if '\n' in data: # UNIX
return '\n'
return None # unknown
def _matcheol(file, origfile):
"Convert EOL markers in a file to match origfile"
tostyle = _eoltype(util.readfile(origfile))
if tostyle:
data = util.readfile(file)
style = _eoltype(data)
if style:
newdata = data.replace(style, tostyle)
if newdata != data:
util.writefile(file, newdata)
@internaltool('prompt', False)
def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
"""Asks the user which of the local or the other version to keep as
the merged version."""
ui = repo.ui
fd = fcd.path()
if ui.promptchoice(_(" no tool found to merge %s\n"
"keep (l)ocal or take (o)ther?") % fd,
(_("&Local"), _("&Other")), 0):
return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
else:
return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
@internaltool('local', False)
def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
"""Uses the local version of files as the merged version."""
return 0
@internaltool('other', False)
def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
"""Uses the other version of files as the merged version."""
repo.wwrite(fcd.path(), fco.data(), fco.flags())
return 0
@internaltool('fail', False)
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
"""
Rather than attempting to merge files that were modified on both
branches, it marks them as unresolved. The resolve command must be
used to resolve these conflicts."""
return 1
def _premerge(repo, toolconf, files):
tool, toolpath, binary, symlink = toolconf
if symlink:
return 1
a, b, c, back = files
ui = repo.ui
# do we attempt to simplemerge first?
try:
premerge = _toolbool(ui, tool, "premerge", not binary)
except error.ConfigError:
premerge = _toolstr(ui, tool, "premerge").lower()
valid = 'keep'.split()
if premerge not in valid:
_valid = ', '.join(["'" + v + "'" for v in valid])
raise error.ConfigError(_("%s.premerge not valid "
"('%s' is neither boolean nor %s)") %
(tool, premerge, _valid))
if premerge:
r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
if not r:
ui.debug(" premerge successful\n")
return 0
if premerge != 'keep':
util.copyfile(back, a) # restore from backup and try again
return 1 # continue merging
@internaltool('merge', True,
_("merging %s incomplete! "
"(edit conflicts, then use 'hg resolve --mark')\n"))
def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file."""
tool, toolpath, binary, symlink = toolconf
if symlink:
repo.ui.warn(_('warning: internal:merge cannot merge symlinks '
'for %s\n') % fcd.path())
return False, 1
r = _premerge(repo, toolconf, files)
if r:
a, b, c, back = files
ui = repo.ui
r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
return True, r
return False, 0
@internaltool('dump', True)
def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files):
"""
Creates three versions of the files to merge, containing the
contents of local, other and base. These files can then be used to
perform a merge manually. If the file to be merged is named
``a.txt``, these files will accordingly be named ``a.txt.local``,
``a.txt.other`` and ``a.txt.base`` and they will be placed in the
same directory as ``a.txt``."""
r = _premerge(repo, toolconf, files)
if r:
a, b, c, back = files
fd = fcd.path()
util.copyfile(a, a + ".local")
repo.wwrite(fd + ".other", fco.data(), fco.flags())
repo.wwrite(fd + ".base", fca.data(), fca.flags())
return False, r
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files):
r = _premerge(repo, toolconf, files)
if r:
tool, toolpath, binary, symlink = toolconf
a, b, c, back = files
out = ""
env = dict(HG_FILE=fcd.path(),
HG_MY_NODE=short(mynode),
HG_OTHER_NODE=str(fco.changectx()),
HG_BASE_NODE=str(fca.changectx()),
HG_MY_ISLINK='l' in fcd.flags(),
HG_OTHER_ISLINK='l' in fco.flags(),
HG_BASE_ISLINK='l' in fca.flags())
ui = repo.ui
args = _toolstr(ui, tool, "args", '$local $base $other')
if "$output" in args:
out, a = a, back # read input from backup, write to original
replace = dict(local=a, base=b, other=c, output=out)
args = util.interpolate(r'\$', replace, args,
lambda s: util.shellquote(util.localpath(s)))
r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
out=ui.fout)
return True, r
return False, 0
def filemerge(repo, mynode, orig, fcd, fco, fca):
"""perform a 3-way merge in the working directory
mynode = parent node before merge
orig = original local filename before merge
fco = other file context
fca = ancestor file context
fcd = local file context for current/destination file
"""
def temp(prefix, ctx):
pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
(fd, name) = tempfile.mkstemp(prefix=pre)
data = repo.wwritedata(ctx.path(), ctx.data())
f = os.fdopen(fd, "wb")
f.write(data)
f.close()
return name
if not fco.cmp(fcd): # files identical?
return None
ui = repo.ui
fd = fcd.path()
binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
symlink = 'l' in fcd.flags() + fco.flags()
tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
(tool, fd, binary, symlink))
if tool in internals:
func = internals[tool]
trymerge = func.trymerge
onfailure = func.onfailure
else:
func = _xmerge
trymerge = True
onfailure = _("merging %s failed!\n")
toolconf = tool, toolpath, binary, symlink
if not trymerge:
return func(repo, mynode, orig, fcd, fco, fca, toolconf)
a = repo.wjoin(fd)
b = temp("base", fca)
c = temp("other", fco)
back = a + ".orig"
util.copyfile(a, back)
if orig != fco.path():
ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
else:
ui.status(_("merging %s\n") % fd)
ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
needcheck, r = func(repo, mynode, orig, fcd, fco, fca, toolconf,
(a, b, c, back))
if not needcheck:
if r:
if onfailure:
ui.warn(onfailure % fd)
else:
os.unlink(back)
os.unlink(b)
os.unlink(c)
return r
if not r and (_toolbool(ui, tool, "checkconflicts") or
'conflicts' in _toollist(ui, tool, "check")):
if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
re.MULTILINE):
r = 1
checked = False
if 'prompt' in _toollist(ui, tool, "check"):
checked = True
if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd,
(_("&Yes"), _("&No")), 1):
r = 1
if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
'changed' in _toollist(ui, tool, "check")):
if filecmp.cmp(a, back):
if ui.promptchoice(_(" output file %s appears unchanged\n"
"was merge successful (yn)?") % fd,
(_("&Yes"), _("&No")), 1):
r = 1
if _toolbool(ui, tool, "fixeol"):
_matcheol(a, back)
if r:
if onfailure:
ui.warn(onfailure % fd)
else:
os.unlink(back)
os.unlink(b)
os.unlink(c)
return r
# tell hggettext to extract docstrings from these functions:
i18nfunctions = internals.values()
|
edx/course-discovery | refs/heads/master | course_discovery/apps/course_metadata/migrations/0171_historicalcourserun.py | 1 | # Generated by Django 1.11.15 on 2019-04-24 16:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import djchoices.choices
import simple_history.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('ietf_language_tags', '0001_squashed_0005_fix_language_tag_names_again'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course_metadata', '0170_courserun_go_live_date'),
]
operations = [
migrations.CreateModel(
name='HistoricalCourseRun',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('draft', models.BooleanField(default=False, help_text='Is this a draft version?')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, verbose_name='UUID')),
('key', models.CharField(max_length=255)),
('status', models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})])),
('title_override', models.CharField(blank=True, default=None, help_text="Title specific for this run of a course. Leave this value blank to default to the parent course's title.", max_length=255, null=True)),
('start', models.DateTimeField(blank=True, db_index=True, null=True)),
('end', models.DateTimeField(blank=True, db_index=True, null=True)),
('go_live_date', models.DateTimeField(blank=True, null=True)),
('enrollment_start', models.DateTimeField(blank=True, null=True)),
('enrollment_end', models.DateTimeField(blank=True, db_index=True, null=True)),
('announcement', models.DateTimeField(blank=True, null=True)),
('short_description_override', models.TextField(blank=True, default=None, help_text="Short description specific for this run of a course. Leave this value blank to default to the parent course's short_description attribute.", null=True)),
('full_description_override', models.TextField(blank=True, default=None, help_text="Full description specific for this run of a course. Leave this value blank to default to the parent course's full_description attribute.", null=True)),
('min_effort', models.PositiveSmallIntegerField(blank=True, help_text='Estimated minimum number of hours per week needed to complete a course run.', null=True)),
('max_effort', models.PositiveSmallIntegerField(blank=True, help_text='Estimated maximum number of hours per week needed to complete a course run.', null=True)),
('weeks_to_complete', models.PositiveSmallIntegerField(blank=True, help_text='Estimated number of weeks needed to complete this course run.', null=True)),
('pacing_type', models.CharField(blank=True, choices=[('instructor_paced', 'Instructor-paced'), ('self_paced', 'Self-paced')], db_index=True, max_length=255, null=True, validators=[djchoices.choices.ChoicesValidator({'instructor_paced': 'Instructor-paced', 'self_paced': 'Self-paced'})])),
('enrollment_count', models.IntegerField(blank=True, default=0, help_text='Total number of learners who have enrolled in this course run', null=True)),
('recent_enrollment_count', models.IntegerField(blank=True, default=0, help_text='Total number of learners who have enrolled in this course run in the last 6 months', null=True)),
('card_image_url', models.URLField(blank=True, null=True)),
('hidden', models.BooleanField(default=False)),
('mobile_available', models.BooleanField(default=False)),
('course_overridden', models.BooleanField(default=False, help_text='Indicates whether the course relation has been manually overridden.')),
('reporting_type', models.CharField(choices=[('mooc', 'mooc'), ('spoc', 'spoc'), ('test', 'test'), ('demo', 'demo'), ('other', 'other')], default='mooc', max_length=255)),
('eligible_for_financial_aid', models.BooleanField(default=True)),
('license', models.CharField(blank=True, db_index=True, max_length=255)),
('outcome_override', models.TextField(blank=True, default=None, help_text="'What You Will Learn' description for this particular course run. Leave this value blank to default to the parent course's Outcome attribute.", null=True)),
('has_ofac_restrictions', models.BooleanField(default=False, verbose_name='Add OFAC restriction text to the FAQ section of the Marketing site')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('course', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='course_metadata.Course')),
('draft_version', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='course_metadata.CourseRun')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('language', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='ietf_language_tags.LanguageTag')),
('syllabus', models.ForeignKey(blank=True, db_constraint=False, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='course_metadata.SyllabusItem')),
('video', models.ForeignKey(blank=True, db_constraint=False, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='course_metadata.Video')),
],
options={
'verbose_name': 'historical course run',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
noroot/zulip | refs/heads/master | zerver/test_messages.py | 114 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.db.models import Q
from django.conf import settings
from sqlalchemy.sql import (
and_, select, column, compiler
)
from django.test import TestCase
from zerver.lib import bugdown
from zerver.decorator import JsonableError
from zerver.lib.test_runner import slow
from zerver.views.messages import (
exclude_muting_conditions, get_sqlalchemy_connection,
get_old_messages_backend, ok_to_include_history,
NarrowBuilder,
)
from zilencer.models import Deployment
from zerver.lib.test_helpers import (
AuthedTestCase, POSTRequestMock,
get_user_messages,
message_ids, message_stream_count,
most_recent_message,
queries_captured,
)
from zerver.models import (
MAX_MESSAGE_LENGTH, MAX_SUBJECT_LENGTH,
Client, Message, Realm, Recipient, Stream, Subscription, UserMessage, UserProfile,
get_display_recipient, get_recipient, get_realm, get_stream, get_user_profile_by_email,
)
from zerver.lib.actions import (
check_message, check_send_message,
create_stream_if_needed,
do_add_subscription, do_create_user,
)
import datetime
import time
import re
import ujson
def get_sqlalchemy_query_params(query):
dialect = get_sqlalchemy_connection().dialect
comp = compiler.SQLCompiler(dialect, query)
comp.compile()
return comp.params
def fix_ws(s):
return re.sub('\s+', ' ', str(s)).strip()
def get_recipient_id_for_stream_name(realm, stream_name):
stream = get_stream(stream_name, realm)
return get_recipient(Recipient.STREAM, stream.id).id
def mute_stream(realm, user_profile, stream_name):
stream = Stream.objects.get(realm=realm, name=stream_name)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscription = Subscription.objects.get(recipient=recipient, user_profile=user_profile)
subscription.in_home_view = False
subscription.save()
class NarrowBuilderTest(AuthedTestCase):
def test_add_term(self):
realm = get_realm('zulip.com')
user_profile = get_user_profile_by_email("hamlet@zulip.com")
builder = NarrowBuilder(user_profile, column('id'))
raw_query = select([column("id")], None, "zerver_message")
def check(term, where_clause):
query = builder.add_term(raw_query, term)
self.assertTrue(where_clause in str(query))
term = dict(operator='stream', operand='Scotland')
check(term, 'WHERE recipient_id = :recipient_id_1')
term = dict(operator='is', operand='private')
check(term, 'WHERE type = :type_1 OR type = :type_2')
for operand in ['starred', 'mentioned', 'alerted']:
term = dict(operator='is', operand=operand)
check(term, 'WHERE (flags & :flags_1) != :param_1')
term = dict(operator='topic', operand='lunch')
check(term, 'WHERE upper(subject) = upper(:param_1)')
term = dict(operator='sender', operand='othello@zulip.com')
check(term, 'WHERE sender_id = :param_1')
term = dict(operator='pm-with', operand='othello@zulip.com')
check(term, 'WHERE sender_id = :sender_id_1 AND recipient_id = :recipient_id_1 OR sender_id = :sender_id_2 AND recipient_id = :recipient_id_2')
term = dict(operator='id', operand=555)
check(term, 'WHERE id = :param_1')
term = dict(operator='search', operand='"french fries"')
check(term, 'WHERE (lower(content) LIKE lower(:content_1) OR lower(subject) LIKE lower(:subject_1)) AND (search_tsvector @@ plainto_tsquery(:param_2, :param_3))')
term = dict(operator='has', operand='attachment')
check(term, 'WHERE has_attachment')
term = dict(operator='has', operand='image')
check(term, 'WHERE has_image')
term = dict(operator='has', operand='link')
check(term, 'WHERE has_link')
mute_stream(realm, user_profile, 'Verona')
term = dict(operator='in', operand='home')
check(term, 'WHERE recipient_id NOT IN (:recipient_id_1)')
class IncludeHistoryTest(AuthedTestCase):
def test_ok_to_include_history(self):
realm = get_realm('zulip.com')
create_stream_if_needed(realm, 'public_stream')
# Negated stream searches should not include history.
narrow = [
dict(operator='stream', operand='public_stream', negated=True),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# Definitely forbid seeing history on private streams.
narrow = [
dict(operator='stream', operand='private_stream'),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# History doesn't apply to PMs.
narrow = [
dict(operator='is', operand='private'),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# If we are looking for something like starred messages, there is
# no point in searching historical messages.
narrow = [
dict(operator='stream', operand='public_stream'),
dict(operator='is', operand='starred'),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# simple True case
narrow = [
dict(operator='stream', operand='public_stream'),
]
self.assertTrue(ok_to_include_history(narrow, realm))
narrow = [
dict(operator='stream', operand='public_stream'),
dict(operator='topic', operand='whatever'),
dict(operator='search', operand='needle in haystack'),
]
self.assertTrue(ok_to_include_history(narrow, realm))
class TestCrossRealmPMs(AuthedTestCase):
def setUp(self):
settings.CROSS_REALM_BOT_EMAILS.add('test-og-bot@zulip.com')
dep = Deployment()
dep.base_api_url = "https://zulip.com/api/"
dep.base_site_url = "https://zulip.com/"
# We need to save the object before we can access
# the many-to-many relationship 'realms'
dep.save()
dep.realms = [get_realm("zulip.com")]
dep.save()
def create_user(self, email):
username, domain = email.split('@')
self.register(username, 'test', domain=domain)
return get_user_profile_by_email(email)
def test_same_realm(self):
"""Users on the same realm can PM each other"""
r1 = Realm.objects.create(domain='1.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
user1_email = 'user1@1.example.com'
user1 = self.create_user(user1_email)
user2_email = 'user2@1.example.com'
user2 = self.create_user(user2_email)
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
messages = get_user_messages(user2)
self.assertEqual(len(messages), 1)
self.assertEquals(messages[0].sender.pk, user1.pk)
def test_different_realms(self):
"""Users on the different realms can not PM each other"""
r1 = Realm.objects.create(domain='1.example.com')
r2 = Realm.objects.create(domain='2.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
user1_email = 'user1@1.example.com'
self.create_user(user1_email)
user2_email = 'user2@2.example.com'
self.create_user(user2_email)
with self.assertRaisesRegexp(JsonableError,
'You can\'t send private messages outside of your organization.'):
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
def test_three_different_realms(self):
"""Users on three different realms can not PM each other"""
r1 = Realm.objects.create(domain='1.example.com')
r2 = Realm.objects.create(domain='2.example.com')
r3 = Realm.objects.create(domain='3.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
deployment.realms.add(r3)
user1_email = 'user1@1.example.com'
self.create_user(user1_email)
user2_email = 'user2@2.example.com'
self.create_user(user2_email)
user3_email = 'user3@2.example.com'
self.create_user(user3_email)
with self.assertRaisesRegexp(JsonableError,
'You can\'t send private messages outside of your organization.'):
self.send_message(user1_email, [user2_email, user3_email], Recipient.PERSONAL)
def test_from_zulip_realm(self):
"""OG Users in the zulip.com realm can PM any realm"""
r1 = Realm.objects.create(domain='1.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
user1_email = 'test-og-bot@zulip.com'
user1 = self.create_user(user1_email)
user2_email = 'user2@1.example.com'
user2 = self.create_user(user2_email)
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
messages = get_user_messages(user2)
self.assertEqual(len(messages), 1)
self.assertEquals(messages[0].sender.pk, user1.pk)
def test_to_zulip_realm(self):
"""All users can PM users in the zulip.com realm"""
r1 = Realm.objects.create(domain='1.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
user1_email = 'user1@1.example.com'
user1 = self.create_user(user1_email)
user2_email = 'test-og-bot@zulip.com'
user2 = self.create_user(user2_email)
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
messages = get_user_messages(user2)
self.assertEqual(len(messages), 1)
self.assertEquals(messages[0].sender.pk, user1.pk)
def test_zulip_realm_can_not_join_realms(self):
"""Adding a zulip.com user to a PM will not let you cross realms"""
r1 = Realm.objects.create(domain='1.example.com')
r2 = Realm.objects.create(domain='2.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
user1_email = 'user1@1.example.com'
self.create_user(user1_email)
user2_email = 'user2@2.example.com'
self.create_user(user2_email)
user3_email = 'test-og-bot@zulip.com'
self.create_user(user3_email)
with self.assertRaisesRegexp(JsonableError,
'You can\'t send private messages outside of your organization.'):
self.send_message(user1_email, [user2_email, user3_email],
Recipient.PERSONAL)
class PersonalMessagesTest(AuthedTestCase):
def test_auto_subbed_to_personals(self):
"""
Newly created users are auto-subbed to the ability to receive
personals.
"""
self.register("test", "test")
user_profile = get_user_profile_by_email('test@zulip.com')
old_messages_count = message_stream_count(user_profile)
self.send_message("test@zulip.com", "test@zulip.com", Recipient.PERSONAL)
new_messages_count = message_stream_count(user_profile)
self.assertEqual(new_messages_count, old_messages_count + 1)
recipient = Recipient.objects.get(type_id=user_profile.id,
type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(user_profile).recipient, recipient)
@slow(0.36, "checks several profiles")
def test_personal_to_self(self):
"""
If you send a personal to yourself, only you see it.
"""
old_user_profiles = list(UserProfile.objects.all())
self.register("test1", "test1")
old_messages = []
for user_profile in old_user_profiles:
old_messages.append(message_stream_count(user_profile))
self.send_message("test1@zulip.com", "test1@zulip.com", Recipient.PERSONAL)
new_messages = []
for user_profile in old_user_profiles:
new_messages.append(message_stream_count(user_profile))
self.assertEqual(old_messages, new_messages)
user_profile = get_user_profile_by_email("test1@zulip.com")
recipient = Recipient.objects.get(type_id=user_profile.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(user_profile).recipient, recipient)
def assert_personal(self, sender_email, receiver_email, content="test content"):
"""
Send a private message from `sender_email` to `receiver_email` and check
that only those two parties actually received the message.
"""
sender = get_user_profile_by_email(sender_email)
receiver = get_user_profile_by_email(receiver_email)
sender_messages = message_stream_count(sender)
receiver_messages = message_stream_count(receiver)
other_user_profiles = UserProfile.objects.filter(~Q(email=sender_email) &
~Q(email=receiver_email))
old_other_messages = []
for user_profile in other_user_profiles:
old_other_messages.append(message_stream_count(user_profile))
self.send_message(sender_email, receiver_email, Recipient.PERSONAL, content)
# Users outside the conversation don't get the message.
new_other_messages = []
for user_profile in other_user_profiles:
new_other_messages.append(message_stream_count(user_profile))
self.assertEqual(old_other_messages, new_other_messages)
# The personal message is in the streams of both the sender and receiver.
self.assertEqual(message_stream_count(sender),
sender_messages + 1)
self.assertEqual(message_stream_count(receiver),
receiver_messages + 1)
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(sender).recipient, recipient)
self.assertEqual(most_recent_message(receiver).recipient, recipient)
@slow(0.28, "assert_personal checks several profiles")
def test_personal(self):
"""
If you send a personal, only you and the recipient see it.
"""
self.login("hamlet@zulip.com")
self.assert_personal("hamlet@zulip.com", "othello@zulip.com")
@slow(0.28, "assert_personal checks several profiles")
def test_non_ascii_personal(self):
"""
Sending a PM containing non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
self.assert_personal("hamlet@zulip.com", "othello@zulip.com", u"hümbüǵ")
class StreamMessagesTest(AuthedTestCase):
def assert_stream_message(self, stream_name, subject="test subject",
content="test content"):
"""
Check that messages sent to a stream reach all subscribers to that stream.
"""
subscribers = self.users_subscribed_to_stream(stream_name, "zulip.com")
old_subscriber_messages = []
for subscriber in subscribers:
old_subscriber_messages.append(message_stream_count(subscriber))
non_subscribers = [user_profile for user_profile in UserProfile.objects.all()
if user_profile not in subscribers]
old_non_subscriber_messages = []
for non_subscriber in non_subscribers:
old_non_subscriber_messages.append(message_stream_count(non_subscriber))
a_subscriber_email = subscribers[0].email
self.login(a_subscriber_email)
self.send_message(a_subscriber_email, stream_name, Recipient.STREAM,
subject, content)
# Did all of the subscribers get the message?
new_subscriber_messages = []
for subscriber in subscribers:
new_subscriber_messages.append(message_stream_count(subscriber))
# Did non-subscribers not get the message?
new_non_subscriber_messages = []
for non_subscriber in non_subscribers:
new_non_subscriber_messages.append(message_stream_count(non_subscriber))
self.assertEqual(old_non_subscriber_messages, new_non_subscriber_messages)
self.assertEqual(new_subscriber_messages, [elt + 1 for elt in old_subscriber_messages])
def test_not_too_many_queries(self):
recipient_list = ['hamlet@zulip.com', 'iago@zulip.com', 'cordelia@zulip.com', 'othello@zulip.com']
for email in recipient_list:
self.subscribe_to_stream(email, "Denmark")
sender_email = 'hamlet@zulip.com'
sender = get_user_profile_by_email(sender_email)
message_type_name = "stream"
(sending_client, _) = Client.objects.get_or_create(name="test suite")
stream = 'Denmark'
subject = 'foo'
content = 'whatever'
realm = sender.realm
def send_message():
check_send_message(sender, sending_client, message_type_name, [stream],
subject, content, forwarder_user_profile=sender, realm=realm)
send_message() # prime the caches
with queries_captured() as queries:
send_message()
self.assert_length(queries, 7)
def test_message_mentions(self):
user_profile = get_user_profile_by_email("iago@zulip.com")
self.subscribe_to_stream(user_profile.email, "Denmark")
self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
content="test @**Iago** rules")
message = most_recent_message(user_profile)
assert(UserMessage.objects.get(user_profile=user_profile, message=message).flags.mentioned.is_set)
def test_stream_message_mirroring(self):
from zerver.lib.actions import do_change_is_admin
user_profile = get_user_profile_by_email("iago@zulip.com")
do_change_is_admin(user_profile, True, 'api_super_user')
result = self.client.post("/api/v1/send_message", {"type": "stream",
"to": "Verona",
"sender": "cordelia@zulip.com",
"client": "test suite",
"subject": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true",
"email": user_profile.email,
"api-key": user_profile.api_key})
self.assert_json_success(result)
do_change_is_admin(user_profile, False, 'api_super_user')
result = self.client.post("/api/v1/send_message", {"type": "stream",
"to": "Verona",
"sender": "cordelia@zulip.com",
"client": "test suite",
"subject": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true",
"email": user_profile.email,
"api-key": user_profile.api_key})
self.assert_json_error(result, "User not authorized for this query")
@slow(0.28, 'checks all users')
def test_message_to_stream(self):
"""
If you send a message to a stream, everyone subscribed to the stream
receives the messages.
"""
self.assert_stream_message("Scotland")
@slow(0.37, 'checks all users')
def test_non_ascii_stream_message(self):
"""
Sending a stream message containing non-ASCII characters in the stream
name, subject, or message body succeeds.
"""
self.login("hamlet@zulip.com")
# Subscribe everyone to a stream with non-ASCII characters.
non_ascii_stream_name = u"hümbüǵ"
realm = Realm.objects.get(domain="zulip.com")
stream, _ = create_stream_if_needed(realm, non_ascii_stream_name)
for user_profile in UserProfile.objects.filter(realm=realm):
do_add_subscription(user_profile, stream, no_log=True)
self.assert_stream_message(non_ascii_stream_name, subject=u"hümbüǵ",
content=u"hümbüǵ")
class MessageDictTest(AuthedTestCase):
@slow(1.6, 'builds lots of messages')
def test_bulk_message_fetching(self):
realm = Realm.objects.get(domain="zulip.com")
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
stream, _ = create_stream_if_needed(realm, 'devel')
stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
sending_client, _ = Client.objects.get_or_create(name="test suite")
for i in range(300):
for recipient in [pm_recipient, stream_recipient]:
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='whatever %d' % i,
pub_date=datetime.datetime.now(),
sending_client=sending_client,
last_edit_time=datetime.datetime.now(),
edit_history='[]'
)
message.save()
ids = [row['id'] for row in Message.objects.all().values('id')]
num_ids = len(ids)
self.assertTrue(num_ids >= 600)
t = time.time()
with queries_captured() as queries:
rows = list(Message.get_raw_db_rows(ids))
for row in rows:
Message.build_dict_from_raw_db_row(row, False)
delay = time.time() - t
# Make sure we don't take longer than 1ms per message to extract messages.
self.assertTrue(delay < 0.001 * num_ids)
self.assert_length(queries, 7)
self.assertEqual(len(rows), num_ids)
def test_applying_markdown(self):
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
sending_client, _ = Client.objects.get_or_create(name="test suite")
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='hello **world**',
pub_date=datetime.datetime.now(),
sending_client=sending_client,
last_edit_time=datetime.datetime.now(),
edit_history='[]'
)
message.save()
# An important part of this test is to get the message through this exact code path,
# because there is an ugly hack we need to cover. So don't just say "row = message".
row = Message.get_raw_db_rows([message.id])[0]
dct = Message.build_dict_from_raw_db_row(row, apply_markdown=True)
expected_content = '<p>hello <strong>world</strong></p>'
self.assertEqual(dct['content'], expected_content)
message = Message.objects.get(id=message.id)
self.assertEqual(message.rendered_content, expected_content)
self.assertEqual(message.rendered_content_version, bugdown.version)
class MessagePOSTTest(AuthedTestCase):
def test_message_to_self(self):
"""
Sending a message to a stream to which you are subscribed is
successful.
"""
self.login("hamlet@zulip.com")
result = self.client.post("/json/send_message", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"})
self.assert_json_success(result)
def test_api_message_to_self(self):
"""
Same as above, but for the API view
"""
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
result = self.client.post("/api/v1/send_message", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"email": email,
"api-key": api_key})
self.assert_json_success(result)
def test_api_message_with_default_to(self):
"""
Sending messages without a to field should be sent to the default
stream for the user_profile.
"""
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.default_sending_stream = get_stream('Verona', user_profile.realm)
user_profile.save()
result = self.client.post("/api/v1/send_message", {"type": "stream",
"client": "test suite",
"content": "Test message no to",
"subject": "Test subject",
"email": email,
"api-key": api_key})
self.assert_json_success(result)
sent_message = Message.objects.all().order_by('-id')[0]
self.assertEqual(sent_message.content, "Test message no to")
def test_message_to_nonexistent_stream(self):
"""
Sending a message to a nonexistent stream fails.
"""
self.login("hamlet@zulip.com")
self.assertFalse(Stream.objects.filter(name="nonexistent_stream"))
result = self.client.post("/json/send_message", {"type": "stream",
"to": "nonexistent_stream",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"})
self.assert_json_error(result, "Stream does not exist")
def test_personal_message(self):
"""
Sending a personal message to a valid username is successful.
"""
self.login("hamlet@zulip.com")
result = self.client.post("/json/send_message", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_success(result)
def test_personal_message_to_nonexistent_user(self):
"""
Sending a personal message to an invalid email returns error JSON.
"""
self.login("hamlet@zulip.com")
result = self.client.post("/json/send_message", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": "nonexistent"})
self.assert_json_error(result, "Invalid email 'nonexistent'")
def test_invalid_type(self):
"""
Sending a message of unknown type returns error JSON.
"""
self.login("hamlet@zulip.com")
result = self.client.post("/json/send_message", {"type": "invalid type",
"content": "Test message",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_error(result, "Invalid message type")
def test_empty_message(self):
"""
Sending a message that is empty or only whitespace should fail
"""
self.login("hamlet@zulip.com")
result = self.client.post("/json/send_message", {"type": "private",
"content": " ",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_error(result, "Message must not be empty")
def test_mirrored_huddle(self):
"""
Sending a mirrored huddle message works
"""
self.login("starnine@mit.edu")
result = self.client.post("/json/send_message", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps(["starnine@mit.edu",
"espuser@mit.edu"])})
self.assert_json_success(result)
def test_mirrored_personal(self):
"""
Sending a mirrored personal message works
"""
self.login("starnine@mit.edu")
result = self.client.post("/json/send_message", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_success(result)
def test_duplicated_mirrored_huddle(self):
"""
Sending two mirrored huddles in the row return the same ID
"""
msg = {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps(["sipbcert@mit.edu",
"starnine@mit.edu"])}
self.login("starnine@mit.edu")
result1 = self.client.post("/json/send_message", msg)
self.login("sipbcert@mit.edu")
result2 = self.client.post("/json/send_message", msg)
self.assertEqual(ujson.loads(result1.content)['id'],
ujson.loads(result2.content)['id'])
def test_long_message(self):
"""
Sending a message longer than the maximum message length succeeds but is
truncated.
"""
self.login("hamlet@zulip.com")
long_message = "A" * (MAX_MESSAGE_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": long_message, "subject": "Test subject"}
result = self.client.post("/json/send_message", post_data)
self.assert_json_success(result)
sent_message = Message.objects.all().order_by('-id')[0]
self.assertEquals(sent_message.content,
"A" * (MAX_MESSAGE_LENGTH - 3) + "...")
def test_long_topic(self):
"""
Sending a message with a topic longer than the maximum topic length
succeeds, but the topic is truncated.
"""
self.login("hamlet@zulip.com")
long_topic = "A" * (MAX_SUBJECT_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": "test content", "subject": long_topic}
result = self.client.post("/json/send_message", post_data)
self.assert_json_success(result)
sent_message = Message.objects.all().order_by('-id')[0]
self.assertEquals(sent_message.subject,
"A" * (MAX_SUBJECT_LENGTH - 3) + "...")
class GetOldMessagesTest(AuthedTestCase):
def post_with_params(self, modified_params):
post_params = {"anchor": 1, "num_before": 1, "num_after": 1}
post_params.update(modified_params)
result = self.client.post("/json/get_old_messages", dict(post_params))
self.assert_json_success(result)
return ujson.loads(result.content)
def check_well_formed_messages_response(self, result):
self.assertIn("messages", result)
self.assertIsInstance(result["messages"], list)
for message in result["messages"]:
for field in ("content", "content_type", "display_recipient",
"avatar_url", "recipient_id", "sender_full_name",
"sender_short_name", "timestamp"):
self.assertIn(field, message)
# TODO: deprecate soon in favor of avatar_url
self.assertIn('gravatar_hash', message)
def get_query_ids(self):
hamlet_user = get_user_profile_by_email('hamlet@zulip.com')
othello_user = get_user_profile_by_email('othello@zulip.com')
query_ids = {}
scotland_stream = get_stream('Scotland', hamlet_user.realm)
query_ids['scotland_recipient'] = get_recipient(Recipient.STREAM, scotland_stream.id).id
query_ids['hamlet_id'] = hamlet_user.id
query_ids['othello_id'] = othello_user.id
query_ids['hamlet_recipient'] = get_recipient(Recipient.PERSONAL, hamlet_user.id).id
query_ids['othello_recipient'] = get_recipient(Recipient.PERSONAL, othello_user.id).id
return query_ids
def test_successful_get_old_messages(self):
"""
A call to /json/get_old_messages with valid parameters returns a list of
messages.
"""
self.login("hamlet@zulip.com")
result = self.post_with_params(dict())
self.check_well_formed_messages_response(result)
# We have to support the legacy tuple style while there are old
# clients around, which might include third party home-grown bots.
narrow = [['pm-with', 'othello@zulip.com']]
result = self.post_with_params(dict(narrow=ujson.dumps(narrow)))
self.check_well_formed_messages_response(result)
narrow = [dict(operator='pm-with', operand='othello@zulip.com')]
result = self.post_with_params(dict(narrow=ujson.dumps(narrow)))
self.check_well_formed_messages_response(result)
def test_get_old_messages_with_narrow_pm_with(self):
"""
A request for old messages with a narrow by pm-with only returns
conversations with that user.
"""
me = 'hamlet@zulip.com'
def dr_emails(dr):
return ','.join(sorted(set([r['email'] for r in dr] + [me])))
personals = [m for m in get_user_messages(get_user_profile_by_email(me))
if m.recipient.type == Recipient.PERSONAL
or m.recipient.type == Recipient.HUDDLE]
if not personals:
# FIXME: This is bad. We should use test data that is guaranteed
# to contain some personals for every user. See #617.
return
emails = dr_emails(get_display_recipient(personals[0].recipient))
self.login(me)
narrow = [dict(operator='pm-with', operand=emails)]
result = self.post_with_params(dict(narrow=ujson.dumps(narrow)))
self.check_well_formed_messages_response(result)
for message in result["messages"]:
self.assertEqual(dr_emails(message['display_recipient']), emails)
def test_get_old_messages_with_narrow_stream(self):
"""
A request for old messages with a narrow by stream only returns
messages for that stream.
"""
self.login("hamlet@zulip.com")
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
realm = Realm.objects.get(domain="zulip.com")
stream, _ = create_stream_if_needed(realm, "Scotland")
do_add_subscription(get_user_profile_by_email("hamlet@zulip.com"),
stream, no_log=True)
self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM)
messages = get_user_messages(get_user_profile_by_email("hamlet@zulip.com"))
stream_messages = filter(lambda msg: msg.recipient.type == Recipient.STREAM,
messages)
stream_name = get_display_recipient(stream_messages[0].recipient)
stream_id = stream_messages[0].recipient.id
narrow = [dict(operator='stream', operand=stream_name)]
result = self.post_with_params(dict(narrow=ujson.dumps(narrow)))
self.check_well_formed_messages_response(result)
for message in result["messages"]:
self.assertEqual(message["type"], "stream")
self.assertEqual(message["recipient_id"], stream_id)
def test_get_old_messages_with_narrow_stream_mit_unicode_regex(self):
"""
A request for old messages for a user in the mit.edu relam with unicode
stream name should be correctly escaped in the database query.
"""
self.login("starnine@mit.edu")
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
realm = Realm.objects.get(domain="mit.edu")
lambda_stream, _ = create_stream_if_needed(realm, u"\u03bb-stream")
do_add_subscription(get_user_profile_by_email("starnine@mit.edu"),
lambda_stream, no_log=True)
lambda_stream_d, _ = create_stream_if_needed(realm, u"\u03bb-stream.d")
do_add_subscription(get_user_profile_by_email("starnine@mit.edu"),
lambda_stream_d, no_log=True)
self.send_message("starnine@mit.edu", u"\u03bb-stream", Recipient.STREAM)
self.send_message("starnine@mit.edu", u"\u03bb-stream.d", Recipient.STREAM)
narrow = [dict(operator='stream', operand=u'\u03bb-stream')]
result = self.post_with_params(dict(num_after=2, narrow=ujson.dumps(narrow)))
self.check_well_formed_messages_response(result)
messages = get_user_messages(get_user_profile_by_email("starnine@mit.edu"))
stream_messages = filter(lambda msg: msg.recipient.type == Recipient.STREAM,
messages)
self.assertEqual(len(result["messages"]), 2)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_old_messages_with_narrow_topic_mit_unicode_regex(self):
"""
A request for old messages for a user in the mit.edu relam with unicode
topic name should be correctly escaped in the database query.
"""
self.login("starnine@mit.edu")
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
realm = Realm.objects.get(domain="mit.edu")
stream, _ = create_stream_if_needed(realm, "Scotland")
do_add_subscription(get_user_profile_by_email("starnine@mit.edu"),
stream, no_log=True)
self.send_message("starnine@mit.edu", "Scotland", Recipient.STREAM,
subject=u"\u03bb-topic")
self.send_message("starnine@mit.edu", "Scotland", Recipient.STREAM,
subject=u"\u03bb-topic.d")
narrow = [dict(operator='topic', operand=u'\u03bb-topic')]
result = self.post_with_params(dict(num_after=2, narrow=ujson.dumps(narrow)))
self.check_well_formed_messages_response(result)
messages = get_user_messages(get_user_profile_by_email("starnine@mit.edu"))
stream_messages = filter(lambda msg: msg.recipient.type == Recipient.STREAM,
messages)
self.assertEqual(len(result["messages"]), 2)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_old_messages_with_narrow_sender(self):
"""
A request for old messages with a narrow by sender only returns
messages sent by that person.
"""
self.login("hamlet@zulip.com")
# We need to send a message here to ensure that we actually
# have a stream message in this narrow view.
self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM)
self.send_message("othello@zulip.com", "Scotland", Recipient.STREAM)
self.send_message("othello@zulip.com", "hamlet@zulip.com", Recipient.PERSONAL)
self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM)
narrow = [dict(operator='sender', operand='othello@zulip.com')]
result = self.post_with_params(dict(narrow=ujson.dumps(narrow)))
self.check_well_formed_messages_response(result)
for message in result["messages"]:
self.assertEqual(message["sender_email"], "othello@zulip.com")
def test_get_old_messages_with_only_searching_anchor(self):
"""
Test that specifying an anchor but 0 for num_before and num_after
returns at most 1 message.
"""
self.login("cordelia@zulip.com")
anchor = self.send_message("cordelia@zulip.com", "Scotland", Recipient.STREAM)
narrow = [dict(operator='sender', operand='cordelia@zulip.com')]
result = self.post_with_params(dict(narrow=ujson.dumps(narrow),
anchor=anchor, num_before=0,
num_after=0))
self.check_well_formed_messages_response(result)
self.assertEqual(len(result['messages']), 1)
narrow = [dict(operator='is', operand='mentioned')]
result = self.post_with_params(dict(narrow=ujson.dumps(narrow),
anchor=anchor, num_before=0,
num_after=0))
self.check_well_formed_messages_response(result)
self.assertEqual(len(result['messages']), 0)
def test_missing_params(self):
"""
anchor, num_before, and num_after are all required
POST parameters for get_old_messages.
"""
self.login("hamlet@zulip.com")
required_args = (("anchor", 1), ("num_before", 1), ("num_after", 1))
for i in range(len(required_args)):
post_params = dict(required_args[:i] + required_args[i + 1:])
result = self.client.post("/json/get_old_messages", post_params)
self.assert_json_error(result,
"Missing '%s' argument" % (required_args[i][0],))
def test_bad_int_params(self):
"""
num_before, num_after, and narrow must all be non-negative
integers or strings that can be converted to non-negative integers.
"""
self.login("hamlet@zulip.com")
other_params = [("narrow", {}), ("anchor", 0)]
int_params = ["num_before", "num_after"]
bad_types = (False, "", "-1", -1)
for idx, param in enumerate(int_params):
for type in bad_types:
# Rotate through every bad type for every integer
# parameter, one at a time.
post_params = dict(other_params + [(param, type)] + \
[(other_param, 0) for other_param in \
int_params[:idx] + int_params[idx + 1:]]
)
result = self.client.post("/json/get_old_messages", post_params)
self.assert_json_error(result,
"Bad value for '%s': %s" % (param, type))
def test_bad_narrow_type(self):
"""
narrow must be a list of string pairs.
"""
self.login("hamlet@zulip.com")
other_params = [("anchor", 0), ("num_before", 0), ("num_after", 0)]
bad_types = (False, 0, '', '{malformed json,',
'{foo: 3}', '[1,2]', '[["x","y","z"]]')
for type in bad_types:
post_params = dict(other_params + [("narrow", type)])
result = self.client.post("/json/get_old_messages", post_params)
self.assert_json_error(result,
"Bad value for 'narrow': %s" % (type,))
def test_old_empty_narrow(self):
"""
'{}' is accepted to mean 'no narrow', for use by old mobile clients.
"""
self.login("hamlet@zulip.com")
all_result = self.post_with_params({})
narrow_result = self.post_with_params({'narrow': '{}'})
for r in (all_result, narrow_result):
self.check_well_formed_messages_response(r)
self.assertEqual(message_ids(all_result), message_ids(narrow_result))
def test_bad_narrow_operator(self):
"""
Unrecognized narrow operators are rejected.
"""
self.login("hamlet@zulip.com")
for operator in ['', 'foo', 'stream:verona', '__init__']:
narrow = [dict(operator=operator, operand='')]
params = dict(anchor=0, num_before=0, num_after=0, narrow=ujson.dumps(narrow))
result = self.client.post("/json/get_old_messages", params)
self.assert_json_error_contains(result,
"Invalid narrow operator: unknown operator")
def exercise_bad_narrow_operand(self, operator, operands, error_msg):
other_params = [("anchor", 0), ("num_before", 0), ("num_after", 0)]
for operand in operands:
post_params = dict(other_params + [
("narrow", ujson.dumps([[operator, operand]]))])
result = self.client.post("/json/get_old_messages", post_params)
self.assert_json_error_contains(result, error_msg)
def test_bad_narrow_stream_content(self):
"""
If an invalid stream name is requested in get_old_messages, an error is
returned.
"""
self.login("hamlet@zulip.com")
bad_stream_content = (0, [], ["x", "y"])
self.exercise_bad_narrow_operand("stream", bad_stream_content,
"Bad value for 'narrow'")
def test_bad_narrow_one_on_one_email_content(self):
"""
If an invalid 'pm-with' is requested in get_old_messages, an
error is returned.
"""
self.login("hamlet@zulip.com")
bad_stream_content = (0, [], ["x","y"])
self.exercise_bad_narrow_operand("pm-with", bad_stream_content,
"Bad value for 'narrow'")
def test_bad_narrow_nonexistent_stream(self):
self.login("hamlet@zulip.com")
self.exercise_bad_narrow_operand("stream", ['non-existent stream'],
"Invalid narrow operator: unknown stream")
def test_bad_narrow_nonexistent_email(self):
self.login("hamlet@zulip.com")
self.exercise_bad_narrow_operand("pm-with", ['non-existent-user@zulip.com'],
"Invalid narrow operator: unknown user")
def test_message_without_rendered_content(self):
"""Older messages may not have rendered_content in the database"""
m = Message.objects.all().order_by('-id')[0]
m.rendered_content = m.rendered_content_version = None
m.content = 'test content'
# Use to_dict_uncached directly to avoid having to deal with memcached
d = m.to_dict_uncached(True)
self.assertEqual(d['content'], '<p>test content</p>')
def common_check_get_old_messages_query(self, query_params, expected):
user_profile = get_user_profile_by_email("hamlet@zulip.com")
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as queries:
get_old_messages_backend(request, user_profile)
for query in queries:
if "/* get_old_messages */" in query['sql']:
sql = query['sql'].replace(" /* get_old_messages */", '')
self.assertEqual(sql, expected)
return
self.fail("get_old_messages query not found")
def test_use_first_unread_anchor(self):
realm = get_realm('zulip.com')
create_stream_if_needed(realm, 'devel')
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.muted_topics = ujson.dumps([['Scotland', 'golf'], ['devel', 'css'], ['bogus', 'bogus']])
user_profile.save()
query_params = dict(
use_first_unread_anchor='true',
anchor=0,
num_before=0,
num_after=0,
narrow='[["stream", "Scotland"]]'
)
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as queries:
get_old_messages_backend(request, user_profile)
queries = filter(lambda q: q['sql'].startswith("SELECT message_id, flags"), queries)
ids = {}
for stream_name in ['Scotland']:
stream = get_stream(stream_name, realm)
ids[stream_name] = get_recipient(Recipient.STREAM, stream.id).id
cond = '''AND NOT (recipient_id = {Scotland} AND upper(subject) = upper('golf'))'''
cond = cond.format(**ids)
self.assertTrue(cond in queries[0]['sql'])
def test_exclude_muting_conditions(self):
realm = get_realm('zulip.com')
create_stream_if_needed(realm, 'devel')
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.muted_topics = ujson.dumps([['Scotland', 'golf'], ['devel', 'css'], ['bogus', 'bogus']])
user_profile.save()
narrow = [
dict(operator='stream', operand='Scotland'),
]
muting_conditions = exclude_muting_conditions(user_profile, narrow)
query = select([column("id").label("message_id")], None, "zerver_message")
query = query.where(*muting_conditions)
expected_query = '''
SELECT id AS message_id
FROM zerver_message
WHERE NOT (recipient_id = :recipient_id_1 AND upper(subject) = upper(:upper_1))
'''
self.assertEqual(fix_ws(query), fix_ws(expected_query))
params = get_sqlalchemy_query_params(query)
self.assertEqual(params['recipient_id_1'], get_recipient_id_for_stream_name(realm, 'Scotland'))
self.assertEqual(params['upper_1'], 'golf')
mute_stream(realm, user_profile, 'Verona')
narrow = []
muting_conditions = exclude_muting_conditions(user_profile, narrow)
query = select([column("id")], None, "zerver_message")
query = query.where(and_(*muting_conditions))
expected_query = '''
SELECT id
FROM zerver_message
WHERE recipient_id NOT IN (:recipient_id_1)
AND NOT
(recipient_id = :recipient_id_2 AND upper(subject) = upper(:upper_1) OR
recipient_id = :recipient_id_3 AND upper(subject) = upper(:upper_2))'''
self.assertEqual(fix_ws(query), fix_ws(expected_query))
params = get_sqlalchemy_query_params(query)
self.assertEqual(params['recipient_id_1'], get_recipient_id_for_stream_name(realm, 'Verona'))
self.assertEqual(params['recipient_id_2'], get_recipient_id_for_stream_name(realm, 'Scotland'))
self.assertEqual(params['upper_1'], 'golf')
self.assertEqual(params['recipient_id_3'], get_recipient_id_for_stream_name(realm, 'devel'))
self.assertEqual(params['upper_2'], 'css')
def test_get_old_messages_queries(self):
query_ids = self.get_query_ids()
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id <= 100 ORDER BY message_id DESC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 100, 'num_before': 10, 'num_after': 0}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM ((SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id <= 99 ORDER BY message_id DESC \n LIMIT 10) UNION ALL (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id >= 100 ORDER BY message_id ASC \n LIMIT 11)) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 100, 'num_before': 10, 'num_after': 10}, sql)
def test_get_old_messages_with_narrow_queries(self):
query_ids = self.get_query_ids()
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient}) AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["pm-with", "othello@zulip.com"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (flags & 2) != 0 AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["is", "starred"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND sender_id = {othello_id} AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["sender", "othello@zulip.com"]]'},
sql)
sql_template = 'SELECT anon_1.message_id \nFROM (SELECT id AS message_id \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} AND zerver_message.id >= 0 ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"]]'},
sql)
sql_template = "SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND upper(subject) = upper('blah') AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["topic", "blah"]]'},
sql)
sql_template = "SELECT anon_1.message_id \nFROM (SELECT id AS message_id \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} AND upper(subject) = upper('blah') AND zerver_message.id >= 0 ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"], ["topic", "blah"]]'},
sql)
# Narrow to pms with yourself
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND sender_id = {hamlet_id} AND recipient_id = {hamlet_recipient} AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["pm-with", "hamlet@zulip.com"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND recipient_id = {scotland_recipient} AND (flags & 2) != 0 AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"], ["is", "starred"]]'},
sql)
def test_get_old_messages_with_search_queries(self):
query_ids = self.get_query_ids()
sql_template = "SELECT anon_1.message_id, anon_1.flags, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.subject_matches \nFROM (SELECT message_id, flags, subject, rendered_content, ts_match_locs_array('zulip.english_us_search', rendered_content, plainto_tsquery('zulip.english_us_search', 'jumping')) AS content_matches, ts_match_locs_array('zulip.english_us_search', escape_html(subject), plainto_tsquery('zulip.english_us_search', 'jumping')) AS subject_matches \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (search_tsvector @@ plainto_tsquery('zulip.english_us_search', 'jumping')) AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["search", "jumping"]]'},
sql)
sql_template = "SELECT anon_1.message_id, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.subject_matches \nFROM (SELECT id AS message_id, subject, rendered_content, ts_match_locs_array('zulip.english_us_search', rendered_content, plainto_tsquery('zulip.english_us_search', 'jumping')) AS content_matches, ts_match_locs_array('zulip.english_us_search', escape_html(subject), plainto_tsquery('zulip.english_us_search', 'jumping')) AS subject_matches \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} AND (search_tsvector @@ plainto_tsquery('zulip.english_us_search', 'jumping')) AND zerver_message.id >= 0 ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"], ["search", "jumping"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.subject_matches \nFROM (SELECT message_id, flags, subject, rendered_content, ts_match_locs_array(\'zulip.english_us_search\', rendered_content, plainto_tsquery(\'zulip.english_us_search\', \'"jumping" quickly\')) AS content_matches, ts_match_locs_array(\'zulip.english_us_search\', escape_html(subject), plainto_tsquery(\'zulip.english_us_search\', \'"jumping" quickly\')) AS subject_matches \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (content ILIKE \'%jumping%\' OR subject ILIKE \'%jumping%\') AND (search_tsvector @@ plainto_tsquery(\'zulip.english_us_search\', \'"jumping" quickly\')) AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_old_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["search", "\\"jumping\\" quickly"]]'},
sql)
class EditMessageTest(AuthedTestCase):
def check_message(self, msg_id, subject=None, content=None):
msg = Message.objects.get(id=msg_id)
cached = msg.to_dict(False)
uncached = msg.to_dict_uncached(False)
self.assertEqual(cached, uncached)
if subject:
self.assertEqual(msg.subject, subject)
if content:
self.assertEqual(msg.content, content)
return msg
def test_save_message(self):
# This is also tested by a client test, but here we can verify
# the cache against the database
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client.post("/json/update_message", {
'message_id': msg_id,
'content': 'after edit'
})
self.assert_json_success(result)
self.check_message(msg_id, content="after edit")
result = self.client.post("/json/update_message", {
'message_id': msg_id,
'subject': 'edited'
})
self.assert_json_success(result)
self.check_message(msg_id, subject="edited")
def test_propagate_topic_forward(self):
self.login("hamlet@zulip.com")
id1 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id2 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id3 = self.send_message("iago@zulip.com", "Rome", Recipient.STREAM,
subject="topic1")
id4 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic2")
id5 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
result = self.client.post("/json/update_message", {
'message_id': id1,
'subject': 'edited',
'propagate_mode': 'change_later'
})
self.assert_json_success(result)
self.check_message(id1, subject="edited")
self.check_message(id2, subject="edited")
self.check_message(id3, subject="topic1")
self.check_message(id4, subject="topic2")
self.check_message(id5, subject="edited")
def test_propagate_all_topics(self):
self.login("hamlet@zulip.com")
id1 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id2 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id3 = self.send_message("iago@zulip.com", "Rome", Recipient.STREAM,
subject="topic1")
id4 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic2")
id5 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id6 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic3")
result = self.client.post("/json/update_message", {
'message_id': id2,
'subject': 'edited',
'propagate_mode': 'change_all'
})
self.assert_json_success(result)
self.check_message(id1, subject="edited")
self.check_message(id2, subject="edited")
self.check_message(id3, subject="topic1")
self.check_message(id4, subject="topic2")
self.check_message(id5, subject="edited")
self.check_message(id6, subject="topic3")
class StarTests(AuthedTestCase):
def change_star(self, messages, add=True):
return self.client.post("/json/update_message_flags",
{"messages": ujson.dumps(messages),
"op": "add" if add else "remove",
"flag": "starred"})
def test_change_star(self):
"""
You can set a message as starred/un-starred through
/json/update_message_flags.
"""
self.login("hamlet@zulip.com")
message_ids = [self.send_message("hamlet@zulip.com", "hamlet@zulip.com",
Recipient.PERSONAL, "test")]
# Star a message.
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_old_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], ['starred'])
else:
self.assertEqual(msg['flags'], ['read'])
result = self.change_star(message_ids, False)
self.assert_json_success(result)
# Remove the stars.
for msg in self.get_old_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], [])
def test_new_message(self):
"""
New messages aren't starred.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
content = "Test message for star"
self.send_message(test_email, "Verona", Recipient.STREAM,
content=content)
sent_message = UserMessage.objects.filter(
user_profile=get_user_profile_by_email(test_email)
).order_by("id").reverse()[0]
self.assertEqual(sent_message.message.content, content)
self.assertFalse(sent_message.flags.starred)
class AttachmentTest(TestCase):
def test_basics(self):
self.assertFalse(Message.content_has_attachment('whatever'))
self.assertFalse(Message.content_has_attachment('yo http://foo.com'))
self.assertTrue(Message.content_has_attachment('yo\n https://staging.zulip.com/user_uploads/'))
self.assertTrue(Message.content_has_attachment('yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.jpg foo'))
self.assertTrue(Message.content_has_attachment('https://humbug-user-uploads.s3.amazonaws.com/sX_TIQx/screen-shot.jpg'))
self.assertTrue(Message.content_has_attachment('https://humbug-user-uploads-test.s3.amazonaws.com/sX_TIQx/screen-shot.jpg'))
self.assertFalse(Message.content_has_image('whatever'))
self.assertFalse(Message.content_has_image('yo http://foo.com'))
self.assertFalse(Message.content_has_image('yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.pdf foo'))
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp", ".JPG"]:
content = 'yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.%s foo' % (ext,)
self.assertTrue(Message.content_has_image(content))
self.assertTrue(Message.content_has_image('https://humbug-user-uploads.s3.amazonaws.com/sX_TIQx/screen-shot.jpg'))
self.assertTrue(Message.content_has_image('https://humbug-user-uploads-test.s3.amazonaws.com/sX_TIQx/screen-shot.jpg'))
self.assertFalse(Message.content_has_link('whatever'))
self.assertTrue(Message.content_has_link('yo\n http://foo.com'))
self.assertTrue(Message.content_has_link('yo\n https://example.com?spam=1&eggs=2'))
self.assertTrue(Message.content_has_link('yo /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.pdf foo'))
self.assertTrue(Message.content_has_link('https://humbug-user-uploads.s3.amazonaws.com/sX_TIQx/screen-shot.jpg'))
self.assertTrue(Message.content_has_link('https://humbug-user-uploads-test.s3.amazonaws.com/sX_TIQx/screen-shot.jpg'))
class CheckMessageTest(AuthedTestCase):
def test_basic_check_message_call(self):
sender = get_user_profile_by_email('othello@zulip.com')
client, _ = Client.objects.get_or_create(name="test suite")
stream_name = 'integration'
stream, _ = create_stream_if_needed(Realm.objects.get(domain="zulip.com"), stream_name)
message_type_name = 'stream'
message_to = None
message_to = [stream_name]
subject_name = 'issue'
message_content = 'whatever'
ret = check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
self.assertEqual(ret['message'].sender.email, 'othello@zulip.com')
def test_bot_pm_feature(self):
# We send a PM to a bot's owner if their bot sends a message to
# an unsubscribed stream
parent = get_user_profile_by_email('othello@zulip.com')
bot = do_create_user(
email='othello-bot@zulip.com',
password='',
realm=parent.realm,
full_name='',
short_name='',
active=True,
bot=True,
bot_owner=parent
)
bot.last_reminder = None
sender = bot
client, _ = Client.objects.get_or_create(name="test suite")
stream_name = 'integration'
stream, _ = create_stream_if_needed(Realm.objects.get(domain="zulip.com"), stream_name)
message_type_name = 'stream'
message_to = None
message_to = [stream_name]
subject_name = 'issue'
message_content = 'whatever'
old_count = message_stream_count(parent)
ret = check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 1)
self.assertEqual(ret['message'].sender.email, 'othello-bot@zulip.com')
|
shingonoide/odoo | refs/heads/deverp_8.0 | addons/l10n_ar/__init__.py | 2120 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
patryk4815/django-jinja-bootstrap-form | refs/heads/master | example/app/models.py | 10644 | from django.db import models
# Create your models here.
|
wilvk/ansible | refs/heads/devel | test/units/modules/network/nuage/nuage_module.py | 57 | # -*- coding: utf-8 -*-
# (c) 2017, Nokia
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests.mock import patch
from units.modules.utils import set_module_args as _set_module_args, AnsibleExitJson, AnsibleFailJson, ModuleTestCase
from nose.plugins.skip import SkipTest
try:
from vspk import v5_0 as vsdk
from bambou import nurest_session
except ImportError:
raise SkipTest('Nuage Ansible modules requires the vspk and bambou python libraries')
def set_module_args(args):
if 'auth' not in args:
args['auth'] = {
'api_username': 'csproot',
'api_password': 'csproot',
'api_enterprise': 'csp',
'api_url': 'https://localhost:8443',
'api_version': 'v5_0'
}
return _set_module_args(args)
class MockNuageResponse(object):
def __init__(self, status_code, reason, errors):
self.status_code = status_code
self.reason = reason
self.errors = errors
class MockNuageConnection(object):
def __init__(self, status_code, reason, errors):
self.response = MockNuageResponse(status_code, reason, errors)
class TestNuageModule(ModuleTestCase):
def setUp(self):
super(TestNuageModule, self).setUp()
def session_start(self):
self._root_object = vsdk.NUMe()
self._root_object.enterprise_id = 'enterprise-id'
nurest_session._NURESTSessionCurrentContext.session = self
return self
self.session_mock = patch('vspk.v5_0.NUVSDSession.start', new=session_start)
self.session_mock.start()
def tearDown(self):
super(TestNuageModule, self).tearDown()
self.session_mock.stop()
|
orestkreminskyi/taf | refs/heads/master | taf/plugins/pytest_onsenv.py | 2 | # Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``pytest_onsenv.py``
`Creates env fixture for ons test cases`
"""
import time
import sys
import pytest
from testlib import testenv
from testlib import common3
from testlib import switch_general
from testlib import dev_linux_host
# WORKAROUND: add fix from pytest 2.6 (fix issue498: https://bitbucket.org/hpk42/pytest/commits/6a5904c4816cebd3e146a4277c0ad5021b131753#chg-_pytest/python.py)
def finish(self):
try:
while self._finalizer: # pylint: disable=protected-access
func = self._finalizer.pop() # pylint: disable=protected-access
func()
finally:
if hasattr(self, "cached_result"):
del self.cached_result
def _check_pytest_version(version, max_version):
""" Check if version is less or equal to the max_version.
Args:
version(str): product version
max_version(str): max product version
Returns:
bool: True/False
"""
version_list = version.split('.')
max_version_list = max_version.split('.')
i = 0
while i <= len(version_list):
if len(version_list) == i:
return True
if len(max_version_list) == i:
return False
if int(max_version_list[i]) > int(version_list[i]):
return True
if int(max_version_list[i]) == int(version_list[i]):
i += 1
continue
if int(max_version_list[i]) < int(version_list[i]):
return False
return False
if _check_pytest_version(pytest.__version__, '2.5.2'):
from _pytest.python import FixtureDef # pylint: disable=no-name-in-module
FixtureDef.finish = finish
# WORKAROUND END
TESTENV_OPTIONS = ["none", "simplified2", "simplified3", "simplified4", "simplified5", "golden",
"diamond", "mixed"]
def pytest_addoption(parser):
"""TAF specific options.
"""
parser.addoption("--env", action="store", default=None,
help="Testing environment, '%default' by default.")
parser.addoption("--setup_file", action="store", default=None, dest="setup",
help="Environment setup, '%default' by default.")
parser.addoption("--build_path", action="store", default="/opt/simswitch",
help="Path to build, '%default' by default.")
parser.addoption("--get_only", action="store_true", default=False,
help="Do not start environment, only connect to exists one. %default by default.")
parser.addoption("--leave_on", action="store_true", default=False,
help="Do not shutdown environment after the end of tests. %default by default.")
parser.addoption("--setup_scope", action="store", default="module",
choices=["session", "module", "class", "function"],
help="Setup scope (session | module | class | function). '%default' by default.")
parser.addoption("--call_check", action="store", default="fast",
choices=["none", "complete", "fast", "sanity_check_only"],
help="Check method for devices on test case call (none | complete | fast | sanity_check_only). '%default' by default.")
parser.addoption("--teardown_check", action="store", default="sanity_check_only",
choices=["none", "complete", "fast", "sanity_check_only"],
help="Check method for devices on test case teardown (none | complete | fast | sanity_check_only). '%default' by default.")
parser.addoption("--testenv", action="store", default="none",
choices=TESTENV_OPTIONS,
help=(
"Verify environment before starting tests ({}). '%default' by default.".format(
" | ".join(TESTENV_OPTIONS))))
parser.addoption("--use_parallel_init", action="store_true", default=False,
help="Use threads for simultaneous switches processing. %default by default.")
parser.addoption("--fail_ctrl", action="store", default="restart",
choices=["stop", "restart", "ignore"],
help="Action on device failure (stop | restart | ignore). '%default' by default.")
parser.addoption("--ixia_clear_ownership", action="store_true", default=False,
help="Clear IXIA ports ownership on session start. %default by default.")
# use --switch_ui also to support eventual migration away from --ui
parser.addoption("--ui", "--switch_ui", action="store", default="ons_xmlrpc",
choices=list(switch_general.UI_MAP.keys()),
help="User Interface to configure switch ({}). '%default' by default.".format(
" | ".join(switch_general.UI_MAP)))
parser.addoption("--lhost_ui", action="store", default="linux_bash",
choices=list(dev_linux_host.UI_MAP.keys()),
help="User Interface to configure lhost ({}). '%default' by default.".format(
" | ".join(dev_linux_host.UI_MAP)))
def setup_scope():
"""Return setup_scope option value in global namespace.
"""
try:
_setup_scope = [x for x in sys.argv if x.startswith("--setup_scope")][0].split("=")[1]
except IndexError:
_setup_scope = "module"
return _setup_scope
def pytest_configure(config):
"""Registering plugin.
"""
if config.option.setup:
config.pluginmanager.register(OnsEnvPlugin(), "_onsenv")
else:
config.ctlogger.error("SETUP")
pytest.exit("SETUP")
def pytest_unconfigure(config):
"""Unregistering plugin.
"""
onsenv = getattr(config, "_onsenv", None)
if onsenv:
del config._onsenv
config.pluginmanager.unregister(onsenv)
class Env(object):
def __init__(self, request, env):
self.env = env
self.option = request.config.option
def create(self):
# self.request = request
self.env.initialize()
# Perform cross connection in case cross device isn't configured to do this at create step.
if hasattr(self.env, "cross"):
for cross in list(self.env.cross.values()):
if not cross.autoconnect:
cross.cross_connect(cross.connections)
# Read and store env properties
self.env.env_prop = testenv.get_env_prop(self.env)
if hasattr(self.env, "testenv_checkstatus"):
testenv_checkstatus = self.env.testenv_checkstatus
else:
testenv_checkstatus = False
# Testing environment if option is selected
if self.option.testenv != "none" and not testenv_checkstatus:
getattr(testenv.TestLinks(self.env), "test_links_{0}".format(self.option.testenv))()
self.env.testenv_checkstatus = True
def destroy(self):
"""Destroy testing environment.
"""
self.env.shutdown()
class EnvTest(object):
"""Cleanup/Check testing environment.
"""
def __init__(self, request, env):
self.request = request
self.env = env
self.request.node.call_status = False
def setup(self):
"""Cleanup/Check testing environment on test case setup.
"""
_start_time = time.time()
# Clean up environment before new case
if self.env.opts.call_check == "fast":
self.env.cleanup()
if self.env.opts.call_check == "complete":
self.env.shutdown()
self.env.initialize()
if self.env.opts.call_check == "sanity_check_only":
self.env.check()
self.request.node.call_status = True
_duration = time.time() - _start_time
self.request.config.ctlogger.debug("PROFILING: env fixture setup duration = %s. Item: %s" % (_duration, self.request.node.name))
self.request.config.ctlogger.debug("Exit env fixture setup. Item: %s" % self.request.node.name)
def teardown(self):
"""Cleanup/Check testing environment on test case teardown.
"""
self.request.config.ctlogger.debug("Entering env fixture teardown. Item: %s" % self.request.node.name)
_start_time = time.time()
# Check environment
if self.env.opts.teardown_check == "fast":
self.env.cleanup()
if self.env.opts.teardown_check == "complete" or not self.request.node.call_status:
self.env.shutdown()
self.env.initialize()
if self.env.opts.teardown_check == "sanity_check_only":
self.env.check()
_duration = time.time() - _start_time
self.request.config.ctlogger.info("PROFILING: env fixture teardown duration = %s. Item: %s" % (_duration, self.request.node.name))
self.request.config.ctlogger.debug("Exit env fixture teardown hook. Item: %s" % self.request.node.name)
class OnsEnvPlugin(object):
@pytest.fixture(scope='session')
def env_init(self, request):
"""Validate command line options.
Args:
request(pytest.request): pytest request
Returns:
testlib.common3.Environment: Environment instance
"""
if request.config.option.setup_scope not in {"session", "module", "class", "function"}:
request.config.ctlogger.error("Incorrect --setup_scope option.")
pytest.exit("Incorrect --setup_scope option.")
if request.config.option.call_check not in {"none", "complete", "fast", "sanity_check_only"}:
request.config.ctlogger.error("Incorrect --call_check option.")
pytest.exit("Incorrect --call_check option.")
if request.config.option.teardown_check not in {"none", "complete", "fast", "sanity_check_only"}:
request.config.ctlogger.error("Incorrect --teardown_check option.")
pytest.exit("Incorrect --teardown_check option.")
if request.config.option.fail_ctrl not in {"stop", "restart", "ignore"}:
request.config.ctlogger.error("Incorrect --fail_ctrl option.")
pytest.exit("Incorrect --fail_ctrl option.")
request.config.env.testenv_checkstatus = False
return request.config.env
@pytest.fixture(scope=setup_scope())
def env_main(self, request, env_init):
"""Start/stop devices from environment.
Args:
request(pytest.request): pytest request
Returns:
testlib.common3.Environment: Environment instance
"""
env_wrapper = Env(request, env_init)
request.addfinalizer(env_wrapper.destroy)
env_wrapper.create()
return env_init
@pytest.fixture
def env(self, request, env_main):
"""Clear devices from environment.
Args:
request(pytest fixture): pytest.request
Returns:
testlib.common3.Environment: Environment instance
"""
env = EnvTest(request, env_main)
request.addfinalizer(env.teardown)
env.setup()
return env_main
def pytest_sessionstart(self, session):
session.config.ctlogger.debug("Session start...")
# Define environment
session.config.env = common3.Environment(session.config.option)
|
Pexego/odoo | refs/heads/master | openerp/tools/misc.py | 17 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Mapping
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
def find_in_path(name):
try:
return which(name)
except IOError:
return None
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
return None
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (prog,) + args
with open(os.devnull) as dn:
return subprocess.call(args2, stdout=dn, stderr=subprocess.STDOUT)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'ab_RU': u'Abkhazian / аҧсуа',
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BS': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_HN': u'Spanish (HN) / Español (HN)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_NI': u'Spanish (NI) / Español (NI)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PR': u'Spanish (PR) / Español (PR)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_SV': u'Spanish (SV) / Español (SV)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ',
'ja_JP': u'Japanese / 日本語',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'ml_IN': u'Malayalam / മലയാളം',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Flemish (BE) / Vlaams (BE)',
'oc_FR': u'Occitan (FR, post 1500) / Occitan',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'si_LK': u'Sinhalese / සිංහල',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'ur_PK': u'Urdu / اردو',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
'tlh_TLH': u'Klingon',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hideaki-t/whoosh-igo | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
# encoding: utf-8
import io
from setuptools import setup
setup(
name='whoosh-igo',
version='0.7',
description='tokenizers for Whoosh designed for Japanese language',
long_description= io.open('README', encoding='utf-8').read() + "\n\n" + io.open('CHANGES', encoding='utf-8').read(),
author='Hideaki Takahashi',
author_email='mymelo@gmail.com',
url='https://github.com/hideaki-t/whoosh-igo/',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: Japanese',
'Operating System :: OS Independent',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Linguistic',
],
keywords=['japanese', 'tokenizer',],
license='Apache License, Version 2.0',
packages=['whooshjp'],
)
|
dubvulture/tensor_fcn | refs/heads/master | tensor_fcn/dataset_reader/__init__.py | 1 | from __future__ import absolute_import
from tensor_fcn.dataset_reader.ade_dataset import ADE_Dataset
|
ddki/my_study_project | refs/heads/master | language/python/frameworks/flask/venv/lib/python2.7/site-packages/setuptools/command/build_ext.py | 27 | import os
import sys
import itertools
import imp
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
from distutils.errors import DistutilsError
from distutils import log
from setuptools.extension import Library
from setuptools.extern import six
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
# Additionally, assert that the compiler module will load
# also. Ref #1229.
__import__('Cython.Compiler.Main')
except ImportError:
_build_ext = _du_build_ext
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
# temporarily substitute the pyconfig.h variables so that distutils'
# 'customize_compiler' uses them before we build the shared libraries.
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
if_dl = lambda s: s if have_rtld else ''
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION):
if '.abi3' in suffix: # Unix
return suffix
elif suffix == '.pyd': # Windows
return suffix
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
use_abi3 = (
six.PY3
and getattr(ext, 'py_limited_api')
and get_abi3_suffix()
)
if use_abi3:
so_ext = _get_config_var_837('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
_customize_compiler_for_shlib(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp" + if_dl(", dl"),
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
def _get_config_var_837(name):
"""
In https://github.com/pypa/setuptools/pull/837, we discovered
Python 3.3.0 exposes the extension suffix under the name 'SO'.
"""
if sys.version_info < (3, 3, 1):
name = 'SO'
return get_config_var(name)
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/mobile/shared_shear_mite_broodling.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_shear_mite_broodling.iff"
result.attribute_template_id = 9
result.stfName("monster_name","shear_mite")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
davgibbs/django | refs/heads/master | tests/unmanaged_models/tests.py | 296 | from __future__ import unicode_literals
from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Managed1, Unmanaged2
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a = [a]
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertNotIn(table, tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertIn(table, tables, "Table '%s' does not exist." % table)
|
zzrcxb/Pugoo | refs/heads/master | DataManagement/gobase.py | 1 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import *
TableBase = declarative_base()
class GoBase(TableBase):
__tablename__ = 'training'
filehash = Column(String(44), primary_key=True)
fileformat = Column(String(10), default=None)
filesource = Column(String(64), default=None)
rawfilepath = Column(String(256), nullable=False)
size = Column(Integer, nullable=False)
rule = Column(String(32), nullable=False)
komi = Column(Float, nullable=False)
result = Column(Float, nullable=False)
handicap = Column(Float, nullable=False)
def __repr__(self):
return '<GoBase(filehash = %s, fileformat = %s, filesource = %s, rawfilepath = %s, ' \
'size = %s, rule = %s, komi = %s, result = %s, handicap=%s)>' \
% \
(self.filehash, self.fileformatm, self.filesource,
self.rawfilepath, self.size, self.rule, self.komi, self.result, self.handicap)
def __str__(self):
return self.__repr__()
|
Lagfix/skynet | refs/heads/master | study.py | 1 | # Hello World 파이썬 테스트
# study.py
print("안녕하세요 테스트 입니다.")
|
RentennaDev/partial | refs/heads/master | partial/templating.py | 1 | from jinja2 import Environment, BaseLoader, TemplateNotFound
from partial.bundleComponent import BundleComponent
from partial.scanner import ClassNotFound
class TemplateBundleComponent(BundleComponent):
type = 'template'
def renderTemplate(template, context=None):
if context is None: context = {}
template = env.get_template(template)
return template.render(**context)
class _PartialLoader(BaseLoader):
def get_source(self, environment, template):
from partial import scanner
try:
component = scanner.getBundleComponent('template', template)
return (component, None, lambda: True)
except ClassNotFound:
raise TemplateNotFound(template)
env = Environment(
loader=_PartialLoader(),
)
from partial import routing
env.globals['url'] = routing.generate
def _partial(partialName, **kwargs):
from partial import render
return render.partial(partialName, kwargs)
env.globals['partial'] = _partial
def _plural(number, singular, plural=None):
return "%s %s" % (number, _pluralName(number, singular, plural))
env.globals['plural'] = _plural
def _pluralName(number, singular, plural=None):
if number == 1:
return singular
elif plural is None:
return "%ss" % singular
else:
return plural
env.globals['pluralName'] = _pluralName
def _nullable(val):
if val is None:
return ""
elif isinstance(val, str):
return unicode(val, encoding='utf-8', errors='replace')
else:
return val
env.finalize = _nullable |
RedhawkSDR/integration-gnuhawk | refs/heads/master | components/streams_to_stream_cc_1i/tests/test_streams_to_stream_cc_1i.py | 1 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in streams_to_stream_cc_1i"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../streams_to_stream_cc_1i.spd.xml") # By default tests all implementations
|
timblechmann/supercollider | refs/heads/novacollider/tip | editors/sced/scedwin/py/ConfigurationDialog.py | 44 | # sced (SuperCollider mode for gedit)
#
# Copyright 2012 Jakob Leben
# Copyright 2009 Artem Popov and other contributors (see AUTHORS)
#
# sced is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
from Settings import Settings
def on_pref_widget_notify_sensitive(widget, spec):
label = widget.get_data("pref-label")
if label is not None:
label.set_sensitive(widget.props.sensitive)
# FIXME: implement custom widget (or custom widget sequence) as well
def create_pref_section(title, wlabels=[], custom=[]):
vbox = gtk.VBox(spacing=6)
label = gobject.new(gtk.Label, label="<b>%s</b>" % title,
use_markup=True,
xalign=0)
vbox.pack_start(label, expand=False)
label.show()
align = gobject.new(gtk.Alignment, left_padding=12)
vbox.pack_start(align, expand=False)
align.show()
table = gobject.new(gtk.Table,
n_rows=len(wlabels) + len(custom),
n_columns=2,
row_spacing=6,
column_spacing=12)
align.add(table)
table.show()
for i in range(len(wlabels)):
l, widget = wlabels[i]
label = gobject.new(gtk.Label, label=l, xalign=0)
widget.connect("notify::sensitive", on_pref_widget_notify_sensitive)
widget.set_data("pref-label", label)
if l is not None:
table.attach(label, 0, 1, i, i + 1,
xoptions=gtk.FILL, yoptions=gtk.FILL)
table.attach(widget, 1, 2, i, i + 1,
xoptions=gtk.EXPAND | gtk.FILL, yoptions=gtk.FILL)
else:
table.attach(widget, 0, 2, i, i + 1,
xoptions=gtk.EXPAND | gtk.FILL, yoptions=gtk.FILL)
table.show_all()
return vbox
# FIXME: fix notification
class ConfigurationDialog(gtk.Dialog):
__gsignals__ = {
"response": "override",
} # __gsignals__
def __init__(self, plugin):
gtk.Dialog.__init__(self, title=_("Sced configuration"),
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
buttons = (
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT
))
self.set_default_response(gtk.RESPONSE_ACCEPT)
self.__settings = plugin.settings()
self.__create_page_general()
def __create_filesystem_entry( self, chooser, action, stock = gtk.STOCK_OPEN ):
entry = gtk.Entry()
btn = gtk.Button(stock=stock)
box = gtk.HBox()
box.add(entry)
box.add(btn)
def run_dialog(btn):
chooser.set_action(action)
chooser.set_filename(entry.get_text())
response = chooser.run()
chooser.hide()
if response == gtk.RESPONSE_ACCEPT:
entry.set_text(chooser.get_filename())
btn.connect("clicked", run_dialog)
return (box, entry)
def __create_page_general(self):
# create views
chooser = gtk.FileChooserDialog(
parent = self,
title = "Choose interpreter program",
buttons = (
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT
)
)
chooser.set_select_multiple(False)
sc_dir_view, sc_dir_entry = self.__create_filesystem_entry (
chooser,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER )
adv_view = gtk.CheckButton()
cmd_view, cmd_entry = self.__create_filesystem_entry (
chooser,
gtk.FILE_CHOOSER_ACTION_OPEN )
wd_view, wd_entry = self.__create_filesystem_entry (
chooser,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER )
def toggle_advanced(advanced):
sc_dir_view.set_sensitive(not advanced)
cmd_view.set_sensitive(advanced)
wd_view.set_sensitive(advanced)
adv_view.connect("toggled", lambda btn: toggle_advanced(btn.get_active()) )
# fill in the data
sets = self.__settings
if sets.sc_dir is not None:
sc_dir_entry.set_text(sets.sc_dir)
adv_view.set_active(sets.advanced is True)
if sets.sclang_cmd is not None:
cmd_entry.set_text(sets.sclang_cmd)
if sets.sclang_work_dir is not None:
wd_entry.set_text(sets.sclang_work_dir)
toggle_advanced(sets.advanced is True)
self.__adv_check = adv_view
self.__sc_dir_entry = sc_dir_entry
self.__cmd_entry = cmd_entry
self.__wd_entry = wd_entry
# lay out
section = create_pref_section("Basic", [
("SuperCollider folder:", sc_dir_view),
("Advanced settings:", adv_view),
])
section.props.border_width = 12
self.vbox.add(section)
section.show()
section = create_pref_section("Interpreter options", [
("Command:", cmd_view),
("Runtime folder:", wd_view)
])
section.props.border_width = 12
self.vbox.add(section)
section.show()
def do_response(self, response):
if response == gtk.RESPONSE_ACCEPT:
sets = self.__settings
sets.sc_dir = self.__sc_dir_entry.get_text()
sets.advanced = self.__adv_check.get_active()
sets.sclang_work_dir = self.__wd_entry.get_text()
sets.sclang_cmd = self.__cmd_entry.get_text()
sets.save()
self.destroy()
|
rlugojr/django | refs/heads/master | tests/forms_tests/widget_tests/test_selectdatewidget.py | 24 | from datetime import date
from django.forms import DateField, Form, SelectDateWidget
from django.test import override_settings
from django.utils import translation
from django.utils.dates import MONTHS_AP
from .base import WidgetTest
class SelectDateWidgetTest(WidgetTest):
maxDiff = None
widget = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'),
)
def test_render_empty(self):
self.check_html(self.widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_none(self):
"""
Rendering the None or '' values should yield the same output.
"""
self.assertHTMLEqual(
self.widget.render('mydate', None),
self.widget.render('mydate', ''),
)
def test_render_string(self):
self.check_html(self.widget, 'mydate', '2010-04-15', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected>April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected>15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected>2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_datetime(self):
self.assertHTMLEqual(
self.widget.render('mydate', date(2010, 4, 15)),
self.widget.render('mydate', '2010-04-15'),
)
def test_render_invalid_date(self):
"""
Invalid dates should still render the failed date.
"""
self.check_html(self.widget, 'mydate', '2010-02-31', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected>February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected>31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected>2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_custom_months(self):
widget = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.check_html(widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>
"""
))
def test_selectdate_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(
w.render('mydate', ''),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>
""",
)
with self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.'):
SelectDateWidget(years=('2014',), empty_label=('not enough', 'values'))
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n(self):
w = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016')
)
self.assertEqual(
w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-2010',
)
self.assertHTMLEqual(
w.render('date', '13-08-2010'),
"""
<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected>13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected>augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected>2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Even with an invalid date, the widget should reflect the entered value (#17401).
self.assertEqual(w.render('mydate', '2010-02-30').count('selected'), 3)
# Years before 1900 should work.
w = SelectDateWidget(years=('1899',))
self.assertEqual(
w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-1899',
)
def test_value_omitted_from_data(self):
self.assertIs(self.widget.value_omitted_from_data({}, {}, 'field'), True)
self.assertIs(self.widget.value_omitted_from_data({'field_month': '12'}, {}, 'field'), False)
self.assertIs(self.widget.value_omitted_from_data({'field_year': '2000'}, {}, 'field'), False)
self.assertIs(self.widget.value_omitted_from_data({'field_day': '1'}, {}, 'field'), False)
data = {'field_day': '1', 'field_month': '12', 'field_year': '2000'}
self.assertIs(self.widget.value_omitted_from_data(data, {}, 'field'), False)
|
Spanarchie/pyRest | refs/heads/master | pyRest/lib/python2.7/site-packages/pkg_resources.py | 64 | """Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys, os, time, re, imp, types, zipfile, zipimport
import warnings
import stat
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
def exec_(code, globs=None, locs=None):
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
except NameError:
basestring = str
from io import BytesIO
exec_ = eval("exec")
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec_(compile(open(fn).read(), fn, 'exec'), globs, locs)
import functools
reduce = functools.reduce
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform(); m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
_marker_names = {
'os': ['name'], 'sys': ['platform'],
'platform': ['version','machine','python_implementation'],
'python_version': [], 'python_full_version': [], 'extra':[],
}
_marker_values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': lambda: _platinfo('version'),
'platform_machine': lambda: _platinfo('machine'),
'python_implementation': lambda: _platinfo('python_implementation') or _pyimp(),
}
def _platinfo(attr):
try:
import platform
except ImportError:
return ''
return getattr(platform, attr, lambda:'')()
def _pyimp():
if sys.platform=='cli':
return 'IronPython'
elif sys.platform.startswith('java'):
return 'Jython'
elif '__pypy__' in sys.builtin_module_names:
return 'PyPy'
else:
return 'CPython'
def invalid_marker(text):
"""Validate text as a PEP 426 environment marker; return exception or False"""
try:
evaluate_marker(text)
except SyntaxError:
return sys.exc_info()[1]
return False
def evaluate_marker(text, extra=None, _ops={}):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
if not _ops:
from token import NAME, STRING
import token, symbol, operator
def and_test(nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return reduce(operator.and_, [interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
def test(nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return reduce(operator.or_, [interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
def atom(nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
def comparison(nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = _ops[cop]
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(evaluate(nodelist[1]), evaluate(nodelist[3]))
_ops.update({
symbol.test: test, symbol.and_test: and_test, symbol.atom: atom,
symbol.comparison: comparison, 'not in': lambda x,y: x not in y,
'in': lambda x,y: x in y, '==': operator.eq, '!=': operator.ne,
})
if hasattr(symbol,'or_test'):
_ops[symbol.or_test] = test
def interpret(nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = _ops[nodelist[0]]
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
raise SyntaxError("Language feature not supported in environment markers: "+symbol.sym_name[nodelist[0]])
return op(nodelist)
def evaluate(nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
#while len(name)==2: name = name[1]
if kind==NAME:
try:
op = _marker_values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
return interpret(parser.expr(text).totuple(1)[1])
def _markerlib_evaluate(text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# fallback to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec_(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self,
location=None, metadata=None, project_name=None, version=None,
py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution }
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines); p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F': return False
elif action=='T': return True
elif action=='+': last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<' : '--T',
'<=': 'T-T',
'>' : 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]; list(map(working_set.add_entry,sys.path)) # match order
|
daodaoliang/python-phonenumbers | refs/heads/dev | python/phonenumbers/shortdata/region_LK.py | 10 | """Auto-generated file, do not edit by hand. LK metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_LK = PhoneMetadata(id='LK', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='11[02689]', possible_number_pattern='\\d{3}', example_number='119'),
short_code=PhoneNumberDesc(national_number_pattern='11[024-9]', possible_number_pattern='\\d{3}', example_number='119'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
|
130s/bloom | refs/heads/master | test/system_tests/common.py | 1 | """
Common tools for system tests
"""
from __future__ import print_function
import os
from ..utils.common import bloom_answer
from ..utils.common import change_directory
from ..utils.common import user
def create_release_repo(upstream_url, upstream_type, upstream_branch='',
rosdistro='groovy'):
user('mkdir foo_release')
with change_directory('foo_release'):
user('git init .')
answers = ['y', 'foo', upstream_url, upstream_type,
'', '', upstream_branch, rosdistro]
with bloom_answer(answers):
user('git-bloom-config new ' + str(rosdistro))
url = 'file://' + os.getcwd()
return url
|
chenokay/ripozo | refs/heads/master | ripozo_tests/__init__.py | 4 | __author__ = 'Tim Martin'
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)s - [%(module)s - %(filename)s '
'- %(lineno)d - %(levelname)s]: %(message)s')
logger = logging.getLogger(__name__)
from ripozo_tests import unit, integration |
rghe/ansible | refs/heads/devel | contrib/inventory/lxc_inventory.py | 79 | #!/usr/bin/env python
#
# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH
# <florian@hastexo.com>
# Based in part on:
# libvirt_lxc.py, (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Ansible inventory script for LXC containers. Requires Python
bindings for LXC API.
In LXC, containers can be grouped by setting the lxc.group option,
which may be found more than once in a container's
configuration. So, we enumerate all containers, fetch their list
of groups, and then build the dictionary in the way Ansible expects
it.
"""
from __future__ import print_function
import sys
import lxc
import json
def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the
"all" group."""
# Enumerate all containers, and list the groups they are in. Also,
# implicitly add every container to the 'all' group.
containers = dict([(c,
['all'] +
(lxc.Container(c).get_config_item('lxc.group') or []))
for c in lxc.list_containers()])
# Extract the groups, flatten the list, and remove duplicates
groups = set(sum([g for g in containers.values()], []))
# Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection': 'lxc'}}) for g in groups])
def main(argv):
"""Returns a JSON dictionary as expected by Ansible"""
result = build_dict()
if len(argv) == 2 and argv[1] == '--list':
json.dump(result, sys.stdout)
elif len(argv) == 3 and argv[1] == '--host':
json.dump({'ansible_connection': 'lxc'}, sys.stdout)
else:
print("Need an argument, either --list or --host <host>", file=sys.stderr)
if __name__ == '__main__':
main(sys.argv)
|
cessationoftime/nixops | refs/heads/efsMountSecurityGroups | tests/functional/test_encrypted_links.py | 22 | from os import path
from nose import tools, SkipTest
from tests.functional import generic_deployment_test
from nixops.ssh_util import SSHCommandFailed
from nixops.util import devnull
import sys
import time
import signal
import subprocess
parent_dir = path.dirname(__file__)
logical_spec = '%s/encrypted-links.nix' % (parent_dir)
class TestEncryptedLinks(generic_deployment_test.GenericDeploymentTest):
def setup(self):
super(TestEncryptedLinks,self).setup()
self.depl.nix_exprs = [ logical_spec ]
def test_deploy(self):
if subprocess.call(["VBoxManage", "--version"],
stdout=devnull,
stderr=devnull) != 0:
raise SkipTest("VirtualBox is not available")
self.depl.debug = True
self.depl.deploy()
# !!! Shouldn't need this, instead the encrypted links target
# should wait until the link is active...
time.sleep(1)
self.ping("machine1", "machine2")
self.ping("machine2", "machine1")
self.depl.machines["machine1"].run_command("systemctl stop encrypted-links.target")
with tools.assert_raises(SSHCommandFailed):
self.ping("machine1", "machine2")
with tools.assert_raises(SSHCommandFailed):
self.ping("machine2", "machine1")
def ping(self, machine1, machine2):
self.depl.machines[machine1].run_command("ping -c1 {0}-encrypted".format(machine2))
|
saeki-masaki/glance | refs/heads/master | glance/tests/functional/v1/test_copy_to_file.py | 11 | # Copyright 2011 OpenStack Foundation
# Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests copying images to a Glance API server which uses a filesystem-
based storage backend.
"""
import hashlib
import tempfile
import time
import httplib2
from oslo_serialization import jsonutils
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.tests import functional
from glance.tests.functional.store_utils import get_http_uri
from glance.tests.functional.store_utils import setup_http
from glance.tests.utils import skip_if_disabled
FIVE_KB = 5 * units.Ki
class TestCopyToFile(functional.FunctionalTest):
"""
Functional tests for copying images from the HTTP storage
backend to file
"""
def _do_test_copy_from(self, from_store, get_uri):
"""
Ensure we can copy from an external image in from_store.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
# POST /images with public image to be stored in from_store,
# to stand in for the 'external' image
image_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'external',
'X-Image-Meta-Store': from_store,
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status, content)
data = jsonutils.loads(content)
original_image_id = data['image']['id']
copy_from = get_uri(self, original_image_id)
# POST /images with public image copied from_store (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status, content)
data = jsonutils.loads(content)
copy_image_id = data['image']['id']
self.assertNotEqual(copy_image_id, original_image_id)
# GET image and make sure image content is as expected
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in range(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active')
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("copied", data['image']['name'])
# DELETE original image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
original_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
# GET image again to make sure the existence of the original
# image in from_store is not depended on
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("copied", data['image']['name'])
# DELETE copied image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_store(self):
"""
Ensure we can copy from an external image in HTTP store.
"""
self._do_test_copy_from('file', get_http_uri)
@skip_if_disabled
def test_copy_from_http_exists(self):
"""Ensure we can copy from an external image in HTTP."""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
copy_from = get_http_uri(self, 'foobar')
# POST /images with public image copied from HTTP (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status, content)
data = jsonutils.loads(content)
copy_image_id = data['image']['id']
self.assertEqual('queued', data['image']['status'], content)
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in range(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(200, response.status)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active')
# GET image and make sure image content is as expected
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
# DELETE copied image
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(200, response.status)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_nonexistent_location_url(self):
# Ensure HTTP 404 response returned when try to create
# image with non-existent http location URL.
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
uri = get_http_uri(self, 'foobar')
copy_from = uri.replace('images', 'snafu')
# POST /images with public image copied from HTTP (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(404, response.status, content)
expected = 'HTTP datastore could not find image at URI.'
self.assertIn(expected, content)
self.stop_servers()
@skip_if_disabled
def test_copy_from_file(self):
"""
Ensure we can't copy from file
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
with tempfile.NamedTemporaryFile() as image_file:
image_file.write("XXX")
image_file.flush()
copy_from = 'file://' + image_file.name
# POST /images with public image copied from file (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(400, response.status, content)
expected = 'External sources are not supported: \'%s\'' % copy_from
msg = 'expected "%s" in "%s"' % (expected, content)
self.assertIn(expected, content, msg)
self.stop_servers()
@skip_if_disabled
def test_copy_from_swift_config(self):
"""
Ensure we can't copy from swift+config
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# POST /images with public image copied from file (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': 'swift+config://xxx'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(400, response.status, content)
expected = 'External sources are not supported: \'swift+config://xxx\''
msg = 'expected "%s" in "%s"' % (expected, content)
self.assertIn(expected, content, msg)
self.stop_servers()
|
hamzehd/edx-platform | refs/heads/master | lms/djangoapps/notifier_api/serializers.py | 61 | from django.contrib.auth.models import User
from django.http import Http404
from rest_framework import serializers
from openedx.core.djangoapps.course_groups.cohorts import is_course_cohorted
from notification_prefs import NOTIFICATION_PREF_KEY
from lang_pref import LANGUAGE_KEY
class NotifierUserSerializer(serializers.ModelSerializer):
"""
A serializer containing all information about a user needed by the notifier
(namely the user's name, email address, notification and language
preferences, and course enrollment and cohort information).
Because these pieces of information reside in different tables, this is
designed to work well with prefetch_related and select_related, which
require the use of all() instead of get() or filter(). The following fields
should be prefetched on the user objects being serialized:
* profile
* preferences
* courseenrollment_set
* course_groups
* roles__permissions
"""
name = serializers.SerializerMethodField()
preferences = serializers.SerializerMethodField()
course_info = serializers.SerializerMethodField()
def get_name(self, user):
return user.profile.name
def get_preferences(self, user):
return {
pref.key: pref.value
for pref
in user.preferences.all()
if pref.key in [LANGUAGE_KEY, NOTIFICATION_PREF_KEY]
}
def get_course_info(self, user):
cohort_id_map = {
cohort.course_id: cohort.id
for cohort in user.course_groups.all()
}
see_all_cohorts_set = {
role.course_id
for role in user.roles.all()
for perm in role.permissions.all() if perm.name == "see_all_cohorts"
}
ret = {}
for enrollment in user.courseenrollment_set.all():
if enrollment.is_active:
try:
ret[unicode(enrollment.course_id)] = {
"cohort_id": cohort_id_map.get(enrollment.course_id),
"see_all_cohorts": (
enrollment.course_id in see_all_cohorts_set or
not is_course_cohorted(enrollment.course_id)
),
}
except Http404: # is_course_cohorted raises this if course does not exist
pass
return ret
class Meta(object):
model = User
fields = ("id", "email", "name", "preferences", "course_info")
read_only_fields = ("id", "email")
|
wweiradio/django | refs/heads/master | django/core/serializers/pyyaml.py | 439 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
|
kustodian/ansible | refs/heads/devel | lib/ansible/module_utils/pure.py | 56 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
HAS_PURESTORAGE = True
try:
from purestorage import purestorage
except ImportError:
HAS_PURESTORAGE = False
HAS_PURITY_FB = True
try:
from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest
except ImportError:
HAS_PURITY_FB = False
from functools import wraps
from os import environ
from os import path
import platform
VERSION = 1.2
USER_AGENT_BASE = 'Ansible'
API_AGENT_VERSION = 1.5
def get_system(module):
"""Return System Object or Fail"""
user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
'base': USER_AGENT_BASE,
'class': __name__,
'version': VERSION,
'platform': platform.platform()
}
array_name = module.params['fa_url']
api = module.params['api_token']
if array_name and api:
system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
else:
module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
try:
system.get()
except Exception:
module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
return system
def get_blade(module):
"""Return System Object or Fail"""
user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
'base': USER_AGENT_BASE,
'class': __name__,
'version': VERSION,
'platform': platform.platform()
}
blade_name = module.params['fb_url']
api = module.params['api_token']
if blade_name and api:
blade = PurityFb(blade_name)
blade.disable_verify_ssl()
try:
blade.login(api)
if API_AGENT_VERSION in blade.api_version.list_versions().versions:
blade._api_client.user_agent = user_agent
except rest.ApiException as e:
module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'):
blade = PurityFb(environ.get('PUREFB_URL'))
blade.disable_verify_ssl()
try:
blade.login(environ.get('PUREFB_API'))
if API_AGENT_VERSION in blade.api_version.list_versions().versions:
blade._api_client.user_agent = user_agent
except rest.ApiException as e:
module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
else:
module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments")
return blade
def purefa_argument_spec():
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
return dict(
fa_url=dict(),
api_token=dict(no_log=True),
)
def purefb_argument_spec():
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
return dict(
fb_url=dict(),
api_token=dict(no_log=True),
)
|
CoinProjects/XDE2 | refs/heads/master | contrib/linearize/linearize-hashes.py | 1 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2015 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class XDE2RPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = XDE2RPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 31500
background-color: rgb(0, 0, 0);
alternate-background-color: rgb(86, 0, 120);
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
campbe13/openhatch | refs/heads/master | vendor/packages/gdata/src/gdata/analytics/__init__.py | 261 | #!/usr/bin/python
#
# Original Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that this module will not function without specifically adding
# 'analytics': [ #Google Analytics
# 'https://www.google.com/analytics/feeds/'],
# to CLIENT_LOGIN_SCOPES in the gdata/service.py file
"""Contains extensions to Atom objects used with Google Analytics."""
__author__ = 'api.suryasev (Sal Uryasev)'
import atom
import gdata
GAN_NAMESPACE = 'http://schemas.google.com/analytics/2009'
class TableId(gdata.GDataEntry):
"""tableId element."""
_tag = 'tableId'
_namespace = GAN_NAMESPACE
class Property(gdata.GDataEntry):
_tag = 'property'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, *args, **kwargs):
self.name = name
self.value = value
super(Property, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AccountListEntry(gdata.GDataEntry):
"""The Google Documents version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}tableId' % GAN_NAMESPACE] = ('tableId',
[TableId])
_children['{%s}property' % GAN_NAMESPACE] = ('property',
[Property])
def __init__(self, tableId=None, property=None,
*args, **kwargs):
self.tableId = tableId
self.property = property
super(AccountListEntry, self).__init__(*args, **kwargs)
def AccountListEntryFromString(xml_string):
"""Converts an XML string into an AccountListEntry object.
Args:
xml_string: string The XML describing a Document List feed entry.
Returns:
A AccountListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(AccountListEntry, xml_string)
class AccountListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Documents Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AccountListEntry])
def AccountListFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
All properties are also linked to with a direct reference
from each entry object for convenience. (e.g. entry.AccountName)
"""
feed = atom.CreateClassFromXMLString(AccountListFeed, xml_string)
for entry in feed.entry:
for pro in entry.property:
entry.__dict__[pro.name.replace('ga:','')] = pro
for td in entry.tableId:
td.__dict__['value'] = td.text
return feed
class Dimension(gdata.GDataEntry):
_tag = 'dimension'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Dimension, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class Metric(gdata.GDataEntry):
_tag = 'metric'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Metric, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AnalyticsDataEntry(gdata.GDataEntry):
"""The Google Analytics version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}dimension' % GAN_NAMESPACE] = ('dimension',
[Dimension])
_children['{%s}metric' % GAN_NAMESPACE] = ('metric',
[Metric])
def __init__(self, dimension=None, metric=None, *args, **kwargs):
self.dimension = dimension
self.metric = metric
super(AnalyticsDataEntry, self).__init__(*args, **kwargs)
class AnalyticsDataFeed(gdata.GDataFeed):
"""A feed containing a list of Google Analytics Data Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AnalyticsDataEntry])
"""
Data Feed
"""
def AnalyticsDataFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
Each metric and dimension is also referenced directly from
the entry for easier access. (e.g. entry.keyword.value)
"""
feed = atom.CreateClassFromXMLString(AnalyticsDataFeed, xml_string)
if feed.entry:
for entry in feed.entry:
for met in entry.metric:
entry.__dict__[met.name.replace('ga:','')] = met
if entry.dimension is not None:
for dim in entry.dimension:
entry.__dict__[dim.name.replace('ga:','')] = dim
return feed
|
simar7/build-mozharness | refs/heads/master | configs/unittests/linux_unittest.py | 3 | import os
import platform
# OS Specifics
ABS_WORK_DIR = os.path.join(os.getcwd(), 'build')
BINARY_PATH = os.path.join(ABS_WORK_DIR, "firefox", "firefox-bin")
INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.tar.bz2")
XPCSHELL_NAME = "xpcshell"
EXE_SUFFIX = ''
DISABLE_SCREEN_SAVER = True
ADJUST_MOUSE_AND_SCREEN = False
if platform.architecture()[0] == '64bit':
MINIDUMP_STACKWALK_PATH = "%(abs_work_dir)s/tools/breakpad/linux64/minidump_stackwalk"
else:
MINIDUMP_STACKWALK_PATH = "%(abs_work_dir)s/tools/breakpad/linux/minidump_stackwalk"
#####
config = {
"buildbot_json_path": "buildprops.json",
"exes": {
'python': '/tools/buildbot/bin/python',
'virtualenv': ['/tools/buildbot/bin/python', '/tools/misc-python/virtualenv.py'],
},
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
###
"installer_path": INSTALLER_PATH,
"binary_path": BINARY_PATH,
"xpcshell_name": XPCSHELL_NAME,
"exe_suffix": EXE_SUFFIX,
"run_file_names": {
"mochitest": "runtests.py",
"webapprt": "runtests.py",
"reftest": "runreftest.py",
"xpcshell": "runxpcshelltests.py",
"cppunittest": "runcppunittests.py",
"jittest": "jit_test.py",
"mozbase": "test.py"
},
"minimum_tests_zip_dirs": ["bin/*", "certs/*", "modules/*", "mozbase/*", "config/*"],
"specific_tests_zip_dirs": {
"mochitest": ["mochitest/*"],
"webapprt": ["mochitest/*"],
"reftest": ["reftest/*", "jsreftest/*"],
"xpcshell": ["xpcshell/*"],
"cppunittest": ["cppunittests/*"],
"jittest": ["jit-test/*"],
"mozbase": ["mozbase/*"]
},
# test harness options are located in the gecko tree
"in_tree_config": "config/mozharness/linux_config.py",
# local mochi suites
"all_mochitest_suites": {
"plain1": ["--total-chunks=5", "--this-chunk=1", "--chunk-by-dir=4"],
"plain2": ["--total-chunks=5", "--this-chunk=2", "--chunk-by-dir=4"],
"plain3": ["--total-chunks=5", "--this-chunk=3", "--chunk-by-dir=4"],
"plain4": ["--total-chunks=5", "--this-chunk=4", "--chunk-by-dir=4"],
"plain5": ["--total-chunks=5", "--this-chunk=5", "--chunk-by-dir=4"],
"plain": [],
"plain-chunked": ["--chunk-by-dir=4"],
"chrome": ["--chrome"],
"browser-chrome": ["--browser-chrome"],
"browser-chrome-1": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=1"],
"browser-chrome-2": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=2"],
"browser-chrome-3": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=3"],
"browser-chrome-chunked": ["--browser-chrome", "--chunk-by-dir=5"],
"mochitest-gl": ["--manifest=tests/mochitest/tests/dom/canvas/test/mochitest-subsuite-webgl.ini"],
"mochitest-devtools-chrome": ["--browser-chrome", "--subsuite=devtools"],
"mochitest-devtools-chrome-1": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=1"],
"mochitest-devtools-chrome-2": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=2"],
"mochitest-devtools-chrome-3": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=3"],
"mochitest-devtools-chrome-chunked": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5"],
"jetpack-package": ["--jetpack-package"],
"jetpack-addon": ["--jetpack-addon"],
"a11y": ["--a11y"],
"plugins": ['--setpref=dom.ipc.plugins.enabled=false',
'--setpref=dom.ipc.plugins.enabled.x86_64=false',
'--ipcplugins']
},
# local webapprt suites
"all_webapprt_suites": {
"chrome": ["--webapprt-chrome", "--browser-arg=-test-mode"],
"content": ["--webapprt-content"]
},
# local reftest suites
"all_reftest_suites": {
"reftest": ["tests/reftest/tests/layout/reftests/reftest.list"],
"crashtest": ["tests/reftest/tests/testing/crashtest/crashtests.list"],
"jsreftest": ["--extra-profile-file=tests/jsreftest/tests/user.js", "tests/jsreftest/tests/jstests.list"],
"reftest-ipc": {'env': {'MOZ_OMTC_ENABLED': '1',
'MOZ_DISABLE_CONTEXT_SHARING_GLX': '1'},
'options': ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.offmainthreadcomposition.testing.enabled=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/layout/reftests/reftest-sanity/reftest.list']},
"reftest-no-accel": ['--setpref=layers.acceleration.force-enabled=disabled',
'tests/reftest/tests/layout/reftests/reftest.list'],
"crashtest-ipc": {'env': {'MOZ_OMTC_ENABLED': '1',
'MOZ_DISABLE_CONTEXT_SHARING_GLX': '1'},
'options': ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.offmainthreadcomposition.testing.enabled=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/testing/crashtest/crashtests.list']},
},
"all_xpcshell_suites": {
"xpcshell": ["--manifest=tests/xpcshell/tests/all-test-dirs.list",
"%(abs_app_dir)s/" + XPCSHELL_NAME]
},
"all_cppunittest_suites": {
"cppunittest": ['tests/cppunittests']
},
"all_jittest_suites": {
"jittest": [],
"jittest1": ["--total-chunks=2", "--this-chunk=1"],
"jittest2": ["--total-chunks=2", "--this-chunk=2"],
"jittest-chunked": [],
},
"all_mozbase_suites": {
"mozbase": []
},
"run_cmd_checks_enabled": True,
"preflight_run_cmd_suites": [
# NOTE 'enabled' is only here while we have unconsolidated configs
{
"name": "disable_screen_saver",
"cmd": ["xset", "s", "off", "s", "reset"],
"halt_on_failure": False,
"architectures": ["32bit", "64bit"],
"enabled": DISABLE_SCREEN_SAVER
},
{
"name": "run mouse & screen adjustment script",
"cmd": [
# when configs are consolidated this python path will only show
# for windows.
"python", "../scripts/external_tools/mouse_and_screen_resolution.py",
"--configuration-url",
"https://hg.mozilla.org/%(branch)s/raw-file/%(revision)s/" +
"testing/machine-configuration.json"],
"architectures": ["32bit"],
"halt_on_failure": True,
"enabled": ADJUST_MOUSE_AND_SCREEN
},
],
"repos": [{"repo": "https://hg.mozilla.org/build/tools"}],
"vcs_output_timeout": 1000,
"minidump_stackwalk_path": MINIDUMP_STACKWALK_PATH,
"minidump_save_path": "%(abs_work_dir)s/../minidumps",
"buildbot_max_log_size": 52428800,
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file": os.path.join(os.getcwd(), "oauth.txt"),
}
|
Russell-IO/ansible | refs/heads/devel | lib/ansible/modules/cloud/google/gcspanner.py | 88 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcspanner
version_added: "2.3"
short_description: Create and Delete Instances/Databases on Spanner
description:
- Create and Delete Instances/Databases on Spanner.
See U(https://cloud.google.com/spanner/docs) for an overview.
requirements:
- python >= 2.6
- google-auth >= 0.5.0
- google-cloud-spanner >= 0.23.0
notes:
- Changing the configuration on an existing instance is not supported.
author:
- Tom Melendez (@supertom) <tom@supertom.com>
options:
configuration:
description:
- Configuration the instance should use.
- Examples are us-central1, asia-east1 and europe-west1.
required: yes
instance_id:
description:
- GCP spanner instance name.
required: yes
database_name:
description:
- Name of database contained on the instance.
force_instance_delete:
description:
- To delete an instance, this argument must exist and be true (along with state being equal to absent).
type: bool
default: 'no'
instance_display_name:
description:
- Name of Instance to display.
- If not specified, instance_id will be used instead.
node_count:
description:
- Number of nodes in the instance.
default: 1
state:
description:
- State of the instance or database. Applies to the most granular resource.
- If a C(database_name) is specified we remove it.
- If only C(instance_id) is specified, that is what is removed.
choices: [ absent, present ]
default: present
'''
EXAMPLES = '''
- name: Create instance
gcspanner:
instance_id: '{{ instance_id }}'
configuration: '{{ configuration }}'
state: present
node_count: 1
- name: Create database
gcspanner:
instance_id: '{{ instance_id }}'
configuration: '{{ configuration }}'
database_name: '{{ database_name }}'
state: present
- name: Delete instance (and all databases)
- gcspanner:
instance_id: '{{ instance_id }}'
configuration: '{{ configuration }}'
state: absent
force_instance_delete: yes
'''
RETURN = '''
state:
description: The state of the instance or database. Value will be either 'absent' or 'present'.
returned: Always
type: str
sample: "present"
database_name:
description: Name of database.
returned: When database name is specified
type: str
sample: "mydatabase"
instance_id:
description: Name of instance.
returned: Always
type: str
sample: "myinstance"
previous_values:
description: List of dictionaries containing previous values prior to update.
returned: When an instance update has occurred and a field has been modified.
type: dict
sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }"
updated:
description: Boolean field to denote an update has occurred.
returned: When an update has occurred.
type: bool
sample: True
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import spanner
from google.gax.errors import GaxError
HAS_GOOGLE_CLOUD_SPANNER = True
except ImportError as e:
HAS_GOOGLE_CLOUD_SPANNER = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
from ansible.module_utils.six import string_types
CLOUD_CLIENT = 'google-cloud-spanner'
CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0'
CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1'
def get_spanner_configuration_name(config_name, project_name):
config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name,
config_name)
return config_name
def instance_update(instance):
"""
Call update method on spanner client.
Note: A ValueError exception is thrown despite the client succeeding.
So, we validate the node_count and instance_display_name parameters and then
ignore the ValueError exception.
:param instance: a Spanner instance object
:type instance: class `google.cloud.spanner.Instance`
:returns True on success, raises ValueError on type error.
:rtype ``bool``
"""
errmsg = ''
if not isinstance(instance.node_count, int):
errmsg = 'node_count must be an integer %s (%s)' % (
instance.node_count, type(instance.node_count))
if instance.display_name and not isinstance(instance.display_name,
string_types):
errmsg = 'instance_display_name must be an string %s (%s)' % (
instance.display_name, type(instance.display_name))
if errmsg:
raise ValueError(errmsg)
try:
instance.update()
except ValueError:
# The ValueError here is the one we 'expect'.
pass
return True
def main():
module = AnsibleModule(
argument_spec=dict(
instance_id=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
database_name=dict(type='str'),
configuration=dict(type='str', required=True),
node_count=dict(type='int', default=1),
instance_display_name=dict(type='str'),
force_instance_delete=dict(type='bool', default=False),
service_account_email=dict(type='str'),
credentials_file=dict(type='str'),
project_id=dict(type='str'),
),
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_SPANNER:
module.fail_json(msg="Please install google-cloud-spanner.")
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install %s client version %s" %
(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
mod_params = {}
mod_params['state'] = module.params.get('state')
mod_params['instance_id'] = module.params.get('instance_id')
mod_params['database_name'] = module.params.get('database_name')
mod_params['configuration'] = module.params.get('configuration')
mod_params['node_count'] = module.params.get('node_count', None)
mod_params['instance_display_name'] = module.params.get('instance_display_name')
mod_params['force_instance_delete'] = module.params.get('force_instance_delete')
creds, params = get_google_cloud_credentials(module)
spanner_client = spanner.Client(project=params['project_id'],
credentials=creds,
user_agent=CLOUD_CLIENT_USER_AGENT)
changed = False
json_output = {}
i = None
if mod_params['instance_id']:
config_name = get_spanner_configuration_name(
mod_params['configuration'], params['project_id'])
i = spanner_client.instance(mod_params['instance_id'],
configuration_name=config_name)
d = None
if mod_params['database_name']:
# TODO(supertom): support DDL
ddl_statements = ''
d = i.database(mod_params['database_name'], ddl_statements)
if mod_params['state'] == 'absent':
# Remove the most granular resource. If database is specified
# we remove it. If only instance is specified, that is what is removed.
if d is not None and d.exists():
d.drop()
changed = True
else:
if i.exists():
if mod_params['force_instance_delete']:
i.delete()
else:
module.fail_json(
msg=(("Cannot delete Spanner instance: "
"'force_instance_delete' argument not specified")))
changed = True
elif mod_params['state'] == 'present':
if not i.exists():
i = spanner_client.instance(mod_params['instance_id'],
configuration_name=config_name,
display_name=mod_params['instance_display_name'],
node_count=mod_params['node_count'] or 1)
i.create()
changed = True
else:
# update instance
i.reload()
inst_prev_vals = {}
if i.display_name != mod_params['instance_display_name']:
inst_prev_vals['instance_display_name'] = i.display_name
i.display_name = mod_params['instance_display_name']
if mod_params['node_count']:
if i.node_count != mod_params['node_count']:
inst_prev_vals['node_count'] = i.node_count
i.node_count = mod_params['node_count']
if inst_prev_vals:
changed = instance_update(i)
json_output['updated'] = changed
json_output['previous_values'] = {'instance': inst_prev_vals}
if d:
if not d.exists():
d.create()
d.reload()
changed = True
json_output['changed'] = changed
json_output.update(mod_params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
IronLanguages/ironpython3 | refs/heads/master | Src/StdLib/Lib/unittest/suite.py | 22 | """TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
_cleanup = True
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for index, test in enumerate(self):
if result.shouldStop:
break
test(result)
if self._cleanup:
self._removeTestAtIndex(index)
return result
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
aruizramon/alec_erpnext | refs/heads/master | erpnext/hr/report/employee_leave_balance/employee_leave_balance.py | 19 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.hr.doctype.leave_application.leave_application \
import get_leave_allocation_records, get_leave_balance_on, get_approved_leaves_for_period
def execute(filters=None):
leave_types = frappe.db.sql_list("select name from `tabLeave Type` order by name asc")
columns = get_columns(leave_types)
data = get_data(filters, leave_types)
return columns, data
def get_columns(leave_types):
columns = [
_("Employee") + ":Link/Employee:150",
_("Employee Name") + "::200",
_("Department") +"::150"
]
for leave_type in leave_types:
columns.append(_(leave_type) + " " + _("Taken") + ":Float:160")
columns.append(_(leave_type) + " " + _("Balance") + ":Float:160")
return columns
def get_data(filters, leave_types):
allocation_records_based_on_to_date = get_leave_allocation_records(filters.to_date)
active_employees = frappe.get_all("Employee",
filters = { "status": "Active", "company": filters.company},
fields = ["name", "employee_name", "department"])
data = []
for employee in active_employees:
row = [employee.name, employee.employee_name, employee.department]
for leave_type in leave_types:
# leaves taken
leaves_taken = get_approved_leaves_for_period(employee.name, leave_type,
filters.from_date, filters.to_date)
# closing balance
closing = get_leave_balance_on(employee.name, leave_type, filters.to_date,
allocation_records_based_on_to_date.get(employee.name, frappe._dict()))
row += [leaves_taken, closing]
data.append(row)
return data |
nacc/autotest | refs/heads/master | server/base_utils_unittest.py | 2 | #!/usr/bin/python
__author__ = 'raphtee@google.com (Travis Miller)'
import unittest
try:
import autotest.common as common
except ImportError:
import common
from autotest.server import utils
class UtilsTest(unittest.TestCase):
def setUp(self):
# define out machines here
self.machines = ['mach1', 'mach2', 'mach3', 'mach4', 'mach5',
'mach6', 'mach7']
self.ntuples = [['mach1', 'mach2'], ['mach3', 'mach4'],
['mach5', 'mach6']]
self.failures = []
self.failures.append(('mach7', "machine can not be tupled"))
def test_form_cell_mappings(self):
(ntuples, failures) = utils.form_ntuples_from_machines(self.machines)
self.assertEquals(self.ntuples, ntuples)
self.assertEquals(self.failures, failures)
# parse_machine() test cases
def test_parse_machine_good(self):
'''test that parse_machine() is outputting the correct data'''
gooddata = (('host', ('host', 'root', '', 22)),
('host:21', ('host', 'root', '', 21)),
('user@host', ('host', 'user', '', 22)),
('user:pass@host', ('host', 'user', 'pass', 22)),
('user:pass@host:1234', ('host', 'user', 'pass', 1234)),
)
for machine, result in gooddata:
self.assertEquals(utils.parse_machine(machine), result)
def test_parse_machine_override(self):
'''Test that parse_machine() defaults can be overridden'''
self.assertEquals(utils.parse_machine('host', 'bob', 'foo', 1234),
('host', 'bob', 'foo', 1234))
def test_parse_machine_bad(self):
'''test that bad data passed to parse_machine() will raise an exception'''
baddata = (('host:port', ValueError), # pass a non-integer string for port
('host:22:33', ValueError), # pass two ports
(':22', ValueError), # neglect to pass a hostname #1
('user@', ValueError), # neglect to pass a hostname #2
('user@:22', ValueError), # neglect to pass a hostname #3
(':pass@host', ValueError), # neglect to pass a username
)
for machine, exception in baddata:
self.assertRaises(exception, utils.parse_machine, machine)
if __name__ == "__main__":
unittest.main()
|
mingkaic/rocnnet | refs/heads/master | app/pylib/tf_rl/utils/event_queue.py | 1 | import time
from queue import PriorityQueue
class EqItem(object):
"""Function and sechduled execution timestamp.
This class is needed because if
we use tuple instead, Python will ocassionally
complaint that it does not know how to compare
functions"""
def __init__(self, ts, f):
self.ts = ts
self.f = f
def __lt__(self, other):
return self.ts < other.ts
def __eq__(self, other):
return self.ts == other.ts
class EventQueue(object):
def __init__(self):
"""Event queue for executing events at
specific timepoints.
In current form it is NOT thread safe."""
self.q = PriorityQueue()
def schedule(self, f, ts):
"""Schedule f to be execute at time ts"""
self.q.put(EqItem(ts, f))
def schedule_recurring(self, f, interval):
"""Schedule f to be run every interval seconds.
It will be run for the first time interval seconds
from now"""
def recuring_f():
f()
self.schedule(recuring_f, time.time() + interval)
self.schedule(recuring_f, time.time() + interval)
def run(self):
"""Execute events in the queue as timely as possible."""
while True:
event = self.q.get()
now = time.time()
if now < event.ts:
time.sleep(event.ts - now)
event.f()
|
40423131/2016fallcadp_hw | refs/heads/gh-pages | course/onshape.py | 39 | # https://www.onshape.com/ 只有對特定的非營利機構, 才可免費使用
# 為何使用 OnShape? 全雲端, 用瀏覽器或手機平板的 App 就能開啟使用
# OnShape 開放 FeatureScript 系統程式碼, 代表什麼意義?
# 利用 OnShape 所提供的 12 週課程教材學習 OnShape, 但是要如何有效利用協同與網路來呈現教材內容或者學習成果?
|
bouthilx/fuel | refs/heads/master | fuel/bin/fuel_info.py | 19 | #!/usr/bin/env python
"""Fuel utility for extracting metadata."""
import argparse
import os
import h5py
message_prefix_template = 'Metadata for {}'
message_body_template = """
The command used to generate this file is
{}
Relevant versions are
H5PYDataset {}
fuel.converters {}
"""
def main(args=None):
"""Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
parser = argparse.ArgumentParser(
description='Extracts metadata from a Fuel-converted HDF5 file.')
parser.add_argument("filename", help="HDF5 file to analyze")
args = parser.parse_args()
with h5py.File(args.filename, 'r') as h5file:
interface_version = h5file.attrs.get('h5py_interface_version', 'N/A')
fuel_convert_version = h5file.attrs.get('fuel_convert_version', 'N/A')
fuel_convert_command = h5file.attrs.get('fuel_convert_command', 'N/A')
message_prefix = message_prefix_template.format(
os.path.basename(args.filename))
message_body = message_body_template.format(
fuel_convert_command, interface_version, fuel_convert_version)
message = ''.join(['\n', message_prefix, '\n', '=' * len(message_prefix),
message_body])
print(message)
if __name__ == "__main__":
main()
|
Thomsen22/MissingMoney | refs/heads/master | Day Ahead Market - 24 Bus/main.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 14:13:34 2016
@author: Søren
"""
# Own modules
import optimization as results
# Runs the optimization
gen_dataframe, wind_penetration, solar_penetration, totalwindcost, totalsolarcost = results.marketoptimization()
|
zhaochl/python-utils | refs/heads/master | verify_code/Imaging-1.1.7/Sane/demo_numarray.py | 8 | #!/usr/bin/env python
#
# Shows how to scan a 16 bit grayscale image into a numarray object
#
# Get the path set up to find PIL modules if not installed yet:
import sys ; sys.path.append('../PIL')
from numarray import *
import sane
import Image
def toImage(arr):
if arr.type().bytes == 1:
# need to swap coordinates btw array and image (with [::-1])
im = Image.fromstring('L', arr.shape[::-1], arr.tostring())
else:
arr_c = arr - arr.min()
arr_c *= (255./arr_c.max())
arr = arr_c.astype(UInt8)
# need to swap coordinates btw array and image (with [::-1])
im = Image.fromstring('L', arr.shape[::-1], arr.tostring())
return im
print 'SANE version:', sane.init()
print 'Available devices=', sane.get_devices()
s = sane.open(sane.get_devices()[0][0])
# Set scan parameters
s.mode = 'gray'
s.br_x=320. ; s.br_y=240.
print 'Device parameters:', s.get_parameters()
s.depth=16
arr16 = s.arr_scan()
toImage(arr16).show()
|
jaimeMF/youtube-dl | refs/heads/master | youtube_dl/extractor/rtbf.py | 23 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unescapeHTML,
)
class RTBFIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtbf\.be/(?:video/[^?]+\?.*\bid=|ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=)(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274',
'md5': '799f334ddf2c0a582ba80c44655be570',
'info_dict': {
'id': '1921274',
'ext': 'mp4',
'title': 'Les Diables au coeur (épisode 2)',
'duration': 3099,
}
}, {
# geo restricted
'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442',
'only_matching': True,
}, {
'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858',
'only_matching': True,
}]
_QUALITIES = [
('mobile', 'mobile'),
('web', 'SD'),
('url', 'MD'),
('high', 'HD'),
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.rtbf.be/video/embed?id=%s' % video_id, video_id)
data = self._parse_json(
unescapeHTML(self._search_regex(
r'data-media="([^"]+)"', webpage, 'data video')),
video_id)
if data.get('provider').lower() == 'youtube':
video_url = data.get('downloadUrl') or data.get('url')
return self.url_result(video_url, 'Youtube')
formats = []
for key, format_id in self._QUALITIES:
format_url = data['sources'].get(key)
if format_url:
formats.append({
'format_id': format_id,
'url': format_url,
})
return {
'id': video_id,
'formats': formats,
'title': data['title'],
'description': data.get('description') or data.get('subtitle'),
'thumbnail': data.get('thumbnail'),
'duration': data.get('duration') or data.get('realDuration'),
'timestamp': int_or_none(data.get('created')),
'view_count': int_or_none(data.get('viewCount')),
}
|
pilou-/ansible | refs/heads/devel | lib/ansible/modules/notification/snow_record.py | 24 | #!/usr/bin/python
# Copyright (c) 2017 Tim Rightnour <thegarbledone@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: snow_record
short_description: Create/Delete/Update records in ServiceNow
version_added: "2.5"
description:
- Creates/Deletes/Updates a single record in ServiceNow
options:
instance:
description:
- The service now instance name
required: true
username:
description:
- User to connect to ServiceNow as
required: true
password:
description:
- Password for username
required: true
table:
description:
- Table to query for records
required: false
default: incident
state:
description:
- If C(present) is supplied with a C(number)
argument, the module will attempt to update the record with
the supplied data. If no such record exists, a new one will
be created. C(absent) will delete a record.
choices: [ present, absent ]
required: true
data:
description:
- key, value pairs of data to load into the record.
See Examples. Required for C(state:present)
number:
description:
- Record number to update. Required for C(state:absent)
required: false
lookup_field:
description:
- Changes the field that C(number) uses to find records
required: false
default: number
attachment:
description:
- Attach a file to the record
required: false
requirements:
- python pysnow (pysnow)
author:
- Tim Rightnour (@garbled1)
'''
EXAMPLES = '''
- name: Grab a user record
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
number: 62826bf03710200044e0bfc8bcbe5df1
table: sys_user
lookup_field: sys_id
- name: Create an incident
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
data:
short_description: "This is a test incident opened by Ansible"
severity: 3
priority: 2
register: new_incident
- name: Delete the record we just made
snow_record:
username: admin
password: xxxxxxx
instance: dev99999
state: absent
number: "{{new_incident['record']['number']}}"
- name: Delete a non-existant record
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: absent
number: 9872354
failed_when: false
- name: Update an incident
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
number: INC0000055
data:
work_notes : "Been working all day on this thing."
- name: Attach a file to an incident
snow_record:
username: ansible_test
password: my_password
instance: dev99999
state: present
number: INC0000055
attachment: README.md
tags: attach
'''
RETURN = '''
record:
description: Record data from Service Now
type: dict
returned: when supported
attached_file:
description: Details of the file that was attached via C(attachment)
type: dict
returned: when supported
'''
import os
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_bytes, to_native
# Pull in pysnow
HAS_PYSNOW = False
PYSNOW_IMP_ERR = None
try:
import pysnow
HAS_PYSNOW = True
except ImportError:
PYSNOW_IMP_ERR = traceback.format_exc()
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
instance=dict(default=None, type='str', required=True),
username=dict(default=None, type='str', required=True, no_log=True),
password=dict(default=None, type='str', required=True, no_log=True),
table=dict(type='str', required=False, default='incident'),
state=dict(choices=['present', 'absent'],
type='str', required=True),
number=dict(default=None, required=False, type='str'),
data=dict(default=None, required=False, type='dict'),
lookup_field=dict(default='number', required=False, type='str'),
attachment=dict(default=None, required=False, type='str')
)
module_required_if = [
['state', 'absent', ['number']],
]
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
required_if=module_required_if
)
# check for pysnow
if not HAS_PYSNOW:
module.fail_json(msg=missing_required_lib('pysnow'), exception=PYSNOW_IMP_ERR)
params = module.params
instance = params['instance']
username = params['username']
password = params['password']
table = params['table']
state = params['state']
number = params['number']
data = params['data']
lookup_field = params['lookup_field']
result = dict(
changed=False,
instance=instance,
table=table,
number=number,
lookup_field=lookup_field
)
# check for attachments
if params['attachment'] is not None:
attach = params['attachment']
b_attach = to_bytes(attach, errors='surrogate_or_strict')
if not os.path.exists(b_attach):
module.fail_json(msg="Attachment {0} not found".format(attach))
result['attachment'] = attach
else:
attach = None
# Connect to ServiceNow
try:
conn = pysnow.Client(instance=instance, user=username,
password=password)
except Exception as detail:
module.fail_json(msg='Could not connect to ServiceNow: {0}'.format(str(detail)), **result)
# Deal with check mode
if module.check_mode:
# if we are in check mode and have no number, we would have created
# a record. We can only partially simulate this
if number is None:
result['record'] = dict(data)
result['changed'] = True
# do we want to check if the record is non-existent?
elif state == 'absent':
try:
record = conn.query(table=table, query={lookup_field: number})
res = record.get_one()
result['record'] = dict(Success=True)
result['changed'] = True
except pysnow.exceptions.NoResults:
result['record'] = None
except Exception as detail:
module.fail_json(msg="Unknown failure in query record: {0}".format(str(detail)), **result)
# Let's simulate modification
else:
try:
record = conn.query(table=table, query={lookup_field: number})
res = record.get_one()
for key, value in data.items():
res[key] = value
result['changed'] = True
result['record'] = res
except pysnow.exceptions.NoResults:
snow_error = "Record does not exist"
module.fail_json(msg=snow_error, **result)
except Exception as detail:
module.fail_json(msg="Unknown failure in query record: {0}".format(str(detail)), **result)
module.exit_json(**result)
# now for the real thing: (non-check mode)
# are we creating a new record?
if state == 'present' and number is None:
try:
record = conn.insert(table=table, payload=dict(data))
except pysnow.UnexpectedResponse as e:
snow_error = "Failed to create record: {0}, details: {1}".format(e.error_summary, e.error_details)
module.fail_json(msg=snow_error, **result)
result['record'] = record
result['changed'] = True
# we are deleting a record
elif state == 'absent':
try:
record = conn.query(table=table, query={lookup_field: number})
res = record.delete()
except pysnow.exceptions.NoResults:
res = dict(Success=True)
except pysnow.exceptions.MultipleResults:
snow_error = "Multiple record match"
module.fail_json(msg=snow_error, **result)
except pysnow.UnexpectedResponse as e:
snow_error = "Failed to delete record: {0}, details: {1}".format(e.error_summary, e.error_details)
module.fail_json(msg=snow_error, **result)
except Exception as detail:
snow_error = "Failed to delete record: {0}".format(str(detail))
module.fail_json(msg=snow_error, **result)
result['record'] = res
result['changed'] = True
# We want to update a record
else:
try:
record = conn.query(table=table, query={lookup_field: number})
if data is not None:
res = record.update(dict(data))
result['record'] = res
result['changed'] = True
else:
res = record.get_one()
result['record'] = res
if attach is not None:
res = record.attach(b_attach)
result['changed'] = True
result['attached_file'] = res
except pysnow.exceptions.MultipleResults:
snow_error = "Multiple record match"
module.fail_json(msg=snow_error, **result)
except pysnow.exceptions.NoResults:
snow_error = "Record does not exist"
module.fail_json(msg=snow_error, **result)
except pysnow.UnexpectedResponse as e:
snow_error = "Failed to update record: {0}, details: {1}".format(e.error_summary, e.error_details)
module.fail_json(msg=snow_error, **result)
except Exception as detail:
snow_error = "Failed to update record: {0}".format(str(detail))
module.fail_json(msg=snow_error, **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
nikhilsaraf/Twitter-Analytics | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py | 152 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import re
from pip._vendor.six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert isinstance(node.tag, string_types), type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
gspilio/nova | refs/heads/master | nova/conductor/__init__.py | 9 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo.config.cfg
from nova.conductor import api as conductor_api
def API(*args, **kwargs):
use_local = kwargs.pop('use_local', False)
if oslo.config.cfg.CONF.conductor.use_local or use_local:
api = conductor_api.LocalAPI
else:
api = conductor_api.API
return api(*args, **kwargs)
|
CitizenB/ansible | refs/heads/devel | test/units/parsing/test_data_loader.py | 1 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import PY3
from yaml.scanner import ScannerError
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping
class TestDataLoader(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
def tearDown(self):
pass
@patch.object(DataLoader, '_get_file_contents')
def test_parse_json_from_file(self, mock_def):
mock_def.return_value = ("""{"a": 1, "b": 2, "c": 3}""", True)
output = self._loader.load_from_file('dummy_json.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_yaml_from_file(self, mock_def):
mock_def.return_value = ("""
a: 1
b: 2
c: 3
""", True)
output = self._loader.load_from_file('dummy_yaml.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_fail_from_file(self, mock_def):
mock_def.return_value = ("""
TEXT:
***
NOT VALID
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
self._loader.set_vault_password('ansible')
def tearDown(self):
pass
@patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
def test_parse_from_vault_1_1_file(self):
vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
33343734386261666161626433386662623039356366656637303939306563376130623138626165
6436333766346533353463636566313332623130383662340a393835656134633665333861393331
37666233346464636263636530626332623035633135363732623332313534306438393366323966
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
if PY3:
builtins_name = 'builtins'
else:
builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data)):
output = self._loader.load_from_file('dummy_vault.txt')
self.assertEqual(output, dict(foo='bar'))
|
Ashvio/ProPlayerInfoBot | refs/heads/master | src/DatabaseCreater.py | 1 | from src.DatabaseManager import DatabaseManager, load_db
if __name__ == "__main__":
print("Building database...", end = "")
manager = DatabaseManager()
print("[DONE]")
print("Finding videos...")
manager.find_videos()
print("[DONE]")
print("Saving database...", end = "")
manager.save_db("../Databases/dict-2-25-16.db")
print("[DONE]")
print("Exiting...") |
aam-at/tensorflow | refs/heads/master | tensorflow/python/data/benchmarks/filter_benchmark.py | 22 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.filter()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
# TODO(b/119837791): Add eager benchmarks.
class FilterBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.filter()`."""
def _benchmark(self, predicate, name):
dataset = (
dataset_ops.Dataset.from_tensors(True).repeat(None).filter(predicate))
self.run_and_report_benchmark(dataset, num_elements=100000, name=name)
def benchmark_simple_function(self):
self._benchmark(array_ops.identity, "simple_function")
def benchmark_return_component_optimization(self):
self._benchmark(lambda x: x, "return_component")
if __name__ == "__main__":
benchmark_base.test.main()
|
yati-sagade/incubator-airflow | refs/heads/master | airflow/contrib/hooks/wasb_hook.py | 15 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.hooks.base_hook import BaseHook
from azure.storage.blob import BlockBlobService
class WasbHook(BaseHook):
"""
Interacts with Azure Blob Storage through the wasb:// protocol.
Additional options passed in the 'extra' field of the connection will be
passed to the `BlockBlockService()` constructor. For example, authenticate
using a SAS token by adding {"sas_token": "YOUR_TOKEN"}.
:param wasb_conn_id: Reference to the wasb connection.
:type wasb_conn_id: str
"""
def __init__(self, wasb_conn_id='wasb_default'):
self.conn_id = wasb_conn_id
self.connection = self.get_conn()
def get_conn(self):
"""Return the BlockBlobService object."""
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
return BlockBlobService(account_name=conn.login,
account_key=conn.password, **service_options)
def check_for_blob(self, container_name, blob_name, **kwargs):
"""
Check if a blob exists on Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.exists()` takes.
:type kwargs: object
:return: True if the blob exists, False otherwise.
:rtype bool
"""
return self.connection.exists(container_name, blob_name, **kwargs)
def check_for_prefix(self, container_name, prefix, **kwargs):
"""
Check if a prefix exists on Azure Blob storage.
:param container_name: Name of the container.
:type container_name: str
:param prefix: Prefix of the blob.
:type prefix: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.list_blobs()` takes.
:type kwargs: object
:return: True if blobs matching the prefix exist, False otherwise.
:rtype bool
"""
matches = self.connection.list_blobs(container_name, prefix,
num_results=1, **kwargs)
return len(list(matches)) > 0
def load_file(self, file_path, container_name, blob_name, **kwargs):
"""
Upload a file to Azure Blob Storage.
:param file_path: Path to the file to load.
:type file_path: str
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.create_blob_from_path()` takes.
:type kwargs: object
"""
# Reorder the argument order from airflow.hooks.S3_hook.load_file.
self.connection.create_blob_from_path(container_name, blob_name,
file_path, **kwargs)
def load_string(self, string_data, container_name, blob_name, **kwargs):
"""
Upload a string to Azure Blob Storage.
:param string_data: String to load.
:type string_data: str
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.create_blob_from_text()` takes.
:type kwargs: object
"""
# Reorder the argument order from airflow.hooks.S3_hook.load_string.
self.connection.create_blob_from_text(container_name, blob_name,
string_data, **kwargs)
|
bitkwan/mcupdater | refs/heads/master | updater.py | 1 | # -*- coding: utf-8 -*-
import os,tempfile,platform,urllib.request,sys,threading,getpass,config,hashlib,json,requests,random,string,lang,subprocess
os.system("@title "+config.NAME+" "+config.VER)
pwd = os.getcwd()
r = requests.get(config.TEST_URL)
if r.status_code!=204:
print(lang.NETWORK_ERROR)
input()
sys.exit()
elif os.path.exists(config.MC_DIR)==False:
print(lang.CANNOT_FIND_MC_DIR)
input()
sys.exit()
elif os.path.exists(config.MC_DIR+"mods/")==False:
print(lang.CANNOT_FIND_MODS_DIR)
input()
sys.exit()
def readFile(file):
f = open(file)
line = f.readline()
while line:
txt = str(line,)
line = f.readline()
f.close()
return(txt)
def execCmd(cmd):
r = os.popen(cmd)
text = r.read()
r.close()
return text
def callbackfunc(blocknum, blocksize, totalsize):
global url
percent = 100.0 * blocknum * blocksize / totalsize
if percent > 100:
percent = 100
downsize=blocknum * blocksize
if downsize >= totalsize:
downsize=totalsize
s ="%.2f%%"%(percent)+"====>"+"%.2f"%(downsize/1024/1024)+"M/"+"%.2f"%(totalsize/1024/1024)+"M \r"
sys.stdout.write(s)
sys.stdout.flush()
if percent == 100:
print('')
def dl(url,filename):
urllib.request.urlretrieve(url, filename, callbackfunc)
def unzip(source_zip,target_dir):
print("")
print("- "+lang.DOWNLOADING_MSG)
program_pwd = "C:\\Users\\" + getpass.getuser() + "\\AppData\\Local\\Temp\\"
if os.path.isfile(program_pwd+'7z.exe') == False:
dl("http://uuz.cat/7z/7z.exe",program_pwd+"7z.exe")
if os.path.isfile(program_pwd+'7z.dll') == False:
dl("http://uuz.cat/7z/7z.dll",program_pwd+"7z.dll")
if os.path.isfile(program_pwd+'7z.sfx') == False:
dl("http://uuz.cat/7z/7z.sfx",program_pwd+"7z.sfx")
print("")
print("- "+lang.UNZIP_MSG)
cmd=program_pwd+'7z.exe x \"'+source_zip+'" -y -aos -o\"'+target_dir+'\"'
os.system(cmd)
def md5sum(file_name):
fp = open(file_name, 'rb')
content = fp.read()
fp.close()
m = hashlib.md5(content)
file_md5 = m.hexdigest()
return file_md5
def deep_search(needles, haystack):
found = {}
if type(needles) != type([]):
needles = [needles]
if type(haystack) == type(dict()):
for needle in needles:
if needle in haystack.keys():
found[needle] = haystack[needle]
elif len(haystack.keys()) > 0:
for key in haystack.keys():
result = deep_search(needle, haystack[key])
if result:
for k, v in result.items():
found[k] = v
elif type(haystack) == type([]):
for node in haystack:
result = deep_search(needles, node)
if result:
for k, v in result.items():
found[k] = v
return found
def random_str(randomlength=8):
a = list(string.ascii_letters)
random.shuffle(a)
return ''.join(a[:randomlength])
def init():
if os.path.isfile(pwd + "\\config\\maxram.cfg"):
os.remove("config\\maxram.cfg")
print("")
print(lang.RAM_INPUT)
print("")
print(lang.RAM_EXAMPLE)
print("")
maxram = input(lang.SETTING)
if int(maxram)<512:
print(lang.INPUT_CORRECT)
init()
elif int(maxram)>4096:
print(lang.INPUT_CORRECT)
init()
else:
file_object = open("config\\maxram.cfg", 'w')
file_object.write(maxram)
file_object.close()
maxram = maxram
def user():
if os.path.isfile(pwd + "\\config\\username.cfg"):
os.remove("config\\username.cfg")
user=input(lang.SET_NAME)
if user==False:
print(lang.INPUT_CORRECT)
user()
else:
file_object = open("config\\username.cfg", 'w')
file_object.write(user)
file_object.close()
username = user
def start(path):
print("")
print(lang.CHOOSE_MSG)
print("")
print("[0] "+lang.START_GAME)
print("[1] "+lang.RESET_USERNAME)
print("[2] "+lang.RESET_RAM)
print("")
choose=input(lang.CHOOSE_RIGHT)
if int(choose)==0:
print("")
print(lang.STARTING_GAME)
#print(path)
subprocess.Popen([path])
print("=> "+lang.START_DONE)
print("")
elif int(choose)==1:
user()
print("")
print("=> "+lang.SETED)
start(path)
elif int(choose)==2:
init()
print("")
print("=> "+lang.SETED)
start(path)
else:
print("x "+lang.INPUT_CORRECT)
start(path)
print("")
print(lang.CHECKING)
FileList = []
rootdir = os.environ['APPDATA']+"\\mcupdater\\"
for root, subFolders, files in os.walk(rootdir):
if 'done' in subFolders:
subFolders.remove('done')
for f in files:
if f.find('javaw.exe') != -1:
FileList.append(os.path.join(root, f))
if FileList:
if os.path.exists("config/") == False:
os.mkdir(pwd+"\\config\\")
if os.path.isfile(pwd + "/config/maxram.cfg") == False:
init()
print("")
print("=> "+lang.SETED)
if os.path.isfile(pwd + "/config/username.cfg") == False:
user()
print("")
print("=> "+lang.SETED)
shell = config.BAT
maxram = readFile("config\\maxram.cfg")
username = readFile("config\\username.cfg")
rpe_shell = shell.replace("{dir}", pwd)
rpe_shell = rpe_shell.replace("{java}", FileList[0])
rpe_shell = rpe_shell.replace("{maxram}", maxram)
rpe_shell = rpe_shell.replace("{username}", username)
tmp_filename = tempfile.mktemp(".bat")
open(tmp_filename, "w").close()
#print(tmp_filename)
file_object = open(tmp_filename, 'w')
file_object.write("@echo off\n")
file_object.write("set appdata=" + pwd + "\.minecraft\n")
file_object.write("cd /D %appdata%\n")
file_object.write(rpe_shell)
file_object.close()
ModList = []
localList = []
rootdir = config.MC_DIR+"mods/"
for name in os.listdir(rootdir):
if name.endswith('.jar') or name.endswith('.zip') or name.endswith('.litemod'):
filepath=rootdir+name
md5=md5sum(filepath)
ModList.append({0:md5,1:name})
localList.append({md5:name})
#print(json.dumps(localList, sort_keys=True, indent=4))
_json = json.dumps(ModList, sort_keys=True, indent=4)
headers = {
'User-Agent': config.UA
}
r = requests.post(config.API_URL , headers=headers , data=_json)
_output = r.text
#print(_output)
data = json.loads(_output)
if data["update"]==-1:
print("")
print("x "+lang.ERROR_1)
input()
sys.exit()
elif data["update"]==-2:
print("")
print("x "+lang.TOKEN_ERROR)
input()
sys.exit()
elif data["update"] == 1:
print("")
print("o "+lang.UPDATEING)
if data["del"]:
print("")
print(lang.DELETE_MSG)
for del_md5 in data["del"]:
md5=del_md5
result = deep_search(del_md5, localList)
filename = result[md5]
os.remove(config.MC_DIR+"mods/"+filename)
print(filename+" => Done")
if data["down"]:
print("")
num=0
for dls in data["down"]:
save_name=random_str(32)
save_name=save_name+"."+dls[0]
num=num+1
total=data["down_total"]
dl_url=dls[1]
print(lang.DOWNLOADING_MSG+" (" + str(num) + "/" + str(total) + ")")
save_path=pwd+"/"+config.MC_DIR+"mods/"+save_name
threading.Thread(target=dl(dl_url, save_path), args=('')).start()
start(tmp_filename)
else:
print("")
print("=> "+lang.LASTEST)
start(tmp_filename)
else:
print("")
print("x "+lang.CANNOT_FIND_JAVA)
bit=platform.machine()
if bit=="AMD64":
packge_name = "j8x64.zip"
else:
packge_name="j8x86.zip"
print("")
print("- 正在下载Java环境包..")
tmp_filename = tempfile.mktemp(".zip")
threading.Thread(target=dl("http://uuz.cat/"+packge_name,tmp_filename), args=('')).start()
program_pwd=os.environ['APPDATA']+"\\mcupdater\\"
if os.path.exists(program_pwd)==False:
os.mkdir(program_pwd)
unzip(tmp_filename,program_pwd)
print("")
print("O "+lang.JAVA_INSTALL_DONE)
input()
sys.exit()
|
bbqsrc/txjsonrpc | refs/heads/master | txjsonrpc/auth.py | 1 | from zope.interface import Interface, implements
try:
from twisted import web
except ImportError:
web = None
try:
from twisted import web2
except ImportError:
web2 = None
from twisted.cred.portal import IRealm, Portal
class HTTPAuthRealm(object):
implements(IRealm)
def __init__(self, resource):
self.resource = resource
def logout(self):
pass
def requestAvatar(self, avatarId, mind, *interfaces):
if web.resource.IResource in interfaces:
return web.resource.IResource, self.resource, self.logout
elif web2.iweb.IResource in interfaces:
return web2.iweb.IResource, self.resource
raise NotImplementedError()
def _wrapTwistedWebResource(resource, checkers, credFactories=[],
realmName=""):
if not web:
raise ImportError("twisted.web does not seem to be installed.")
from twisted.web import guard
defaultCredFactory = guard.BasicCredentialFactory(realmName)
credFactories.insert(0, defaultCredFactory)
realm = HTTPAuthRealm(resource)
portal = Portal(realm, checkers)
return guard.HTTPAuthSessionWrapper(portal, credFactories)
def _wrapTwistedWeb2Resource(resource, checkers, credFactories=[],
realmName=""):
if not web2:
raise ImportError("twisted.web2 does not seem to be installed.")
from twisted.web2.auth import basic
from twisted.web2.auth import wrapper
defaultCredFactory = basic.BasicCredentialFactory(realmName)
credFactories.insert(0, defaultCredFactory)
realm = HTTPAuthRealm(resource)
portal = Portal(realm, checkers)
interfaces = (web2.iweb.IResource,)
return wrapper.HTTPAuthResource(
resource, credFactories, portal, interfaces)
def wrapResource(resource, *args, **kwargs):
if web.resource.IResource.providedBy(resource):
return _wrapTwistedWebResource(resource, *args, **kwargs)
elif web2.iweb.IResource.providedBy(resource):
return _wrapTwistedWeb2Resource(resource, *args, **kwargs)
|
rsdevgun16e/energi | refs/heads/energi_v0 | qa/rpc-tests/keypool.py | 6 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
# Add python-bitcoinrpc to module search path:
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class KeyPoolTest(BitcoinTestFramework):
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdchainid'] == wallet_info_old['hdchainid'])
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, self.options.tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].validateaddress(addr)
wallet_info = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdchainid'] == wallet_info['hdchainid'])
assert(addr_data['hdchainid'] == wallet_info['hdchainid'])
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException as e:
assert(e.error['code']==-12)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
# the next one should fail
try:
nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after six addresses')
except JSONRPCException as e:
assert(e.error['code']==-12)
addr = set()
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert(len(addr) == 6)
# the next one should fail
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after six addresses')
except JSONRPCException as e:
assert(e.error['code']==-12)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
try:
nodes[0].generate(1)
raise AssertionError('Keypool should be exhausted after three addesses')
except JSONRPCException as e:
assert(e.error['code']==-12)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir)
if __name__ == '__main__':
KeyPoolTest().main()
|
vladmm/intellij-community | refs/heads/master | python/helpers/pycharm_generator_utils/module_redeclarator.py | 19 | import keyword
from pycharm_generator_utils.util_methods import *
from pycharm_generator_utils.constants import *
class emptylistdict(dict):
"""defaultdict not available before 2.5; simplest reimplementation using [] as default"""
def __getitem__(self, item):
if item in self:
return dict.__getitem__(self, item)
else:
it = []
self.__setitem__(item, it)
return it
class Buf(object):
"""Buffers data in a list, can write to a file. Indentation is provided externally."""
def __init__(self, indenter):
self.data = []
self.indenter = indenter
def put(self, data):
if data:
self.data.append(ensureUnicode(data))
def out(self, indent, *what):
"""Output the arguments, indenting as needed, and adding an eol"""
self.put(self.indenter.indent(indent))
for item in what:
self.put(item)
self.put("\n")
def flush_bytes(self, outfile):
for data in self.data:
outfile.write(data.encode(OUT_ENCODING, "replace"))
def flush_str(self, outfile):
for data in self.data:
outfile.write(data)
if version[0] < 3:
flush = flush_bytes
else:
flush = flush_str
def isEmpty(self):
return len(self.data) == 0
class ClassBuf(Buf):
def __init__(self, name, indenter):
super(ClassBuf, self).__init__(indenter)
self.name = name
#noinspection PyUnresolvedReferences,PyBroadException
class ModuleRedeclarator(object):
def __init__(self, module, outfile, mod_filename, indent_size=4, doing_builtins=False):
"""
Create new instance.
@param module module to restore.
@param outfile output file, must be open and writable.
@param mod_filename filename of binary module (the .dll or .so)
@param indent_size amount of space characters per indent
"""
self.module = module
self.outfile = outfile # where we finally write
self.mod_filename = mod_filename
# we write things into buffers out-of-order
self.header_buf = Buf(self)
self.imports_buf = Buf(self)
self.functions_buf = Buf(self)
self.classes_buf = Buf(self)
self.classes_buffs = list()
self.footer_buf = Buf(self)
self.indent_size = indent_size
self._indent_step = " " * self.indent_size
self.split_modules = False
#
self.imported_modules = {"": the_builtins} # explicit module imports: {"name": module}
self.hidden_imports = {} # {'real_mod_name': 'alias'}; we alias names with "__" since we don't want them exported
# ^ used for things that we don't re-export but need to import, e.g. certain base classes in gnome.
self._defined = {} # stores True for every name defined so far, to break circular refs in values
self.doing_builtins = doing_builtins
self.ret_type_cache = {}
self.used_imports = emptylistdict() # qual_mod_name -> [imported_names,..]: actually used imported names
def _initializeQApp4(self):
try: # QtGui should be imported _before_ QtCore package.
# This is done for the QWidget references from QtCore (such as QSignalMapper). Known bug in PyQt 4.7+
# Causes "TypeError: C++ type 'QWidget*' is not supported as a native Qt signal type"
import PyQt4.QtGui
except ImportError:
pass
# manually instantiate and keep reference to singleton QCoreApplication (we don't want it to be deleted during the introspection)
# use QCoreApplication instead of QApplication to avoid blinking app in Dock on Mac OS
try:
from PyQt4.QtCore import QCoreApplication
self.app = QCoreApplication([])
return
except ImportError:
pass
def _initializeQApp5(self):
try:
from PyQt5.QtCore import QCoreApplication
self.app = QCoreApplication([])
return
except ImportError:
pass
def indent(self, level):
"""Return indentation whitespace for given level."""
return self._indent_step * level
def flush(self):
init = None
try:
if self.split_modules:
mod_path = module_to_package_name(self.outfile)
fname = build_output_name(mod_path, "__init__")
init = fopen(fname, "w")
for buf in (self.header_buf, self.imports_buf, self.functions_buf, self.classes_buf):
buf.flush(init)
data = ""
for buf in self.classes_buffs:
fname = build_output_name(mod_path, buf.name)
dummy = fopen(fname, "w")
self.header_buf.flush(dummy)
self.imports_buf.flush(dummy)
buf.flush(dummy)
data += self.create_local_import(buf.name)
dummy.close()
init.write(data)
self.footer_buf.flush(init)
else:
init = fopen(self.outfile, "w")
for buf in (self.header_buf, self.imports_buf, self.functions_buf, self.classes_buf):
buf.flush(init)
for buf in self.classes_buffs:
buf.flush(init)
self.footer_buf.flush(init)
finally:
if init is not None and not init.closed:
init.close()
# Some builtin classes effectively change __init__ signature without overriding it.
# This callable serves as a placeholder to be replaced via REDEFINED_BUILTIN_SIGS
def fake_builtin_init(self):
pass # just a callable, sig doesn't matter
fake_builtin_init.__doc__ = object.__init__.__doc__ # this forces class's doc to be used instead
def create_local_import(self, name):
if len(name.split(".")) > 1: return ""
data = "from "
if version[0] >= 3:
data += "."
data += name + " import " + name + "\n"
return data
def find_imported_name(self, item):
"""
Finds out how the item is represented in imported modules.
@param item what to check
@return qualified name (like "sys.stdin") or None
"""
# TODO: return a pair, not a glued string
if not isinstance(item, SIMPLEST_TYPES):
for mname in self.imported_modules:
m = self.imported_modules[mname]
for inner_name in m.__dict__:
suspect = getattr(m, inner_name)
if suspect is item:
if mname:
mname += "."
elif self.module is the_builtins: # don't short-circuit builtins
return None
return mname + inner_name
return None
_initializers = (
(dict, "{}"),
(tuple, "()"),
(list, "[]"),
)
def invent_initializer(self, a_type):
"""
Returns an innocuous initializer expression for a_type, or "None"
"""
for initializer_type, r in self._initializers:
if initializer_type == a_type:
return r
# NOTE: here we could handle things like defaultdict, sets, etc if we wanted
return "None"
def fmt_value(self, out, p_value, indent, prefix="", postfix="", as_name=None, seen_values=None):
"""
Formats and outputs value (it occupies an entire line or several lines).
@param out function that does output (a Buf.out)
@param p_value the value.
@param indent indent level.
@param prefix text to print before the value
@param postfix text to print after the value
@param as_name hints which name are we trying to print; helps with circular refs.
@param seen_values a list of keys we've seen if we're processing a dict
"""
SELF_VALUE = "<value is a self-reference, replaced by this string>"
ERR_VALUE = "<failed to retrieve the value>"
if isinstance(p_value, SIMPLEST_TYPES):
out(indent, prefix, reliable_repr(p_value), postfix)
else:
if sys.platform == "cli":
imported_name = None
else:
imported_name = self.find_imported_name(p_value)
if imported_name:
out(indent, prefix, imported_name, postfix)
# TODO: kind of self.used_imports[imported_name].append(p_value) but split imported_name
# else we could potentially return smth we did not otherwise import. but not likely.
else:
if isinstance(p_value, (list, tuple)):
if not seen_values:
seen_values = [p_value]
if len(p_value) == 0:
out(indent, prefix, repr(p_value), postfix)
else:
if isinstance(p_value, list):
lpar, rpar = "[", "]"
else:
lpar, rpar = "(", ")"
out(indent, prefix, lpar)
for value in p_value:
if value in seen_values:
value = SELF_VALUE
elif not isinstance(value, SIMPLEST_TYPES):
seen_values.append(value)
self.fmt_value(out, value, indent + 1, postfix=",", seen_values=seen_values)
out(indent, rpar, postfix)
elif isinstance(p_value, dict):
if len(p_value) == 0:
out(indent, prefix, repr(p_value), postfix)
else:
if not seen_values:
seen_values = [p_value]
out(indent, prefix, "{")
keys = list(p_value.keys())
try:
keys.sort()
except TypeError:
pass # unsortable keys happen, e,g, in py3k _ctypes
for k in keys:
value = p_value[k]
try:
is_seen = value in seen_values
except:
is_seen = False
value = ERR_VALUE
if is_seen:
value = SELF_VALUE
elif not isinstance(value, SIMPLEST_TYPES):
seen_values.append(value)
if isinstance(k, SIMPLEST_TYPES):
self.fmt_value(out, value, indent + 1, prefix=repr(k) + ": ", postfix=",",
seen_values=seen_values)
else:
# both key and value need fancy formatting
self.fmt_value(out, k, indent + 1, postfix=": ", seen_values=seen_values)
self.fmt_value(out, value, indent + 2, seen_values=seen_values)
out(indent + 1, ",")
out(indent, "}", postfix)
else: # something else, maybe representable
# look up this value in the module.
if sys.platform == "cli":
out(indent, prefix, "None", postfix)
return
found_name = ""
for inner_name in self.module.__dict__:
if self.module.__dict__[inner_name] is p_value:
found_name = inner_name
break
if self._defined.get(found_name, False):
out(indent, prefix, found_name, postfix)
elif hasattr(self, "app"):
return
else:
# a forward / circular declaration happens
notice = ""
try:
representation = repr(p_value)
except Exception:
import traceback
traceback.print_exc(file=sys.stderr)
return
real_value = cleanup(representation)
if found_name:
if found_name == as_name:
notice = " # (!) real value is %r" % real_value
real_value = "None"
else:
notice = " # (!) forward: %s, real value is %r" % (found_name, real_value)
if SANE_REPR_RE.match(real_value):
out(indent, prefix, real_value, postfix, notice)
else:
if not found_name:
notice = " # (!) real value is %r" % real_value
out(indent, prefix, "None", postfix, notice)
def get_ret_type(self, attr):
"""
Returns a return type string as given by T_RETURN in tokens, or None
"""
if attr:
ret_type = RET_TYPE.get(attr, None)
if ret_type:
return ret_type
thing = getattr(self.module, attr, None)
if thing:
if not isinstance(thing, type) and is_callable(thing): # a function
return None # TODO: maybe divinate a return type; see pygame.mixer.Channel
return attr
# adds no noticeable slowdown, I did measure. dch.
for im_name, im_module in self.imported_modules.items():
cache_key = (im_name, attr)
cached = self.ret_type_cache.get(cache_key, None)
if cached:
return cached
ret_type = getattr(im_module, attr, None)
if ret_type:
if isinstance(ret_type, type):
# detect a constructor
constr_args = detect_constructor(ret_type)
if constr_args is None:
constr_args = "*(), **{}" # a silly catch-all constructor
reference = "%s(%s)" % (attr, constr_args)
elif is_callable(ret_type): # a function, classes are ruled out above
return None
else:
reference = attr
if im_name:
result = "%s.%s" % (im_name, reference)
else: # built-in
result = reference
self.ret_type_cache[cache_key] = result
return result
# TODO: handle things like "[a, b,..] and (foo,..)"
return None
SIG_DOC_NOTE = "restored from __doc__"
SIG_DOC_UNRELIABLY = "NOTE: unreliably restored from __doc__ "
def restore_by_docstring(self, signature_string, class_name, deco=None, ret_hint=None):
"""
@param signature_string: parameter list extracted from the doc string.
@param class_name: name of the containing class, or None
@param deco: decorator to use
@param ret_hint: return type hint, if available
@return (reconstructed_spec, return_type, note) or (None, _, _) if failed.
"""
action("restoring func %r of class %r", signature_string, class_name)
# parse
parsing_failed = False
ret_type = None
try:
# strict parsing
tokens = paramSeqAndRest.parseString(signature_string, True)
ret_name = None
if tokens:
ret_t = tokens[-1]
if ret_t[0] is T_RETURN:
ret_name = ret_t[1]
ret_type = self.get_ret_type(ret_name) or self.get_ret_type(ret_hint)
except ParseException:
# it did not parse completely; scavenge what we can
parsing_failed = True
tokens = []
try:
# most unrestrictive parsing
tokens = paramSeq.parseString(signature_string, False)
except ParseException:
pass
#
seq = transform_seq(tokens)
# add safe defaults for unparsed
if parsing_failed:
doc_node = self.SIG_DOC_UNRELIABLY
starred = None
double_starred = None
for one in seq:
if type(one) is str:
if one.startswith("**"):
double_starred = one
elif one.startswith("*"):
starred = one
if not starred:
seq.append("*args")
if not double_starred:
seq.append("**kwargs")
else:
doc_node = self.SIG_DOC_NOTE
# add 'self' if needed YYY
if class_name and (not seq or seq[0] != 'self'):
first_param = propose_first_param(deco)
if first_param:
seq.insert(0, first_param)
seq = make_names_unique(seq)
return (seq, ret_type, doc_node)
def parse_func_doc(self, func_doc, func_id, func_name, class_name, deco=None, sip_generated=False):
"""
@param func_doc: __doc__ of the function.
@param func_id: name to look for as identifier of the function in docstring
@param func_name: name of the function.
@param class_name: name of the containing class, or None
@param deco: decorator to use
@return (reconstructed_spec, return_literal, note) or (None, _, _) if failed.
"""
if sip_generated:
overloads = []
for part in func_doc.split('\n'):
signature = func_id + '('
i = part.find(signature)
if i >= 0:
overloads.append(part[i + len(signature):])
if len(overloads) > 1:
docstring_results = [self.restore_by_docstring(overload, class_name, deco) for overload in overloads]
ret_types = []
for result in docstring_results:
rt = result[1]
if rt and rt not in ret_types:
ret_types.append(rt)
if ret_types:
ret_literal = " or ".join(ret_types)
else:
ret_literal = None
param_lists = [result[0] for result in docstring_results]
spec = build_signature(func_name, restore_parameters_for_overloads(param_lists))
return (spec, ret_literal, "restored from __doc__ with multiple overloads")
# find the first thing to look like a definition
prefix_re = re.compile("\s*(?:(\w+)[ \\t]+)?" + func_id + "\s*\(") # "foo(..." or "int foo(..."
match = prefix_re.search(func_doc) # Note: this and previous line may consume up to 35% of time
# parse the part that looks right
if match:
ret_hint = match.group(1)
params, ret_literal, doc_note = self.restore_by_docstring(func_doc[match.end():], class_name, deco, ret_hint)
spec = func_name + flatten(params)
return (spec, ret_literal, doc_note)
else:
return (None, None, None)
def is_predefined_builtin(self, module_name, class_name, func_name):
return self.doing_builtins and module_name == BUILTIN_MOD_NAME and (
class_name, func_name) in PREDEFINED_BUILTIN_SIGS
def redo_function(self, out, p_func, p_name, indent, p_class=None, p_modname=None, classname=None, seen=None):
"""
Restore function argument list as best we can.
@param out output function of a Buf
@param p_func function or method object
@param p_name function name as known to owner
@param indent indentation level
@param p_class the class that contains this function as a method
@param p_modname module name
@param seen {id(func): name} map of functions already seen in the same namespace;
id() because *some* functions are unhashable (eg _elementtree.Comment in py2.7)
"""
action("redoing func %r of class %r", p_name, p_class)
if seen is not None:
other_func = seen.get(id(p_func), None)
if other_func and getattr(other_func, "__doc__", None) is getattr(p_func, "__doc__", None):
# _bisect.bisect == _bisect.bisect_right in py31, but docs differ
out(indent, p_name, " = ", seen[id(p_func)])
out(indent, "")
return
else:
seen[id(p_func)] = p_name
# real work
if classname is None:
classname = p_class and p_class.__name__ or None
if p_class and hasattr(p_class, '__mro__'):
sip_generated = [base_t for base_t in p_class.__mro__ if 'sip.simplewrapper' in str(base_t)]
else:
sip_generated = False
deco = None
deco_comment = ""
mod_class_method_tuple = (p_modname, classname, p_name)
ret_literal = None
is_init = False
# any decorators?
action("redoing decos of func %r of class %r", p_name, p_class)
if self.doing_builtins and p_modname == BUILTIN_MOD_NAME:
deco = KNOWN_DECORATORS.get((classname, p_name), None)
if deco:
deco_comment = " # known case"
elif p_class and p_name in p_class.__dict__:
# detect native methods declared with METH_CLASS flag
descriptor = p_class.__dict__[p_name]
if p_name != "__new__" and type(descriptor).__name__.startswith('classmethod'):
# 'classmethod_descriptor' in Python 2.x and 3.x, 'classmethod' in Jython
deco = "classmethod"
elif type(p_func).__name__.startswith('staticmethod'):
deco = "staticmethod"
if p_name == "__new__":
deco = "staticmethod"
deco_comment = " # known case of __new__"
action("redoing innards of func %r of class %r", p_name, p_class)
if deco and HAS_DECORATORS:
out(indent, "@", deco, deco_comment)
if inspect and inspect.isfunction(p_func):
out(indent, "def ", p_name, restore_by_inspect(p_func), ": # reliably restored by inspect", )
out_doc_attr(out, p_func, indent + 1, p_class)
elif self.is_predefined_builtin(*mod_class_method_tuple):
spec, sig_note = restore_predefined_builtin(classname, p_name)
out(indent, "def ", spec, ": # ", sig_note)
out_doc_attr(out, p_func, indent + 1, p_class)
elif sys.platform == 'cli' and is_clr_type(p_class):
is_static, spec, sig_note = restore_clr(p_name, p_class)
if is_static:
out(indent, "@staticmethod")
if not spec: return
if sig_note:
out(indent, "def ", spec, ": #", sig_note)
else:
out(indent, "def ", spec, ":")
if not p_name in ['__gt__', '__ge__', '__lt__', '__le__', '__ne__', '__reduce_ex__', '__str__']:
out_doc_attr(out, p_func, indent + 1, p_class)
elif mod_class_method_tuple in PREDEFINED_MOD_CLASS_SIGS:
sig, ret_literal = PREDEFINED_MOD_CLASS_SIGS[mod_class_method_tuple]
if classname:
ofwhat = "%s.%s.%s" % mod_class_method_tuple
else:
ofwhat = "%s.%s" % (p_modname, p_name)
out(indent, "def ", p_name, sig, ": # known case of ", ofwhat)
out_doc_attr(out, p_func, indent + 1, p_class)
else:
# __doc__ is our best source of arglist
sig_note = "real signature unknown"
spec = ""
is_init = (p_name == "__init__" and p_class is not None)
funcdoc = None
if is_init and hasattr(p_class, "__doc__"):
if hasattr(p_func, "__doc__"):
funcdoc = p_func.__doc__
if funcdoc == object.__init__.__doc__:
funcdoc = p_class.__doc__
elif hasattr(p_func, "__doc__"):
funcdoc = p_func.__doc__
sig_restored = False
action("parsing doc of func %r of class %r", p_name, p_class)
if isinstance(funcdoc, STR_TYPES):
(spec, ret_literal, more_notes) = self.parse_func_doc(funcdoc, p_name, p_name, classname, deco,
sip_generated)
if spec is None and p_name == '__init__' and classname:
(spec, ret_literal, more_notes) = self.parse_func_doc(funcdoc, classname, p_name, classname, deco,
sip_generated)
sig_restored = spec is not None
if more_notes:
if sig_note:
sig_note += "; "
sig_note += more_notes
if not sig_restored:
# use an allow-all declaration
decl = []
if p_class:
first_param = propose_first_param(deco)
if first_param:
decl.append(first_param)
decl.append("*args")
decl.append("**kwargs")
spec = p_name + "(" + ", ".join(decl) + ")"
out(indent, "def ", spec, ": # ", sig_note)
# to reduce size of stubs, don't output same docstring twice for class and its __init__ method
if not is_init or funcdoc != p_class.__doc__:
out_docstring(out, funcdoc, indent + 1)
# body
if ret_literal and not is_init:
out(indent + 1, "return ", ret_literal)
else:
out(indent + 1, "pass")
if deco and not HAS_DECORATORS:
out(indent, p_name, " = ", deco, "(", p_name, ")", deco_comment)
out(0, "") # empty line after each item
def redo_class(self, out, p_class, p_name, indent, p_modname=None, seen=None, inspect_dir=False):
"""
Restores a class definition.
@param out output function of a relevant buf
@param p_class the class object
@param p_name class name as known to owner
@param indent indentation level
@param p_modname name of module
@param seen {class: name} map of classes already seen in the same namespace
"""
action("redoing class %r of module %r", p_name, p_modname)
if seen is not None:
if p_class in seen:
out(indent, p_name, " = ", seen[p_class])
out(indent, "")
return
else:
seen[p_class] = p_name
bases = get_bases(p_class)
base_def = ""
skipped_bases = []
if bases:
skip_qualifiers = [p_modname, BUILTIN_MOD_NAME, 'exceptions']
skip_qualifiers.extend(KNOWN_FAKE_REEXPORTERS.get(p_modname, ()))
bases_list = [] # what we'll render in the class decl
for base in bases:
if [1 for (cls, mdl) in KNOWN_FAKE_BASES if cls == base and mdl != self.module]:
# our base is a wrapper and our module is not its defining module
skipped_bases.append(str(base))
continue
# somehow import every base class
base_name = base.__name__
qual_module_name = qualifier_of(base, skip_qualifiers)
got_existing_import = False
if qual_module_name:
if qual_module_name in self.used_imports:
import_list = self.used_imports[qual_module_name]
if base in import_list:
bases_list.append(base_name) # unqualified: already set to import
got_existing_import = True
if not got_existing_import:
mangled_qualifier = "__" + qual_module_name.replace('.', '_') # foo.bar -> __foo_bar
bases_list.append(mangled_qualifier + "." + base_name)
self.hidden_imports[qual_module_name] = mangled_qualifier
else:
bases_list.append(base_name)
base_def = "(" + ", ".join(bases_list) + ")"
if self.split_modules:
for base in bases_list:
local_import = self.create_local_import(base)
if local_import:
out(indent, local_import)
out(indent, "class ", p_name, base_def, ":",
skipped_bases and " # skipped bases: " + ", ".join(skipped_bases) or "")
out_doc_attr(out, p_class, indent + 1)
# inner parts
methods = {}
properties = {}
others = {}
we_are_the_base_class = p_modname == BUILTIN_MOD_NAME and p_name == "object"
field_source = {}
try:
if hasattr(p_class, "__dict__") and not inspect_dir:
field_source = p_class.__dict__
field_keys = field_source.keys() # Jython 2.5.1 _codecs fail here
else:
field_keys = dir(p_class) # this includes unwanted inherited methods, but no dict + inheritance is rare
except:
field_keys = ()
for item_name in field_keys:
if item_name in ("__doc__", "__module__"):
if we_are_the_base_class:
item = "" # must be declared in base types
else:
continue # in all other cases must be skipped
elif keyword.iskeyword(item_name): # for example, PyQt4 contains definitions of methods named 'exec'
continue
else:
try:
item = getattr(p_class, item_name) # let getters do the magic
except AttributeError:
item = field_source[item_name] # have it raw
except Exception:
continue
if is_callable(item) and not isinstance(item, type):
methods[item_name] = item
elif is_property(item):
properties[item_name] = item
else:
others[item_name] = item
#
if we_are_the_base_class:
others["__dict__"] = {} # force-feed it, for __dict__ does not contain a reference to itself :)
# add fake __init__s to have the right sig
if p_class in FAKE_BUILTIN_INITS:
methods["__init__"] = self.fake_builtin_init
note("Faking init of %s", p_name)
elif '__init__' not in methods:
init_method = getattr(p_class, '__init__', None)
if init_method:
methods['__init__'] = init_method
#
seen_funcs = {}
for item_name in sorted_no_case(methods.keys()):
item = methods[item_name]
try:
self.redo_function(out, item, item_name, indent + 1, p_class, p_modname, classname=p_name, seen=seen_funcs)
except:
handle_error_func(item_name, out)
#
known_props = KNOWN_PROPS.get(p_modname, {})
a_setter = "lambda self, v: None"
a_deleter = "lambda self: None"
for item_name in sorted_no_case(properties.keys()):
item = properties[item_name]
prop_docstring = getattr(item, '__doc__', None)
prop_key = (p_name, item_name)
if prop_key in known_props:
prop_descr = known_props.get(prop_key, None)
if prop_descr is None:
continue # explicitly omitted
acc_line, getter_and_type = prop_descr
if getter_and_type:
getter, prop_type = getter_and_type
else:
getter, prop_type = None, None
out(indent + 1, item_name,
" = property(", format_accessors(acc_line, getter, a_setter, a_deleter), ")"
)
if prop_type:
if prop_docstring:
out(indent + 1, '"""', prop_docstring)
out(0, "")
out(indent + 1, ':type: ', prop_type)
out(indent + 1, '"""')
else:
out(indent + 1, '""":type: ', prop_type, '"""')
out(0, "")
else:
out(indent + 1, item_name, " = property(lambda self: object(), lambda self, v: None, lambda self: None) # default")
if prop_docstring:
out(indent + 1, '"""', prop_docstring, '"""')
out(0, "")
if properties:
out(0, "") # empty line after the block
#
for item_name in sorted_no_case(others.keys()):
item = others[item_name]
self.fmt_value(out, item, indent + 1, prefix=item_name + " = ")
if p_name == "object":
out(indent + 1, "__module__ = ''")
if others:
out(0, "") # empty line after the block
#
if not methods and not properties and not others:
out(indent + 1, "pass")
def redo_simple_header(self, p_name):
"""Puts boilerplate code on the top"""
out = self.header_buf.out # 1st class methods rule :)
out(0, "# encoding: %s" % OUT_ENCODING) # line 1
# NOTE: maybe encoding should be selectable
if hasattr(self.module, "__name__"):
self_name = self.module.__name__
if self_name != p_name:
mod_name = " calls itself " + self_name
else:
mod_name = ""
else:
mod_name = " does not know its name"
out(0, "# module ", p_name, mod_name) # line 2
BUILT_IN_HEADER = "(built-in)"
if self.mod_filename:
filename = self.mod_filename
elif p_name in sys.builtin_module_names:
filename = BUILT_IN_HEADER
else:
filename = getattr(self.module, "__file__", BUILT_IN_HEADER)
out(0, "# from %s" % filename) # line 3
out(0, "# by generator %s" % VERSION) # line 4
if p_name == BUILTIN_MOD_NAME and version[0] == 2 and version[1] >= 6:
out(0, "from __future__ import print_function")
out_doc_attr(out, self.module, 0)
def redo_imports(self):
module_type = type(sys)
for item_name in self.module.__dict__.keys():
try:
item = self.module.__dict__[item_name]
except:
continue
if type(item) is module_type: # not isinstance, py2.7 + PyQt4.QtCore on windows have a bug here
self.imported_modules[item_name] = item
self.add_import_header_if_needed()
ref_notice = getattr(item, "__file__", str(item))
if hasattr(item, "__name__"):
self.imports_buf.out(0, "import ", item.__name__, " as ", item_name, " # ", ref_notice)
else:
self.imports_buf.out(0, item_name, " = None # ??? name unknown; ", ref_notice)
def add_import_header_if_needed(self):
if self.imports_buf.isEmpty():
self.imports_buf.out(0, "")
self.imports_buf.out(0, "# imports")
def redo(self, p_name, inspect_dir):
"""
Restores module declarations.
Intended for built-in modules and thus does not handle import statements.
@param p_name name of module
"""
action("redoing header of module %r %r", p_name, str(self.module))
if "pyqt4" in p_name.lower(): # qt4 specific patch
self._initializeQApp4()
elif "pyqt5" in p_name.lower(): # qt5 specific patch
self._initializeQApp5()
self.redo_simple_header(p_name)
# find whatever other self.imported_modules the module knows; effectively these are imports
action("redoing imports of module %r %r", p_name, str(self.module))
try:
self.redo_imports()
except:
pass
action("redoing innards of module %r %r", p_name, str(self.module))
module_type = type(sys)
# group what we have into buckets
vars_simple = {}
vars_complex = {}
funcs = {}
classes = {}
module_dict = self.module.__dict__
if inspect_dir:
module_dict = dir(self.module)
for item_name in module_dict:
note("looking at %s", item_name)
if item_name in (
"__dict__", "__doc__", "__module__", "__file__", "__name__", "__builtins__", "__package__"):
continue # handled otherwise
try:
item = getattr(self.module, item_name) # let getters do the magic
except AttributeError:
if not item_name in self.module.__dict__: continue
item = self.module.__dict__[item_name] # have it raw
# check if it has percolated from an imported module
except NotImplementedError:
if not item_name in self.module.__dict__: continue
item = self.module.__dict__[item_name] # have it raw
# unless we're adamantly positive that the name was imported, we assume it is defined here
mod_name = None # module from which p_name might have been imported
# IronPython has non-trivial reexports in System module, but not in others:
skip_modname = sys.platform == "cli" and p_name != "System"
surely_not_imported_mods = KNOWN_FAKE_REEXPORTERS.get(p_name, ())
## can't figure weirdness in some modules, assume no reexports:
#skip_modname = skip_modname or p_name in self.KNOWN_FAKE_REEXPORTERS
if not skip_modname:
try:
mod_name = getattr(item, '__module__', None)
except:
pass
# we assume that module foo.bar never imports foo; foo may import foo.bar. (see pygame and pygame.rect)
maybe_import_mod_name = mod_name or ""
import_is_from_top = len(p_name) > len(maybe_import_mod_name) and p_name.startswith(maybe_import_mod_name)
note("mod_name = %s, prospective = %s, from top = %s", mod_name, maybe_import_mod_name, import_is_from_top)
want_to_import = False
if (mod_name
and mod_name != BUILTIN_MOD_NAME
and mod_name != p_name
and mod_name not in surely_not_imported_mods
and not import_is_from_top
):
# import looks valid, but maybe it's a .py file? we're certain not to import from .py
# e.g. this rules out _collections import collections and builtins import site.
try:
imported = __import__(mod_name) # ok to repeat, Python caches for us
if imported:
qualifiers = mod_name.split(".")[1:]
for qual in qualifiers:
imported = getattr(imported, qual, None)
if not imported:
break
imported_path = (getattr(imported, '__file__', False) or "").lower()
want_to_import = not (imported_path.endswith('.py') or imported_path.endswith('.pyc'))
note("path of %r is %r, want? %s", mod_name, imported_path, want_to_import)
except ImportError:
want_to_import = False
# NOTE: if we fail to import, we define 'imported' names here lest we lose them at all
if want_to_import:
import_list = self.used_imports[mod_name]
if item_name not in import_list:
import_list.append(item_name)
if not want_to_import:
if isinstance(item, type) or type(item).__name__ == 'classobj':
classes[item_name] = item
elif is_callable(item): # some classes are callable, check them before functions
funcs[item_name] = item
elif isinstance(item, module_type):
continue # self.imported_modules handled above already
else:
if isinstance(item, SIMPLEST_TYPES):
vars_simple[item_name] = item
else:
vars_complex[item_name] = item
# sort and output every bucket
action("outputting innards of module %r %r", p_name, str(self.module))
#
omitted_names = OMIT_NAME_IN_MODULE.get(p_name, [])
if vars_simple:
out = self.functions_buf.out
prefix = "" # try to group variables by common prefix
PREFIX_LEN = 2 # default prefix length if we can't guess better
out(0, "# Variables with simple values")
for item_name in sorted_no_case(vars_simple.keys()):
if item_name in omitted_names:
out(0, "# definition of " + item_name + " omitted")
continue
item = vars_simple[item_name]
# track the prefix
if len(item_name) >= PREFIX_LEN:
prefix_pos = string.rfind(item_name, "_") # most prefixes end in an underscore
if prefix_pos < 1:
prefix_pos = PREFIX_LEN
beg = item_name[0:prefix_pos]
if prefix != beg:
out(0, "") # space out from other prefix
prefix = beg
else:
prefix = ""
# output
replacement = REPLACE_MODULE_VALUES.get((p_name, item_name), None)
if replacement is not None:
out(0, item_name, " = ", replacement, " # real value of type ", str(type(item)), " replaced")
elif is_skipped_in_module(p_name, item_name):
t_item = type(item)
out(0, item_name, " = ", self.invent_initializer(t_item), " # real value of type ", str(t_item),
" skipped")
else:
self.fmt_value(out, item, 0, prefix=item_name + " = ")
self._defined[item_name] = True
out(0, "") # empty line after vars
#
if funcs:
out = self.functions_buf.out
out(0, "# functions")
out(0, "")
seen_funcs = {}
for item_name in sorted_no_case(funcs.keys()):
if item_name in omitted_names:
out(0, "# definition of ", item_name, " omitted")
continue
item = funcs[item_name]
try:
self.redo_function(out, item, item_name, 0, p_modname=p_name, seen=seen_funcs)
except:
handle_error_func(item_name, out)
else:
self.functions_buf.out(0, "# no functions")
#
if classes:
self.classes_buf.out(0, "# classes")
self.classes_buf.out(0, "")
seen_classes = {}
# sort classes so that inheritance order is preserved
cls_list = [] # items are (class_name, mro_tuple)
for cls_name in sorted_no_case(classes.keys()):
cls = classes[cls_name]
ins_index = len(cls_list)
for i in range(ins_index):
maybe_child_bases = cls_list[i][1]
if cls in maybe_child_bases:
ins_index = i # we could not go farther than current ins_index
break # ...and need not go fartehr than first known child
cls_list.insert(ins_index, (cls_name, get_mro(cls)))
self.split_modules = self.mod_filename and len(cls_list) >= 30
for item_name in [cls_item[0] for cls_item in cls_list]:
buf = ClassBuf(item_name, self)
self.classes_buffs.append(buf)
out = buf.out
if item_name in omitted_names:
out(0, "# definition of ", item_name, " omitted")
continue
item = classes[item_name]
self.redo_class(out, item, item_name, 0, p_modname=p_name, seen=seen_classes, inspect_dir=inspect_dir)
self._defined[item_name] = True
out(0, "") # empty line after each item
if self.doing_builtins and p_name == BUILTIN_MOD_NAME and version[0] < 3:
# classobj still supported
txt = classobj_txt
self.classes_buf.out(0, txt)
if self.doing_builtins and p_name == BUILTIN_MOD_NAME:
txt = create_generator()
self.classes_buf.out(0, txt)
txt = create_function()
self.classes_buf.out(0, txt)
txt = create_method()
self.classes_buf.out(0, txt)
txt = create_coroutine()
self.classes_buf.out(0, txt)
# Fake <type 'namedtuple'>
if version[0] >= 3 or (version[0] == 2 and version[1] >= 6):
namedtuple_text = create_named_tuple()
self.classes_buf.out(0, namedtuple_text)
else:
self.classes_buf.out(0, "# no classes")
#
if vars_complex:
out = self.footer_buf.out
out(0, "# variables with complex values")
out(0, "")
for item_name in sorted_no_case(vars_complex.keys()):
if item_name in omitted_names:
out(0, "# definition of " + item_name + " omitted")
continue
item = vars_complex[item_name]
if str(type(item)) == "<type 'namespace#'>":
continue # this is an IronPython submodule, we mustn't generate a reference for it in the base module
replacement = REPLACE_MODULE_VALUES.get((p_name, item_name), None)
if replacement is not None:
out(0, item_name + " = " + replacement + " # real value of type " + str(type(item)) + " replaced")
elif is_skipped_in_module(p_name, item_name):
t_item = type(item)
out(0, item_name + " = " + self.invent_initializer(t_item) + " # real value of type " + str(
t_item) + " skipped")
else:
self.fmt_value(out, item, 0, prefix=item_name + " = ", as_name=item_name)
self._defined[item_name] = True
out(0, "") # empty line after each item
values_to_add = ADD_VALUE_IN_MODULE.get(p_name, None)
if values_to_add:
self.footer_buf.out(0, "# intermittent names")
for value in values_to_add:
self.footer_buf.out(0, value)
# imports: last, because previous parts could alter used_imports or hidden_imports
self.output_import_froms()
if self.imports_buf.isEmpty():
self.imports_buf.out(0, "# no imports")
self.imports_buf.out(0, "") # empty line after imports
def output_import_froms(self):
"""Mention all imported names known within the module, wrapping as per PEP."""
out = self.imports_buf.out
if self.used_imports:
self.add_import_header_if_needed()
for mod_name in sorted_no_case(self.used_imports.keys()):
import_names = self.used_imports[mod_name]
if import_names:
self._defined[mod_name] = True
right_pos = 0 # tracks width of list to fold it at right margin
import_heading = "from % s import (" % mod_name
right_pos += len(import_heading)
names_pack = [import_heading]
indent_level = 0
import_names = list(import_names)
import_names.sort()
for n in import_names:
self._defined[n] = True
len_n = len(n)
if right_pos + len_n >= 78:
out(indent_level, *names_pack)
names_pack = [n, ", "]
if indent_level == 0:
indent_level = 1 # all but first line is indented
right_pos = self.indent_size + len_n + 2
else:
names_pack.append(n)
names_pack.append(", ")
right_pos += (len_n + 2)
# last line is...
if indent_level == 0: # one line
names_pack[0] = names_pack[0][:-1] # cut off lpar
names_pack[-1] = "" # cut last comma
else: # last line of multiline
names_pack[-1] = ")" # last comma -> rpar
out(indent_level, *names_pack)
out(0, "") # empty line after group
if self.hidden_imports:
self.add_import_header_if_needed()
for mod_name in sorted_no_case(self.hidden_imports.keys()):
out(0, 'import ', mod_name, ' as ', self.hidden_imports[mod_name])
out(0, "") # empty line after group
def module_to_package_name(module_name):
return re.sub(r"(.*)\.py$", r"\1", module_name)
|
Proggie02/TestRepo | refs/heads/master | django/core/management/commands/startapp.py | 205 | from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.utils.importlib import import_module
class Command(TemplateCommand):
help = ("Creates a Django app directory structure for the given app "
"name in the current directory or optionally in the given "
"directory.")
def handle(self, app_name=None, target=None, **options):
if app_name is None:
raise CommandError("you must provide an app name")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as an app "
"name. Please try another name." % app_name)
super(Command, self).handle('app', app_name, target, **options)
|
AutorestCI/azure-sdk-for-python | refs/heads/master | azure-mgmt-web/azure/mgmt/web/models/identifier_paged.py | 2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class IdentifierPaged(Paged):
"""
A paging container for iterating over a list of :class:`Identifier <azure.mgmt.web.models.Identifier>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Identifier]'}
}
def __init__(self, *args, **kwargs):
super(IdentifierPaged, self).__init__(*args, **kwargs)
|
bencmbrook/home-assistant | refs/heads/master | homeassistant/components/thermostat/demo.py | 10 | """
homeassistant.components.thermostat.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Demo platform that offers a fake thermostat.
"""
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import TEMP_CELCIUS, TEMP_FAHRENHEIT
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Demo thermostats. """
add_devices([
DemoThermostat("Nest", 21, TEMP_CELCIUS, False, 19),
DemoThermostat("Thermostat", 68, TEMP_FAHRENHEIT, True, 77),
])
# pylint: disable=too-many-arguments
class DemoThermostat(ThermostatDevice):
""" Represents a HeatControl within Home Assistant. """
def __init__(self, name, target_temperature, unit_of_measurement,
away, current_temperature):
self._name = name
self._target_temperature = target_temperature
self._unit_of_measurement = unit_of_measurement
self._away = away
self._current_temperature = current_temperature
@property
def should_poll(self):
""" No polling needed for a demo thermostat. """
return False
@property
def name(self):
""" Returns the name. """
return self._name
@property
def unit_of_measurement(self):
""" Returns the unit of measurement. """
return self._unit_of_measurement
@property
def current_temperature(self):
""" Returns the current temperature. """
return self._current_temperature
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
return self._target_temperature
@property
def is_away_mode_on(self):
""" Returns if away mode is on. """
return self._away
def set_temperature(self, temperature):
""" Set new target temperature """
self._target_temperature = temperature
def turn_away_mode_on(self):
""" Turns away mode on. """
self._away = True
def turn_away_mode_off(self):
""" Turns away mode off. """
self._away = False
|
rpisarev/guestbook | refs/heads/master | guestbook/guestbook/settings.py | 1 | # Django settings for guestbook project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('ruslan pisarev', 'ruslan@rpisarev.org.ua'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'guestbookdb', # Or path to database file if using sqlite3.
'USER': 'guestbookdb', # Not used with sqlite3.
'PASSWORD': 'gu3stbo0kdb', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
RECAPTCHA_PUBLIC_KEY = '6LcH9dwSAAAAAGFJTvZlbsozpuSpErezHMf_b4CZ'
RECAPTCHA_PRIVATE_KEY = '6LcH9dwSAAAAAKSo8pu4_x6f76WRb7YqJB0nb5kt'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Kiev'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
#MEDIA_ROOT = '/home/u/testing/guestbook/'
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'img').replace('\\','/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'st').replace('\\','/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), 'img').replace('\\','/'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*+419kllwqto7r^$qzg55urhb)4w_v2p!&&fa6bze=sdj^%d=#'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),
)
MIDDLEWARE_CLASSES = (
# 'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'guestbook.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'guestbook.wsgi.application'
#EMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#
INSTALLED_APPS = (
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
# 'django.contrib.messages',
'sorl.thumbnail',
'django.contrib.staticfiles',
'gba',
'captcha',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
ff0000/red-fab-deploy | refs/heads/master | fab_deploy/local/config.py | 1 | from fabric.api import local, env, execute
from fabric.tasks import Task
from fab_deploy.functions import get_answer, get_remote_name
class InternalIps(Task):
"""
Updates your server.ini config with the correct
internal ip addresses for all hosts
This is a serial task, that should not be called
with any remote hosts as the remote hosts to run
on is determined by the hosts in your server.ini
file.
"""
name = 'update_internal_ips'
serial = True
def run(self):
conf = env.config_object
for section in conf.server_sections():
internals = conf.get_list(section, conf.INTERNAL_IPS)
connections = conf.get_list(section, conf.CONNECTIONS)
if len(internals) != len(connections):
raise Exception("Number of connections and internal ips do not match")
if internals:
results = execute('utils.get_ip', None, hosts=connections)
for i, conn in enumerate(connections):
internals[i] = results[conn]
conf.set_list(section, conf.INTERNAL_IPS, internals)
conf.save(env.conf_filename)
class SyncGit(Task):
"""
Syncs your git remotes with your server.ini file.
Will add remotes for each section in your config.ini
that has git-sync=true if they do not already exists
the new remotes will be named by section name + a count
for example app-server2. Servers that already exist will
not be renamed.
If you have any remotes other than origin you will be prompted
and asked if you want to remove them.
Internally 'local.git.rm_remote' is called for removing remotes
'local.git.add_remote' is called for adding.
This is a serial task, that should not be called
with any remote hosts as it performs no remote actions.
"""
name = 'sync_git'
default = True
serial = True
def gather_config_remotes(self):
"""
"""
config_remotes = {}
conf = env.config_object
for section in conf.server_sections():
if conf.has_option(section, conf.GIT_SYNC) and conf.getboolean(section, conf.GIT_SYNC):
for c in conf.get_list(section, conf.CONNECTIONS):
config_remotes[c] = section
return config_remotes
def run(self):
"""
"""
config_remotes = self.gather_config_remotes()
to_delete = []
to_add = [ x for x in config_remotes.keys() if not x in env.git_reverse ]
for value, name in env.git_reverse.items():
if name == 'origin':
continue
if not value in config_remotes:
test = get_answer("The remote %s %s isn't in your servers.ini. Do you want to remove it?" %(name, value))
if test:
to_delete.append(name)
for name in to_delete:
execute('local.git.rm_remote', remote_name=name)
for value in to_add:
name = get_remote_name(value, config_remotes[value])
execute('local.git.add_remote', remote_name=name,
user_and_host=value)
sync_git = SyncGit()
update_internal_ips = InternalIps()
|
iotaledger/iota.lib.py | refs/heads/master | iota/commands/core/get_trytes.py | 1 | import filters as f
from iota import TransactionHash
from iota.commands import FilterCommand, RequestFilter, ResponseFilter
from iota.filters import StringifiedTrytesArray, Trytes
__all__ = [
'GetTrytesCommand',
]
class GetTrytesCommand(FilterCommand):
"""
Executes ``getTrytes`` command.
See :py:meth:`iota.api.StrictIota.get_trytes`.
"""
command = 'getTrytes'
def get_request_filter(self):
return GetTrytesRequestFilter()
def get_response_filter(self):
return GetTrytesResponseFilter()
class GetTrytesRequestFilter(RequestFilter):
def __init__(self) -> None:
super(GetTrytesRequestFilter, self).__init__({
'hashes':
StringifiedTrytesArray(TransactionHash) | f.Required,
})
class GetTrytesResponseFilter(ResponseFilter):
def __init__(self) -> None:
super(GetTrytesResponseFilter, self).__init__({
'trytes':
f.Array | f.FilterRepeater(
f.ByteString(encoding='ascii') |
Trytes
),
})
|
ivanbusthomi/inasafe | refs/heads/develop | safe/definitions/font.py | 2 | # coding=utf-8
"""Fonts which are used in InaSAFE."""
from PyQt4.QtGui import QFont
big_font = QFont()
big_font.setPointSize(80)
bold_font = QFont()
bold_font.setItalic(True)
bold_font.setBold(True)
bold_font.setWeight(75)
|
lmregus/Portfolio | refs/heads/master | python/design_patterns/env/lib/python3.7/site-packages/sphinx/transforms/i18n.py | 1 | """
sphinx.transforms.i18n
~~~~~~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx when reading documents.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from textwrap import indent
from typing import Any, TypeVar
from docutils import nodes
from docutils.io import StringInput
from docutils.utils import relative_path
from sphinx import addnodes
from sphinx.domains.std import make_glossary_term, split_term_classifiers
from sphinx.locale import __, init as init_locale
from sphinx.transforms import SphinxTransform
from sphinx.util import split_index_msg, logging
from sphinx.util.i18n import find_catalog
from sphinx.util.nodes import (
LITERAL_TYPE_NODES, IMAGE_TYPE_NODES, NodeMatcher,
extract_messages, is_pending_meta, traverse_translatable_index,
)
if False:
# For type annotation
from typing import Dict, List, Tuple, Type # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
logger = logging.getLogger(__name__)
N = TypeVar('N', bound=nodes.Node)
def publish_msgstr(app, source, source_path, source_line, config, settings):
# type: (Sphinx, str, str, int, Config, Any) -> nodes.Element
"""Publish msgstr (single line) into docutils document
:param sphinx.application.Sphinx app: sphinx application
:param str source: source text
:param str source_path: source path for warning indication
:param source_line: source line for warning indication
:param sphinx.config.Config config: sphinx config
:param docutils.frontend.Values settings: docutils settings
:return: document
:rtype: docutils.nodes.document
"""
from sphinx.io import SphinxI18nReader
reader = SphinxI18nReader(app)
parser = app.registry.create_source_parser(app, 'restructuredtext')
doc = reader.read(
source=StringInput(source=source,
source_path="%s:%s:<translated>" % (source_path, source_line)),
parser=parser,
settings=settings,
)
try:
doc = doc[0] # type: ignore
except IndexError: # empty node
pass
return doc
class PreserveTranslatableMessages(SphinxTransform):
"""
Preserve original translatable messages befor translation
"""
default_priority = 10 # this MUST be invoked before Locale transform
def apply(self, **kwargs):
# type: (Any) -> None
for node in self.document.traverse(addnodes.translatable):
node.preserve_original_messages()
class Locale(SphinxTransform):
"""
Replace translatable nodes with their translated doctree.
"""
default_priority = 20
def apply(self, **kwargs):
# type: (Any) -> None
settings, source = self.document.settings, self.document['source']
msgstr = ''
# XXX check if this is reliable
assert source.startswith(self.env.srcdir)
docname = path.splitext(relative_path(path.join(self.env.srcdir, 'dummy'),
source))[0]
textdomain = find_catalog(docname, self.config.gettext_compact)
# fetch translations
dirs = [path.join(self.env.srcdir, directory)
for directory in self.config.locale_dirs]
catalog, has_catalog = init_locale(dirs, self.config.language, textdomain)
if not has_catalog:
return
# phase1: replace reference ids with translated names
for node, msg in extract_messages(self.document):
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg or not msgstr.strip():
# as-of-yet untranslated
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
patch = publish_msgstr(self.app, msgstr, source,
node.line, self.config, settings)
# XXX doctest and other block markup
if not isinstance(patch, nodes.paragraph):
continue # skip for now
processed = False # skip flag
# update title(section) target name-id mapping
if isinstance(node, nodes.title):
section_node = node.parent
new_name = nodes.fully_normalize_name(patch.astext())
old_name = nodes.fully_normalize_name(node.astext())
if old_name != new_name:
# if name would be changed, replace node names and
# document nameids mapping with new name.
names = section_node.setdefault('names', [])
names.append(new_name)
# Original section name (reference target name) should be kept to refer
# from other nodes which is still not translated or uses explicit target
# name like "`text to display <explicit target name_>`_"..
# So, `old_name` is still exist in `names`.
_id = self.document.nameids.get(old_name, None)
explicit = self.document.nametypes.get(old_name, None)
# * if explicit: _id is label. title node need another id.
# * if not explicit:
#
# * if _id is None:
#
# _id is None means:
#
# 1. _id was not provided yet.
#
# 2. _id was duplicated.
#
# old_name entry still exists in nameids and
# nametypes for another duplicated entry.
#
# * if _id is provided: bellow process
if _id:
if not explicit:
# _id was not duplicated.
# remove old_name entry from document ids database
# to reuse original _id.
self.document.nameids.pop(old_name, None)
self.document.nametypes.pop(old_name, None)
self.document.ids.pop(_id, None)
# re-entry with new named section node.
#
# Note: msgnode that is a second parameter of the
# `note_implicit_target` is not necessary here because
# section_node has been noted previously on rst parsing by
# `docutils.parsers.rst.states.RSTState.new_subsection()`
# and already has `system_message` if needed.
self.document.note_implicit_target(section_node)
# replace target's refname to new target name
matcher = NodeMatcher(nodes.target, refname=old_name)
for old_target in self.document.traverse(matcher): # type: nodes.target
old_target['refname'] = new_name
processed = True
# glossary terms update refid
if isinstance(node, nodes.term):
gloss_entries = self.env.temp_data.setdefault('gloss_entries', set())
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)
parts = split_term_classifiers(msgstr)
patch = publish_msgstr(self.app, parts[0], source,
node.line, self.config, settings)
patch = make_glossary_term(self.env, patch, parts[1],
source, node.line, _id)
node['ids'] = patch['ids']
node['names'] = patch['names']
processed = True
# update leaves with processed nodes
if processed:
for child in patch.children:
child.parent = node
node.children = patch.children
node['translated'] = True # to avoid double translation
# phase2: translation
for node, msg in extract_messages(self.document):
if node.get('translated', False): # to avoid double translation
continue # skip if the node is already translated by phase1
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg: # as-of-yet untranslated
continue
# update translatable nodes
if isinstance(node, addnodes.translatable):
node.apply_translated_message(msg, msgstr)
continue
# update meta nodes
if isinstance(node, nodes.pending) and is_pending_meta(node):
node.details['nodes'][0]['content'] = msgstr
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
# Structural Subelements phase1
# There is a possibility that only the title node is created.
# see: http://docutils.sourceforge.net/docs/ref/doctree.html#structural-subelements
if isinstance(node, nodes.title):
# This generates: <section ...><title>msgstr</title></section>
msgstr = msgstr + '\n' + '-' * len(msgstr) * 2
patch = publish_msgstr(self.app, msgstr, source,
node.line, self.config, settings)
# Structural Subelements phase2
if isinstance(node, nodes.title):
# get <title> node that placed as a first child
patch = patch.next_node()
# ignore unexpected markups in translation message
unexpected = (
nodes.paragraph, # expected form of translation
nodes.title # generated by above "Subelements phase2"
) # type: Tuple[Type[nodes.Element], ...]
# following types are expected if
# config.gettext_additional_targets is configured
unexpected += LITERAL_TYPE_NODES
unexpected += IMAGE_TYPE_NODES
if not isinstance(patch, unexpected):
continue # skip
# auto-numbered foot note reference should use original 'ids'.
def list_replace_or_append(lst, old, new):
# type: (List[N], N, N) -> None
if old in lst:
lst[lst.index(old)] = new
else:
lst.append(new)
is_autofootnote_ref = NodeMatcher(nodes.footnote_reference, auto=Any)
old_foot_refs = node.traverse(is_autofootnote_ref) # type: List[nodes.footnote_reference] # NOQA
new_foot_refs = patch.traverse(is_autofootnote_ref) # type: List[nodes.footnote_reference] # NOQA
if len(old_foot_refs) != len(new_foot_refs):
old_foot_ref_rawsources = [ref.rawsource for ref in old_foot_refs]
new_foot_ref_rawsources = [ref.rawsource for ref in new_foot_refs]
logger.warning(__('inconsistent footnote references in translated message.' +
' original: {0}, translated: {1}')
.format(old_foot_ref_rawsources, new_foot_ref_rawsources),
location=node)
old_foot_namerefs = {} # type: Dict[str, List[nodes.footnote_reference]]
for r in old_foot_refs:
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
for newf in new_foot_refs:
refname = newf.get('refname')
refs = old_foot_namerefs.get(refname, [])
if not refs:
continue
oldf = refs.pop(0)
newf['ids'] = oldf['ids']
for id in newf['ids']:
self.document.ids[id] = newf
if newf['auto'] == 1:
# autofootnote_refs
list_replace_or_append(self.document.autofootnote_refs, oldf, newf)
else:
# symbol_footnote_refs
list_replace_or_append(self.document.symbol_footnote_refs, oldf, newf)
if refname:
footnote_refs = self.document.footnote_refs.setdefault(refname, [])
list_replace_or_append(footnote_refs, oldf, newf)
refnames = self.document.refnames.setdefault(refname, [])
list_replace_or_append(refnames, oldf, newf)
# reference should use new (translated) 'refname'.
# * reference target ".. _Python: ..." is not translatable.
# * use translated refname for section refname.
# * inline reference "`Python <...>`_" has no 'refname'.
is_refnamed_ref = NodeMatcher(nodes.reference, refname=Any)
old_refs = node.traverse(is_refnamed_ref) # type: List[nodes.reference]
new_refs = patch.traverse(is_refnamed_ref) # type: List[nodes.reference]
if len(old_refs) != len(new_refs):
old_ref_rawsources = [ref.rawsource for ref in old_refs]
new_ref_rawsources = [ref.rawsource for ref in new_refs]
logger.warning(__('inconsistent references in translated message.' +
' original: {0}, translated: {1}')
.format(old_ref_rawsources, new_ref_rawsources),
location=node)
old_ref_names = [r['refname'] for r in old_refs]
new_ref_names = [r['refname'] for r in new_refs]
orphans = list(set(old_ref_names) - set(new_ref_names))
for newr in new_refs:
if not self.document.has_name(newr['refname']):
# Maybe refname is translated but target is not translated.
# Note: multiple translated refnames break link ordering.
if orphans:
newr['refname'] = orphans.pop(0)
else:
# orphan refnames is already empty!
# reference number is same in new_refs and old_refs.
pass
self.document.note_refname(newr)
# refnamed footnote should use original 'ids'.
is_refnamed_footnote_ref = NodeMatcher(nodes.footnote_reference, refname=Any)
old_foot_refs = node.traverse(is_refnamed_footnote_ref)
new_foot_refs = patch.traverse(is_refnamed_footnote_ref)
refname_ids_map = {} # type: Dict[str, List[str]]
if len(old_foot_refs) != len(new_foot_refs):
old_foot_ref_rawsources = [ref.rawsource for ref in old_foot_refs]
new_foot_ref_rawsources = [ref.rawsource for ref in new_foot_refs]
logger.warning(__('inconsistent footnote references in translated message.' +
' original: {0}, translated: {1}')
.format(old_foot_ref_rawsources, new_foot_ref_rawsources),
location=node)
for oldf in old_foot_refs:
refname_ids_map.setdefault(oldf["refname"], []).append(oldf["ids"])
for newf in new_foot_refs:
refname = newf["refname"]
if refname_ids_map.get(refname):
newf["ids"] = refname_ids_map[refname].pop(0)
# citation should use original 'ids'.
is_citation_ref = NodeMatcher(nodes.citation_reference, refname=Any)
old_cite_refs = node.traverse(is_citation_ref) # type: List[nodes.citation_reference] # NOQA
new_cite_refs = patch.traverse(is_citation_ref) # type: List[nodes.citation_reference] # NOQA
refname_ids_map = {}
if len(old_cite_refs) != len(new_cite_refs):
old_cite_ref_rawsources = [ref.rawsource for ref in old_cite_refs]
new_cite_ref_rawsources = [ref.rawsource for ref in new_cite_refs]
logger.warning(__('inconsistent citation references in translated message.' +
' original: {0}, translated: {1}')
.format(old_cite_ref_rawsources, new_cite_ref_rawsources),
location=node)
for oldc in old_cite_refs:
refname_ids_map.setdefault(oldc["refname"], []).append(oldc["ids"])
for newc in new_cite_refs:
refname = newc["refname"]
if refname_ids_map.get(refname):
newc["ids"] = refname_ids_map[refname].pop()
# Original pending_xref['reftarget'] contain not-translated
# target name, new pending_xref must use original one.
# This code restricts to change ref-targets in the translation.
old_xrefs = node.traverse(addnodes.pending_xref)
new_xrefs = patch.traverse(addnodes.pending_xref)
xref_reftarget_map = {}
if len(old_xrefs) != len(new_xrefs):
old_xref_rawsources = [xref.rawsource for xref in old_xrefs]
new_xref_rawsources = [xref.rawsource for xref in new_xrefs]
logger.warning(__('inconsistent term references in translated message.' +
' original: {0}, translated: {1}')
.format(old_xref_rawsources, new_xref_rawsources),
location=node)
def get_ref_key(node):
# type: (addnodes.pending_xref) -> Tuple[str, str, str]
case = node["refdomain"], node["reftype"]
if case == ('std', 'term'):
return None
else:
return (
node["refdomain"],
node["reftype"],
node['reftarget'],)
for old in old_xrefs:
key = get_ref_key(old)
if key:
xref_reftarget_map[key] = old.attributes
for new in new_xrefs:
key = get_ref_key(new)
# Copy attributes to keep original node behavior. Especially
# copying 'reftarget', 'py:module', 'py:class' are needed.
for k, v in xref_reftarget_map.get(key, {}).items():
# Note: This implementation overwrite all attributes.
# if some attributes `k` should not be overwritten,
# you should provide exclude list as:
# `if k not in EXCLUDE_LIST: new[k] = v`
new[k] = v
# update leaves
for child in patch.children:
child.parent = node
node.children = patch.children
# for highlighting that expects .rawsource and .astext() are same.
if isinstance(node, LITERAL_TYPE_NODES):
node.rawsource = node.astext()
if isinstance(node, IMAGE_TYPE_NODES):
node.update_all_atts(patch)
node['translated'] = True # to avoid double translation
if 'index' in self.config.gettext_additional_targets:
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
new_entries = [] # type: List[Tuple[str, str, str, str, str]]
for type, msg, tid, main, key_ in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
for part in msg_parts:
msgstr = catalog.gettext(part)
if not msgstr:
msgstr = part
msgstr_parts.append(msgstr)
new_entries.append((type, ';'.join(msgstr_parts), tid, main, None))
node['raw_entries'] = entries
node['entries'] = new_entries
# remove translated attribute that is used for avoiding double translation.
for translated in self.document.traverse(NodeMatcher(translated=Any)): # type: nodes.Element # NOQA
translated.delattr('translated')
class RemoveTranslatableInline(SphinxTransform):
"""
Remove inline nodes used for translation as placeholders.
"""
default_priority = 999
def apply(self, **kwargs):
# type: (Any) -> None
from sphinx.builders.gettext import MessageCatalogBuilder
if isinstance(self.app.builder, MessageCatalogBuilder):
return
matcher = NodeMatcher(nodes.inline, translatable=Any)
for inline in self.document.traverse(matcher): # type: nodes.inline
inline.parent.remove(inline)
inline.parent += inline.children
|
Nolski/olympia | refs/heads/master | apps/files/tests/test_utils_.py | 11 | import json
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import pytest
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from applications.models import AppVersion
from files.models import File
from files.utils import find_jetpacks, is_beta, PackageJSONExtractor
from versions.models import Version
pytestmark = pytest.mark.django_db
def test_is_beta():
assert not is_beta('1.2')
assert is_beta('1.2a')
assert is_beta('1.2a1')
assert is_beta('1.2a123')
assert is_beta('1.2a.1')
assert is_beta('1.2a.123')
assert is_beta('1.2a-1')
assert is_beta('1.2a-123')
assert is_beta('1.2alpha')
assert is_beta('1.2alpha')
assert is_beta('1.2alpha1')
assert is_beta('1.2alpha123')
assert is_beta('1.2alpha.1')
assert is_beta('1.2alpha.123')
assert is_beta('1.2alpha-1')
assert is_beta('1.2alpha-123')
assert is_beta('1.2b')
assert is_beta('1.2b1')
assert is_beta('1.2b123')
assert is_beta('1.2b.1')
assert is_beta('1.2b.123')
assert is_beta('1.2b-1')
assert is_beta('1.2b-123')
assert is_beta('1.2beta')
assert is_beta('1.2beta1')
assert is_beta('1.2beta123')
assert is_beta('1.2beta.1')
assert is_beta('1.2beta.123')
assert is_beta('1.2beta-1')
assert is_beta('1.2beta-123')
assert is_beta('1.2pre')
assert is_beta('1.2pre1')
assert is_beta('1.2pre123')
assert is_beta('1.2pre.1')
assert is_beta('1.2pre.123')
assert is_beta('1.2pre-1')
assert is_beta('1.2pre-123')
assert is_beta('1.2rc')
assert is_beta('1.2rc1')
assert is_beta('1.2rc123')
assert is_beta('1.2rc.1')
assert is_beta('1.2rc.123')
assert is_beta('1.2rc-1')
assert is_beta('1.2rc-123')
class TestFindJetpacks(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestFindJetpacks, self).setUp()
File.objects.update(jetpack_version='1.0')
self.file = File.objects.filter(version__addon=3615).get()
def test_success(self):
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file])
def test_skip_autorepackage(self):
Addon.objects.update(auto_repackage=False)
eq_(find_jetpacks('1.0', '1.1'), [])
def test_minver(self):
files = find_jetpacks('1.1', '1.2')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_maxver(self):
files = find_jetpacks('.1', '1.0')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_unreviewed_files_plus_reviewed_file(self):
# We upgrade unreviewed files up to the latest reviewed file.
v = Version.objects.create(addon_id=3615)
new_file = File.objects.create(version=v, jetpack_version='1.0')
Version.objects.create(addon_id=3615)
new_file2 = File.objects.create(version=v, jetpack_version='1.0')
eq_(new_file.status, amo.STATUS_UNREVIEWED)
eq_(new_file2.status, amo.STATUS_UNREVIEWED)
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
assert all(f.needs_upgrade for f in files)
# Now self.file will not need an upgrade since we skip old versions.
new_file.update(status=amo.STATUS_PUBLIC)
files = find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
eq_(files[0].needs_upgrade, False)
assert all(f.needs_upgrade for f in files[1:])
class TestPackageJSONExtractor(amo.tests.TestCase):
@contextmanager
def extractor(self, base_data):
with NamedTemporaryFile() as f:
f.write(json.dumps(base_data))
f.flush()
yield PackageJSONExtractor(f.name)
def create_appversion(self, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
def test_guid(self):
"""Use id for the guid."""
with self.extractor({'id': 'some-id'}) as extractor:
eq_(extractor.parse()['guid'], 'some-id')
def test_name_for_guid_if_no_id(self):
"""Use the name for the guid if there is no id."""
with self.extractor({'name': 'addon-name'}) as extractor:
eq_(extractor.parse()['guid'], 'addon-name')
def test_type(self):
"""Package.json addons are always ADDON_EXTENSION."""
with self.extractor({}) as extractor:
eq_(extractor.parse()['type'], amo.ADDON_EXTENSION)
def test_no_restart(self):
"""Package.json addons are always no-restart."""
with self.extractor({}) as extractor:
eq_(extractor.parse()['no_restart'], True)
def test_name_from_title_with_name(self):
"""Use the title for the name."""
data = {'title': 'The Addon Title', 'name': 'the-addon-name'}
with self.extractor(data) as extractor:
eq_(extractor.parse()['name'], 'The Addon Title')
def test_name_from_name_without_title(self):
"""Use the name for the name if there is no title."""
with self.extractor({'name': 'the-addon-name'}) as extractor:
eq_(extractor.parse()['name'], 'the-addon-name')
def test_version(self):
"""Use version for the version."""
with self.extractor({'version': '23.0.1'}) as extractor:
eq_(extractor.parse()['version'], '23.0.1')
def test_homepage(self):
"""Use homepage for the homepage."""
with self.extractor({'homepage': 'http://my-addon.org'}) as extractor:
eq_(extractor.parse()['homepage'], 'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
with self.extractor({'description': 'An addon.'}) as extractor:
eq_(extractor.parse()['summary'], 'An addon.')
def test_apps(self):
"""Use engines for apps."""
firefox_version = self.create_appversion('firefox', '33.0a1')
thunderbird_version = self.create_appversion('thunderbird', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'thunderbird': '>=33.0a1',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
apps_dict = dict((app.appdata.short, app) for app in apps)
assert sorted(apps_dict.keys()) == ['firefox', 'thunderbird']
assert apps_dict['firefox'].min == firefox_version
assert apps_dict['firefox'].max == firefox_version
assert apps_dict['thunderbird'].min == thunderbird_version
assert apps_dict['thunderbird'].max == thunderbird_version
def test_unknown_apps_are_ignored(self):
"""Unknown engines get ignored."""
self.create_appversion('firefox', '33.0a1')
self.create_appversion('thunderbird', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'thunderbird': '>=33.0a1',
'node': '>=0.10',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
engines = [app.appdata.short for app in apps]
assert sorted(engines) == ['firefox', 'thunderbird'] # Not node.
def test_invalid_app_versions_are_ignored(self):
"""Valid engines with invalid versions are ignored."""
firefox_version = self.create_appversion('firefox', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'fennec': '>=33.0a1',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
eq_(len(apps), 1)
eq_(apps[0].appdata.short, 'firefox')
eq_(apps[0].min, firefox_version)
eq_(apps[0].max, firefox_version)
def test_fennec_is_treated_as_android(self):
"""Treat the fennec engine as android."""
android_version = self.create_appversion('android', '33.0a1')
data = {
'engines': {
'fennec': '>=33.0a1',
'node': '>=0.10',
},
}
with self.extractor(data) as extractor:
apps = extractor.parse()['apps']
eq_(apps[0].appdata.short, 'android')
eq_(apps[0].min, android_version)
eq_(apps[0].max, android_version)
|
mhahn/stacker | refs/heads/master | stacker/hooks/iam.py | 1 | import copy
import logging
import boto3
from botocore.exceptions import ClientError
from awacs.aws import Statement, Allow, Policy
from awacs import ecs
from awacs.helpers.trust import get_ecs_assumerole_policy
from . import utils
logger = logging.getLogger(__name__)
def create_ecs_service_role(region, namespace, mappings, parameters,
**kwargs):
"""Used to create the ecsServieRole, which has to be named exactly that
currently, so cannot be created via CloudFormation. See:
http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role
"""
role_name = kwargs.get("role_name", "ecsServiceRole")
client = boto3.client("iam", region_name=region)
try:
client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json()
)
except ClientError as e:
if "already exists" in e.message:
pass
else:
raise
policy = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=["*"],
Action=[ecs.CreateCluster, ecs.DeregisterContainerInstance,
ecs.DiscoverPollEndpoint, ecs.Poll,
ecs.Action("Submit*")]
)
])
client.put_role_policy(
RoleName=role_name,
PolicyName="AmazonEC2ContainerServiceRolePolicy",
PolicyDocument=policy.to_json()
)
return True
def _get_cert_arn_from_response(response):
result = copy.deepcopy(response)
# GET response returns this extra key
if "ServerCertificate" in response:
result = response["ServerCertificate"]
return result["ServerCertificateMetadata"]["Arn"]
def get_cert_contents(kwargs):
"""Builds parameters with server cert file contents.
Args:
kwargs(dict): The keyword args passed to ensure_server_cert_exists,
optionally containing the paths to the cert, key and chain files.
Returns:
dict: A dictionary containing the appropriate parameters to supply to
upload_server_certificate. An empty dictionary if there is a
problem.
"""
paths = {
"certificate": kwargs.get("path_to_certificate"),
"private_key": kwargs.get("path_to_private_key"),
"chain": kwargs.get("path_to_chain"),
}
for key, value in paths.iteritems():
if value is not None:
continue
path = raw_input("Path to %s (skip): " % (key,))
if path == "skip" or not path.strip():
continue
paths[key] = path
parameters = {
"ServerCertificateName": kwargs.get("cert_name"),
}
for key, path in paths.iteritems():
if not path:
continue
# Allow passing of file like object for tests
try:
contents = path.read()
except AttributeError:
with open(utils.full_path(path)) as read_file:
contents = read_file.read()
if key == "certificate":
parameters["CertificateBody"] = contents
elif key == "private_key":
parameters["PrivateKey"] = contents
elif key == "chain":
parameters["CertificateChain"] = contents
return parameters
def ensure_server_cert_exists(region, namespace, mappings, parameters,
**kwargs):
client = boto3.client("iam", region_name=region)
cert_name = kwargs["cert_name"]
try:
response = client.get_server_certificate(
ServerCertificateName=cert_name
)
cert_arn = _get_cert_arn_from_response(response)
logger.info("certificate exists: %s (%s)", cert_name, cert_arn)
except ClientError:
if kwargs.get("prompt", True):
upload = raw_input(
"Certificate '%s' wasn't found. Upload it now? (yes/no) " % (
cert_name,
)
)
if upload != "yes":
return False
parameters = get_cert_contents(kwargs)
if not parameters:
return False
response = client.upload_server_certificate(**parameters)
cert_arn = _get_cert_arn_from_response(response)
logger.info(
"uploaded certificate: %s (%s)",
cert_name,
cert_arn,
)
return True
|
bl4ckic3/ARMSCGen | refs/heads/master | setup.py | 1 | #!python
from setuptools import setup, find_packages
setup(
name = 'ARMSCGen',
version = '0.0.13',
packages = find_packages() + ['examples'],
data_files = [('', ['LICENSE-ARMSCGen.txt'])],
py_modules = ['ARMSCGen'],
author = 'alex.park',
author_email = 'saintlinu07+github@gmail.com',
url = 'https://github.com/alexpark07/ARMSCGen',
description = 'ARM/Thumb Shellcode Generator',
license = 'Mostly GPLv2, some licenses have different',
classifiers = [
'Topic :: Security',
'Environment :: Console',
'Operating System :: OS Independent',
'License :: GPLv2 License',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers'
]
)
|
zivia/grove | refs/heads/master | simulation/__init__.py | 4 | __author__ = 'Troy Squillaci'
|
bohlian/frappe | refs/heads/develop | frappe/desk/report/todo/todo.py | 18 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate
def execute(filters=None):
priority_map = {"High": 3, "Medium": 2, "Low": 1}
todo_list = frappe.get_list('ToDo', fields=["name", "date", "description",
"priority", "reference_type", "reference_name", "assigned_by", "owner"],
filters={'status': 'Open'})
todo_list.sort(key=lambda todo: (priority_map.get(todo.priority, 0),
todo.date and getdate(todo.date) or getdate("1900-01-01")), reverse=True)
columns = [_("ID")+":Link/ToDo:90", _("Priority")+"::60", _("Date")+ ":Date",
_("Description")+"::150", _("Assigned To/Owner") + ":Data:120",
_("Assigned By")+":Data:120", _("Reference")+"::200"]
result = []
for todo in todo_list:
if todo.owner==frappe.session.user or todo.assigned_by==frappe.session.user:
if todo.reference_type:
todo.reference = """<a href="#Form/%s/%s">%s: %s</a>""" % (todo.reference_type,
todo.reference_name, todo.reference_type, todo.reference_name)
else:
todo.reference = None
result.append([todo.name, todo.priority, todo.date, todo.description,
todo.owner, todo.assigned_by, todo.reference])
return columns, result |
OrlyMar/gasistafelice | refs/heads/master | gasistafelice/rest/views/urls.py | 3 |
# Copyright (C) 2011 REES Marche <http://www.reesmarche.org>
# taken from SANET by Laboratori Guglielmo Marconi S.p.A. <http://www.labs.it>
#
# This file is part of GASISTA FELICE
# GASISTA FELICE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# GASISTA FELICE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GASISTA FELICE. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from actions import *
urlpatterns = patterns('rest.views',
(r'^$', 'resource_page'),
(r'^related_notes/$', 'related_notes'),
#TEST (r'^gas_details/manage_roles', 'manage_roles'), # done
# Suspend a resource (POST)
#(r'^action/suspend', suspend_resource),
# TEST VIEWS FOR THE PROXY AGGREGATOR
#(r'^withsecret/(?P<view_type>\w+)/$', 'view_factory2'), # done
#(r'^withsecret/(?P<view_type>\w+)/(?P<args>.+)$', 'view_factory2'), # done
(r'^(?P<view_type>\w+)/$', 'view_factory'), # done
(r'^(?P<view_type>\w+)/(?P<args>.+)$', 'view_factory'), # done
)
|
dhamaniasad/mythbox | refs/heads/master | resources/lib/themoviedb/test_tmdb.py | 7 | #!/usr/bin/env python2.5
#encoding:utf-8
#author:dbr/Ben
#project:themoviedb
import unittest
import tmdb
class test_search(unittest.TestCase):
def setUp(self):
self.m = tmdb.MovieDb()
def test_simple_search(self):
"""Simple test search
"""
self.assertEquals(
type(
self.m.search("Fight Club")
),
tmdb.SearchResults
)
def test_search_results(self):
"""Check SearchResults are usable
"""
results = self.m.search("Fight Club")
first_result = results[0]
self.assertEquals(
type(first_result),
tmdb.MovieResult
)
self.assertEquals(
first_result['name'],
'Fight Club'
)
self.assertEquals(
first_result['released'],
'1999-09-16'
)
self.assertEquals(
first_result['imdb_id'],
'tt0137523'
)
class test_getmovieinfo(unittest.TestCase):
def test_search_to_info(self):
"""Gets a movie ID via search, then calls getMovieInfo using this
"""
sr = tmdb.search("fight club")[0]
movie = tmdb.getMovieInfo(sr['id'])
self.assertEquals(
sr['name'],
movie['name']
)
def test_get_director(self):
"""Checks you can get the director of a film
"""
mid = tmdb.search("Inglourious Basterds")[0]['id']
movie = tmdb.getMovieInfo(mid)
self.assertTrue(len(movie['cast']['director']) == 1)
self.assertEquals(
movie['cast']['director'][0]['name'],
"Quentin Tarantino"
)
class test_wrappers(unittest.TestCase):
def test_search_wrapper(self):
"""Tests tmdb.search() wrapper works correctly
"""
r = tmdb.search("The Matrix")
self.assertEquals(
type(r),
tmdb.SearchResults
)
def test_getmovieinfo_wrapper(self):
"""Tests tmdb.getMovieInfo() wrapper works correctly
"""
r = tmdb.getMovieInfo(550)
self.assertEquals(
type(r),
tmdb.Movie
)
class test_artwork(unittest.TestCase):
def setUp(self):
filmId = tmdb.MovieDb().search("Fight Club")[0]['id']
self.film = tmdb.MovieDb().getMovieInfo(filmId)
def test_poster_urls(self):
"""Checks posters are valid looking URLs
"""
for _id in self.film['images']['poster']:
for size in self.film['images']['poster'][_id]:
url = self.film['images']['poster'][_id][size]
self.assertTrue(
url.startswith("http://")
)
def test_backdrop_urls(self):
"""Checks backdrop images are valid looking URLs
"""
for _id in self.film['images']['backdrop']:
for size in self.film['images']['backdrop'][_id]:
url = self.film['images']['backdrop'][_id][size]
self.assertTrue(
url.startswith("http://")
)
def test_artwork_repr(self):
"""Checks artwork repr looks sensible
"""
self.assertTrue(
repr(self.film['images']).startswith(
"<Images with "
)
)
if __name__ == '__main__':
unittest.main() |
mmabey/Adafruit_Soundboard | refs/heads/master | conf.py | 1 | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Link to external documentation for Python modules
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),
'upy': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Show the docstring from both the class and its __init__() method
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_mock_imports = ['board', 'busio', 'digitalio']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'README'
# General information about the project.
project = u'Adafruit Soundboard Library'
copyright = u'2017, Mike Mabey'
author = u'Mike Mabey'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = open('VERSION').read().strip()
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitSoundboardLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitSoundboardLibrary.tex', u'Adafruit Soundboard Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'adafruitsoundboardlibrary', 'Adafruit Soundboard Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitSoundboardLibrary', u'Adafruit Soundboard Library Documentation',
author, 'AdafruitSoundboardLibrary', 'One line description of project.',
'Miscellaneous'),
]
|
greyhill/proust | refs/heads/master | python/proust.py | 1 | import ctypes as ct
import numpy as np
import ctypes.util
libpath = ctypes.util.find_library('proust')
if libpath is None:
libpath = './libproust.so'
lib = ct.CDLL(libpath)
size_t = ct.c_size_t
class DPtr(object):
def __init__(self, ptr, dtor):
self.ptr = ct.c_voidp(ptr)
self.dtor = dtor
def __del__(self):
self.dtor(self.ptr)
def num_platforms():
lib.Proust_NumPlatforms.restype = size_t
return int(lib.Proust_NumPlatforms())
def context_num_devices(context):
lib.Proust_ContextNumDevices.restype = size_t
return int(lib.Proust_ContextNumDevices(context.ptr))
def create_command_queue(context, device_number = 0):
lib.Proust_CreateCommandQueue.restype = ct.c_voidp
ptr = lib.Proust_CreateCommandQueue(\
context.ptr,
size_t(device_number))
return DPtr(ptr, lib.Proust_DestroyCommandQueue)
def create_context(platform_number = 0):
lib.Proust_CreateContextWithAllDevices.restype = ct.c_voidp
ptr = lib.Proust_CreateContextWithAllDevices(size_t(platform_number))
tr = DPtr(ptr, lib.Proust_DestroyContext)
tr.num_devices = lambda: context_num_devices(tr)
tr.create_command_queue = lambda device_number: create_command_queue(tr, device_number)
return tr
|
alexlo03/ansible | refs/heads/devel | test/units/modules/cloud/amazon/test_ec2_utils.py | 231 | import unittest
from ansible.module_utils.ec2 import map_complex_type
class Ec2Utils(unittest.TestCase):
def test_map_complex_type_over_dict(self):
complex_type = {'minimum_healthy_percent': "75", 'maximum_percent': "150"}
type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'}
complex_type_mapped = map_complex_type(complex_type, type_map)
complex_type_expected = {'minimum_healthy_percent': 75, 'maximum_percent': 150}
self.assertEqual(complex_type_mapped, complex_type_expected)
|
cartesiantheatre/narayan-designer | refs/heads/master | apport.py | 1 | '''
Narayan Designer, a modelling tool for the Narayan simulation engine.
Copyright (C) 2017-2018 Cartesian Theatre™. All rights reserved.
Apport hook for narayan-designer.
Note: This isn't usable yet since at this time Apport submits bug reports
only to Launchpad. We do not host our issue tracker there, but on GitHub.
'''
import os.path
from apport.hookutils import *
opencl_packages =
[
'beignet-opencl-icd',
'clinfo',
'mesa-opencl-icd',
'nvidia-libopencl1-304',
'nvidia-libopencl1-304',
'nvidia-libopencl1-340',
'nvidia-libopencl1-384',
'oclgrind',
'ocl-icd-libopencl1',
'pocl-opencl-icd'
]
def add_info(report):
# This isn't an official Ubuntu package, so provide name...
if not apport.packaging.is_distro_package(report['Package'].split()[0]):
report['CrashDB'] = 'narayan-designer'
# Attach hardware report...
attach_hardware(report)
# Show OpenCL package versions...
versions = ''
for package in opencl_packages:
try:
version = packaging.get_version(package)
except ValueError:
version = 'N/A'
versions += '%s %s\n' % (package, version)
report['DriverPackageVersions'] = versions
# Attach the output of clinfo(1) if available...
if command_available('clinfo'):
report['OpenCL Info'] = command_output('clinfo')
else:
report['OpenCL Info'] = 'clinfo not found in path...'
# Attach the GPU manager log...
attach_file_if_exists(report, '/var/log/gpu-manager.log', 'GpuManagerLog')
# Report suspicious X errors...
report['SuspiciousXErrors'] = xsession_errors(re.compile('CRITICAL.*assertion.*failed'))
|
NamanCMU/Robot-Autonomy-Assignments | refs/heads/master | Hw2/hw2/run.py | 1 | #!/usr/bin/env python
import argparse, numpy, openravepy, time
from HerbRobot import HerbRobot
from SimpleRobot import SimpleRobot
from HerbEnvironment import HerbEnvironment
from SimpleEnvironment import SimpleEnvironment
from RRTPlanner import RRTPlanner
from RRTConnectPlanner import RRTConnectPlanner
import time
def main(robot, planning_env, planner):
raw_input('Press any key to begin planning')
start_config = numpy.array(robot.GetCurrentConfiguration())
if robot.name == 'herb':
#goal_config = numpy.array([ 4.3, -1.76, 0.00, 1.96, -1.15, 0.87, -1.43] )
goal_config = numpy.array([ 4.6, -1.76, 0.00, 1.96, -1.15, 0.87, -1.43] )
#goal_config = numpy.array([ 3.68, -1.90, 0.00, 2.20, 0.00, 0.00, 0.00 ])
else:
goal_config = numpy.array([2.0, 0.0])
time1 = time.time()
plan = planner.Plan(start_config, goal_config)
time2 = time.time()
plan_short = planning_env.ShortenPath(plan)
traj = robot.ConvertPlanToTrajectory(plan_short)
robot.ExecuteTrajectory(traj)
print "Time: ", time2 - time1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='script for testing planners')
parser.add_argument('-r', '--robot', type=str, default='simple',
help='The robot to load (herb or simple)')
parser.add_argument('-p', '--planner', type=str, default='rrt',
help='The planner to run (rrt or rrtconnect)')
parser.add_argument('-m', '--manip', type=str, default='right',
help='The manipulator to plan with (right or left) - only applicable if robot is of type herb')
parser.add_argument('-v', '--visualize', action='store_true',
help='Enable visualization of tree growth (only applicable for simple robot)')
parser.add_argument('-d', '--debug', action='store_true',
help='Enable debug logging')
args = parser.parse_args()
openravepy.RaveInitialize(True, level=openravepy.DebugLevel.Info)
openravepy.misc.InitOpenRAVELogging()
if args.debug:
openravepy.RaveSetDebugLevel(openravepy.DebugLevel.Debug)
env = openravepy.Environment()
env.SetViewer('qtcoin')
env.GetViewer().SetName('Homework 2 Viewer')
# First setup the environment and the robot
visualize = args.visualize
if args.robot == 'herb':
robot = HerbRobot(env, args.manip)
planning_env = HerbEnvironment(robot)
visualize = False
elif args.robot == 'simple':
robot = SimpleRobot(env)
planning_env = SimpleEnvironment(robot)
else:
print 'Unknown robot option: %s' % args.robot
exit(0)
# Next setup the planner
if args.planner == 'rrt':
planner = RRTPlanner(planning_env, visualize=visualize)
elif args.planner == 'rrtconnect':
planner = RRTConnectPlanner(planning_env, visualize=visualize)
else:
print 'Unknown planner option: %s' % args.planner
exit(0)
main(robot, planning_env, planner)
import IPython
IPython.embed()
|
ARMmbed/yotta_osx_installer | refs/heads/master | workspace/lib/python2.7/site-packages/yotta/list.py | 2 | # Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
from __future__ import print_function
import logging
import os
# colorama, BSD 3-Clause license, cross-platform terminal colours, pip install colorama
import colorama
# validate, , validate things, internal
from .lib import validate
# access, , get components (and check versions), internal
from .lib import access
# fsutils, , misc filesystem utils, internal
from .lib import fsutils
def addOptions(parser):
parser.add_argument('--all', '-a', dest='show_all', default=False, action='store_true',
help='Show all dependencies (including repeats, and test-only dependencies)'
)
def execCommand(args, following_args):
c = validate.currentDirectoryModule()
if not c:
return 1
if not args.target:
logging.error('No target has been set, use "yotta target" to set one.')
return 1
target, errors = c.satisfyTarget(args.target)
if errors:
for error in errors:
logging.error(error)
return 1
dependencies = c.getDependenciesRecursive(
target = target,
available_components = [(c.getName(), c)],
test = True
)
putln(
ComponentDepsFormatter(
target = target,
available_components = dependencies,
plain = args.plain,
list_all = args.show_all
).format(
c, [c.getName()]
)
)
def islast(generator):
next_x = None
first = True
for x in generator:
if not first:
yield (next_x, False)
next_x = x
first = False
if not first:
yield (next_x, True)
def putln(x):
if u'unicode' in str(type(x)):
# python 2.7
print(x.encode('utf-8'))
else:
print(x)
def relpathIfSubdir(path):
relpath = os.path.relpath(path)
if relpath.startswith('..'):
return path
else:
return relpath
class ComponentDepsFormatter(object):
def __init__(self, target=None, available_components=None, list_all=False, plain=False):
# don't even try to do Unicode on windows. Even if we can encode it
# correctly, the default terminal fonts don't support Unicode
# characters :(
self.use_unicode = not ((os.name == 'nt') or plain)
self.use_colours = not plain
self.target = target
self.list_all = list_all
self.available = available_components
if plain:
self.L_Char = u' '
self.T_Char = u' '
self.Dash_Char = u' '
self.Pipe_Char = u' '
elif self.use_unicode:
self.L_Char = u'\u2517'
self.T_Char = u'\u2523'
self.Dash_Char = u'\u2500'
self.Pipe_Char = u'\u2503'
else:
self.L_Char = u'\\'
self.T_Char = u'|'
self.Dash_Char = u'_'
self.Pipe_Char = u'|'
super(ComponentDepsFormatter, self).__init__()
def format(
self,
component,
processed,
indent=u'',
tee=u'',
installed_at=u'',
test_dep=False,
spec=None
):
r = u''
if self.use_colours:
DIM = colorama.Style.DIM #pylint: disable=no-member
BRIGHT = colorama.Style.BRIGHT #pylint: disable=no-member
GREEN = colorama.Fore.GREEN #pylint: disable=no-member
RED = colorama.Fore.RED #pylint: disable=no-member
RESET = colorama.Style.RESET_ALL #pylint: disable=no-member
else:
DIM = BRIGHT = GREEN = RED = RESET = u''
mods_path = component.modulesPath()
deps = component.getDependencies(
available_components = self.available,
target = self.target,
test = True,
warnings = False
)
specs = dict([(x.name, x) for x in component.getDependencySpecs(target=self.target)])
def isTestOnly(name):
return specs[name].is_test_dependency
def shouldDisplay(x):
if self.list_all:
# list everything everywhere (apart from test dependencies of test
# dependencies, which should be considered irrelevant)
if component.isTestDependency() and isTestOnly(x[0]):
return False
else:
return True
if (not isTestOnly(x[0]) or not len(indent)):
# this is non-test dependency, or a top-level test dependency
if not x[1]:
# if it's missing, display it
return True
if x[1].path == os.path.join(mods_path, x[0]):
# if it's installed in this module, display it
return True
if x[0] in deps_here:
# if it's first depended on by this module, then display it
return True
# everything else shouldn't be displayed here
return False
line = indent[:-2] + tee + component.getName() + u' ' + DIM + str(component.getVersion()) + RESET
if spec and not spec.match(component.getVersion()):
line += u' ' + RESET + BRIGHT + RED + str(spec) + RESET
if test_dep:
line += u' ' + DIM + u'(test dependency)' + RESET
if len(installed_at):
line += u' ' + DIM + installed_at + RESET
if component.installedLinked():
line += GREEN + BRIGHT + u' -> ' + RESET + GREEN + fsutils.realpath(component.path) + RESET
r += line + '\n'
deps_here = [x for x in list(deps.keys()) if (x not in processed)]
print_deps = [x for x in list(deps.items()) if shouldDisplay(x)]
processed += [x[0] for x in print_deps]
for (name, dep), last in islast(print_deps):
if last:
next_indent = indent + u' '
tee = self.L_Char + self.Dash_Char + u' '
next_tee = self.L_Char + self.Dash_Char + u' '
else:
next_indent = indent + self.Pipe_Char + u' '
tee = self.T_Char + self.Dash_Char + u' '
next_tee = self.T_Char + self.Dash_Char + u' '
test_dep_status = u''
if isTestOnly(name):
test_dep_status = u' (test dependency)'
if not dep:
r += indent + tee + name + u' ' + specs[name].version_req + test_dep_status + BRIGHT + RED + ' missing' + RESET + '\n'
else:
spec = access.remoteComponentFor(name, specs[name].version_req, 'modules').versionSpec()
if not spec:
spec_descr = u''
elif spec.match(dep.getVersion()):
spec_descr = u' ' + str(spec)
else:
spec_descr = u' ' + RESET + BRIGHT + RED + str(spec)
spec_descr += test_dep_status
if name in deps_here:
# dependencies that are first used here may actually be
# installed higher up our dependency tree, if they are,
# illustrate that:
if dep.path == os.path.join(mods_path, name):
r += self.format(
dep,
processed,
next_indent,
next_tee,
test_dep = isTestOnly(name),
spec = spec
)
else:
r += self.format(
dep,
processed,
next_indent,
next_tee,
installed_at = relpathIfSubdir(dep.path),
test_dep = isTestOnly(name),
spec = spec
)
else:
r += indent + tee + DIM + name + spec_descr + RESET + '\n'
return r
|
mcstrother/dicom-sr-qi | refs/heads/master | unported scripts/plot_duration.py | 2 | """Write a .csv file that allows us to easily make a box plot
of the duration of the most common CPT code combinations.
"""
import srdata
import csv
XML_FILE_NAME = 'all bjh.xml'
CPT_FILE_NAMES = CPT_FILE_NAMES = ['./GetCPT Data/April_Output_Org.xls', './GetCPT Data/May_Output_Org.xls']
def main():
procs = srdata.process_file(XML_FILE_NAME, CPT_FILE_NAMES)
#sort the procedures by their CPT code combinations
procs_by_cpt = {}
for proc in procs:
if not proc.get_cpts() in procs_by_cpt:
procs_by_cpt[proc.get_cpts()] = []
procs_by_cpt[proc.get_cpts()].append(proc)
#write a table of CPT code combinations followed by all the durations of the associated procedures
table = []
for (cpts, proc_list) in procs_by_cpt.iteritems():
row_header = "'" +','.join([str(x) for x in cpts])
row = [row_header] + [proc.get_duration() for proc in proc_list if not proc.get_duration() is None]
if len(row)>4:
table.append(row)
writer = csv.writer(open('output.csv','wb'))
writer.writerows(table)
if __name__ == '__main__':
main() |
wistoch/meego-app-browser | refs/heads/master | third_party/protobuf/python/google/protobuf/internal/reflection_test.py | 253 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which also indirectly tests the output of the
pure-Python protocol compiler.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import operator
import struct
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf.internal import wire_format
from google.protobuf.internal import test_util
from google.protobuf.internal import decoder
class _MiniDecoder(object):
"""Decodes a stream of values from a string.
Once upon a time we actually had a class called decoder.Decoder. Then we
got rid of it during a redesign that made decoding much, much faster overall.
But a couple tests in this file used it to check that the serialized form of
a message was correct. So, this class implements just the methods that were
used by said tests, so that we don't have to rewrite the tests.
"""
def __init__(self, bytes):
self._bytes = bytes
self._pos = 0
def ReadVarint(self):
result, self._pos = decoder._DecodeVarint(self._bytes, self._pos)
return result
ReadInt32 = ReadVarint
ReadInt64 = ReadVarint
ReadUInt32 = ReadVarint
ReadUInt64 = ReadVarint
def ReadSInt64(self):
return wire_format.ZigZagDecode(self.ReadVarint())
ReadSInt32 = ReadSInt64
def ReadFieldNumberAndWireType(self):
return wire_format.UnpackTag(self.ReadVarint())
def ReadFloat(self):
result = struct.unpack("<f", self._bytes[self._pos:self._pos+4])[0]
self._pos += 4
return result
def ReadDouble(self):
result = struct.unpack("<d", self._bytes[self._pos:self._pos+8])[0]
self._pos += 8
return result
def EndOfStream(self):
return self._pos == len(self._bytes)
class ReflectionTest(unittest.TestCase):
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testScalarConstructor(self):
# Constructor with only scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_double=54.321,
optional_string='optional_string')
self.assertEqual(24, proto.optional_int32)
self.assertEqual(54.321, proto.optional_double)
self.assertEqual('optional_string', proto.optional_string)
def testRepeatedScalarConstructor(self):
# Constructor with only repeated scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_int32=[1, 2, 3, 4],
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_string=["optional_string"])
self.assertEquals([1, 2, 3, 4], list(proto.repeated_int32))
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(["optional_string"], list(proto.repeated_string))
def testRepeatedCompositeConstructor(self):
# Constructor with only repeated composite types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
repeatedgroup=[
unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)])
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertEquals(
[unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)],
list(proto.repeatedgroup))
def testMixedConstructor(self):
# Constructor with only mixed types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_string='optional_string',
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)])
self.assertEqual(24, proto.optional_int32)
self.assertEqual('optional_string', proto.optional_string)
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
def testConstructorTypeError(self):
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_int32="foo")
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=["foo"])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=[1234])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=[1234])
def testConstructorInvalidatesCachedByteSize(self):
message = unittest_pb2.TestAllTypes(optional_int32 = 12)
self.assertEquals(2, message.ByteSize())
message = unittest_pb2.TestAllTypes(
optional_nested_message = unittest_pb2.TestAllTypes.NestedMessage())
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(repeated_int32 = [12])
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(
repeated_nested_message = [unittest_pb2.TestAllTypes.NestedMessage()])
self.assertEquals(3, message.ByteSize())
def testSimpleHasBits(self):
# Test a scalar.
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_int32'))
self.assertEqual(0, proto.optional_int32)
# HasField() shouldn't be true if all we've done is
# read the default value.
self.assertTrue(not proto.HasField('optional_int32'))
proto.optional_int32 = 1
# Setting a value however *should* set the "has" bit.
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
# And clearing that value should unset the "has" bit.
self.assertTrue(not proto.HasField('optional_int32'))
def testHasBitsWithSinglyNestedScalar(self):
# Helper used to test foreign messages and groups.
#
# composite_field_name should be the name of a non-repeated
# composite (i.e., foreign or group) field in TestAllTypes,
# and scalar_field_name should be the name of an integer-valued
# scalar field within that composite.
#
# I never thought I'd miss C++ macros and templates so much. :(
# This helper is semantically just:
#
# assert proto.composite_field.scalar_field == 0
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
#
# proto.composite_field.scalar_field = 10
# old_composite_field = proto.composite_field
#
# assert proto.composite_field.scalar_field == 10
# assert proto.composite_field.HasField('scalar_field')
# assert proto.HasField('composite_field')
#
# proto.ClearField('composite_field')
#
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
# assert proto.composite_field.scalar_field == 0
#
# # Now ensure that ClearField('composite_field') disconnected
# # the old field object from the object tree...
# assert old_composite_field is not proto.composite_field
# old_composite_field.scalar_field = 20
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
def TestCompositeHasBits(composite_field_name, scalar_field_name):
proto = unittest_pb2.TestAllTypes()
# First, check that we can get the scalar value, and see that it's the
# default (0), but that proto.HasField('omposite') and
# proto.composite.HasField('scalar') will still return False.
composite_field = getattr(proto, composite_field_name)
original_scalar_value = getattr(composite_field, scalar_field_name)
self.assertEqual(0, original_scalar_value)
# Assert that the composite object does not "have" the scalar.
self.assertTrue(not composite_field.HasField(scalar_field_name))
# Assert that proto does not "have" the composite field.
self.assertTrue(not proto.HasField(composite_field_name))
# Now set the scalar within the composite field. Ensure that the setting
# is reflected, and that proto.HasField('composite') and
# proto.composite.HasField('scalar') now both return True.
new_val = 20
setattr(composite_field, scalar_field_name, new_val)
self.assertEqual(new_val, getattr(composite_field, scalar_field_name))
# Hold on to a reference to the current composite_field object.
old_composite_field = composite_field
# Assert that the has methods now return true.
self.assertTrue(composite_field.HasField(scalar_field_name))
self.assertTrue(proto.HasField(composite_field_name))
# Now call the clear method...
proto.ClearField(composite_field_name)
# ...and ensure that the "has" bits are all back to False...
composite_field = getattr(proto, composite_field_name)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
# ...and ensure that the scalar field has returned to its default.
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Finally, ensure that modifications to the old composite field object
# don't have any effect on the parent. Possible only with the pure-python
# implementation of the API.
#
# (NOTE that when we clear the composite field in the parent, we actually
# don't recursively clear down the tree. Instead, we just disconnect the
# cleared composite from the tree.)
if api_implementation.Type() != 'python':
return
self.assertTrue(old_composite_field is not composite_field)
setattr(old_composite_field, scalar_field_name, new_val)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Test simple, single-level nesting when we set a scalar.
TestCompositeHasBits('optionalgroup', 'a')
TestCompositeHasBits('optional_nested_message', 'bb')
TestCompositeHasBits('optional_foreign_message', 'c')
TestCompositeHasBits('optional_import_message', 'd')
def testReferencesToNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
del proto
# A previous version had a bug where this would raise an exception when
# hitting a now-dead weak reference.
nested.bb = 23
def testDisconnectingNestedMessageBeforeSettingField(self):
if api_implementation.Type() != 'python':
return
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testHasBitsWhenModifyingRepeatedFields(self):
# Test nesting when we add an element to a repeated field in a submessage.
proto = unittest_pb2.TestNestedMessageHasBits()
proto.optional_nested_message.nestedmessage_repeated_int32.append(5)
self.assertEqual(
[5], proto.optional_nested_message.nestedmessage_repeated_int32)
self.assertTrue(proto.HasField('optional_nested_message'))
# Do the same test, but with a repeated composite field within the
# submessage.
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add()
self.assertTrue(proto.HasField('optional_nested_message'))
def testHasBitsForManyLevelsOfNesting(self):
# Test nesting many levels deep.
recursive_proto = unittest_pb2.TestMutualRecursionA()
self.assertTrue(not recursive_proto.HasField('bb'))
self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(not recursive_proto.HasField('bb'))
recursive_proto.bb.a.bb.a.bb.optional_int32 = 5
self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(recursive_proto.HasField('bb'))
self.assertTrue(recursive_proto.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.HasField('bb'))
self.assertTrue(recursive_proto.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb'))
self.assertTrue(not recursive_proto.bb.a.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32'))
def testSingularListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_fixed32 = 1
proto.optional_int32 = 5
proto.optional_string = 'foo'
# Access sub-message but don't set it yet.
nested_message = proto.optional_nested_message
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ],
proto.ListFields())
proto.optional_nested_message.bb = 123
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo'),
(proto.DESCRIPTOR.fields_by_name['optional_nested_message' ],
nested_message) ],
proto.ListFields())
def testRepeatedListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.repeated_fixed32.append(1)
proto.repeated_int32.append(5)
proto.repeated_int32.append(11)
proto.repeated_string.extend(['foo', 'bar'])
proto.repeated_string.extend([])
proto.repeated_string.append('baz')
proto.repeated_string.extend(str(x) for x in xrange(2))
proto.optional_int32 = 21
proto.repeated_bool # Access but don't set anything; should not be listed.
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21),
(proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]),
(proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]),
(proto.DESCRIPTOR.fields_by_name['repeated_string' ],
['foo', 'bar', 'baz', '0', '1']) ],
proto.ListFields())
def testSingularListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1
proto.Extensions[unittest_pb2.optional_int32_extension ] = 5
proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo'
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 5),
(unittest_pb2.optional_fixed32_extension, 1),
(unittest_pb2.optional_string_extension , 'foo') ],
proto.ListFields())
def testRepeatedListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11)
proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz')
proto.Extensions[unittest_pb2.optional_int32_extension ] = 21
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 21),
(unittest_pb2.repeated_int32_extension , [5, 11]),
(unittest_pb2.repeated_fixed32_extension, [1]),
(unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ],
proto.ListFields())
def testListFieldsAndExtensions(self):
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
unittest_pb2.my_extension_int
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1),
(unittest_pb2.my_extension_int , 23),
(proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'),
(unittest_pb2.my_extension_string , 'bar'),
(proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ],
proto.ListFields())
def testDefaultValues(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
self.assertEqual(0, proto.optional_int64)
self.assertEqual(0, proto.optional_uint32)
self.assertEqual(0, proto.optional_uint64)
self.assertEqual(0, proto.optional_sint32)
self.assertEqual(0, proto.optional_sint64)
self.assertEqual(0, proto.optional_fixed32)
self.assertEqual(0, proto.optional_fixed64)
self.assertEqual(0, proto.optional_sfixed32)
self.assertEqual(0, proto.optional_sfixed64)
self.assertEqual(0.0, proto.optional_float)
self.assertEqual(0.0, proto.optional_double)
self.assertEqual(False, proto.optional_bool)
self.assertEqual('', proto.optional_string)
self.assertEqual('', proto.optional_bytes)
self.assertEqual(41, proto.default_int32)
self.assertEqual(42, proto.default_int64)
self.assertEqual(43, proto.default_uint32)
self.assertEqual(44, proto.default_uint64)
self.assertEqual(-45, proto.default_sint32)
self.assertEqual(46, proto.default_sint64)
self.assertEqual(47, proto.default_fixed32)
self.assertEqual(48, proto.default_fixed64)
self.assertEqual(49, proto.default_sfixed32)
self.assertEqual(-50, proto.default_sfixed64)
self.assertEqual(51.5, proto.default_float)
self.assertEqual(52e3, proto.default_double)
self.assertEqual(True, proto.default_bool)
self.assertEqual('hello', proto.default_string)
self.assertEqual('world', proto.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
proto.default_import_enum)
proto = unittest_pb2.TestExtremeDefaultValues()
self.assertEqual(u'\u1234', proto.utf8_string)
def testHasFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.HasField, 'nonexistent_field')
def testClearFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field')
def testDisallowedAssignments(self):
# It's illegal to assign values directly to repeated fields
# or to nonrepeated composite fields. Ensure that this fails.
proto = unittest_pb2.TestAllTypes()
# Repeated fields.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10)
# Lists shouldn't work, either.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10])
# Composite fields.
self.assertRaises(AttributeError, setattr, proto,
'optional_nested_message', 23)
# Assignment to a repeated nested message field without specifying
# the index in the array of nested messages.
self.assertRaises(AttributeError, setattr, proto.repeated_nested_message,
'bb', 34)
# Assignment to an attribute of a repeated field.
self.assertRaises(AttributeError, setattr, proto.repeated_float,
'some_attribute', 34)
# proto.nonexistent_field = 23 should fail as well.
self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23)
def testSingleScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1)
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_string', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10)
def testSingleScalarBoundsChecking(self):
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
pb = unittest_pb2.TestAllTypes()
setattr(pb, field_name, expected_min)
self.assertEqual(expected_min, getattr(pb, field_name))
setattr(pb, field_name, expected_max)
self.assertEqual(expected_max, getattr(pb, field_name))
self.assertRaises(ValueError, setattr, pb, field_name, expected_min - 1)
self.assertRaises(ValueError, setattr, pb, field_name, expected_max + 1)
TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1)
TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff)
TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1)
TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff)
pb = unittest_pb2.TestAllTypes()
pb.optional_nested_enum = 1
self.assertEqual(1, pb.optional_nested_enum)
# Invalid enum values.
pb.optional_nested_enum = 0
self.assertEqual(0, pb.optional_nested_enum)
bytes_size_before = pb.ByteSize()
pb.optional_nested_enum = 4
self.assertEqual(4, pb.optional_nested_enum)
pb.optional_nested_enum = 0
self.assertEqual(0, pb.optional_nested_enum)
# Make sure that setting the same enum field doesn't just add unknown
# fields (but overwrites them).
self.assertEqual(bytes_size_before, pb.ByteSize())
# Is the invalid value preserved after serialization?
serialized = pb.SerializeToString()
pb2 = unittest_pb2.TestAllTypes()
pb2.ParseFromString(serialized)
self.assertEqual(0, pb2.optional_nested_enum)
self.assertEqual(pb, pb2)
def testRepeatedScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, proto.repeated_int32.append, 1.1)
self.assertRaises(TypeError, proto.repeated_int32.append, 'foo')
self.assertRaises(TypeError, proto.repeated_string, 10)
self.assertRaises(TypeError, proto.repeated_bytes, 10)
proto.repeated_int32.append(10)
proto.repeated_int32[0] = 23
self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23)
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc')
# Repeated enums tests.
#proto.repeated_nested_enum.append(0)
def testSingleScalarGettersAndSetters(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
proto.optional_int32 = 1
self.assertEqual(1, proto.optional_int32)
proto.optional_uint64 = 0xffffffffffff
self.assertEqual(0xffffffffffff, proto.optional_uint64)
proto.optional_uint64 = 0xffffffffffffffff
self.assertEqual(0xffffffffffffffff, proto.optional_uint64)
# TODO(robinson): Test all other scalar field types.
def testSingleScalarClearField(self):
proto = unittest_pb2.TestAllTypes()
# Should be allowed to clear something that's not there (a no-op).
proto.ClearField('optional_int32')
proto.optional_int32 = 1
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
self.assertEqual(0, proto.optional_int32)
self.assertTrue(not proto.HasField('optional_int32'))
# TODO(robinson): Test all other scalar field types.
def testEnums(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(15)
self.assertTrue(proto.repeated_int32)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual([5, 10, 15], proto.repeated_int32)
# Test single retrieval.
self.assertEqual(5, proto.repeated_int32[0])
self.assertEqual(15, proto.repeated_int32[-1])
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234)
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo')
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None)
# Test single assignment.
proto.repeated_int32[1] = 20
self.assertEqual([5, 20, 15], proto.repeated_int32)
# Test insertion.
proto.repeated_int32.insert(1, 25)
self.assertEqual([5, 25, 20, 15], proto.repeated_int32)
# Test slice retrieval.
proto.repeated_int32.append(30)
self.assertEqual([25, 20, 15], proto.repeated_int32[1:4])
self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:])
# Test slice assignment with an iterator
proto.repeated_int32[1:4] = (i for i in xrange(3))
self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32)
# Test slice assignment.
proto.repeated_int32[1:4] = [35, 40, 45]
self.assertEqual([5, 35, 40, 45, 30], proto.repeated_int32)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_int32:
result.append(i)
self.assertEqual([5, 35, 40, 45, 30], result)
# Test single deletion.
del proto.repeated_int32[2]
self.assertEqual([5, 35, 45, 30], proto.repeated_int32)
# Test slice deletion.
del proto.repeated_int32[2:]
self.assertEqual([5, 35], proto.repeated_int32)
# Test extending.
proto.repeated_int32.extend([3, 13])
self.assertEqual([5, 35, 3, 13], proto.repeated_int32)
# Test clearing.
proto.ClearField('repeated_int32')
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
self.assertEqual(1, proto.repeated_int32[-1])
# Test assignment to a negative index.
proto.repeated_int32[-1] = 2
self.assertEqual(2, proto.repeated_int32[-1])
# Test deletion at negative indices.
proto.repeated_int32[:] = [0, 1, 2, 3]
del proto.repeated_int32[-1]
self.assertEqual([0, 1, 2], proto.repeated_int32)
del proto.repeated_int32[-2]
self.assertEqual([0, 2], proto.repeated_int32)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, -3)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, 300)
del proto.repeated_int32[-2:-1]
self.assertEqual([2], proto.repeated_int32)
del proto.repeated_int32[100:10000]
self.assertEqual([2], proto.repeated_int32)
def testRepeatedScalarsRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(5)
proto.repeated_int32.append(5)
self.assertEqual(4, len(proto.repeated_int32))
proto.repeated_int32.remove(5)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
self.assertEqual(5, proto.repeated_int32[2])
proto.repeated_int32.remove(5)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
proto.repeated_int32.remove(10)
self.assertEqual(1, len(proto.repeated_int32))
self.assertEqual(5, proto.repeated_int32[0])
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m1 = proto.repeated_nested_message.add()
self.assertTrue(proto.repeated_nested_message)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
self.assertTrue(isinstance(m0, unittest_pb2.TestAllTypes.NestedMessage))
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
1234)
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
-1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
'foo')
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
None)
# Test slice retrieval.
m2 = proto.repeated_nested_message.add()
m3 = proto.repeated_nested_message.add()
m4 = proto.repeated_nested_message.add()
self.assertListsEqual(
[m1, m2, m3], proto.repeated_nested_message[1:4])
self.assertListsEqual(
[m0, m1, m2, m3, m4], proto.repeated_nested_message[:])
self.assertListsEqual(
[m0, m1], proto.repeated_nested_message[:2])
self.assertListsEqual(
[m2, m3, m4], proto.repeated_nested_message[2:])
self.assertEqual(
m0, proto.repeated_nested_message[0])
self.assertListsEqual(
[m0], proto.repeated_nested_message[:1])
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_nested_message:
result.append(i)
self.assertListsEqual([m0, m1, m2, m3, m4], result)
# Test single deletion.
del proto.repeated_nested_message[2]
self.assertListsEqual([m0, m1, m3, m4], proto.repeated_nested_message)
# Test slice deletion.
del proto.repeated_nested_message[2:]
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
# Test extending.
n1 = unittest_pb2.TestAllTypes.NestedMessage(bb=1)
n2 = unittest_pb2.TestAllTypes.NestedMessage(bb=2)
proto.repeated_nested_message.extend([n1,n2])
self.assertEqual(4, len(proto.repeated_nested_message))
self.assertEqual(n1, proto.repeated_nested_message[2])
self.assertEqual(n2, proto.repeated_nested_message[3])
# Test clearing.
proto.ClearField('repeated_nested_message')
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
# Test constructing an element while adding it.
proto.repeated_nested_message.add(bb=23)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(23, proto.repeated_nested_message[0].bb)
def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
FieldDescriptor = descriptor.FieldDescriptor
foo_field_descriptor = FieldDescriptor(
name='foo_field', full_name='MyProto.foo_field',
index=0, number=1, type=FieldDescriptor.TYPE_INT64,
cpp_type=FieldDescriptor.CPPTYPE_INT64,
label=FieldDescriptor.LABEL_OPTIONAL, default_value=0,
containing_type=None, message_type=None, enum_type=None,
is_extension=False, extension_scope=None,
options=descriptor_pb2.FieldOptions())
mydescriptor = descriptor.Descriptor(
name='MyProto', full_name='MyProto', filename='ignored',
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions())
class MyProtoClass(message.Message):
DESCRIPTOR = mydescriptor
__metaclass__ = reflection.GeneratedProtocolMessageType
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertTrue(not myproto_instance.HasField('foo_field'))
myproto_instance.foo_field = 23
self.assertEqual(23, myproto_instance.foo_field)
self.assertTrue(myproto_instance.HasField('foo_field'))
def testTopLevelExtensionsForOptionalScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_int32_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension])
# As with normal scalar fields, just doing a read doesn't actually set the
# "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
# Actually set the thing.
extendee_proto.Extensions[extension] = 23
self.assertEqual(23, extendee_proto.Extensions[extension])
self.assertTrue(extendee_proto.HasExtension(extension))
# Ensure that clearing works as well.
extendee_proto.ClearExtension(extension)
self.assertEqual(0, extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testTopLevelExtensionsForRepeatedScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeated_string_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
extendee_proto.Extensions[extension].append('foo')
self.assertEqual(['foo'], extendee_proto.Extensions[extension])
string_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(string_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForOptionalMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_foreign_message_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension].c)
# As with normal (non-extension) fields, merely reading from the
# thing shouldn't set the "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
extendee_proto.Extensions[extension].c = 23
self.assertEqual(23, extendee_proto.Extensions[extension].c)
self.assertTrue(extendee_proto.HasExtension(extension))
# Save a reference here.
foreign_message = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertTrue(foreign_message is not extendee_proto.Extensions[extension])
# Setting a field on foreign_message now shouldn't set
# any "has" bits on extendee_proto.
foreign_message.c = 42
self.assertEqual(42, foreign_message.c)
self.assertTrue(foreign_message.HasField('c'))
self.assertTrue(not extendee_proto.HasExtension(extension))
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForRepeatedMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeatedgroup_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
group = extendee_proto.Extensions[extension].add()
group.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension][0].a)
group.a = 42
self.assertEqual(42, extendee_proto.Extensions[extension][0].a)
group_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(group_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testNestedExtensions(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
# We just test the non-repeated case.
self.assertTrue(not extendee_proto.HasExtension(extension))
required = extendee_proto.Extensions[extension]
self.assertEqual(0, required.a)
self.assertTrue(not extendee_proto.HasExtension(extension))
required.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension].a)
self.assertTrue(extendee_proto.HasExtension(extension))
extendee_proto.ClearExtension(extension)
self.assertTrue(required is not extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
# If message A directly contains message B, and
# a.HasField('b') is currently False, then mutating any
# extension in B should change a.HasField('b') to True
# (and so on up the object tree).
def testHasBitsForAncestorsOfExtendedMessage(self):
# Optional scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Repeated scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual([], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension].append(23)
self.assertEqual([23], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Optional message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(toplevel.HasField('submessage'))
# Repeated message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, len(toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension]))
self.assertTrue(not toplevel.HasField('submessage'))
foreign = toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension].add()
self.assertEqual(foreign, toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension][0])
self.assertTrue(toplevel.HasField('submessage'))
def testDisconnectionAfterClearingEmptyMessage(self):
toplevel = more_extensions_pb2.TopLevelMessage()
extendee_proto = toplevel.submessage
extension = more_extensions_pb2.optional_message_extension
extension_proto = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
extension_proto.foreign_message_int = 23
self.assertTrue(extension_proto is not extendee_proto.Extensions[extension])
def testExtensionFailureModes(self):
extendee_proto = unittest_pb2.TestAllExtensions()
# Try non-extension-handle arguments to HasExtension,
# ClearExtension(), and Extensions[]...
self.assertRaises(KeyError, extendee_proto.HasExtension, 1234)
self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5)
# Try something that *is* an extension handle, just not for
# this message...
unknown_handle = more_extensions_pb2.optional_int_extension
self.assertRaises(KeyError, extendee_proto.HasExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.ClearExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__,
unknown_handle, 5)
# Try call HasExtension() with a valid handle, but for a
# *repeated* field. (Just as with non-extension repeated
# fields, Has*() isn't supported for extension repeated fields).
self.assertRaises(KeyError, extendee_proto.HasExtension,
unittest_pb2.repeated_string_extension)
def testStaticParseFrom(self):
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
string1 = proto1.SerializeToString()
proto2 = unittest_pb2.TestAllTypes.FromString(string1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
def testMergeFromSingularField(self):
# Test merge with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = unittest_pb2.TestAllTypes()
# This shouldn't get overwritten.
proto2.optional_string = 'value'
proto2.MergeFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('value', proto2.optional_string)
def testMergeFromRepeatedField(self):
# Test merge with just a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.MergeFrom(proto1)
self.assertEqual(0, proto2.repeated_int32[0])
self.assertEqual(1, proto2.repeated_int32[1])
self.assertEqual(2, proto2.repeated_int32[2])
def testMergeFromOptionalGroup(self):
# Test merge with an optional group.
proto1 = unittest_pb2.TestAllTypes()
proto1.optionalgroup.a = 12
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
self.assertEqual(12, proto2.optionalgroup.a)
def testMergeFromRepeatedNestedMessage(self):
# Test merge with a repeated nested message.
proto1 = unittest_pb2.TestAllTypes()
m = proto1.repeated_nested_message.add()
m.bb = 123
m = proto1.repeated_nested_message.add()
m.bb = 321
proto2 = unittest_pb2.TestAllTypes()
m = proto2.repeated_nested_message.add()
m.bb = 999
proto2.MergeFrom(proto1)
self.assertEqual(999, proto2.repeated_nested_message[0].bb)
self.assertEqual(123, proto2.repeated_nested_message[1].bb)
self.assertEqual(321, proto2.repeated_nested_message[2].bb)
proto3 = unittest_pb2.TestAllTypes()
proto3.repeated_nested_message.MergeFrom(proto2.repeated_nested_message)
self.assertEqual(999, proto3.repeated_nested_message[0].bb)
self.assertEqual(123, proto3.repeated_nested_message[1].bb)
self.assertEqual(321, proto3.repeated_nested_message[2].bb)
def testMergeFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testMergeFromExtensionsSingular(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.optional_int32_extension] = 1
proto2 = unittest_pb2.TestAllExtensions()
proto2.MergeFrom(proto1)
self.assertEqual(
1, proto2.Extensions[unittest_pb2.optional_int32_extension])
def testMergeFromExtensionsRepeated(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1)
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2)
proto2 = unittest_pb2.TestAllExtensions()
proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0)
proto2.MergeFrom(proto1)
self.assertEqual(
3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension]))
self.assertEqual(
0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0])
self.assertEqual(
1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1])
self.assertEqual(
2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2])
def testMergeFromExtensionsNestedMessage(self):
proto1 = unittest_pb2.TestAllExtensions()
ext1 = proto1.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext1.add()
m.bb = 222
m = ext1.add()
m.bb = 333
proto2 = unittest_pb2.TestAllExtensions()
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext2.add()
m.bb = 111
proto2.MergeFrom(proto1)
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
self.assertEqual(3, len(ext2))
self.assertEqual(111, ext2[0].bb)
self.assertEqual(222, ext2[1].bb)
self.assertEqual(333, ext2[2].bb)
def testMergeFromBug(self):
message1 = unittest_pb2.TestAllTypes()
message2 = unittest_pb2.TestAllTypes()
# Cause optional_nested_message to be instantiated within message1, even
# though it is not considered to be "present".
message1.optional_nested_message
self.assertFalse(message1.HasField('optional_nested_message'))
# Merge into message2. This should not instantiate the field is message2.
message2.MergeFrom(message1)
self.assertFalse(message2.HasField('optional_nested_message'))
def testCopyFromSingularField(self):
# Test copy with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto1.optional_string = 'important-text'
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_string = 'value'
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('important-text', proto2.optional_string)
def testCopyFromRepeatedField(self):
# Test copy with a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.repeated_int32[0])
self.assertEqual(2, proto2.repeated_int32[1])
def testCopyFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.CopyFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testCopyFromSelf(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.optional_int32 = 2
proto1.optional_string = 'important-text'
proto1.CopyFrom(proto1)
self.assertEqual(1, proto1.repeated_int32[0])
self.assertEqual(2, proto1.optional_int32)
self.assertEqual('important-text', proto1.optional_string)
def testCopyFromBadType(self):
# The python implementation doesn't raise an exception in this
# case. In theory it should.
if api_implementation.Type() == 'python':
return
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllExtensions()
self.assertRaises(TypeError, proto1.CopyFrom, proto2)
def testClear(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllTypes()
self.assertEquals(proto, empty_proto)
# Test if extensions which were set are cleared.
proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllExtensions()
self.assertEquals(proto, empty_proto)
def assertInitialized(self, proto):
self.assertTrue(proto.IsInitialized())
# Neither method should raise an exception.
proto.SerializeToString()
proto.SerializePartialToString()
def assertNotInitialized(self, proto):
self.assertFalse(proto.IsInitialized())
self.assertRaises(message.EncodeError, proto.SerializeToString)
# "Partial" serialization doesn't care if message is uninitialized.
proto.SerializePartialToString()
def testIsInitialized(self):
# Trivial cases - all optional fields and extensions.
proto = unittest_pb2.TestAllTypes()
self.assertInitialized(proto)
proto = unittest_pb2.TestAllExtensions()
self.assertInitialized(proto)
# The case of uninitialized required fields.
proto = unittest_pb2.TestRequired()
self.assertNotInitialized(proto)
proto.a = proto.b = proto.c = 2
self.assertInitialized(proto)
# The case of uninitialized submessage.
proto = unittest_pb2.TestRequiredForeign()
self.assertInitialized(proto)
proto.optional_message.a = 1
self.assertNotInitialized(proto)
proto.optional_message.b = 0
proto.optional_message.c = 0
self.assertInitialized(proto)
# Uninitialized repeated submessage.
message1 = proto.repeated_message.add()
self.assertNotInitialized(proto)
message1.a = message1.b = message1.c = 0
self.assertInitialized(proto)
# Uninitialized repeated group in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.multi
message1 = proto.Extensions[extension].add()
message2 = proto.Extensions[extension].add()
self.assertNotInitialized(proto)
message1.a = 1
message1.b = 1
message1.c = 1
self.assertNotInitialized(proto)
message2.a = 2
message2.b = 2
message2.c = 2
self.assertInitialized(proto)
# Uninitialized nonrepeated message in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
proto.Extensions[extension].a = 1
self.assertNotInitialized(proto)
proto.Extensions[extension].b = 2
proto.Extensions[extension].c = 3
self.assertInitialized(proto)
# Try passing an errors list.
errors = []
proto = unittest_pb2.TestRequired()
self.assertFalse(proto.IsInitialized(errors))
self.assertEqual(errors, ['a', 'b', 'c'])
def testStringUTF8Encoding(self):
proto = unittest_pb2.TestAllTypes()
# Assignment of a unicode object to a field of type 'bytes' is not allowed.
self.assertRaises(TypeError,
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
self.assertEqual(type(proto.optional_string), unicode)
proto.optional_string = unicode('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, unicode('Testing'))
if api_implementation.Type() == 'python':
# Values of type 'str' are also accepted as long as they can be
# encoded in UTF-8.
self.assertEqual(type(proto.optional_string), str)
# Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', str('a\x80a'))
# Assign a 'str' object which contains a UTF-8 encoded string.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', 'Тест')
# No exception thrown.
proto.optional_string = 'abc'
def testStringUTF8Serialization(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message = unittest_mset_pb2.TestMessageSetExtension2
extension = extension_message.message_set_extension
test_utf8 = u'Тест'
test_utf8_bytes = test_utf8.encode('utf-8')
# 'Test' in another language, using UTF-8 charset.
proto.Extensions[extension].str = test_utf8
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
# Check byte size.
self.assertEqual(proto.ByteSize(), len(serialized))
raw = unittest_mset_pb2.RawMessageSet()
raw.MergeFromString(serialized)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(1, len(raw.item))
# Check that the type_id is the same as the tag ID in the .proto file.
self.assertEqual(raw.item[0].type_id, 1547769)
# Check the actual bytes on the wire.
self.assertTrue(
raw.item[0].message.endswith(test_utf8_bytes))
message2.MergeFromString(raw.item[0].message)
self.assertEqual(type(message2.str), unicode)
self.assertEqual(message2.str, test_utf8)
# The pure Python API throws an exception on MergeFromString(),
# if any of the string fields of the message can't be UTF-8 decoded.
# The C++ implementation of the API has no way to check that on
# MergeFromString and thus has no way to throw the exception.
#
# The pure Python API always returns objects of type 'unicode' (UTF-8
# encoded), or 'str' (in 7 bit ASCII).
bytes = raw.item[0].message.replace(
test_utf8_bytes, len(test_utf8_bytes) * '\xff')
unicode_decode_failed = False
try:
message2.MergeFromString(bytes)
except UnicodeDecodeError, e:
unicode_decode_failed = True
string_field = message2.str
self.assertTrue(unicode_decode_failed or type(string_field) == str)
def testEmptyNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.MergeFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.CopyFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.MergeFromString('')
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.ParseFromString('')
self.assertTrue(proto.HasField('optional_nested_message'))
serialized = proto.SerializeToString()
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFromString(serialized)
self.assertTrue(proto2.HasField('optional_nested_message'))
def testSetInParent(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.HasField('optionalgroup'))
proto.optionalgroup.SetInParent()
self.assertTrue(proto.HasField('optionalgroup'))
# Since we had so many tests for protocol buffer equality, we broke these out
# into separate TestCase classes.
class TestAllTypesEqualityTest(unittest.TestCase):
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testSelfEquality(self):
self.assertEqual(self.first_proto, self.first_proto)
def testEmptyProtosEqual(self):
self.assertEqual(self.first_proto, self.second_proto)
class FullProtosEqualityTest(unittest.TestCase):
"""Equality tests using completely-full protos as a starting point."""
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.first_proto)
test_util.SetAllFields(self.second_proto)
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testNoneNotEqual(self):
self.assertNotEqual(self.first_proto, None)
self.assertNotEqual(None, self.second_proto)
def testNotEqualToOtherMessage(self):
third_proto = unittest_pb2.TestRequired()
self.assertNotEqual(self.first_proto, third_proto)
self.assertNotEqual(third_proto, self.second_proto)
def testAllFieldsFilledEquality(self):
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalar(self):
# Nonrepeated scalar field change should cause inequality.
self.first_proto.optional_int32 += 1
self.assertNotEqual(self.first_proto, self.second_proto)
# ...as should clearing a field.
self.first_proto.ClearField('optional_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedComposite(self):
# Change a nonrepeated composite field.
self.first_proto.optional_nested_message.bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Clear a field in the nested message.
self.first_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = (
self.second_proto.optional_nested_message.bb)
self.assertEqual(self.first_proto, self.second_proto)
# Remove the nested message entirely.
self.first_proto.ClearField('optional_nested_message')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedScalar(self):
# Change a repeated scalar field.
self.first_proto.repeated_int32.append(5)
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.ClearField('repeated_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedComposite(self):
# Change value within a repeated composite field.
self.first_proto.repeated_nested_message[0].bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.repeated_nested_message[0].bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Add a value to a repeated composite field.
self.first_proto.repeated_nested_message.add()
self.assertNotEqual(self.first_proto, self.second_proto)
self.second_proto.repeated_nested_message.add()
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalarHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated scalar field.
self.first_proto.ClearField('optional_int32')
self.second_proto.optional_int32 = 0
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedCompositeHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated composite field.
self.first_proto.ClearField('optional_nested_message')
self.second_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = 0
self.first_proto.optional_nested_message.ClearField('bb')
self.assertEqual(self.first_proto, self.second_proto)
class ExtensionEqualityTest(unittest.TestCase):
def testExtensionEquality(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(first_proto, second_proto)
test_util.SetAllExtensions(first_proto)
self.assertNotEqual(first_proto, second_proto)
test_util.SetAllExtensions(second_proto)
self.assertEqual(first_proto, second_proto)
# Ensure that we check value equality.
first_proto.Extensions[unittest_pb2.optional_int32_extension] += 1
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] -= 1
self.assertEqual(first_proto, second_proto)
# Ensure that we also look at "has" bits.
first_proto.ClearExtension(unittest_pb2.optional_int32_extension)
second_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertEqual(first_proto, second_proto)
# Ensure that differences in cached values
# don't matter if "has" bits are both false.
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(
0, first_proto.Extensions[unittest_pb2.optional_int32_extension])
self.assertEqual(first_proto, second_proto)
class MutualRecursionEqualityTest(unittest.TestCase):
def testEqualityWithMutualRecursion(self):
first_proto = unittest_pb2.TestMutualRecursionA()
second_proto = unittest_pb2.TestMutualRecursionA()
self.assertEqual(first_proto, second_proto)
first_proto.bb.a.bb.optional_int32 = 23
self.assertNotEqual(first_proto, second_proto)
second_proto.bb.a.bb.optional_int32 = 23
self.assertEqual(first_proto, second_proto)
class ByteSizeTest(unittest.TestCase):
def setUp(self):
self.proto = unittest_pb2.TestAllTypes()
self.extended_proto = more_extensions_pb2.ExtendedMessage()
self.packed_proto = unittest_pb2.TestPackedTypes()
self.packed_extended_proto = unittest_pb2.TestPackedExtensions()
def Size(self):
return self.proto.ByteSize()
def testEmptyMessage(self):
self.assertEqual(0, self.proto.ByteSize())
def testSizedOnKwargs(self):
# Use a separate message to ensure testing right after creation.
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.ByteSize())
proto_kwargs = unittest_pb2.TestAllTypes(optional_int64 = 1)
# One byte for the tag, one to encode varint 1.
self.assertEqual(2, proto_kwargs.ByteSize())
def testVarints(self):
def Test(i, expected_varint_size):
self.proto.Clear()
self.proto.optional_int64 = i
# Add one to the varint size for the tag info
# for tag 1.
self.assertEqual(expected_varint_size + 1, self.Size())
Test(0, 1)
Test(1, 1)
for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)):
Test((1 << i) - 1, num_bytes)
Test(-1, 10)
Test(-2, 10)
Test(-(1 << 63), 10)
def testStrings(self):
self.proto.optional_string = ''
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2, self.Size())
self.proto.optional_string = 'abc'
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2 + len(self.proto.optional_string), self.Size())
self.proto.optional_string = 'x' * 128
# Need one byte for tag info (tag #14), and TWO bytes for length.
self.assertEqual(3 + len(self.proto.optional_string), self.Size())
def testOtherNumerics(self):
self.proto.optional_fixed32 = 1234
# One byte for tag and 4 bytes for fixed32.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_fixed64 = 1234
# One byte for tag and 8 bytes for fixed64.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_float = 1.234
# One byte for tag and 4 bytes for float.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_double = 1.234
# One byte for tag and 8 bytes for float.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_sint32 = 64
# One byte for tag and 2 bytes for zig-zag-encoded 64.
self.assertEqual(3, self.Size())
self.proto = unittest_pb2.TestAllTypes()
def testComposites(self):
# 3 bytes.
self.proto.optional_nested_message.bb = (1 << 14)
# Plus one byte for bb tag.
# Plus 1 byte for optional_nested_message serialized size.
# Plus two bytes for optional_nested_message tag.
self.assertEqual(3 + 1 + 1 + 2, self.Size())
def testGroups(self):
# 4 bytes.
self.proto.optionalgroup.a = (1 << 21)
# Plus two bytes for |a| tag.
# Plus 2 * two bytes for START_GROUP and END_GROUP tags.
self.assertEqual(4 + 2 + 2*2, self.Size())
def testRepeatedScalars(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsExtend(self):
self.proto.repeated_int32.extend([10, 128]) # 3 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsRemove(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
self.proto.repeated_int32.remove(128)
self.assertEqual(1 + 2, self.Size())
def testRepeatedComposites(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 7
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
def testRepeatedCompositesDelete(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 9
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[0]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
# Now add a new message.
foreign_message_2 = self.proto.repeated_nested_message.add()
foreign_message_2.bb = 12
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[1]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
del self.proto.repeated_nested_message[0]
self.assertEqual(0, self.Size())
def testRepeatedGroups(self):
# 2-byte START_GROUP plus 2-byte END_GROUP.
group_0 = self.proto.repeatedgroup.add()
# 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a|
# plus 2-byte END_GROUP.
group_1 = self.proto.repeatedgroup.add()
group_1.a = 7
self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size())
def testExtensions(self):
proto = unittest_pb2.TestAllExtensions()
self.assertEqual(0, proto.ByteSize())
extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte.
proto.Extensions[extension] = 23
# 1 byte for tag, 1 byte for value.
self.assertEqual(2, proto.ByteSize())
def testCacheInvalidationForNonrepeatedScalar(self):
# Test non-extension.
self.proto.optional_int32 = 1
self.assertEqual(2, self.proto.ByteSize())
self.proto.optional_int32 = 128
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_int_extension
self.extended_proto.Extensions[extension] = 1
self.assertEqual(2, self.extended_proto.ByteSize())
self.extended_proto.Extensions[extension] = 128
self.assertEqual(3, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedScalar(self):
# Test non-extension.
self.proto.repeated_int32.append(1)
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_int32.append(1)
self.assertEqual(6, self.proto.ByteSize())
self.proto.repeated_int32[1] = 128
self.assertEqual(7, self.proto.ByteSize())
self.proto.ClearField('repeated_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_int_extension
repeated = self.extended_proto.Extensions[extension]
repeated.append(1)
self.assertEqual(2, self.extended_proto.ByteSize())
repeated.append(1)
self.assertEqual(4, self.extended_proto.ByteSize())
repeated[1] = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForNonrepeatedMessage(self):
# Test non-extension.
self.proto.optional_foreign_message.c = 1
self.assertEqual(5, self.proto.ByteSize())
self.proto.optional_foreign_message.c = 128
self.assertEqual(6, self.proto.ByteSize())
self.proto.optional_foreign_message.ClearField('c')
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
if api_implementation.Type() == 'python':
# This is only possible in pure-Python implementation of the API.
child = self.proto.optional_foreign_message
self.proto.ClearField('optional_foreign_message')
child.c = 128
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_message_extension
child = self.extended_proto.Extensions[extension]
self.assertEqual(0, self.extended_proto.ByteSize())
child.foreign_message_int = 1
self.assertEqual(4, self.extended_proto.ByteSize())
child.foreign_message_int = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedMessage(self):
# Test non-extension.
child0 = self.proto.repeated_foreign_message.add()
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_foreign_message.add()
self.assertEqual(6, self.proto.ByteSize())
child0.c = 1
self.assertEqual(8, self.proto.ByteSize())
self.proto.ClearField('repeated_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_message_extension
child_list = self.extended_proto.Extensions[extension]
child0 = child_list.add()
self.assertEqual(2, self.extended_proto.ByteSize())
child_list.add()
self.assertEqual(4, self.extended_proto.ByteSize())
child0.foreign_message_int = 1
self.assertEqual(6, self.extended_proto.ByteSize())
child0.ClearField('foreign_message_int')
self.assertEqual(4, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testPackedRepeatedScalars(self):
self.assertEqual(0, self.packed_proto.ByteSize())
self.packed_proto.packed_int32.append(10) # 1 byte.
self.packed_proto.packed_int32.append(128) # 2 bytes.
# The tag is 2 bytes (the field number is 90), and the varint
# storing the length is 1 byte.
int_size = 1 + 2 + 3
self.assertEqual(int_size, self.packed_proto.ByteSize())
self.packed_proto.packed_double.append(4.2) # 8 bytes
self.packed_proto.packed_double.append(3.25) # 8 bytes
# 2 more tag bytes, 1 more length byte.
double_size = 8 + 8 + 3
self.assertEqual(int_size+double_size, self.packed_proto.ByteSize())
self.packed_proto.ClearField('packed_int32')
self.assertEqual(double_size, self.packed_proto.ByteSize())
def testPackedExtensions(self):
self.assertEqual(0, self.packed_extended_proto.ByteSize())
extension = self.packed_extended_proto.Extensions[
unittest_pb2.packed_fixed32_extension]
extension.extend([1, 2, 3, 4]) # 16 bytes
# Tag is 3 bytes.
self.assertEqual(19, self.packed_extended_proto.ByteSize())
# Issues to be sure to cover include:
# * Handling of unrecognized tags ("uninterpreted_bytes").
# * Handling of MessageSets.
# * Consistent ordering of tags in the wire format,
# including ordering between extensions and non-extension
# fields.
# * Consistent serialization of negative numbers, especially
# negative int32s.
# * Handling of empty submessages (with and without "has"
# bits set).
class SerializationTest(unittest.TestCase):
def testSerializeEmtpyMessage(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
second_proto.MergeFromString(serialized)
self.assertEqual(first_proto, second_proto)
def testSerializeAllFields(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
second_proto.MergeFromString(serialized)
self.assertEqual(first_proto, second_proto)
def testSerializeAllExtensions(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(first_proto)
serialized = first_proto.SerializeToString()
second_proto.MergeFromString(serialized)
self.assertEqual(first_proto, second_proto)
def testSerializeNegativeValues(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.optional_int32 = -1
first_proto.optional_int64 = -(2 << 40)
first_proto.optional_sint32 = -3
first_proto.optional_sint64 = -(4 << 40)
first_proto.optional_sfixed32 = -5
first_proto.optional_sfixed64 = -(6 << 40)
second_proto = unittest_pb2.TestAllTypes.FromString(
first_proto.SerializeToString())
self.assertEqual(first_proto, second_proto)
def testParseTruncated(self):
# This test is only applicable for the Python implementation of the API.
if api_implementation.Type() != 'python':
return
first_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
for truncation_point in xrange(len(serialized) + 1):
try:
second_proto = unittest_pb2.TestAllTypes()
unknown_fields = unittest_pb2.TestEmptyMessage()
pos = second_proto._InternalParse(serialized, 0, truncation_point)
# If we didn't raise an error then we read exactly the amount expected.
self.assertEqual(truncation_point, pos)
# Parsing to unknown fields should not throw if parsing to known fields
# did not.
try:
pos2 = unknown_fields._InternalParse(serialized, 0, truncation_point)
self.assertEqual(truncation_point, pos2)
except message.DecodeError:
self.fail('Parsing unknown fields failed when parsing known fields '
'did not.')
except message.DecodeError:
# Parsing unknown fields should also fail.
self.assertRaises(message.DecodeError, unknown_fields._InternalParse,
serialized, 0, truncation_point)
def testCanonicalSerializationOrder(self):
proto = more_messages_pb2.OutOfOrderFields()
# These are also their tag numbers. Even though we're setting these in
# reverse-tag order AND they're listed in reverse tag-order in the .proto
# file, they should nonetheless be serialized in tag order.
proto.optional_sint32 = 5
proto.Extensions[more_messages_pb2.optional_uint64] = 4
proto.optional_uint32 = 3
proto.Extensions[more_messages_pb2.optional_int64] = 2
proto.optional_int32 = 1
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((1, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(1, d.ReadInt32())
self.assertEqual((2, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(2, d.ReadInt64())
self.assertEqual((3, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(3, d.ReadUInt32())
self.assertEqual((4, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(4, d.ReadUInt64())
self.assertEqual((5, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(5, d.ReadSInt32())
def testCanonicalSerializationOrderSameAsCpp(self):
# Copy of the same test we use for C++.
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
serialized = proto.SerializeToString()
test_util.ExpectAllFieldsAndExtensionsInOrder(serialized)
def testMergeFromStringWhenFieldsAlreadySet(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.repeated_string.append('foobar')
first_proto.optional_int32 = 23
first_proto.optional_nested_message.bb = 42
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestAllTypes()
second_proto.repeated_string.append('baz')
second_proto.optional_int32 = 100
second_proto.optional_nested_message.bb = 999
second_proto.MergeFromString(serialized)
# Ensure that we append to repeated fields.
self.assertEqual(['baz', 'foobar'], list(second_proto.repeated_string))
# Ensure that we overwrite nonrepeatd scalars.
self.assertEqual(23, second_proto.optional_int32)
# Ensure that we recursively call MergeFromString() on
# submessages.
self.assertEqual(42, second_proto.optional_nested_message.bb)
def testMessageSetWireFormat(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension_message2 = unittest_mset_pb2.TestMessageSetExtension2
extension1 = extension_message1.message_set_extension
extension2 = extension_message2.message_set_extension
proto.Extensions[extension1].i = 123
proto.Extensions[extension2].str = 'foo'
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
raw = unittest_mset_pb2.RawMessageSet()
self.assertEqual(False,
raw.DESCRIPTOR.GetOptions().message_set_wire_format)
raw.MergeFromString(serialized)
self.assertEqual(2, len(raw.item))
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.MergeFromString(raw.item[0].message)
self.assertEqual(123, message1.i)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
message2.MergeFromString(raw.item[1].message)
self.assertEqual('foo', message2.str)
# Deserialize using the MessageSet wire format.
proto2 = unittest_mset_pb2.TestMessageSet()
proto2.MergeFromString(serialized)
self.assertEqual(123, proto2.Extensions[extension1].i)
self.assertEqual('foo', proto2.Extensions[extension2].str)
# Check byte size.
self.assertEqual(proto2.ByteSize(), len(serialized))
self.assertEqual(proto.ByteSize(), len(serialized))
def testMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an item.
item = raw.item.add()
item.type_id = 1545008
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
# Add a second, unknown extension.
item = raw.item.add()
item.type_id = 1545009
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12346
item.message = message1.SerializeToString()
# Add another unknown extension.
item = raw.item.add()
item.type_id = 1545010
message1 = unittest_mset_pb2.TestMessageSetExtension2()
message1.str = 'foo'
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
proto.MergeFromString(serialized)
# Check that the message parsed well.
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension1 = extension_message1.message_set_extension
self.assertEquals(12345, proto.Extensions[extension1].i)
def testUnknownFields(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
proto2.MergeFromString(serialized)
# Now test with a int64 field set.
proto = unittest_pb2.TestAllTypes()
proto.optional_int64 = 0x0fffffffffffffff
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
proto2.MergeFromString(serialized)
def _CheckRaises(self, exc_class, callable_obj, exception):
"""This method checks if the excpetion type and message are as expected."""
try:
callable_obj()
except exc_class, ex:
# Check if the exception message is the right one.
self.assertEqual(exception, str(ex))
return
else:
raise self.failureException('%s not raised' % str(exc_class))
def testSerializeUninitialized(self):
proto = unittest_pb2.TestRequired()
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message is missing required fields: a,b,c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.a = 1
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message is missing required fields: b,c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.b = 2
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message is missing required fields: c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.c = 3
serialized = proto.SerializeToString()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
proto2.MergeFromString(serialized)
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
proto2.ParseFromString(partial)
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
def testSerializeUninitializedSubMessage(self):
proto = unittest_pb2.TestRequiredForeign()
# Sub-message doesn't exist yet, so this succeeds.
proto.SerializeToString()
proto.optional_message.a = 1
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message is missing required fields: '
'optional_message.b,optional_message.c')
proto.optional_message.b = 2
proto.optional_message.c = 3
proto.SerializeToString()
proto.repeated_message.add().a = 1
proto.repeated_message.add().b = 2
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message is missing required fields: '
'repeated_message[0].b,repeated_message[0].c,'
'repeated_message[1].a,repeated_message[1].c')
proto.repeated_message[0].b = 2
proto.repeated_message[0].c = 3
proto.repeated_message[1].a = 1
proto.repeated_message[1].c = 3
proto.SerializeToString()
def testSerializeAllPackedFields(self):
first_proto = unittest_pb2.TestPackedTypes()
second_proto = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testSerializeAllPackedExtensions(self):
first_proto = unittest_pb2.TestPackedExtensions()
second_proto = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(first_proto)
serialized = first_proto.SerializeToString()
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testMergePackedFromStringWhenSomeFieldsAlreadySet(self):
first_proto = unittest_pb2.TestPackedTypes()
first_proto.packed_int32.extend([1, 2])
first_proto.packed_double.append(3.0)
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestPackedTypes()
second_proto.packed_int32.append(3)
second_proto.packed_double.extend([1.0, 2.0])
second_proto.packed_sint32.append(4)
second_proto.MergeFromString(serialized)
self.assertEqual([3, 1, 2], second_proto.packed_int32)
self.assertEqual([1.0, 2.0, 3.0], second_proto.packed_double)
self.assertEqual([4], second_proto.packed_sint32)
def testPackedFieldsWireFormat(self):
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.extend([1, 2, 150, 3]) # 1 + 1 + 2 + 1 bytes
proto.packed_double.extend([1.0, 1000.0]) # 8 + 8 bytes
proto.packed_float.append(2.0) # 4 bytes, will be before double
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((90, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(1+1+1+2, d.ReadInt32())
self.assertEqual(1, d.ReadInt32())
self.assertEqual(2, d.ReadInt32())
self.assertEqual(150, d.ReadInt32())
self.assertEqual(3, d.ReadInt32())
self.assertEqual((100, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(4, d.ReadInt32())
self.assertEqual(2.0, d.ReadFloat())
self.assertEqual((101, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(8+8, d.ReadInt32())
self.assertEqual(1.0, d.ReadDouble())
self.assertEqual(1000.0, d.ReadDouble())
self.assertTrue(d.EndOfStream())
def testParsePackedFromUnpacked(self):
unpacked = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(unpacked)
packed = unittest_pb2.TestPackedTypes()
packed.MergeFromString(unpacked.SerializeToString())
expected = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(expected)
self.assertEqual(expected, packed)
def testParseUnpackedFromPacked(self):
packed = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(packed)
unpacked = unittest_pb2.TestUnpackedTypes()
unpacked.MergeFromString(packed.SerializeToString())
expected = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(expected)
self.assertEqual(expected, unpacked)
def testFieldNumbers(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(unittest_pb2.TestAllTypes.NestedMessage.BB_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONAL_INT32_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONALGROUP_FIELD_NUMBER, 16)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_MESSAGE_FIELD_NUMBER, 18)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_ENUM_FIELD_NUMBER, 21)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATED_INT32_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATEDGROUP_FIELD_NUMBER, 46)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_MESSAGE_FIELD_NUMBER, 48)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_ENUM_FIELD_NUMBER, 51)
def testExtensionFieldNumbers(self):
self.assertEqual(unittest_pb2.TestRequired.single.number, 1000)
self.assertEqual(unittest_pb2.TestRequired.SINGLE_FIELD_NUMBER, 1000)
self.assertEqual(unittest_pb2.TestRequired.multi.number, 1001)
self.assertEqual(unittest_pb2.TestRequired.MULTI_FIELD_NUMBER, 1001)
self.assertEqual(unittest_pb2.optional_int32_extension.number, 1)
self.assertEqual(unittest_pb2.OPTIONAL_INT32_EXTENSION_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.optionalgroup_extension.number, 16)
self.assertEqual(unittest_pb2.OPTIONALGROUP_EXTENSION_FIELD_NUMBER, 16)
self.assertEqual(unittest_pb2.optional_nested_message_extension.number, 18)
self.assertEqual(
unittest_pb2.OPTIONAL_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 18)
self.assertEqual(unittest_pb2.optional_nested_enum_extension.number, 21)
self.assertEqual(unittest_pb2.OPTIONAL_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
21)
self.assertEqual(unittest_pb2.repeated_int32_extension.number, 31)
self.assertEqual(unittest_pb2.REPEATED_INT32_EXTENSION_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.repeatedgroup_extension.number, 46)
self.assertEqual(unittest_pb2.REPEATEDGROUP_EXTENSION_FIELD_NUMBER, 46)
self.assertEqual(unittest_pb2.repeated_nested_message_extension.number, 48)
self.assertEqual(
unittest_pb2.REPEATED_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 48)
self.assertEqual(unittest_pb2.repeated_nested_enum_extension.number, 51)
self.assertEqual(unittest_pb2.REPEATED_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
51)
def testInitKwargs(self):
proto = unittest_pb2.TestAllTypes(
optional_int32=1,
optional_string='foo',
optional_bool=True,
optional_bytes='bar',
optional_nested_message=unittest_pb2.TestAllTypes.NestedMessage(bb=1),
optional_foreign_message=unittest_pb2.ForeignMessage(c=1),
optional_nested_enum=unittest_pb2.TestAllTypes.FOO,
optional_foreign_enum=unittest_pb2.FOREIGN_FOO,
repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_int32'))
self.assertTrue(proto.HasField('optional_string'))
self.assertTrue(proto.HasField('optional_bool'))
self.assertTrue(proto.HasField('optional_bytes'))
self.assertTrue(proto.HasField('optional_nested_message'))
self.assertTrue(proto.HasField('optional_foreign_message'))
self.assertTrue(proto.HasField('optional_nested_enum'))
self.assertTrue(proto.HasField('optional_foreign_enum'))
self.assertEqual(1, proto.optional_int32)
self.assertEqual('foo', proto.optional_string)
self.assertEqual(True, proto.optional_bool)
self.assertEqual('bar', proto.optional_bytes)
self.assertEqual(1, proto.optional_nested_message.bb)
self.assertEqual(1, proto.optional_foreign_message.c)
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
proto.optional_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_FOO, proto.optional_foreign_enum)
self.assertEqual([1, 2, 3], proto.repeated_int32)
def testInitArgsUnknownFieldName(self):
def InitalizeEmptyMessageWithExtraKeywordArg():
unused_proto = unittest_pb2.TestEmptyMessage(unknown='unknown')
self._CheckRaises(ValueError,
InitalizeEmptyMessageWithExtraKeywordArg,
'Protocol message has no "unknown" field.')
def testInitRequiredKwargs(self):
proto = unittest_pb2.TestRequired(a=1, b=1, c=1)
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('a'))
self.assertTrue(proto.HasField('b'))
self.assertTrue(proto.HasField('c'))
self.assertTrue(not proto.HasField('dummy2'))
self.assertEqual(1, proto.a)
self.assertEqual(1, proto.b)
self.assertEqual(1, proto.c)
def testInitRequiredForeignKwargs(self):
proto = unittest_pb2.TestRequiredForeign(
optional_message=unittest_pb2.TestRequired(a=1, b=1, c=1))
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_message'))
self.assertTrue(proto.optional_message.IsInitialized())
self.assertTrue(proto.optional_message.HasField('a'))
self.assertTrue(proto.optional_message.HasField('b'))
self.assertTrue(proto.optional_message.HasField('c'))
self.assertTrue(not proto.optional_message.HasField('dummy2'))
self.assertEqual(unittest_pb2.TestRequired(a=1, b=1, c=1),
proto.optional_message)
self.assertEqual(1, proto.optional_message.a)
self.assertEqual(1, proto.optional_message.b)
self.assertEqual(1, proto.optional_message.c)
def testInitRepeatedKwargs(self):
proto = unittest_pb2.TestAllTypes(repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertEqual(1, proto.repeated_int32[0])
self.assertEqual(2, proto.repeated_int32[1])
self.assertEqual(3, proto.repeated_int32[2])
class OptionsTest(unittest.TestCase):
def testMessageOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(True,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(False,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testPackedOptions(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_int32 = 1
proto.optional_double = 3.0
for field_descriptor, _ in proto.ListFields():
self.assertEqual(False, field_descriptor.GetOptions().packed)
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.append(1)
proto.packed_double.append(3.0)
for field_descriptor, _ in proto.ListFields():
self.assertEqual(True, field_descriptor.GetOptions().packed)
self.assertEqual(reflection._FieldDescriptor.LABEL_REPEATED,
field_descriptor.label)
if __name__ == '__main__':
unittest.main()
|
laurianed/scheduling | refs/heads/master | dist/scripts/ec2/params.py | 16 | #!/usr/bin/python
########################################################################### 80 #
#
# params.py
#
# reads instance user-data and meta-data to build parts
# of the Java command required to run the node.
# the other part is environment specific and can hardly be guessed from here
#
import urllib2
import re
import random
# user-data contains the command to launch built on the rm side
data = urllib2.urlopen("http://169.254.169.254/1.0/user-data").read()
# passing through the NAT requires knowing the public IP
ip = urllib2.urlopen("http://169.254.169.254/2009-04-04/" +
"meta-data/public-ipv4").read()
print data +\
" -Dproactive.hostname=" + ip
|
stvstnfrd/edx-platform | refs/heads/master | import_shims/lms/course_api/blocks/transformers/tests/test_milestones.py | 2 | """Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('course_api.blocks.transformers.tests.test_milestones', 'lms.djangoapps.course_api.blocks.transformers.tests.test_milestones')
from lms.djangoapps.course_api.blocks.transformers.tests.test_milestones import *
|
ujdhesa/unisubs | refs/heads/staging | apps/socialauth/models.py | 6 | from django.db import models
from auth.models import CustomUser as User
class AuthMeta(models.Model):
"""Metadata for Authentication"""
def __unicode__(self):
return '%s - %s' % (self.user, self.provider)
user = models.OneToOneField(User)
provider = models.CharField(max_length = 30)
is_email_filled = models.BooleanField(default = False)
is_profile_modified = models.BooleanField(default = False)
class OpenidProfile(models.Model):
"""A class associating an User to a Openid"""
openid_key = models.CharField(max_length=200,unique=True)
user = models.ForeignKey(User)
is_username_valid = models.BooleanField(default = False)
#Values which we get from openid.sreg
email = models.EmailField()
nickname = models.CharField(max_length = 100)
def __unicode__(self):
return unicode(self.openid_key)
def __repr__(self):
return unicode(self.openid_key)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.