gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.core.polling.base_polling import LROBasePolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, IO, Iterable, List, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class ScenesOperations(object): """ScenesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.agrifood.farming.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, farmer_id, # type: str boundary_id, # type: str provider="Microsoft", # type: str source="Sentinel_2_L2A", # type: Optional[str] start_date_time=None, # type: Optional[datetime.datetime] end_date_time=None, # type: Optional[datetime.datetime] max_cloud_coverage_percentage=100, # type: Optional[float] max_dark_pixel_coverage_percentage=100, # type: Optional[float] image_names=None, # type: Optional[List[str]] image_resolutions=None, # type: Optional[List[float]] image_formats=None, # type: Optional[List[str]] max_page_size=50, # type: Optional[int] skip_token=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Iterable["_models.SceneListResponse"] """Returns a paginated list of scene resources. :param farmer_id: FarmerId. :type farmer_id: str :param boundary_id: BoundaryId. :type boundary_id: str :param provider: Provider name of scene data. :type provider: str :param source: Source name of scene data, default value Sentinel_2_L2A (Sentinel 2 L2A). :type source: str :param start_date_time: Scene start UTC datetime (inclusive), sample format: yyyy-MM-ddThh:mm:ssZ. :type start_date_time: ~datetime.datetime :param end_date_time: Scene end UTC datetime (inclusive), sample format: yyyy-MM-dThh:mm:ssZ. :type end_date_time: ~datetime.datetime :param max_cloud_coverage_percentage: Filter scenes with cloud coverage percentage less than max value. Range [0 to 100.0]. :type max_cloud_coverage_percentage: float :param max_dark_pixel_coverage_percentage: Filter scenes with dark pixel coverage percentage less than max value. Range [0 to 100.0]. :type max_dark_pixel_coverage_percentage: float :param image_names: List of image names to be filtered. :type image_names: list[str] :param image_resolutions: List of image resolutions in meters to be filtered. :type image_resolutions: list[float] :param image_formats: List of image formats to be filtered. :type image_formats: list[str] :param max_page_size: Maximum number of items needed (inclusive). Minimum = 10, Maximum = 1000, Default value = 50. :type max_page_size: int :param skip_token: Skip token for getting next set of results. :type skip_token: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SceneListResponse or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.SceneListResponse] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SceneListResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-03-31-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['provider'] = self._serialize.query("provider", provider, 'str') query_parameters['farmerId'] = self._serialize.query("farmer_id", farmer_id, 'str') query_parameters['boundaryId'] = self._serialize.query("boundary_id", boundary_id, 'str') if source is not None: query_parameters['source'] = self._serialize.query("source", source, 'str') if start_date_time is not None: query_parameters['startDateTime'] = self._serialize.query("start_date_time", start_date_time, 'iso-8601') if end_date_time is not None: query_parameters['endDateTime'] = self._serialize.query("end_date_time", end_date_time, 'iso-8601') if max_cloud_coverage_percentage is not None: query_parameters['maxCloudCoveragePercentage'] = self._serialize.query("max_cloud_coverage_percentage", max_cloud_coverage_percentage, 'float', maximum=100, minimum=0) if max_dark_pixel_coverage_percentage is not None: query_parameters['maxDarkPixelCoveragePercentage'] = self._serialize.query("max_dark_pixel_coverage_percentage", max_dark_pixel_coverage_percentage, 'float', maximum=100, minimum=0) if image_names is not None: query_parameters['imageNames'] = [self._serialize.query("image_names", q, 'str') if q is not None else '' for q in image_names] if image_resolutions is not None: query_parameters['imageResolutions'] = [self._serialize.query("image_resolutions", q, 'float') if q is not None else '' for q in image_resolutions] if image_formats is not None: query_parameters['imageFormats'] = [self._serialize.query("image_formats", q, 'str') if q is not None else '' for q in image_formats] if max_page_size is not None: query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10) if skip_token is not None: query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] path_format_arguments = { 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('SceneListResponse', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/scenes'} # type: ignore def _create_satellite_data_ingestion_job_initial( self, job_id, # type: str job=None, # type: Optional["_models.SatelliteDataIngestionJob"] **kwargs # type: Any ): # type: (...) -> "_models.SatelliteDataIngestionJob" cls = kwargs.pop('cls', None) # type: ClsType["_models.SatelliteDataIngestionJob"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-03-31-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_satellite_data_ingestion_job_initial.metadata['url'] # type: ignore path_format_arguments = { 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] if job is not None: body_content = self._serialize.body(job, 'SatelliteDataIngestionJob') else: body_content = None body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SatelliteDataIngestionJob', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_satellite_data_ingestion_job_initial.metadata = {'url': '/scenes/satellite/ingest-data/{jobId}'} # type: ignore def begin_create_satellite_data_ingestion_job( self, job_id, # type: str job=None, # type: Optional["_models.SatelliteDataIngestionJob"] **kwargs # type: Any ): # type: (...) -> LROPoller["_models.SatelliteDataIngestionJob"] """Create a satellite data ingestion job. :param job_id: JobId provided by user. :type job_id: str :param job: Job parameters supplied by user. :type job: ~azure.agrifood.farming.models.SatelliteDataIngestionJob :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be LROBasePolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either SatelliteDataIngestionJob or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.agrifood.farming.models.SatelliteDataIngestionJob] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.SatelliteDataIngestionJob"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_satellite_data_ingestion_job_initial( job_id=job_id, job=job, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('SatelliteDataIngestionJob', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), } if polling is True: polling_method = LROBasePolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_satellite_data_ingestion_job.metadata = {'url': '/scenes/satellite/ingest-data/{jobId}'} # type: ignore def get_satellite_data_ingestion_job_details( self, job_id, # type: str **kwargs # type: Any ): # type: (...) -> "_models.SatelliteDataIngestionJob" """Get a satellite data ingestion job. :param job_id: ID of the job. :type job_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SatelliteDataIngestionJob, or the result of cls(response) :rtype: ~azure.agrifood.farming.models.SatelliteDataIngestionJob :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SatelliteDataIngestionJob"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-03-31-preview" accept = "application/json" # Construct URL url = self.get_satellite_data_ingestion_job_details.metadata['url'] # type: ignore path_format_arguments = { 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SatelliteDataIngestionJob', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_satellite_data_ingestion_job_details.metadata = {'url': '/scenes/satellite/ingest-data/{jobId}'} # type: ignore def download( self, file_path, # type: str **kwargs # type: Any ): # type: (...) -> IO """Downloads and returns file stream as response for the given input filePath. :param file_path: cloud storage path of scene file. :type file_path: str :keyword callable cls: A custom type or function that will be passed the direct response :return: IO, or the result of cls(response) :rtype: IO :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[IO] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-03-31-preview" accept = "application/octet-stream, application/json" # Construct URL url = self.download.metadata['url'] # type: ignore path_format_arguments = { 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['filePath'] = self._serialize.query("file_path", file_path, 'str') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error) deserialized = response.stream_download(self._client._pipeline) if cls: return cls(pipeline_response, deserialized, {}) return deserialized download.metadata = {'url': '/scenes/downloadFiles'} # type: ignore
""" .. todo:: WRITEME """ import cPickle import pickle import logging import numpy as np import os import time import warnings import sys from pylearn2.utils.string_utils import preprocess from pylearn2.utils.mem import improve_memory_error_message from cPickle import BadPickleGet io = None hdf_reader = None import struct from pylearn2.utils.string_utils import match import shutil logger = logging.getLogger(__name__) def raise_cannot_open(path): """ .. todo:: WRITEME """ pieces = path.split('/') for i in xrange(1,len(pieces)+1): so_far = '/'.join(pieces[0:i]) if not os.path.exists(so_far): if i == 1: if so_far == '': continue raise IOError('Cannot open '+path+' ('+so_far+' does not exist)') parent = '/'.join(pieces[0:i-1]) bad = pieces[i-1] if not os.path.isdir(parent): raise IOError("Cannot open "+path+" because "+parent+" is not a directory.") candidates = os.listdir(parent) if len(candidates) == 0: raise IOError("Cannot open "+path+" because "+parent+" is empty.") if len(candidates) > 100: # Don't attempt to guess the right name if the directory is huge raise IOError("Cannot open "+path+" but can open "+parent+".") if os.path.islink(path): raise IOError(path + " appears to be a symlink to a non-existent file") raise IOError("Cannot open "+path+" but can open "+parent+". Did you mean "+match(bad,candidates)+" instead of "+bad+"?") # end if # end for assert False def load(filepath, recurse_depth=0, retry=True): """ .. todo:: WRITEME .. todo:: Refactor to hide recurse_depth from end users Parameters ---------- filepath : str A path to a file to load. Should be a pickle, Matlab, or NumPy file; or a .txt or .amat file that numpy.loadtxt can load. recurse_depth : int, optional End users should not use this argument. It is used by the function itself to implement the `retry` option recursively. retry : bool, optional If True, will make a handful of attempts to load the file before giving up. This can be useful if you are for example calling show_weights.py on a file that is actively being written to by a training script--sometimes the load attempt might fail if the training script writes at the same time show_weights tries to read, but if you try again after a few seconds you should be able to open the file. Returns ------- loaded_object : object The object that was stored in the file. """ try: import joblib joblib_available = True except ImportError: joblib_available = False if recurse_depth == 0: filepath = preprocess(filepath) if filepath.endswith('.npy') or filepath.endswith('.npz'): return np.load(filepath) if filepath.endswith('.amat') or filepath.endswith('txt'): try: return np.loadtxt(filepath) except Exception: logger.exception("{0} cannot be loaded by serial.load (trying to" " use np.loadtxt)".format(filepath)) raise if filepath.endswith('.mat'): global io if io is None: import scipy.io io = scipy.io try: return io.loadmat(filepath) except NotImplementedError, nei: if str(nei).find('HDF reader') != -1: global hdf_reader if hdf_reader is None: import h5py hdf_reader = h5py return hdf_reader.File(filepath, 'r') else: raise #this code should never be reached assert False def exponential_backoff(): if recurse_depth > 9: logger.info('Max number of tries exceeded while trying to open ' '{0}'.format(filepath)) logger.info('attempting to open via reading string') f = open(filepath, 'rb') lines = f.readlines() f.close() content = ''.join(lines) return cPickle.loads(content) else: nsec = 0.5 * (2.0 ** float(recurse_depth)) logger.info("Waiting {0} seconds and trying again".format(nsec)) time.sleep(nsec) return load(filepath, recurse_depth + 1, retry) try: if not joblib_available: with open(filepath, 'rb') as f: obj = cPickle.load(f) else: try: obj = joblib.load(filepath) except Exception, e: if os.path.exists(filepath) and not os.path.isdir(filepath): raise raise_cannot_open(filepath) except MemoryError as e: # We want to explicitly catch this exception because for MemoryError # __str__ returns the empty string, so some of our default printouts # below don't make a lot of sense. # Also, a lot of users assume any exception is a bug in the library, # so we can cut down on mail to pylearn-users by adding a message # that makes it clear this exception is caused by their machine not # meeting requirements. if os.path.splitext(filepath)[1] == ".pkl": improve_memory_error_message(e, "You do not have enough memory to open %s \n" " + Try using numpy.{save,load} (file with extension '.npy') " "to save your file. It uses less memory when reading and " "writing files than pickled files." % filepath) else: improve_memory_error_message(e, "You do not have enough memory to open %s" % filepath) except BadPickleGet, e: logger.exception('Failed to open {0} due to BadPickleGet ' 'with exception string {1}'.format(filepath, e)) if not retry: raise obj = exponential_backoff() except EOFError, e: logger.exception('Failed to open {0} due to EOFError ' 'with exception string {1}'.format(filepath, e)) if not retry: raise obj = exponential_backoff() except ValueError, e: logger.exception('Failed to open {0} due to ValueError ' 'with string {1}'.format(filepath, e)) if not retry: raise obj = exponential_backoff() except Exception, e: #assert False exc_str = str(e) if len(exc_str) > 0: import pdb tb = pdb.traceback.format_exc() raise Exception("Couldn't open '" + str(filepath) + "' due to: " + str(type(e)) + ', ' + str(e) + ". Orig traceback:\n" + tb) else: logger.exception("Couldn't open '{0}' and exception has no string." "Opening it again outside the try/catch " "so you can see whatever error it prints " "on its own.".format(filepath)) f = open(filepath, 'rb') obj = cPickle.load(f) f.close() #if the object has no yaml_src, we give it one that just says it #came from this file. could cause trouble if you save obj again #to a different location if not hasattr(obj,'yaml_src'): try: obj.yaml_src = '!pkl: "'+os.path.abspath(filepath)+'"' except: pass return obj def save(filepath, obj, on_overwrite = 'ignore'): """ Serialize `object` to a file denoted by `filepath`. Parameters ---------- filepath : str A filename. If the suffix is `.joblib` and joblib can be imported, `joblib.dump` is used in place of the regular pickling mechanisms; this results in much faster saves by saving arrays as separate .npy files on disk. If the file suffix is `.npy` than `numpy.save` is attempted on `obj`. Otherwise, (c)pickle is used. obj : object A Python object to be serialized. on_overwrite : str, optional A string specifying what to do if the file already exists. Possible values include: - "ignore" : Just overwrite the existing file. - "backup" : Make a backup copy of the file (<filepath>.bak). Save the new copy. Then delete the backup copy. This allows recovery of the old version of the file if saving the new one fails. """ filepath = preprocess(filepath) if os.path.exists(filepath): if on_overwrite == 'backup': backup = filepath + '.bak' shutil.move(filepath, backup) save(filepath, obj) try: os.remove(backup) except Exception, e: warnings.warn("Got an error while traing to remove "+backup+":"+str(e)) return else: assert on_overwrite == 'ignore' try: _save(filepath, obj) except RuntimeError, e: """ Sometimes for large theano graphs, pickle/cPickle exceed the maximum recursion depth. This seems to me like a fundamental design flaw in pickle/cPickle. The workaround I employ here is the one recommended to someone who had a similar problem on stackexchange: http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle Obviously this does not scale and could cause a crash but I don't see another solution short of writing our own implementation of pickle. """ if str(e).find('recursion') != -1: logger.warning('pylearn2.utils.save encountered the following ' 'error: ' + str(e) + '\nAttempting to resolve this error by calling ' + 'sys.setrecusionlimit and retrying') old_limit = sys.getrecursionlimit() try: sys.setrecursionlimit(50000) _save(filepath, obj) finally: sys.setrecursionlimit(old_limit) def get_pickle_protocol(): """ Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate. """ try: protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL'] except KeyError: # If not defined, we default to 0 because this is the default # protocol used by cPickle.dump (and because it results in # maximum portability) protocol_str = '0' if protocol_str == 'pickle.HIGHEST_PROTOCOL': return pickle.HIGHEST_PROTOCOL return int(protocol_str) def _save(filepath, obj): """ .. todo:: WRITEME """ try: import joblib joblib_available = True except ImportError: joblib_available = False if filepath.endswith('.npy'): np.save(filepath, obj) return # This is dumb # assert filepath.endswith('.pkl') save_dir = os.path.dirname(filepath) # Handle current working directory case. if save_dir == '': save_dir = '.' if not os.path.exists(save_dir): os.makedirs(save_dir) if os.path.exists(save_dir) and not os.path.isdir(save_dir): raise IOError("save path %s exists, not a directory" % save_dir) elif not os.access(save_dir, os.W_OK): raise IOError("permission error creating %s" % filepath) try: if joblib_available and filepath.endswith('.joblib'): joblib.dump(obj, filepath) else: if filepath.endswith('.joblib'): warnings.warn('Warning: .joblib suffix specified but joblib ' 'unavailable. Using ordinary pickle.') with open(filepath, 'wb') as filehandle: cPickle.dump(obj, filehandle, get_pickle_protocol()) except Exception, e: logger.exception("cPickle has failed to write an object to " "{0}".format(filepath)) if str(e).find('maximum recursion depth exceeded') != -1: raise try: logger.info('retrying with pickle') with open(filepath, "wb") as f: pickle.dump(obj, f) except Exception, e2: if str(e) == '' and str(e2) == '': logger.exception('neither cPickle nor pickle could write to ' '{0}'.format(filepath)) logger.exception( 'moreover, neither of them raised an exception that ' 'can be converted to a string' ) logger.exception( 'now re-attempting to write with cPickle outside the ' 'try/catch loop so you can see if it prints anything ' 'when it dies' ) with open(filepath, 'wb') as f: cPickle.dump(obj, f, get_pickle_protocol()) logger.info('Somehow or other, the file write worked once ' 'we quit using the try/catch.') else: if str(e2) == 'env': raise import pdb tb = pdb.traceback.format_exc() raise IOError(str(obj) + ' could not be written to '+ str(filepath) + ' by cPickle due to ' + str(e) + ' nor by pickle due to ' + str(e2) + '. \nTraceback '+ tb) logger.warning('{0} was written by pickle instead of cPickle, due to ' '{1} (perhaps your object' ' is really big?)'.format(filepath, e)) def clone_via_serialize(obj): """ .. todo:: WRITEME """ s = cPickle.dumps(obj, get_pickle_protocol()) return cPickle.loads(s) def to_string(obj): """ .. todo:: WRITEME """ return cPickle.dumps(obj, get_pickle_protocol()) def from_string(s): """ .. todo:: WRITEME """ return cPickle.loads(s) def mkdir(filepath): """ Make a directory. Should succeed even if it needs to make more than one directory and nest subdirectories to do so. Raises an error if the directory can't be made. Does not raise an error if the directory already exists. Parameters ---------- filepath : WRITEME """ try: os.makedirs(filepath) except: if not os.path.isdir(filepath): raise def read_int( fin, n = 1): """ .. todo:: WRITEME """ if n == 1: s = fin.read(4) if len(s) != 4: raise ValueError('fin did not contain 4 bytes') return struct.unpack('i', s)[0] else: rval = [] for i in xrange(n): rval.append(read_int(fin)) return rval #dictionary to convert lush binary matrix magic numbers #to dtypes lush_magic = { 507333717 : 'uint8', 507333716 : 'int32', 507333713 : 'float32', 507333715 : 'float64' } def read_bin_lush_matrix(filepath): """ .. todo:: WRITEME """ f = open(filepath,'rb') try: magic = read_int(f) except ValueError: raise ValueError("Couldn't read magic number") ndim = read_int(f) if ndim == 0: shape = () else: shape = read_int(f, max(3, ndim)) total_elems = 1 for dim in shape: total_elems *= dim try: dtype = lush_magic[magic] except KeyError: raise ValueError('Unrecognized lush magic number '+str(magic)) rval = np.fromfile(file = f, dtype = dtype, count = total_elems) excess = f.read(-1) if excess != '': raise ValueError(str(len(excess))+' extra bytes found at end of file.' ' This indicates mismatch between header and content') rval = rval.reshape(*shape) f.close() return rval def load_train_file(config_file_path, environ=None): """ Loads and parses a yaml file for a Train object. Publishes the relevant training environment variables Parameters ---------- config_file_path : str Path to a config file containing a YAML string describing a pylearn2.train.Train object environ : dict, optional A dictionary used for ${FOO} substitutions in addition to environment variables when parsing the YAML file. If a key appears both in `os.environ` and this dictionary, the value in this dictionary is used. Returns ------- Object described by the YAML string stored in the config file """ from pylearn2.config import yaml_parse suffix_to_strip = '.yaml' # Publish environment variables related to file name if config_file_path.endswith(suffix_to_strip): config_file_full_stem = config_file_path[0:-len(suffix_to_strip)] else: config_file_full_stem = config_file_path os.environ["PYLEARN2_TRAIN_FILE_FULL_STEM"] = config_file_full_stem directory = config_file_path.split('/')[:-1] directory = '/'.join(directory) if directory != '': directory += '/' os.environ["PYLEARN2_TRAIN_DIR"] = directory os.environ["PYLEARN2_TRAIN_BASE_NAME"] = config_file_path.split('/')[-1] os.environ["PYLEARN2_TRAIN_FILE_STEM"] = config_file_full_stem.split('/')[-1] return yaml_parse.load_path(config_file_path, environ=environ)
"""fxproc.py @ https://github.com/coderand/pyfxproc Direct3D .fx file interface for GPU based data processing. Created by Dmitry "AND" Andreev 2013-2021. License Creative Commons Zero v1.0 Universal. """ __version__ = '0.1.9' __all__ = ["Effect"] import os import sys import atexit import ctypes from ctypes import WINFUNCTYPE, Structure from ctypes.wintypes import * HRESULT = DWORD # Direct3D9 constants D3D_SDK_VERSION = 32 D3DADAPTER_DEFAULT = 0 D3DDEVTYPE_HAL = 1 D3DDEVTYPE_REF = 2 D3DCREATE_MULTITHREADED = 0x00000004 D3DCREATE_SOFTWARE_VERTEXPROCESSING = 0x00000020 D3DCREATE_HARDWARE_VERTEXPROCESSING = 0x00000040 D3DCREATE_MIXED_VERTEXPROCESSING = 0x00000080 D3DPT_TRIANGLELIST = 4 D3DPT_TRIANGLESTRIP = 5 D3DSWAPEFFECT = UINT D3DSWAPEFFECT_DISCARD = 1 D3DX_DEFAULT = UINT(-1) D3DX_DEFAULT_NONPOW2 = UINT(-2) D3DXFX_NOT_CLONEABLE = (1 << 11) D3DXSHADER_SKIPOPTIMIZATION = (1 << 2) D3DPOOL = UINT D3DPOOL_DEFAULT = 0 D3DPOOL_MANAGED = 1 D3DPOOL_SYSTEMMEM = 2 D3DUSAGE_RENDERTARGET = 0x00000001 D3DUSAGE_DEPTHSTENCIL = 0x00000002 D3DUSAGE_DYNAMIC = 0x00000200 D3DCLEAR_TARGET = 0x00000001 D3DCUBEMAP_FACE_POSITIVE_X = 0 D3DCUBEMAP_FACE_NEGATIVE_X = 1 D3DCUBEMAP_FACE_POSITIVE_Y = 2 D3DCUBEMAP_FACE_NEGATIVE_Y = 3 D3DCUBEMAP_FACE_POSITIVE_Z = 4 D3DCUBEMAP_FACE_NEGATIVE_Z = 5 D3DRESOURCETYPE = UINT D3DRTYPE_SURFACE = 1 D3DRTYPE_VOLUME = 2 D3DRTYPE_TEXTURE = 3 D3DRTYPE_VOLUMETEXTURE = 4 D3DRTYPE_CUBETEXTURE = 5 D3DRTYPE_VERTEXBUFFER = 6 D3DRTYPE_INDEXBUFFER = 7 D3DQUERYTYPE_TIMESTAMP = 10 D3DISSUE_END = (1 << 0) D3DGETDATA_FLUSH = (1 << 0) class D3DFORMAT : values = [ ("UNKNOWN", 0), ("R8G8B8", 20), ("A8R8G8B8", 21), ("X8R8G8B8", 22), ("R5G6B5", 23), ("X1R5G5B5", 24), ("A1R5G5B5", 25), ("A4R4G4B4", 26), ("R3G3B2", 27), ("A8", 28), ("A8R3G3B2", 29), ("X4R4G4B4", 30), ("A2B10G10R10", 31), ("A8B8G8R8", 32), ("X8B8G8R8", 33), ("G16R16", 34), ("A2R10G10B10", 35), ("A16B16G16R16", 36), ("A8P8", 40), ("P8", 41), ("L8", 50), ("A8L8", 51), ("A4L4", 52), ("V8U8", 60), ("L6V5U5", 61), ("X8L8V8U8", 62), ("Q8W8V8U8", 63), ("V16U16", 64), ("A2W10V10U10", 67), ("L16", 81), ("DXT1", 0x31545844), ("DXT2", 0x32545844), ("DXT3", 0x33545844), ("DXT4", 0x34545844), ("DXT5", 0x35545844), # Floating point surface formats # s10e5 formats (16-bits per channel) ("R16F", 111), ("G16R16F", 112), ("A16B16G16R16F", 113), # IEEE s23e8 formats (32-bits per channel) ("R32F", 114), ("G32R32F", 115), ("A32B32G32R32F", 116), ] by_num = {} by_str = {} for x in values : by_num[x[1]] = x[0] by_str[x[0]] = x[1] class D3DXIMAGE_FILEFORMAT : values = [ ("BMP", 0), ("JPG", 1), ("TGA", 2), ("PNG", 3), ("DDS", 4), ("PPM", 5), ("DIB", 6), ("HDR", 7), ("PFM", 8), ] by_num = {} by_str = {} for x in values : name = x[0].lower() value = x[1] by_num[value] = name by_str[name] = value D3DMULTISAMPLE_TYPE = UINT class D3DPRESENT_PARAMETERS(Structure): _fields_ = [ ('BackBufferWidth', UINT), ('BackBufferHeight', UINT), ('BackBufferFormat', UINT), # D3DFORMAT ('BackBufferCount', UINT), ('MultiSampleType', D3DMULTISAMPLE_TYPE), ('MultiSampleQuality', DWORD), ('SwapEffect', D3DSWAPEFFECT), ('hDeviceWindow', HWND), ('Windowed', BOOL), ('EnableAutoDepthStencil', BOOL), ('AutoDepthStencilFormat', UINT), # D3DFORMAT ('Flags', DWORD), ('FullScreen_RefreshRateInHz', UINT), ('PresentationInterval', UINT), ] class D3DXIMAGE_INFO(Structure): _fields_ = [ ('Width', UINT), ('Height', UINT), ('Depth', UINT), ('MipLevels', UINT), ('Format', UINT), # D3DFORMAT ('ResourceType', D3DRESOURCETYPE), ('ImageFileFormat', UINT), # D3DXIMAGE_FILEFORMAT ] class D3DSURFACE_DESC(Structure): _fields_ = [ ('Format', UINT), # D3DFORMAT ('Type', D3DRESOURCETYPE), ('Usage', DWORD), ('Pool', D3DPOOL), ('MultiSampleType', D3DMULTISAMPLE_TYPE), ('MultiSampleQuality', DWORD), ('Width', UINT), ('Height', UINT), ] class D3DVOLUME_DESC(Structure): _fields_ = [ ('Format', UINT), # D3DFORMAT ('Type', D3DRESOURCETYPE), ('Usage', DWORD), ('Pool', D3DPOOL), ('Width', UINT), ('Height', UINT), ('Depth', UINT), ] class D3DXVECTOR4(Structure): _fields_ = [ ('x', FLOAT), ('y', FLOAT), ('z', FLOAT), ('w', FLOAT), ] class TRI_VTX(Structure): FVF = 0x00000104 # D3DFVF_XYZRHW | D3DFVF_TEXCOORDSIZE2( 0 ) | D3DFVF_TEX1 _fields_ = [ ('x0', FLOAT), ('y0', FLOAT), ('z0', FLOAT), ('w0', FLOAT), ('u0', FLOAT), ('v0', FLOAT), ('x1', FLOAT), ('y1', FLOAT), ('z1', FLOAT), ('w1', FLOAT), ('u1', FLOAT), ('v1', FLOAT), ('x2', FLOAT), ('y2', FLOAT), ('z2', FLOAT), ('w2', FLOAT), ('u2', FLOAT), ('v2', FLOAT), ] class QUAD_VTX(Structure): FVF = 0x00000104 # D3DFVF_XYZRHW | D3DFVF_TEXCOORDSIZE2( 0 ) | D3DFVF_TEX1 _fields_ = [ ('x0', FLOAT), ('y0', FLOAT), ('z0', FLOAT), ('w0', FLOAT), ('u0', FLOAT), ('v0', FLOAT), ('x1', FLOAT), ('y1', FLOAT), ('z1', FLOAT), ('w1', FLOAT), ('u1', FLOAT), ('v1', FLOAT), ('x2', FLOAT), ('y2', FLOAT), ('z2', FLOAT), ('w2', FLOAT), ('u2', FLOAT), ('v2', FLOAT), ('x3', FLOAT), ('y3', FLOAT), ('z3', FLOAT), ('w3', FLOAT), ('u3', FLOAT), ('v3', FLOAT), ] # D3D9 Function Prototypes COM_Release = WINFUNCTYPE(UINT)(2, "COM_Release") D3D9_CreateDevice = WINFUNCTYPE(HRESULT, UINT, UINT, HWND, DWORD, LPVOID, LPVOID)(16, "D3D9_CreateDevice") IDirect3DDevice9_CreateTexture = WINFUNCTYPE(HRESULT, UINT, UINT, UINT, DWORD, UINT, UINT, LPVOID, LPVOID)(23, "IDirect3DDevice9_CreateTexture") IDirect3DDevice9_CreateVolumeTexture = WINFUNCTYPE(HRESULT, UINT, UINT, UINT, UINT, DWORD, UINT, UINT, LPVOID, LPVOID)(24, "IDirect3DDevice9_CreateVolumeTexture") IDirect3DDevice9_CreateCubeTexture = WINFUNCTYPE(HRESULT, UINT, UINT, DWORD, UINT, UINT, LPVOID, LPVOID)(25, "IDirect3DDevice9_CreateCubeTexture") IDirect3DDevice9_SetRenderTarget = WINFUNCTYPE(HRESULT, DWORD, LPVOID)(37, "IDirect3DDevice9_SetRenderTarget") IDirect3DDevice9_BeginScene = WINFUNCTYPE(HRESULT)(41, "IDirect3DDevice9_BeginScene") IDirect3DDevice9_EndScene = WINFUNCTYPE(HRESULT)(42, "IDirect3DDevice9_EndScene") IDirect3DDevice9_Clear = WINFUNCTYPE(HRESULT, DWORD, LPVOID, DWORD, DWORD, FLOAT, DWORD)(43, "IDirect3DDevice9_Clear") IDirect3DDevice9_DrawPrimitiveUP = WINFUNCTYPE(HRESULT, UINT, UINT, LPVOID, UINT)(83, "IDirect3DDevice9_DrawPrimitiveUP") IDirect3DDevice9_SetFVF = WINFUNCTYPE(HRESULT, DWORD)(89, "IDirect3DDevice9_SetFVF") IDirect3DDevice9_CreateQuery = WINFUNCTYPE(HRESULT, DWORD, LPVOID)(118, "IDirect3DDevice9_CreateQuery") IDirect3DQuery9_Issue = WINFUNCTYPE(HRESULT, DWORD)(6, "IDirect3DQuery9_Issue") IDirect3DQuery9_GetData = WINFUNCTYPE(HRESULT, LPVOID, DWORD, DWORD)(7, "IDirect3DQuery9_GetData") Direct3DBaseTexture9_GetType = WINFUNCTYPE(DWORD)(10, "Direct3DBaseTexture9_GetType") Direct3DBaseTexture9_GetLevelCount = WINFUNCTYPE(DWORD)(13, "Direct3DBaseTexture9_GetLevelCount") IDirect3DTexture9_GetLevelDesc = WINFUNCTYPE(DWORD, UINT, LPVOID)(17, "IDirect3DTexture9_GetLevelDesc") IDirect3DTexture9_GetSurfaceLevel = WINFUNCTYPE(DWORD, UINT, LPVOID)(18, "IDirect3DTexture9_GetSurfaceLevel") IDirect3DCubeTexture9_GetLevelDesc = WINFUNCTYPE(DWORD, UINT, LPVOID)(17, "IDirect3DCubeTexture9_GetLevelDesc") IDirect3DCubeTexture9_GetCubeMapSurface = WINFUNCTYPE(DWORD, UINT, UINT, LPVOID)(18, "IDirect3DCubeTexture9_GetCubeMapSurface") IDirect3DVolumeTexture9_GetLevelDesc = WINFUNCTYPE(DWORD, UINT, LPVOID)(17, "IDirect3DVolumeTexture9_GetLevelDesc") D3DXBUFFER_GetBufferPointer = WINFUNCTYPE(LPVOID)(3, "D3DXBUFFER_GetBufferPointer") D3DXBUFFER_GetBufferSize = WINFUNCTYPE(DWORD)(4, "D3DXBUFFER_GetBufferSize") ID3DXEffect_SetFloat = WINFUNCTYPE(HRESULT, LPCSTR, FLOAT)(30, "ID3DXEffect_SetFloat") ID3DXEffect_SetVector = WINFUNCTYPE(HRESULT, LPCSTR, LPVOID)(34, "ID3DXEffect_SetVector") ID3DXEffect_SetTexture = WINFUNCTYPE(HRESULT, LPCSTR, LPVOID)(52, "ID3DXEffect_SetTexture") ID3DXEffect_SetTechnique = WINFUNCTYPE(HRESULT, LPCSTR)(58, "ID3DXEffect_SetTechnique") ID3DXEffect_Begin = WINFUNCTYPE(HRESULT, LPVOID, DWORD)(63, "ID3DXEffect_Begin") ID3DXEffect_BeginPass = WINFUNCTYPE(HRESULT, UINT)(64, "ID3DXEffect_BeginPass") ID3DXEffect_EndPass = WINFUNCTYPE(HRESULT)(66, "ID3DXEffect_EndPass") ID3DXEffect_End = WINFUNCTYPE(HRESULT)(67, "ID3DXEffect_End") # Windows constants CreateWindowEx = ctypes.windll.user32.CreateWindowExA CreateWindowEx.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, UINT, UINT, UINT, UINT, HWND, HMENU, HINSTANCE, LPVOID] CreateWindowEx.restype = HWND WS_OVERLAPPEDWINDOW = 0x00CF0000 # Load DLLs and import functions d3d9_dll = ctypes.windll.LoadLibrary('d3d9.dll') d3dx9_43_dll = None d3dx9_43_warning = False for d3dx_version in range(43, 31, -1): try: d3dx9_43_dll = ctypes.windll.LoadLibrary('d3dx9_%d.dll' % (d3dx_version)) break except WindowsError: d3dx9_43_warning = True if not d3dx9_43_dll : raise Exception("Failed to find d3dx9_*.dll") if d3dx9_43_warning : print("WARNING: d3dx9_43.dll not found, falling back to lower version") Direct3DCreate9 = getattr(d3d9_dll, 'Direct3DCreate9') Direct3DCreate9.restype = LPVOID D3DXCreateEffectFromFile = getattr(d3dx9_43_dll, 'D3DXCreateEffectFromFileA') D3DXCreateEffectFromFile.argtypes = [LPVOID, LPCSTR, LPVOID, LPVOID, DWORD, LPVOID, LPVOID, LPVOID] D3DXCreateEffectFromFile.restype = HRESULT D3DXCreateEffect = getattr(d3dx9_43_dll, 'D3DXCreateEffect') D3DXCreateEffect.argtypes = [LPVOID, LPCSTR, UINT, LPVOID, LPVOID, DWORD, LPVOID, LPVOID, LPVOID] D3DXCreateEffect.restype = HRESULT D3DXGetImageInfoFromFile = getattr(d3dx9_43_dll, 'D3DXGetImageInfoFromFileA') D3DXGetImageInfoFromFile.argtypes = [LPCSTR, LPVOID] D3DXGetImageInfoFromFile.restype = HRESULT D3DXCreateTextureFromFileEx = getattr(d3dx9_43_dll, 'D3DXCreateTextureFromFileExA') D3DXCreateTextureFromFileEx.argtypes = [LPVOID, LPCSTR, UINT, UINT, UINT, DWORD, UINT, UINT, DWORD, DWORD, UINT, LPVOID, LPVOID, LPVOID] D3DXCreateTextureFromFileEx.restype = HRESULT D3DXCreateCubeTextureFromFileEx = getattr(d3dx9_43_dll, 'D3DXCreateCubeTextureFromFileExA') D3DXCreateCubeTextureFromFileEx.argtypes = [LPVOID, LPCSTR, UINT, UINT, DWORD, UINT, UINT, DWORD, DWORD, UINT, LPVOID, LPVOID, LPVOID] D3DXCreateCubeTextureFromFileEx.restype = HRESULT D3DXSaveTextureToFile = getattr(d3dx9_43_dll, 'D3DXSaveTextureToFileA') D3DXSaveTextureToFile.argtypes = [LPCSTR, UINT, LPVOID, LPVOID] D3DXSaveTextureToFile.restype = HRESULT # Initialize Direct3D lpD3D9 = LPVOID(Direct3DCreate9(D3D_SDK_VERSION)) if not lpD3D9: raise Exception("Failed to create D3D") hWnd = CreateWindowEx(0, "STATIC".encode("ascii"), "fxproc_window".encode("ascii"), WS_OVERLAPPEDWINDOW, 0, 0, 100, 100, 0, 0, 0, 0) if hWnd == 0: raise Exception("Failed to create window") NULL = LPVOID(0) lpDevice = LPVOID(0) d3dpp = D3DPRESENT_PARAMETERS(Windowed=1, SwapEffect=D3DSWAPEFFECT_DISCARD) try: D3D9_CreateDevice(lpD3D9, D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd, D3DCREATE_MULTITHREADED | D3DCREATE_HARDWARE_VERTEXPROCESSING, ctypes.byref(d3dpp), ctypes.byref(lpDevice)) #:TODO: Try different configurations when one fails #D3D9_CreateDevice(lpD3D9, D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd, D3DCREATE_SOFTWARE_VERTEXPROCESSING, ctypes.byref(d3dpp), ctypes.byref(lpDevice)) #D3D9_CreateDevice(lpD3D9, D3DADAPTER_DEFAULT, D3DDEVTYPE_REF, hWnd, D3DCREATE_HARDWARE_VERTEXPROCESSING, ctypes.byref(d3dpp), ctypes.byref(lpDevice)) except: raise Exception("Failed to create D3D device") lpFlushQuery = LPVOID(0) try: IDirect3DDevice9_CreateQuery(lpDevice, D3DQUERYTYPE_TIMESTAMP, ctypes.byref(lpFlushQuery)) except: pass class Texture : all_textures = [] def __init__(self, d3d_texture, name = ""): assert(d3d_texture) desc = D3DSURFACE_DESC() slices = 0 ttype = Direct3DBaseTexture9_GetType(d3d_texture) if ttype == D3DRTYPE_TEXTURE: IDirect3DTexture9_GetLevelDesc(d3d_texture, 0, ctypes.byref(desc)) elif ttype == D3DRTYPE_CUBETEXTURE: IDirect3DCubeTexture9_GetLevelDesc(d3d_texture, 0, ctypes.byref(desc)) elif ttype == D3DRTYPE_VOLUMETEXTURE: volume_desc = D3DVOLUME_DESC() IDirect3DVolumeTexture9_GetLevelDesc(d3d_texture, 0, ctypes.byref(volume_desc)) slices = volume_desc.Depth desc.Width = volume_desc.Width desc.Height = volume_desc.Height desc.Format = volume_desc.Format else: raise TypeError("Unknown resource type") format_name = D3DFORMAT.by_num[desc.Format] self.d3d_texture = d3d_texture self.format = format_name self.width = desc.Width self.height = desc.Height self.levels = Direct3DBaseTexture9_GetLevelCount(d3d_texture) self.slices = slices self.name = name Texture.all_textures.append(d3d_texture) def __del__(self): if self.d3d_texture and (self.d3d_texture in Texture.all_textures): COM_Release(self.d3d_texture) Texture.all_textures.remove(self.d3d_texture) def __str__(self): return ( "width=" + str(self.width) + " height=" + str(self.height) + " format=" + self.format + " levels=" + str(self.levels) + " slices=" + str(self.slices) + " d3d_texture=" + hex(self.d3d_texture.value) + " name=" + '"' + self.name + '"' ) @staticmethod def check_type_of(obj): assert isinstance(obj, Texture), "object %r is not a texture" % (obj) class Effect : """Essential bindings for Effect manipulation - open ( file_name ) - fromstring ( text ) - createRenderTarget ( width, height, format_str, levels = 1 ) - createRenderTargetCube ( size, format_str, levels = 1 ) - createVolumeTexture ( width, height, format_str, levels = 1, slices = 1 ) - loadTexture ( file_name, levels = 0 ) - saveTexture ( texture_or_render_target, file_name ) - setRenderTarget ( render_target, level = 0, face = 0 ) - clear ( r_byte, g_byte, b_byte, a_byte ) - drawQuad ( technique_name ) - createTris ( tri_count ) - drawTris ( tris, technique_name ) - copyLevelToVolumeSlice ( source, destination_volume, slice ) - flush () - setFloat ( name, x ) - setFloat4 ( name, x, y, z, w ) - setTexture ( name, texture_or_render_target ) """ all_effects = [] curr_target_size = (0, 0) begin_called = False def __init__(self, d3d_effect, name = ""): assert(d3d_effect) self.d3d_effect = d3d_effect self.name = name Effect.all_effects.append(d3d_effect) def __del__(self): if self.d3d_effect and (self.d3d_effect in Effect.all_effects): COM_Release(self.d3d_effect) Effect.all_effects.remove(self.d3d_effect) @staticmethod def open(fx_name): errors = LPVOID(0) d3d_effect = LPVOID(0) try: D3DXCreateEffectFromFile( lpDevice, fx_name.encode('ascii'), NULL, NULL, D3DXFX_NOT_CLONEABLE | D3DXSHADER_SKIPOPTIMIZATION, NULL, ctypes.byref(d3d_effect), ctypes.byref(errors) ) except WindowsError: Effect.__printD3DXBuffer(errors) raise IOError('Can\'t load effect file "%s"' % (fx_name)) return Effect(d3d_effect, name=fx_name) @staticmethod def fromstring(text): errors = LPVOID(0) d3d_effect = LPVOID(0) try: D3DXCreateEffect( lpDevice, text.encode('ascii'), len(text), NULL, NULL, D3DXFX_NOT_CLONEABLE | D3DXSHADER_SKIPOPTIMIZATION, NULL, ctypes.byref(d3d_effect), ctypes.byref(errors) ) except WindowsError: Effect.__printD3DXBuffer(errors) raise IOError('Can\'t create effect') return Effect(d3d_effect, name="<string>") @staticmethod def __printD3DXBuffer(d3dxbuffer): if d3dxbuffer: sz = D3DXBUFFER_GetBufferSize(d3dxbuffer) ptr = D3DXBUFFER_GetBufferPointer(d3dxbuffer) if sz > 0: text = ctypes.string_at(LPVOID(ptr), sz - 1) print("") print(text.rstrip()) @staticmethod def loadTexture(file_name, levels=0): texture = LPVOID(0) info = D3DXIMAGE_INFO() try: D3DXGetImageInfoFromFile(file_name.encode('ascii'), ctypes.byref(info)) if info.ResourceType == D3DRTYPE_CUBETEXTURE: D3DXCreateCubeTextureFromFileEx( lpDevice, file_name.encode('ascii'), D3DX_DEFAULT_NONPOW2, int(levels), 0, info.Format, D3DPOOL_MANAGED, D3DX_DEFAULT, D3DX_DEFAULT, 0, NULL, NULL, ctypes.byref(texture) ) elif info.ResourceType == D3DRTYPE_TEXTURE: D3DXCreateTextureFromFileEx( lpDevice, file_name.encode('ascii'), D3DX_DEFAULT_NONPOW2, D3DX_DEFAULT_NONPOW2, int(levels), 0, info.Format, D3DPOOL_MANAGED, D3DX_DEFAULT, D3DX_DEFAULT, 0, NULL, NULL, ctypes.byref(texture) ) else: raise TypeError("Unsupported resource") return Texture(texture, name=file_name) except WindowsError: raise IOError("Can't load texture " '"%s"' % (file_name)) return None @staticmethod def saveTexture(pyobj, file_name): Texture.check_type_of(pyobj) ext = os.path.splitext(file_name)[1] ext = ext[1:].lower() format = D3DXIMAGE_FILEFORMAT.by_str[ext] try: D3DXSaveTextureToFile(file_name.encode('ascii'), format, pyobj.d3d_texture, NULL) except: raise IOError("Can't save texture " '"%s"' % (file_name)) @staticmethod def createRenderTarget(width, height, format_str, levels=1): format = D3DFORMAT.by_str[format_str] texture = LPVOID(0) try: IDirect3DDevice9_CreateTexture(lpDevice, width, height, levels, D3DUSAGE_RENDERTARGET, format, D3DPOOL_DEFAULT, ctypes.byref(texture), NULL) except: raise Exception("Can't create render target") return Texture(texture, name="<renderTarget>") @staticmethod def createRenderTargetCube(size, format_str, levels=1): format = D3DFORMAT.by_str[format_str] texture = LPVOID(0) try: IDirect3DDevice9_CreateCubeTexture(lpDevice, size, levels, D3DUSAGE_RENDERTARGET, format, D3DPOOL_DEFAULT, ctypes.byref(texture), NULL) except: raise Exception("Can't create render target cube") return Texture(texture, name="<renderTargetCube>") @staticmethod def createVolumeTexture(width, height, format_str, levels=1, slices=1): format = D3DFORMAT.by_str[format_str] texture = LPVOID(0) try: IDirect3DDevice9_CreateVolumeTexture(lpDevice, width, height, slices, levels, 0, format, D3DPOOL_MANAGED, ctypes.byref(texture), NULL) except: raise Exception("Can't create volume texture") return Texture(texture, name="<volumeTexture>") @staticmethod def copyLevelToVolumeSlice(src_pyobj, dest_pyobj, slice_index): Texture.check_type_of(src_pyobj) Texture.check_type_of(dest_pyobj) raise NotImplementedError("Not yet ported") @staticmethod def setRenderTarget(pyobj, level=0, face=0): Texture.check_type_of(pyobj) surface = LPVOID(0) ttype = Direct3DBaseTexture9_GetType(pyobj.d3d_texture) if ttype == D3DRTYPE_TEXTURE: IDirect3DTexture9_GetSurfaceLevel(pyobj.d3d_texture, level, ctypes.byref(surface)) elif ttype == D3DRTYPE_CUBETEXTURE: faces = [ D3DCUBEMAP_FACE_POSITIVE_X, D3DCUBEMAP_FACE_NEGATIVE_X, D3DCUBEMAP_FACE_POSITIVE_Y, D3DCUBEMAP_FACE_NEGATIVE_Y, D3DCUBEMAP_FACE_POSITIVE_Z, D3DCUBEMAP_FACE_NEGATIVE_Z, ] dxface = faces[face] IDirect3DCubeTexture9_GetCubeMapSurface(pyobj.d3d_texture, dxface, level, ctypes.byref(surface)) else: raise TypeError("Incorrect render target type") Effect.curr_target_size = (float(int(pyobj.width) >> level), float(int(pyobj.height) >> level)) IDirect3DDevice9_SetRenderTarget(lpDevice, 0, surface) COM_Release(surface) @staticmethod def clear(r=0, g=0, b=0, a=0): ir = min(max(int(r), 0), 255) ig = min(max(int(g), 0), 255) ib = min(max(int(b), 0), 255) ia = min(max(int(a), 0), 255) argb = UINT((ia << 24) | (ir << 16) | (ig << 8) | ib) IDirect3DDevice9_Clear(lpDevice, 0, NULL, D3DCLEAR_TARGET, argb, 1.0, 0) def __beginScene(self, technique_name): w = Effect.curr_target_size[0] h = Effect.curr_target_size[1] if not Effect.begin_called: IDirect3DDevice9_BeginScene(lpDevice) Effect.begin_called = True vec = D3DXVECTOR4(w, h, 1.0 / w, 1.0 / h) try: ID3DXEffect_SetVector(self.d3d_effect, b"vTargetSize", ctypes.byref(vec)) except: pass try: ID3DXEffect_SetTechnique(self.d3d_effect, technique_name.encode('ascii')) except WindowsError: raise ValueError('Can\'t set technique "%s"' % (technique_name)) def drawQuad(self, technique_name, do_flush=True): x = -0.5 y = -0.5 w = Effect.curr_target_size[0] h = Effect.curr_target_size[1] q = QUAD_VTX( x , y , 0, 1, 0, 0, x + w, y , 0, 1, 1, 0, x , y + h, 0, 1, 0, 1, x + w, y + h, 0, 1, 1, 1, ) self.__beginScene(technique_name) IDirect3DDevice9_SetFVF(lpDevice, QUAD_VTX.FVF) pass_count = UINT(0) ID3DXEffect_Begin(self.d3d_effect, ctypes.byref(pass_count), 0) for p in range(pass_count.value): ID3DXEffect_BeginPass(self.d3d_effect, p) IDirect3DDevice9_DrawPrimitiveUP(lpDevice, D3DPT_TRIANGLESTRIP, 2, ctypes.byref(q), int(ctypes.sizeof(QUAD_VTX) / 4)) ID3DXEffect_EndPass(self.d3d_effect) ID3DXEffect_End(self.d3d_effect) if do_flush: IDirect3DDevice9_EndScene(lpDevice) Effect.begin_called = False self.flush() @staticmethod def createTris(tri_count): return (TRI_VTX * tri_count)() def drawTris(self, tri_list, technique_name, do_flush=True): assert isinstance(tri_list, ctypes.Array) and TRI_VTX == tri_list._type_, "object %r is not an array of TRI_VTX" % (tri_list) self.__beginScene(technique_name) IDirect3DDevice9_SetFVF(lpDevice, TRI_VTX.FVF) pass_count = UINT(0) ID3DXEffect_Begin(self.d3d_effect, ctypes.byref(pass_count), 0) for p in range(pass_count.value): ID3DXEffect_BeginPass(self.d3d_effect, p) IDirect3DDevice9_DrawPrimitiveUP(lpDevice, D3DPT_TRIANGLELIST, len(tri_list), ctypes.byref(tri_list), int(ctypes.sizeof(TRI_VTX) / 3)) ID3DXEffect_EndPass(self.d3d_effect) ID3DXEffect_End(self.d3d_effect) if do_flush: IDirect3DDevice9_EndScene(lpDevice) Effect.begin_called = False self.flush() @staticmethod def flush(): try: IDirect3DQuery9_Issue(lpFlushQuery, D3DISSUE_END) IDirect3DQuery9_GetData(lpFlushQuery, NULL, 0, D3DGETDATA_FLUSH) except: pass def setFloat(self, name, x): try: ID3DXEffect_SetFloat(self.d3d_effect, name.encode('ascii'), x) except WindowsError: raise ValueError('Can\'t set float "%s"' % (name)) def setFloat4(self, name, x, y=0.0, z=0.0, w=0.0): vec = D3DXVECTOR4(x, y, z, w) try: ID3DXEffect_SetVector(self.d3d_effect, name.encode('ascii'), ctypes.byref(vec)) except WindowsError: raise ValueError('Can\'t set vector "%s"' % (name)) def setTexture(self, name, pyobj): Texture.check_type_of(pyobj) try: ID3DXEffect_SetTexture(self.d3d_effect, name.encode('ascii'), pyobj.d3d_texture) except WindowsError: raise ValueError('Can\'t set texture "%s"' % (name)) def _cleanup(): for p in Effect.all_effects: COM_Release(p) Effect.all_effects = [] for p in Texture.all_textures: COM_Release(p) Texture.all_textures = [] if lpFlushQuery: COM_Release(lpFlushQuery) ref = 0 if lpDevice: ref += COM_Release(lpDevice) if lpD3D9: ref += COM_Release(lpD3D9) if ref != 0: print("WARNING: leaking D3D resources") atexit.register(_cleanup)
# utils/update_checkout.py - Utility to update local checkouts --*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors from __future__ import print_function import argparse import json import os import re import sys import traceback from functools import reduce from multiprocessing import freeze_support from swift_build_support.swift_build_support import shell from swift_build_support.swift_build_support.SwiftBuildSupport import \ SWIFT_SOURCE_ROOT SCRIPT_FILE = os.path.abspath(__file__) SCRIPT_DIR = os.path.dirname(SCRIPT_FILE) def confirm_tag_in_repo(tag, repo_name): tag_exists = shell.capture(['git', 'ls-remote', '--tags', 'origin', tag], echo=False) if not tag_exists: print("Tag '" + tag + "' does not exist for '" + repo_name + "', just updating regularly") tag = None return tag def find_rev_by_timestamp(timestamp, repo_name, refspec): base_args = ["git", "log", "-1", "--format=%H", '--before=' + timestamp] # Prefer the most-recent change _made by swift-ci_ before the timestamp, # falling back to most-recent in general if there is none by swift-ci. rev = shell.capture(base_args + ['--author', 'swift-ci', refspec]).strip() if rev: return rev rev = shell.capture(base_args + [refspec]).strip() if rev: return rev else: raise RuntimeError('No rev in %s before timestamp %s' % (repo_name, timestamp)) def get_branch_for_repo(config, repo_name, scheme_name, scheme_map, cross_repos_pr): cross_repo = False repo_branch = scheme_name if scheme_map: scheme_branch = scheme_map[repo_name] repo_branch = scheme_branch remote_repo_id = config['repos'][repo_name]['remote']['id'] if remote_repo_id in cross_repos_pr: cross_repo = True pr_id = cross_repos_pr[remote_repo_id] repo_branch = "ci_pr_{0}".format(pr_id) shell.run(["git", "checkout", scheme_branch], echo=True) shell.capture(["git", "branch", "-D", repo_branch], echo=True, allow_non_zero_exit=True) shell.run(["git", "fetch", "origin", "pull/{0}/merge:{1}" .format(pr_id, repo_branch)], echo=True) return repo_branch, cross_repo def update_single_repository(args): config, repo_name, scheme_name, scheme_map, tag, timestamp, \ reset_to_remote, should_clean, cross_repos_pr = args repo_path = os.path.join(SWIFT_SOURCE_ROOT, repo_name) if not os.path.isdir(repo_path): return try: print("Updating '" + repo_path + "'") with shell.pushd(repo_path, dry_run=False, echo=False): cross_repo = False checkout_target = None if tag: checkout_target = confirm_tag_in_repo(tag, repo_name) elif scheme_name: checkout_target, cross_repo = get_branch_for_repo( config, repo_name, scheme_name, scheme_map, cross_repos_pr) if timestamp: checkout_target = find_rev_by_timestamp(timestamp, repo_name, checkout_target) elif timestamp: checkout_target = find_rev_by_timestamp(timestamp, repo_name, "HEAD") # The clean option restores a repository to pristine condition. if should_clean: shell.run(['git', 'clean', '-fdx'], echo=True) shell.run(['git', 'submodule', 'foreach', '--recursive', 'git', 'clean', '-fdx'], echo=True) shell.run(['git', 'submodule', 'foreach', '--recursive', 'git', 'reset', '--hard', 'HEAD'], echo=True) shell.run(['git', 'reset', '--hard', 'HEAD'], echo=True) # It is possible to reset --hard and still be mid-rebase. try: shell.run(['git', 'rebase', '--abort'], echo=True) except Exception: pass if checkout_target: shell.run(['git', 'status', '--porcelain', '-uno'], echo=False) shell.run(['git', 'checkout', checkout_target], echo=True) # It's important that we checkout, fetch, and rebase, in order. # .git/FETCH_HEAD updates the not-for-merge attributes based on # which branch was checked out during the fetch. shell.run(["git", "fetch", "--recurse-submodules=yes"], echo=True) # If we were asked to reset to the specified branch, do the hard # reset and return. if checkout_target and reset_to_remote and not cross_repo: shell.run(['git', 'reset', '--hard', "origin/%s" % checkout_target], echo=True) return # Query whether we have a "detached HEAD", which will mean that # we previously checked out a tag rather than a branch. detached_head = False try: # This git command returns error code 1 if HEAD is detached. # Otherwise there was some other error, and we need to handle # it like other command errors. shell.run(["git", "symbolic-ref", "-q", "HEAD"], echo=False) except Exception as e: if e.ret == 1: detached_head = True else: raise # Pass this error up the chain. # If we have a detached HEAD in this repository, we don't want # to rebase. With a detached HEAD, the fetch will have marked # all the branches in FETCH_HEAD as not-for-merge, and the # "git rebase FETCH_HEAD" will try to rebase the tree from the # default branch's current head, making a mess. # Prior to Git 2.6, this is the way to do a "git pull # --rebase" that respects rebase.autostash. See # http://stackoverflow.com/a/30209750/125349 if not cross_repo and not detached_head: shell.run(["git", "rebase", "FETCH_HEAD"], echo=True) elif detached_head: print(repo_path, "\nDetached HEAD; probably checked out a tag. No need " "to rebase.\n") shell.run(["git", "submodule", "update", "--recursive"], echo=True) except Exception: (type, value, tb) = sys.exc_info() print('Error on repo "%s": %s' % (repo_path, traceback.format_exc())) return value def get_timestamp_to_match(args): if not args.match_timestamp: return None with shell.pushd(os.path.join(SWIFT_SOURCE_ROOT, "swift"), dry_run=False, echo=False): return shell.capture(["git", "log", "-1", "--format=%cI"], echo=False).strip() def update_all_repositories(args, config, scheme_name, cross_repos_pr): scheme_map = None if scheme_name: # This loop is only correct, since we know that each alias set has # unique contents. This is checked by validate_config. Thus the first # branch scheme data that has scheme_name as one of its aliases is # the only possible correct answer. for v in config['branch-schemes'].values(): if scheme_name in v['aliases']: scheme_map = v['repos'] break pool_args = [] timestamp = get_timestamp_to_match(args) for repo_name in config['repos'].keys(): if repo_name in args.skip_repository_list: print("Skipping update of '" + repo_name + "', requested by user") continue my_args = [config, repo_name, scheme_name, scheme_map, args.tag, timestamp, args.reset_to_remote, args.clean, cross_repos_pr] pool_args.append(my_args) return shell.run_parallel(update_single_repository, pool_args, args.n_processes) def obtain_additional_swift_sources(pool_args): (args, repo_name, repo_info, repo_branch, remote, with_ssh, scheme_name, skip_history, skip_repository_list) = pool_args with shell.pushd(SWIFT_SOURCE_ROOT, dry_run=False, echo=False): print("Cloning '" + repo_name + "'") if skip_history: shell.run(['git', 'clone', '--recursive', '--depth', '1', remote, repo_name], echo=True) else: shell.run(['git', 'clone', '--recursive', remote, repo_name], echo=True) if scheme_name: src_path = os.path.join(SWIFT_SOURCE_ROOT, repo_name, ".git") shell.run(['git', '--git-dir', src_path, '--work-tree', os.path.join(SWIFT_SOURCE_ROOT, repo_name), 'checkout', repo_branch], echo=False) with shell.pushd(os.path.join(SWIFT_SOURCE_ROOT, repo_name), dry_run=False, echo=False): shell.run(["git", "submodule", "update", "--recursive"], echo=False) def obtain_all_additional_swift_sources(args, config, with_ssh, scheme_name, skip_history, skip_repository_list): pool_args = [] with shell.pushd(SWIFT_SOURCE_ROOT, dry_run=False, echo=False): for repo_name, repo_info in config['repos'].items(): if repo_name in skip_repository_list: print("Skipping clone of '" + repo_name + "', requested by " "user") continue if os.path.isdir(os.path.join(repo_name, ".git")): print("Skipping clone of '" + repo_name + "', directory " "already exists") continue # If we have a url override, use that url instead of # interpolating. remote_repo_info = repo_info['remote'] if 'url' in remote_repo_info: remote = remote_repo_info['url'] else: remote_repo_id = remote_repo_info['id'] if with_ssh is True or 'https-clone-pattern' not in config: remote = config['ssh-clone-pattern'] % remote_repo_id else: remote = config['https-clone-pattern'] % remote_repo_id repo_branch = None if scheme_name: for v in config['branch-schemes'].values(): if scheme_name not in v['aliases']: continue repo_branch = v['repos'][repo_name] break else: repo_branch = scheme_name pool_args.append([args, repo_name, repo_info, repo_branch, remote, with_ssh, scheme_name, skip_history, skip_repository_list]) if not pool_args: print("Not cloning any repositories.") return return shell.run_parallel(obtain_additional_swift_sources, pool_args, args.n_processes) def dump_repo_hashes(config): max_len = reduce(lambda acc, x: max(acc, len(x)), config['repos'].keys(), 0) fmt = "{:<%r}{}" % (max_len + 5) for repo_name, repo_info in sorted(config['repos'].items(), key=lambda x: x[0]): with shell.pushd(os.path.join(SWIFT_SOURCE_ROOT, repo_name), dry_run=False, echo=False): h = shell.capture(["git", "log", "--oneline", "-n", "1"], echo=False).strip() print(fmt.format(repo_name, h)) def dump_hashes_config(args, config): branch_scheme_name = args.dump_hashes_config new_config = {} config_copy_keys = ['ssh-clone-pattern', 'https-clone-pattern', 'repos'] for config_copy_key in config_copy_keys: new_config[config_copy_key] = config[config_copy_key] repos = {} branch_scheme = {'aliases': [branch_scheme_name], 'repos': repos} new_config['branch-schemes'] = {args.dump_hashes_config: branch_scheme} for repo_name, repo_info in sorted(config['repos'].items(), key=lambda x: x[0]): with shell.pushd(os.path.join(SWIFT_SOURCE_ROOT, repo_name), dry_run=False, echo=False): h = shell.capture(["git", "rev-parse", "HEAD"], echo=False).strip() repos[repo_name] = str(h) print(json.dumps(new_config, indent=4)) def validate_config(config): # Make sure that our branch-names are unique. scheme_names = config['branch-schemes'].keys() if len(scheme_names) != len(set(scheme_names)): raise RuntimeError('Configuration file has duplicate schemes?!') # Ensure the branch-scheme name is also an alias # This guarantees sensible behavior of update_repository_to_scheme when # the branch-scheme is passed as the scheme name for scheme_name in config['branch-schemes'].keys(): if scheme_name not in config['branch-schemes'][scheme_name]['aliases']: raise RuntimeError('branch-scheme name: "{0}" must be an alias ' 'too.'.format(scheme_name)) # Then make sure the alias names used by our branches are unique. # # We do this by constructing a list consisting of len(names), # set(names). Then we reduce over that list summing the counts and taking # the union of the sets. We have uniqueness if the length of the union # equals the length of the sum of the counts. data = [(len(v['aliases']), set(v['aliases'])) for v in config['branch-schemes'].values()] result = reduce(lambda acc, x: (acc[0] + x[0], acc[1] | x[1]), data, (0, set([]))) if result[0] == len(result[1]): return raise RuntimeError('Configuration file has schemes with duplicate ' 'aliases?!') def main(): freeze_support() parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=""" repositories. By default, updates your checkouts of Swift, SourceKit, LLDB, and SwiftPM.""") parser.add_argument( "--clone", help="Obtain Sources for Swift and Related Projects", action="store_true") parser.add_argument( "--clone-with-ssh", help="Obtain Sources for Swift and Related Projects via SSH", action="store_true") parser.add_argument( "--skip-history", help="Skip histories when obtaining sources", action="store_true") parser.add_argument( "--skip-repository", metavar="DIRECTORY", default=[], help="Skip the specified repository", dest='skip_repository_list', action="append") parser.add_argument( "--scheme", help='Use branches from the specified branch-scheme. A "branch-scheme"' ' is a list of (repo, branch) pairs.', metavar='BRANCH-SCHEME', dest='scheme') parser.add_argument( '--reset-to-remote', help='Reset each branch to the remote state.', action='store_true') parser.add_argument( '--clean', help='Clean unrelated files from each repository.', action='store_true') parser.add_argument( "--config", default=os.path.join(SCRIPT_DIR, "update-checkout-config.json"), help="Configuration file to use") parser.add_argument( "--github-comment", help="""Check out related pull requests referenced in the given free-form GitHub-style comment.""", metavar='GITHUB-COMMENT', dest='github_comment') parser.add_argument( '--dump-hashes', action='store_true', help='Dump the git hashes of all repositories being tracked') parser.add_argument( '--dump-hashes-config', help='Dump the git hashes of all repositories packaged into ' 'update-checkout-config.json', metavar='BRANCH-SCHEME-NAME') parser.add_argument( "--tag", help="""Check out each repository to the specified tag.""", metavar='TAG-NAME') parser.add_argument( "--match-timestamp", help='Check out adjacent repositories to match timestamp of ' ' current swift checkout.', action='store_true') parser.add_argument( "-j", "--jobs", type=int, help="Number of threads to run at once", default=0, dest="n_processes") args = parser.parse_args() if args.reset_to_remote and not args.scheme: print("update-checkout usage error: --reset-to-remote must specify " "--scheme=foo") sys.exit(1) clone = args.clone clone_with_ssh = args.clone_with_ssh skip_history = args.skip_history scheme = args.scheme github_comment = args.github_comment with open(args.config) as f: config = json.load(f) validate_config(config) if args.dump_hashes: dump_repo_hashes(config) return (None, None) if args.dump_hashes_config: dump_hashes_config(args, config) return (None, None) cross_repos_pr = {} if github_comment: regex_pr = r'(apple/[-a-zA-Z0-9_]+/pull/\d+|apple/[-a-zA-Z0-9_]+#\d+)' repos_with_pr = re.findall(regex_pr, github_comment) print("Found related pull requests:", str(repos_with_pr)) repos_with_pr = [pr.replace('/pull/', '#') for pr in repos_with_pr] cross_repos_pr = dict(pr.split('#') for pr in repos_with_pr) clone_results = None if clone or clone_with_ssh: # If branch is None, default to using the default branch alias # specified by our configuration file. if scheme is None: scheme = config['default-branch-scheme'] skip_repo_list = args.skip_repository_list clone_results = obtain_all_additional_swift_sources(args, config, clone_with_ssh, scheme, skip_history, skip_repo_list) update_results = update_all_repositories(args, config, scheme, cross_repos_pr) fail_count = 0 fail_count += shell.check_parallel_results(clone_results, "CLONE") fail_count += shell.check_parallel_results(update_results, "UPDATE") if fail_count > 0: print("update-checkout failed, fix errors and try again") sys.exit(fail_count) if __name__ == "__main__": main()
# Copyright 2000-2004 Michael Hudson-Doyle <micahel@gmail.com> # # All Rights Reserved # # # Permission to use, copy, modify, and distribute this software and # its documentation for any purpose is hereby granted without fee, # provided that the above copyright notice appear in all copies and # that both that copyright notice and this permission notice appear in # supporting documentation. # # THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO # THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, # INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER # RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from pyrepl import reader, commands from pyrepl.reader import Reader as R isearch_keymap = tuple( [('\\%03o'%c, 'isearch-end') for c in range(256) if chr(c) != '\\'] + \ [(c, 'isearch-add-character') for c in map(chr, range(32, 127)) if c != '\\'] + \ [('\\%03o'%c, 'isearch-add-character') for c in range(256) if chr(c).isalpha() and chr(c) != '\\'] + \ [('\\\\', 'self-insert'), (r'\C-r', 'isearch-backwards'), (r'\C-s', 'isearch-forwards'), (r'\C-c', 'isearch-cancel'), (r'\C-g', 'isearch-cancel'), (r'\<backspace>', 'isearch-backspace')]) if 'c' in globals(): del c ISEARCH_DIRECTION_NONE = '' ISEARCH_DIRECTION_BACKWARDS = 'r' ISEARCH_DIRECTION_FORWARDS = 'f' class next_history(commands.Command): def do(self): r = self.reader if r.historyi == len(r.history): r.error("end of history list") return r.select_item(r.historyi + 1) class previous_history(commands.Command): def do(self): r = self.reader if r.historyi == 0: r.error("start of history list") return r.select_item(r.historyi - 1) class restore_history(commands.Command): def do(self): r = self.reader if r.historyi != len(r.history): if r.get_unicode() != r.history[r.historyi]: r.buffer = list(r.history[r.historyi]) r.pos = len(r.buffer) r.dirty = 1 class first_history(commands.Command): def do(self): self.reader.select_item(0) class last_history(commands.Command): def do(self): self.reader.select_item(len(self.reader.history)) class operate_and_get_next(commands.FinishCommand): def do(self): self.reader.next_history = self.reader.historyi + 1 class yank_arg(commands.Command): def do(self): r = self.reader if r.last_command is self.__class__: r.yank_arg_i += 1 else: r.yank_arg_i = 0 if r.historyi < r.yank_arg_i: r.error("beginning of history list") return a = r.get_arg(-1) # XXX how to split? words = r.get_item(r.historyi - r.yank_arg_i - 1).split() if a < -len(words) or a >= len(words): r.error("no such arg") return w = words[a] b = r.buffer if r.yank_arg_i > 0: o = len(r.yank_arg_yanked) else: o = 0 b[r.pos - o:r.pos] = list(w) r.yank_arg_yanked = w r.pos += len(w) - o r.dirty = 1 class forward_history_isearch(commands.Command): def do(self): r = self.reader r.isearch_direction = ISEARCH_DIRECTION_FORWARDS r.isearch_start = r.historyi, r.pos r.isearch_term = '' r.dirty = 1 r.push_input_trans(r.isearch_trans) class reverse_history_isearch(commands.Command): def do(self): r = self.reader r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS r.dirty = 1 r.isearch_term = '' r.push_input_trans(r.isearch_trans) r.isearch_start = r.historyi, r.pos class isearch_cancel(commands.Command): def do(self): r = self.reader r.isearch_direction = ISEARCH_DIRECTION_NONE r.pop_input_trans() r.select_item(r.isearch_start[0]) r.pos = r.isearch_start[1] r.dirty = 1 class isearch_add_character(commands.Command): def do(self): r = self.reader b = r.buffer r.isearch_term += self.event[-1] r.dirty = 1 p = r.pos + len(r.isearch_term) - 1 if b[p:p+1] != [r.isearch_term[-1]]: r.isearch_next() class isearch_backspace(commands.Command): def do(self): r = self.reader if len(r.isearch_term) > 0: r.isearch_term = r.isearch_term[:-1] r.dirty = 1 else: r.error("nothing to rubout") class isearch_forwards(commands.Command): def do(self): r = self.reader r.isearch_direction = ISEARCH_DIRECTION_FORWARDS r.isearch_next() class isearch_backwards(commands.Command): def do(self): r = self.reader r.isearch_direction = ISEARCH_DIRECTION_BACKWARDS r.isearch_next() class isearch_end(commands.Command): def do(self): r = self.reader r.isearch_direction = ISEARCH_DIRECTION_NONE r.console.forgetinput() r.pop_input_trans() r.dirty = 1 class HistoricalReader(R): """Adds history support (with incremental history searching) to the Reader class. Adds the following instance variables: * history: a list of strings * historyi: * transient_history: * next_history: * isearch_direction, isearch_term, isearch_start: * yank_arg_i, yank_arg_yanked: used by the yank-arg command; not actually manipulated by any HistoricalReader instance methods. """ def collect_keymap(self): return super(HistoricalReader, self).collect_keymap() + ( (r'\C-n', 'next-history'), (r'\C-p', 'previous-history'), (r'\C-o', 'operate-and-get-next'), (r'\C-r', 'reverse-history-isearch'), (r'\C-s', 'forward-history-isearch'), (r'\M-r', 'restore-history'), (r'\M-.', 'yank-arg'), (r'\<page down>', 'last-history'), (r'\<page up>', 'first-history')) def __init__(self, console): super(HistoricalReader, self).__init__(console) self.history = [] self.historyi = 0 self.transient_history = {} self.next_history = None self.isearch_direction = ISEARCH_DIRECTION_NONE for c in [next_history, previous_history, restore_history, first_history, last_history, yank_arg, forward_history_isearch, reverse_history_isearch, isearch_end, isearch_add_character, isearch_cancel, isearch_add_character, isearch_backspace, isearch_forwards, isearch_backwards, operate_and_get_next]: self.commands[c.__name__] = c self.commands[c.__name__.replace('_', '-')] = c from pyrepl import input self.isearch_trans = input.KeymapTranslator( isearch_keymap, invalid_cls=isearch_end, character_cls=isearch_add_character) def select_item(self, i): self.transient_history[self.historyi] = self.get_unicode() buf = self.transient_history.get(i) if buf is None: buf = self.history[i] self.buffer = list(buf) self.historyi = i self.pos = len(self.buffer) self.dirty = 1 def get_item(self, i): if i != len(self.history): return self.transient_history.get(i, self.history[i]) else: return self.transient_history.get(i, self.get_unicode()) def prepare(self): super(HistoricalReader, self).prepare() try: self.transient_history = {} if self.next_history is not None \ and self.next_history < len(self.history): self.historyi = self.next_history self.buffer[:] = list(self.history[self.next_history]) self.pos = len(self.buffer) self.transient_history[len(self.history)] = '' else: self.historyi = len(self.history) self.next_history = None except: self.restore() raise def get_prompt(self, lineno, cursor_on_line): if cursor_on_line and self.isearch_direction != ISEARCH_DIRECTION_NONE: d = 'rf'[self.isearch_direction == ISEARCH_DIRECTION_FORWARDS] return "(%s-search `%s') "%(d, self.isearch_term) else: return super(HistoricalReader, self).get_prompt(lineno, cursor_on_line) def isearch_next(self): st = self.isearch_term p = self.pos i = self.historyi s = self.get_unicode() forwards = self.isearch_direction == ISEARCH_DIRECTION_FORWARDS while 1: if forwards: p = s.find(st, p + 1) else: p = s.rfind(st, 0, p + len(st) - 1) if p != -1: self.select_item(i) self.pos = p return elif ((forwards and i == len(self.history) - 1) or (not forwards and i == 0)): self.error("not found") return else: if forwards: i += 1 s = self.get_item(i) p = -1 else: i -= 1 s = self.get_item(i) p = len(s) def finish(self): super(HistoricalReader, self).finish() ret = self.get_unicode() for i, t in self.transient_history.items(): if i < len(self.history) and i != self.historyi: self.history[i] = t if ret: self.history.append(ret) def test(): from pyrepl.unix_console import UnixConsole reader = HistoricalReader(UnixConsole()) reader.ps1 = "h**> " reader.ps2 = "h/*> " reader.ps3 = "h|*> " reader.ps4 = "h\*> " while reader.readline(): pass if __name__=='__main__': test()
"""The tests for the Home Assistant HTTP component.""" import asyncio import requests from homeassistant import setup, const import homeassistant.components.http as http from tests.common import get_test_instance_port, get_test_home_assistant API_PASSWORD = 'test1234' SERVER_PORT = get_test_instance_port() HTTP_BASE = '127.0.0.1:{}'.format(SERVER_PORT) HTTP_BASE_URL = 'http://{}'.format(HTTP_BASE) HA_HEADERS = { const.HTTP_HEADER_HA_AUTH: API_PASSWORD, const.HTTP_HEADER_CONTENT_TYPE: const.CONTENT_TYPE_JSON, } CORS_ORIGINS = [HTTP_BASE_URL, HTTP_BASE] hass = None def _url(path=''): """Helper method to generate URLs.""" return HTTP_BASE_URL + path # pylint: disable=invalid-name def setUpModule(): """Initialize a Home Assistant server.""" global hass hass = get_test_home_assistant() setup.setup_component( hass, http.DOMAIN, { http.DOMAIN: { http.CONF_API_PASSWORD: API_PASSWORD, http.CONF_SERVER_PORT: SERVER_PORT, http.CONF_CORS_ORIGINS: CORS_ORIGINS, } } ) setup.setup_component(hass, 'api') # Registering static path as it caused CORS to blow up hass.http.register_static_path( '/custom_components', hass.config.path('custom_components')) hass.start() # pylint: disable=invalid-name def tearDownModule(): """Stop the Home Assistant server.""" hass.stop() class TestCors: """Test HTTP component.""" def test_cors_allowed_with_password_in_url(self): """Test cross origin resource sharing with password in url.""" req = requests.get(_url(const.URL_API), params={'api_password': API_PASSWORD}, headers={const.HTTP_HEADER_ORIGIN: HTTP_BASE_URL}) allow_origin = const.HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN assert req.status_code == 200 assert req.headers.get(allow_origin) == HTTP_BASE_URL def test_cors_allowed_with_password_in_header(self): """Test cross origin resource sharing with password in header.""" headers = { const.HTTP_HEADER_HA_AUTH: API_PASSWORD, const.HTTP_HEADER_ORIGIN: HTTP_BASE_URL } req = requests.get(_url(const.URL_API), headers=headers) allow_origin = const.HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN assert req.status_code == 200 assert req.headers.get(allow_origin) == HTTP_BASE_URL def test_cors_denied_without_origin_header(self): """Test cross origin resource sharing with password in header.""" headers = { const.HTTP_HEADER_HA_AUTH: API_PASSWORD } req = requests.get(_url(const.URL_API), headers=headers) allow_origin = const.HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN allow_headers = const.HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS assert req.status_code == 200 assert allow_origin not in req.headers assert allow_headers not in req.headers def test_cors_preflight_allowed(self): """Test cross origin resource sharing preflight (OPTIONS) request.""" headers = { const.HTTP_HEADER_ORIGIN: HTTP_BASE_URL, 'Access-Control-Request-Method': 'GET', 'Access-Control-Request-Headers': 'x-ha-access' } req = requests.options(_url(const.URL_API), headers=headers) allow_origin = const.HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN allow_headers = const.HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS assert req.status_code == 200 assert req.headers.get(allow_origin) == HTTP_BASE_URL assert req.headers.get(allow_headers) == \ const.HTTP_HEADER_HA_AUTH.upper() class TestView(http.HomeAssistantView): """Test the HTTP views.""" name = 'test' url = '/hello' @asyncio.coroutine def get(self, request): """Return a get request.""" return 'hello' @asyncio.coroutine def test_registering_view_while_running(hass, test_client): """Test that we can register a view while the server is running.""" yield from setup.async_setup_component( hass, http.DOMAIN, { http.DOMAIN: { http.CONF_SERVER_PORT: get_test_instance_port(), } } ) yield from setup.async_setup_component(hass, 'api') yield from hass.async_start() yield from hass.async_block_till_done() hass.http.register_view(TestView) client = yield from test_client(hass.http.app) resp = yield from client.get('/hello') assert resp.status == 200 text = yield from resp.text() assert text == 'hello' @asyncio.coroutine def test_api_base_url_with_domain(hass): """Test setting api url.""" result = yield from setup.async_setup_component(hass, 'http', { 'http': { 'base_url': 'example.com' } }) assert result assert hass.config.api.base_url == 'http://example.com' @asyncio.coroutine def test_api_base_url_with_ip(hass): """Test setting api url.""" result = yield from setup.async_setup_component(hass, 'http', { 'http': { 'server_host': '1.1.1.1' } }) assert result assert hass.config.api.base_url == 'http://1.1.1.1:8123' @asyncio.coroutine def test_api_base_url_with_ip_port(hass): """Test setting api url.""" result = yield from setup.async_setup_component(hass, 'http', { 'http': { 'base_url': '1.1.1.1:8124' } }) assert result assert hass.config.api.base_url == 'http://1.1.1.1:8124' @asyncio.coroutine def test_api_no_base_url(hass): """Test setting api url.""" result = yield from setup.async_setup_component(hass, 'http', { 'http': { } }) assert result assert hass.config.api.base_url == 'http://127.0.0.1:8123'
import mock from nose.tools import * # noqa: from api.base.settings.defaults import API_BASE from framework.auth.core import Auth from osf_tests.factories import ( AuthUserFactory, NodeFactory, RegistrationFactory, ) from tests.base import ApiTestCase, get_default_metaschema class LinkedRegistrationsTestCase(ApiTestCase): def setUp(self): super(LinkedRegistrationsTestCase, self).setUp() self.mock_archive = mock.patch('website.archiver.tasks.archive') self.non_contributor = AuthUserFactory() self.read_contributor = AuthUserFactory() self.rw_contributor = AuthUserFactory() self.admin_contributor = AuthUserFactory() self.public_linked_registration = RegistrationFactory( is_public=True, creator=self.rw_contributor) self.private_linked_registration = RegistrationFactory( is_public=False, creator=self.rw_contributor) self.mock_archive.start() public_node = NodeFactory( creator=self.admin_contributor, is_public=True) public_node.add_contributor( self.rw_contributor, auth=Auth(self.admin_contributor)) public_node.add_contributor( self.read_contributor, permissions=['read'], auth=Auth(self.admin_contributor)) public_node.add_pointer( self.public_linked_registration, auth=Auth(self.admin_contributor)) public_node.add_pointer( self.private_linked_registration, auth=Auth(self.rw_contributor)) public_node.save() self.public_registration = public_node.register_node( get_default_metaschema(), Auth(self.admin_contributor), '', None) self.public_registration.is_public = True self.public_registration.save() private_node = NodeFactory(creator=self.admin_contributor) private_node.add_contributor( self.rw_contributor, auth=Auth(self.admin_contributor)) private_node.add_contributor( self.read_contributor, permissions=['read'], auth=Auth(self.admin_contributor)) private_node.add_pointer( self.public_linked_registration, auth=Auth(self.admin_contributor)) private_node.add_pointer( self.private_linked_registration, auth=Auth(self.rw_contributor)) private_node.save() self.private_registration = private_node.register_node( get_default_metaschema(), Auth(self.admin_contributor), '', None) def tearDown(self): super(LinkedRegistrationsTestCase, self).tearDown() self.mock_archive.stop() class TestRegistrationLinkedRegistrationsList(LinkedRegistrationsTestCase): def setUp(self): super(TestRegistrationLinkedRegistrationsList, self).setUp() def make_request( self, registration_id=None, auth=None, expect_errors=False): url = '/{}registrations/{}/linked_registrations/'.format( API_BASE, registration_id) if auth: return self.app.get(url, auth=auth, expect_errors=expect_errors) return self.app.get(url, expect_errors=expect_errors) def test_unauthenticated_can_view_public_registration_linked_registrations( self): res = self.make_request(registration_id=self.public_registration._id) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_not_in( self.private_linked_registration._id, linked_registration_ids) def test_admin_can_view_private_registration_linked_registrations(self): res = self.make_request( registration_id=self.private_registration._id, auth=self.admin_contributor.auth ) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_not_in( self.private_linked_registration._id, linked_registration_ids) def test_rw_contributor_can_view_private_registration_linked_registrations( self): res = self.make_request( registration_id=self.private_registration._id, auth=self.rw_contributor.auth ) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_in( self.private_linked_registration._id, linked_registration_ids) def test_read_only_contributor_can_view_private_registration_linked_registrations( self): res = self.make_request( registration_id=self.private_registration._id, auth=self.read_contributor.auth ) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_not_in( self.private_linked_registration._id, linked_registration_ids) def test_non_contributor_cannot_view_private_registration_linked_registrations( self): res = self.make_request( registration_id=self.private_registration._id, auth=self.non_contributor.auth, expect_errors=True ) assert_equal(res.status_code, 403) assert_equal( res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_unauthenticated_cannot_view_private_registration_linked_registrations( self): res = self.make_request( registration_id=self.private_registration._id, expect_errors=True ) assert_equal(res.status_code, 401) assert_equal( res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') class TestRegistrationsLinkedRegistrationsRelationship( LinkedRegistrationsTestCase): def setUp(self): super(TestRegistrationsLinkedRegistrationsRelationship, self).setUp() self.public_url = '/{}registrations/{}/relationships/linked_registrations/'.format( API_BASE, self.public_registration._id) def make_request( self, registration_id=None, auth=None, expect_errors=False, version=None): url = '/{}registrations/{}/relationships/linked_registrations/'.format( API_BASE, registration_id) if version: url = '{}?version={}'.format(url, version) if auth: return self.app.get(url, auth=auth, expect_errors=expect_errors) return self.app.get(url, expect_errors=expect_errors) def test_public_registration_unauthenticated_user_can_view_linked_registrations_relationship( self): res = self.make_request(registration_id=self.public_registration._id) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_not_in( self.private_linked_registration._id, linked_registration_ids) assert res.json['data'][0]['type'] == 'linked_registrations' def test_public_registration_unauthenticated_user_can_view_linked_registrations_relationship_2_13( self): res = self.make_request(registration_id=self.public_registration._id, version='2.13') assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_not_in( self.private_linked_registration._id, linked_registration_ids) assert res.json['data'][0]['type'] == 'registrations' def test_private_registration_admin_contributor_can_view_linked_registrations_relationship( self): res = self.make_request( registration_id=self.private_registration._id, auth=self.admin_contributor.auth ) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_not_in( self.private_linked_registration._id, linked_registration_ids) def test_private_registration_rw_contributor_can_view_linked_registrations_relationship( self): res = self.make_request( registration_id=self.private_registration._id, auth=self.rw_contributor.auth ) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_in( self.private_linked_registration._id, linked_registration_ids) def test_private_registration_read_contributor_can_view_linked_registrations_relationship( self): res = self.make_request( registration_id=self.private_registration._id, auth=self.read_contributor.auth ) assert_equal(res.status_code, 200) linked_registration_ids = [r['id'] for r in res.json['data']] assert_in(self.public_linked_registration._id, linked_registration_ids) assert_not_in( self.private_linked_registration._id, linked_registration_ids) def test_private_registration_non_contributor_cannot_view_linked_registrations_relationship( self): res = self.make_request( registration_id=self.private_registration._id, auth=self.non_contributor.auth, expect_errors=True ) assert_equal(res.status_code, 403) assert_equal( res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_private_registration_unauthenticated_user_cannot_view_linked_registrations_relationship( self): res = self.make_request( registration_id=self.private_registration._id, expect_errors=True ) assert_equal(res.status_code, 401) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_cannot_create_linked_registrations_relationship(self): res = self.app.post_json_api( self.public_url, {}, auth=self.admin_contributor.auth, expect_errors=True) assert_equal(res.status_code, 405) def test_cannot_update_linked_registrations_relationship(self): res = self.app.put_json_api( self.public_url, {}, auth=self.admin_contributor.auth, expect_errors=True) assert_equal(res.status_code, 405) def test_cannot_delete_linked_registrations_relationship(self): res = self.app.delete_json_api( self.public_url, {}, auth=self.admin_contributor.auth, expect_errors=True) assert_equal(res.status_code, 405)
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # import datetime import socket import socketserver import struct import threading # from adynaton.constants import * from adynaton.utility import Utility, Formatted_Packet class DomainNameSystem(object): """ Object for DNS """ def __init__(self): self.DNS_header_format = '!H 2B 4H' self.DNS_nameserver = "8.8.4.4" self.DNS_TCP_port = 53 self.DNS_UDP_port = 53 self.DNS_packet = { 'id': 1234, 'qr': 0, 'opcode': 0, 'aa': 0, 'tc': 0, 'rd': 1, 'ra': 0, 'z': 0, 'rcode': 0, 'qdcount': 1, 'ancount': 0, 'nscount': 0, 'arcount': 0, 'qname': 0, 'qtype': 1, 'qclass': 1, 'qdata': '', } self.DNS_timeout = 5 self.packet = False @staticmethod def default_packet(): """ structure to hold packet info for lifecycle, framework for other services When the values are found things like the size of qname bits can be populated printing pretty output can become a shared library that just consumes this structure """ packet = [ {'RFC_name': 'DNS', 'bits': 16, 'order': 0, 'name': 'Domain Name System', 'value': 0}, {'RFC_name': 'id', 'bits': 16, 'order': 1, 'name': 'Identifier', 'value': 1234}, {'RFC_name': 'qr', 'bits': 1, 'order': 2, 'name': 'Query or Response', 'value': 0}, {'RFC_name': 'opcode', 'bits': 4, 'order': 3, 'name': 'Operation Code', 'value': 0}, {'RFC_name': 'aa', 'bits': 1, 'order': 4, 'name': 'Authoritative Answer', 'value': 0}, {'RFC_name': 'tc', 'bits': 1, 'order': 5, 'name': 'TrunCation', 'value': 0}, {'RFC_name': 'rd', 'bits': 1, 'order': 6, 'name': 'Recursion Desired', 'value': 1}, {'RFC_name': 'ra', 'bits': 1, 'order': 7, 'name': 'Recursion Available', 'value': 0}, {'RFC_name': 'z', 'bits': 3, 'order': 8, 'name': 'Zero (reserved)', 'value': 1}, {'RFC_name': 'rcode', 'bits': 4, 'order': 9, 'name': 'Response Code', 'value': 0}, {'RFC_name': 'qdcount', 'bits': 16, 'order': 10, 'name': 'Query Domain Count', 'value': 1}, {'RFC_name': 'ancount', 'bits': 16, 'order': 11, 'name': 'Answer Count', 'value': 0}, {'RFC_name': 'nscount', 'bits': 16, 'order': 12, 'name': 'Name Server Count', 'value': 0}, {'RFC_name': 'arcount', 'bits': 16, 'order': 13, 'name': 'Additional Records Count', 'value': 0}, {'RFC_name': 'qname', 'bits': 8, 'order': 14, 'name': 'Query Name', 'value': 0}, {'RFC_name': 'qtype', 'bits': 16, 'order': 15, 'name': 'Query Type', 'value': 1}, {'RFC_name': 'qclass', 'bits': 16, 'order': 16, 'name': 'Query Class', 'value': 1}, ] return packet @staticmethod def packet_answers(): """ Method """ packet = [ {'RFC_name': 'name', 'bits': 0, 'order': '', 'name': 'Resource Record Name', 'value': 0}, {'RFC_name': 'type', 'bits': 16, 'order': '', 'name': 'Resource Record Type', 'value': 0}, {'RFC_name': 'class', 'bits': 16, 'order': '', 'name': 'Resource Record Class', 'value': 0}, {'RFC_name': 'ttl', 'bits': 16, 'order': '', 'name': 'Resource Record Time To Live', 'value': 0} ] return packet class DomainNameSystemClient(DomainNameSystem): """ DNS Client Usage import adynaton dns_client = adynaton.DomainNameSystemClient() dns_client.DNS_nameserver = "8.8.4.4" dns_client.DNS_query = "www.google.com" dns_client.send_query() print(dns_client.pretty_result()) Note: Defaults should be removed so always set nameserver. """ def __init__(self, nameserver=None, query=None): DomainNameSystem.__init__(self) if nameserver: self.DNS_nameserver = nameserver self.DNS_qname_bytes = b'' if query: self.DNS_query = query self.DNS_header = b'' self.DNS_query_bytes = None self.DNS_response = None def validate_query(self): """ TODO add idna or punny code check/conversion """ self.trim_query() return self.DNS_query def set_query(self, query=None): """ Method """ if query: self.DNS_query = query return True def trim_query(self): """ Clean up user input that could contain non-domainname artifacts """ if self.DNS_query.endswith('.'): self.DNS_query = self.DNS_query[:-1] if '://' in self.DNS_query: self.DNS_query = self.DNS_query.split('://')[1] if '/' in self.DNS_query: self.DNS_query = self.DNS_query.split('/')[0] if ':' in self.DNS_query: self.DNS_query = self.DNS_query.split(':')[0] if '@' in self.DNS_query: # This could be too far, clean up your uri yourself self.DNS_query = self.DNS_query.split('@')[1] return self.DNS_query def create_header(self): """ Create a header or the first twelve bytes of a DNS packet using the objects values so a user can set each option and fire off a packet with custom values for debugging, testing and the like """ self.packet = self.default_packet() self.DNS_header = struct.pack( self.DNS_header_format, self.packet[1]['value'], (self.packet[2]['value'] << 7 | self.packet[3]['value'] << 3 | self.packet[4]['value'] << 2 | self.packet[5]['value'] << 1 | self.packet[6]['value']), (self.packet[7]['value'] << 7 | self.packet[8]['value'] << 4 | self.packet[9]['value']), self.packet[10]['value'], self.packet[11]['value'], self.packet[12]['value'], self.packet[13]['value'], ) return self.DNS_header def create_qname(self): """ still a bit rough, under development """ self.validate_query() qname = [] for label in self.DNS_query.split("."): qname.append(len(label)) for letter in bytes(label, 'utf-8'): qname.append(letter) qname.append(0) qname_format = "!%dB" % len(qname) self.DNS_qname_bytes = struct.pack(qname_format, *qname) return self.DNS_qname_bytes def create_query(self): """ Method """ self.create_header() self.create_qname() query_footer = struct.pack( '!2H', self.packet[15]['value'], self.packet[16]['value']) self.DNS_query_bytes = self.DNS_header self.DNS_query_bytes += self.DNS_qname_bytes + query_footer return self.DNS_query_bytes def send_query(self): """ Method """ self.create_query() client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) client_socket.settimeout(self.DNS_timeout) client_socket.connect((self.DNS_nameserver, self.DNS_UDP_port)) client_socket.send(self.DNS_query_bytes) self.DNS_response = client_socket.recvfrom(512)[0] # logic for tc and TCP switchover here client_socket.close() self.parse_data() def parse_data(self): """ Method """ header_size = 12 header_format = self.DNS_header_format response = struct.unpack( header_format, self.DNS_response[:header_size]) self.packet[1]['value'] = response[0] self.packet[2]['value'] = response[1] >> 7 self.packet[3]['value'] = (response[1] & 0x7F) >> 3 self.packet[4]['value'] = response[1] & 0x07 >> 2 self.packet[5]['value'] = response[1] & 0x03 >> 1 self.packet[6]['value'] = response[1] & 0x01 self.packet[7]['value'] = response[2] >> 7 self.packet[8]['value'] = (response[2] & 0x7F) >> 4 self.packet[9]['value'] = response[2] & 0x0F self.packet[10]['value'] = response[3] self.packet[11]['value'] = response[4] self.packet[12]['value'] = response[5] self.packet[13]['value'] = response[6] response_size = len(self.DNS_response) - header_size response_format = "!%dB" % (response_size) response = struct.unpack( response_format, self.DNS_response[header_size:]) self.packet[14]['value'] = [] for value in response: self.packet[14]['value'].append(value) if value is 0: qname_size = len(self.packet[14]['value']) break response_size = response_size - qname_size response_format = "!2H" response_start = header_size + qname_size response_end = header_size + qname_size + 4 response = struct.unpack( response_format, self.DNS_response[response_start:response_end]) self.packet[15]['value'] = response[0] self.packet[16]['value'] = response[1] response_size = len(self.DNS_response) - header_size - qname_size - 4 response_format = "!%dB" % (response_size) response = struct.unpack( response_format, self.DNS_response[header_size + qname_size + 4:]) def pretty_result(self): """ For debugging and or educational usage. """ tab = "\t" newline = "\n" pipe = "|" tabpipe = tab + pipe end = "|\n" numbers, separator = Utility().packet_documentation() packet = self.packet output = newline output += tab + str('DNS Packet').center(49) + newline output += tab + str('Name queried: ' + self.DNS_query).center(49) output += newline output += tab + str('Name server: ' + self.DNS_nameserver).center(49) output += newline output += newline output += tab + "Bits".center(49) + newline output += tab + numbers output += tab + separator output += tabpipe + str(packet[1]['value']).center(47) + end output += tab + separator output += tabpipe + str(packet[2]['value']).rjust(2) output += pipe + str(packet[3]['value']).center(11) output += pipe + str(packet[4]['value']).rjust(2) output += pipe + str(packet[5]['value']).rjust(2) output += pipe + str(packet[6]['value']).rjust(2) output += pipe + str(packet[7]['value']).rjust(2) output += pipe + str(packet[8]['value']).center(8) output += pipe + str(packet[9]['value']).center(11) + end output += tab + separator output += tabpipe + str(packet[10]['value']).center(47) + end output += tab + separator output += tabpipe + str(packet[11]['value']).center(47) + end output += tab + separator output += tabpipe + str(packet[12]['value']).center(47) + end output += tab + separator output += tabpipe + str(packet[13]['value']).center(47) + end output += tab + separator left = True for item in self.packet[14]['value']: v = str(item) if left: output += tabpipe + v.center(23) left = False else: output += pipe + v.center(23) + end output += tab + separator left = True odd = False if not left: odd = True output += pipe + str('**').center(23, ' ') + end output += tab + separator output += tabpipe + str(packet[15]['value']).center(47) + end output += tab + separator output += tabpipe + str(packet[16]['value']).center(47) + end output += tab + separator if odd: output += newline + tab + "** Odd Length qname, table altered for" output += " readability. Actual packet has no padding here." output += newline for item in range(1, len(packet)): if isinstance(packet[item]['value'], list): v = 'List: ' for subitem in packet[item]['value']: v += str(subitem) + " " else: v = packet[item]['value'] output += tab + "%s: %s\n" % ( packet[item]['name'], v) return output def detailed_packet(self): text = [self.packet[0]['name'], 'Name queried: ' + self.DNS_query, 'Name server: ' + self.DNS_nameserver] output = Formatted_Packet(header=text, packet=self.packet) return output class DomainNameSystemServer(DomainNameSystem): """ Object for DNS """ def __init__(self): DomainNameSystem.__init__(self) self.DNS_bind_address = '0.0.0.0' self.DNS_TTL = 300 self.DNS_data_file = '/srv/adynaton/DNSDATA' self.DNS_status = "Off" self.DNS_status_message = "DNS server is currently: " self.log_level = 0 self.TCPserver = False self.TCPserverthread = False self.DNS_status_TCP = False self.UDPserver = False self.UDPserverthread = False self.DNS_status_UDP = False def status(self): """ Method """ return self.DNS_status_message + self.DNS_status def start_TCP(self): """ Method """ self.TCPserver = DomainNameSystemServerTCPThreading( (self.DNS_bind_address, self.DNS_TCP_port), DomainNameSystemServerTCPHandler) self.TCPserverthread = threading.Thread( target=self.TCPserver.serve_forever) self.TCPserverthread.setDaemon(True) self.TCPserverthread.start() self.DNS_status_TCP = "\tTCP is running\n" return True def start_UDP(self): """ Method """ self.UDPserver = DomainNameSystemServerUDPThreading( (self.DNS_bind_address, self.DNS_UDP_port), DomainNameSystemServerUDPHandler) self.UDPserverthread = threading.Thread( target=self.UDPserver.serve_forever) self.UDPserverthread.setDaemon(True) self.UDPserverthread.start() self.DNS_status_UDP = "\tUDP is Running\n" return True def stop(self): """ Method """ self.TCPserver.shutdown() self.TCPserver.server_close() self.TCPserverthread.join() self.TCPserverthread = None self.DNS_status_TCP = "TCP is off" self.UDPserver.shutdown() self.UDPserver.server_close() self.UDPserverthread.join() self.UDPserverthread = None self.DNS_status_UDP = "UDP is off" return True class DomainNameSystemServerTCPHandler(socketserver.BaseRequestHandler): """ Object for DNS """ def handle(self): """ Method """ print(self.request.recv(1024).strip()) print(str(self.client_address[0])) class DomainNameSystemServerUDPHandler(socketserver.BaseRequestHandler): """ Object for DNS """ def handle(self): """ Method """ print(self.request[0]) print(self.client_address[0]) class DomainNameSystemServerTCPThreading( socketserver.ThreadingMixIn, socketserver.TCPServer): """ Object for DNS """ pass class DomainNameSystemServerUDPThreading( socketserver.ThreadingMixIn, socketserver.UDPServer): """ Object for DNS """ pass
# # This file is a part of the normalize python library # # normalize is free software: you can redistribute it and/or modify # it under the terms of the MIT License. # # normalize is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. # # You should have received a copy of the MIT license along with # normalize. If not, refer to the upstream repository at # http://github.com/hearsaycorp/normalize # """tests for the new, mixin-based property/descriptor system""" from __future__ import absolute_import import re import types import unittest2 from normalize import RecordList from normalize.coll import ListCollection import normalize.exc as exc from normalize.identity import record_id from normalize.record import Record from normalize.property import LazyProperty from normalize.property import LazySafeProperty from normalize.property import make_property_type from normalize.property import Property from normalize.property import ROLazyProperty from normalize.property import ROProperty from normalize.property import SafeProperty from normalize.property.coll import ListProperty from normalize.property.meta import _merge_camel_case_names from normalize.property.meta import MetaProperty class TestProperties(unittest2.TestCase): """Test that the new data descriptor classes work""" def test_0_property(self): """Test that unbound Property objects can be created successfully""" prop = Property() self.assertIsNotNone(prop) self.assertIsInstance(prop, Property) self.assertIsInstance(type(prop), MetaProperty) self.assertRegexpMatches(str(prop), r".*unbound.*", re.I) roprop = Property(traits=['ro']) self.assertIsNotNone(roprop) self.assertIsInstance(roprop, ROProperty) self.assertIsInstance(type(prop), MetaProperty) roprop = ROProperty() self.assertIsNotNone(roprop) self.assertIsInstance(roprop, ROProperty) lazyprop = Property(lazy=True) self.assertIsInstance(lazyprop, LazyProperty) self.assertFalse(isinstance(lazyprop, SafeProperty)) safelazyprop = Property(lazy=True, isa=str) self.assertIsInstance(safelazyprop, LazyProperty) self.assertIsInstance(safelazyprop, SafeProperty) self.assertRaises(exc.LazyIsFalse, Property, lazy=False) self.assertRaises(exc.CoerceWithoutType, Property, coerce=lambda x: 1) def test_1_basic(self): """Test that basic Properties can be defined and used""" class BasicRecord(Record): name = Property() defaulted = Property(default=lambda: []) default_none = Property(default=None) # test Property.__repr__ includes class & attribute name self.assertRegexpMatches( str(BasicRecord.__dict__['name']), r".*Property.*BasicRecord\.name.*", re.I, ) br = BasicRecord() self.assertIsInstance(br, BasicRecord) self.assertIsInstance(br.defaulted, list) br.defaulted.append("foo") self.assertEqual(br.defaulted[0], "foo") with self.assertRaisesRegexp(AttributeError, r'BasicRecord.name'): br.name self.assertEqual(br.default_none, None) br = BasicRecord(name="Bromine") self.assertEqual(br.name, "Bromine") self.assertFalse(br.defaulted) def test_2_ro(self): """Test Attributes which don't allow being set""" class TrivialRecord(Record): id = ROProperty() name = Property() self.assertRegexpMatches( str(TrivialRecord.__dict__['id']), r".*ROProperty.*TrivialRecord\.id.*", re.I ) tr = TrivialRecord(id=123) self.assertEqual(tr.id, 123) with self.assertRaisesRegexp( AttributeError, r'TrivialRecord.id.*read-only', ): tr.id = 124 tr.name = "Travel Guides" self.assertEqual(tr.name, "Travel Guides") def test_3_lazy(self): """Test Attributes which are build-once""" _seq_num = [0] def _seq(): _seq_num[0] += 1 return _seq_num[0] def _func_with_default_args(plus=5): return _seq() + plus class TrapDoorRecord(Record): def _shoot(self): projectile = self.chamber self.chamber = "empty" return projectile chamber = Property() fired = LazyProperty(default=_shoot) ask = LazyProperty(default=_seq) plus = LazyProperty(default=_func_with_default_args) tdr = TrapDoorRecord(chamber="bolt") self.assertNotIn( "fired", tdr.__dict__, "peek into lazy object's dict" ) self.assertNotIn("ask", tdr.__dict__) self.assertEqual(tdr.fired, "bolt") self.assertEqual(tdr.chamber, "empty") self.assertEqual(tdr.fired, "bolt") self.assertEqual(tdr.ask, 1) self.assertEqual(tdr.ask, 1) self.assertEqual(tdr.plus, 7) # lazy properties may be assigned tdr.fired = None self.assertEqual(tdr.fired, None) self.assertEqual(TrapDoorRecord.fired.__get__(tdr), None) # delete and start again! tdr.chamber = "bullet" del tdr.fired self.assertEqual(tdr.fired, "bullet") def test_4_required_check(self): """Test Attributes which are marked as required""" class FussyRecord(Record): id = Property(required=True, isa=int) natural = SafeProperty(check=lambda i: i > 0) must = SafeProperty(required=True) rbn = SafeProperty(required=True, isa=(str, types.NoneType)) with self.assertRaises(ValueError): fr = FussyRecord() fr = FussyRecord(id=123, must="sugary", rbn="Hello") self.assertIn("Hello", str(fr)) self.assertEqual(fr, eval(repr(fr))) with self.assertRaises(ValueError): del fr.must with self.assertRaises(ValueError): fr.must = None fr.must = "barmy" with self.assertRaises(ValueError): del fr.rbn fr.rbn = None fr.natural = 7 with self.assertRaises(ValueError): fr.natural = 0 def test_5_raisins_of_etre(self): """Check that property types which are mixed-in combinations of types work as expected""" num = [0] def seq(): num[0] += 1 return num[0] class VariedRecord(Record): def _lazy(self): return "%s.%d" % (self.must, self.id) id = ROLazyProperty( required=True, check=lambda i: i > 0, default=seq, ) must = SafeProperty(required=True) lazy = LazySafeProperty( check=lambda i: re.match(r'\w+\.\d+$', i), default=_lazy, ) vr = VariedRecord(must="horn") self.assertEqual(vr.lazy, "horn.1") self.assertEqual( vr.lazy, "horn.1", "lazy, safe attribute not re-computed" ) vr.lazy = "belly.5" with self.assertRaises(ValueError): vr.lazy = "dog collar.3" vr.must = "snout" self.assertEqual(vr.lazy, "belly.5") with self.assertRaises(AttributeError): vr.id = 2 with self.assertRaises(ValueError): vr.must = None num[0] = -1 vr = VariedRecord(must="ears") with self.assertRaises(ValueError): # test RO lazy value is computed late, and the result is # type checked vr.id def test_list_properties(self): """Test that List Properties can be created which are iterable""" class Item(Record): name = Property() class GroupingRecord(Record): members = ListProperty(of=Item) gr = GroupingRecord(members=[Item(name="bob"), Item(name="bill")]) self.assertIsInstance(gr.members, ListCollection) self.assertIsInstance(gr.members[0], Item) members = list(gr.members) self.assertEqual(members[0].name, "bob") self.assertEqual(members[1].name, "bill") class Item(Record): age = Property() with self.assertRaises(exc.PropertyNotUnique): class GR2(Record): members = ListProperty(of=Item) def test_customized_list_properties(self): """Test that list properties with custom collection behavior invoke such correctly""" class Eyetem(Record): name = Property() class CustomColl(ListCollection): @classmethod def coll_to_tuples(cls, values): if isinstance(values, types.StringType): values = values.split(',') for i, v in zip(xrange(0, len(values)), values): yield i, {'name': v} else: for x in super(CustomColl, cls).coll_to_tuples(values): yield x class GroupingRecord(Record): members = ListProperty(coll=CustomColl, of=Eyetem) # Instantiating with Python objects should still work... gr = GroupingRecord(members=[Eyetem(name="bob"), Eyetem(name="bill")]) self.assertIsInstance(gr.members, ListCollection) self.assertIsInstance(gr.members[0], Eyetem) members = list(gr.members) self.assertEqual(members[0].name, "bob") self.assertEqual(members[1].name, "bill") # Instantiating from the dict should work as well, with custom behavior gr = GroupingRecord({'members': 'bob,bill'}) self.assertIsInstance(gr.members, ListCollection) self.assertIsInstance(gr.members[0], Eyetem) members = list(gr.members) self.assertEqual(members[0].name, "bob") self.assertEqual(members[1].name, "bill") def test_list_records(self): """Test that RecordList works""" class SingleThing(Record): name = Property() class ManyThingsRecord(RecordList): itemtype = SingleThing # note: must pass pre-coerced members to constructor. mtr = ManyThingsRecord( (SingleThing(name="bert"), SingleThing(name="phil")) ) self.assertEqual(record_id(mtr[0]), ("bert",)) self.assertEqual(record_id(mtr), (("bert",), ("phil",))) self.assertTrue(mtr.__getitem__) self.assertIsInstance(mtr, ManyThingsRecord) # test construction from generators def generator(seq): for x in seq: yield x ManyThingsRecord(generator(mtr)) # ...iterators... ManyThingsRecord(mtr) def test_subclassing(self): """Test that Record subclasses work""" class Thing(Record): id = Property() class NamedThing(Thing): name = Property() NamedThing(id=123, name="adam") def test_property_meta_names(self): """Test the property metaclass creates new property names OK""" self.assertEqual( _merge_camel_case_names("MetaProperty", "SafeProperty"), "SafeMetaProperty", ) self.assertEqual( _merge_camel_case_names("LazyListProperty", "SafeJsonProperty"), "SafeJsonLazyListProperty", ) def test_property_mixin_ok(self): """Test that properties can be mixed in automagically""" class MyLittleProperty(Property): __trait__ = "mylittle" def __init__(self, pony_name=None, **kwargs): super(MyLittleProperty, self).__init__(**kwargs) mlp = Property(pony_name="Applejack", isa=str) self.assertIsInstance(mlp, MyLittleProperty) self.assertIsInstance(mlp, SafeProperty) self.assertEqual(type(mlp).traits, ("mylittle", "safe")) lazypony = Property(pony_name="Persnickety", lazy=lambda: "x") self.assertEqual(type(lazypony).traits, ("lazy", "mylittle")) self.assertIsInstance(lazypony, MyLittleProperty) self.assertIsInstance(lazypony, LazyProperty) def test_property_mixin_exc(self): """Test that bad property mixes raise the right exceptions""" class SuperProperty(SafeProperty): __trait__ = "pony" def __init__(self, hero_name=None, **kwargs): super(SuperProperty, self).__init__(**kwargs) Property(hero_name="Bruce Wayne") with self.assertRaises(exc.PropertyTypeMixinNotPossible): Property(hero_name="Bruce Wayne", traits=['unsafe']) def test_make_property_type(self): """Test that make_property_type can morph types""" SimpleStrProperty = make_property_type( "FooProperty", isa=str, ) ssp = SimpleStrProperty() self.assertEqual(ssp.valuetype, str) def test_isa_coerce_required(self): """Test various combinations of isa=, coerce=, required=""" # should later add more tests for combinations including check= as well def positive_int_or_none(x): return int(x) if int(x) > 0 else None class Mixed(Record): id = Property(required=True, isa=int, coerce=positive_int_or_none) num = Property(isa=int, coerce=positive_int_or_none) def get_what(self): return "I'm Mixed %d" % self.id what = Property( default=get_what, isa=int, lazy=True, required=True, ) def get_hmm(self): return positive_int_or_none(self.what) hmm = Property(isa=int, required=True, lazy=True, default=get_hmm) def get_huh(self): return str(self.what) huh = Property(isa=int, required=True, lazy=True, coerce=positive_int_or_none, default=get_huh) with self.assertRaisesRegexp(exc.ValueCoercionError, r'Mixed.id'): mixer = Mixed(id="-1") mixer = Mixed(id="1", num="-6") with self.assertRaises(AttributeError): mixer.num with self.assertRaises(TypeError): mixer.num = "-2" with self.assertRaises(TypeError): mixer.id = "-3" for i in 1, 2: with self.assertRaises(TypeError): mixer.what mixer.what = 2 self.assertEqual(mixer.what, 2) mixer.num = "3" self.assertEqual(mixer.num, 3) with self.assertRaises(TypeError): mixer.num = "-4" mixer.what = -5 with self.assertRaises(TypeError): mixer.hmm with self.assertRaises(TypeError): mixer.huh mixer.what = 4 self.assertEqual(mixer.hmm, 4) self.assertEqual(mixer.huh, 4) def test_list_of(self): class Person(Record): name = Property() class Warfare(Record): proleteriat = Property(list_of=Person) bourgeois = ListProperty(of=Person) society = Warfare( proleteriat=[{"name": "Joe Bloggs"}], bourgeois=[{"name": "Richard B'stard"}], ) self.assertIsInstance(society.proleteriat[0], Person) self.assertIsInstance(society.bourgeois[0], Person) def test_list_safety(self): """Test that ListProperty implies SafeProperty""" with self.assertRaises(exc.ListOfWhat): self.assertIsInstance(ListProperty(), SafeProperty) self.assertIsInstance(ListProperty(of=str), SafeProperty) self.assertIsInstance(ListProperty(of=Record), SafeProperty) def test_unknown_kwarg(self): with self.assertRaisesRegexp(TypeError, r"'yo_momma' of Property"): Property(yo_momma="so fat, when she sits around the house, " "she really SITS AROUND THE HOUSE")
import mlp; import sys; # DEFINE CONSTANTS ################### TEST_SEEDS = 0; TEST_FUNCTION = 1; VAL_INFINITY = 99999; TOLERANCE = 0.09; LEARNING_RATE = 0.1; LEARNING_DECAY = 1.0; # > Learning rate is multiplied by decay after each training step MOMENTUM = 0.1; DATASET_TRAINING_RATE = 0.8; RANDOMIZE_INPUTS = True; VERBOSE = True; ################### # SCRIPT PARAMS ################### TEST_TYPE = TEST_SEEDS; LAYERS = []; MAX_ITERATIONS = 100; if(TEST_TYPE == TEST_SEEDS): NET_INPUT = 7; OUTPUT_NEURONS = 3; NUM_TESTS = 210; LAYERS = [2,2,3] elif(TEST_TYPE == TEST_FUNCTION): NET_INPUT = 14; OUTPUT_NEURONS = 1; NUM_TESTS = 464; LAYERS = [2,4,1] ################### # NORMALIZATION AND ARRAY MANIPULATION ################### def norm_min_max(ds): min_val = [VAL_INFINITY] * len(ds[0][0]); max_val = [-VAL_INFINITY] * len(ds[0][0]); min_targ = VAL_INFINITY; max_targ = -VAL_INFINITY; output_dataset = [[],[]]; for index in range(0, len(ds[0])): inp = ds[0][index]; targ = ds[1][index]; for i in range(0, len(inp)): if(max_val[i] < inp[i]): max_val[i] = inp[i]; if(min_val[i] > inp[i]): min_val[i] = inp[i]; if(TEST_TYPE == TEST_FUNCTION): if(max_targ < targ): max_targ = targ; if(min_targ > targ): min_targ = targ; for index in range(0, len(ds[0])): inp = ds[0][index]; targ = ds[1][index]; inputs = [0] * len(inp); for i in range(0, len(inp)): inputs[i] = (inp[i] - min_val[i])/(max_val[i] - min_val[i]); if(TEST_TYPE == TEST_FUNCTION): targ = (targ - min_targ)/(max_targ - min_targ); output_dataset[0].append(inputs); output_dataset[1].append(targ); return output_dataset; def canonical_array(arr): max_val = max(arr); for i in range(0, len(arr)): arr[i] = int(arr[i]/max_val); return arr; ################### # DATASET MANIPULATION ################### def get_dataset(): dataset = [[], []]; for i in range(0, NUM_TESTS): # > Input processing line = raw_input(); inputs = line.split(); for i in range(0, len(inputs)): inputs[i] = float(inputs[i]); if(TEST_TYPE == TEST_SEEDS): label = int(inputs[-1]); # .. The last element is the label of the given input if(label == 1): label = [1,0,0]; elif(label == 2): label = [0,1,0]; elif(label == 3): label = [0,0,1]; else: label = float(inputs[-1]); # .. The last element is the label of the given input inputs = inputs[:-1]; # .. Remove the last element (the label); # > Split the dataset in training and test sets dataset[0].append(inputs); dataset[1].append(label); # > Debug Output sys.stdout.write("\r* Read: %d from %d inputs" % (i+1, NUM_TESTS)); sys.stdout.flush(); print ; print ("* Dataset size: %d" % (len(dataset[0]))); return dataset; # > Return true if they are equal; false otherwise. def compare_labels(l1, l2): for i in range(0,len(l1)): if(l1[i] != l2[i]): return False; return True; def can2class(canonical_array): for i in range(0, len(canonical_array)): if(canonical_array[i] == 1): return i; # > Finds the number of elements of each class on the dataset and splits # . . them in proportionally with the given training rate. def split_dataset(training_rate, dataset): tr_dataset = [[],[]]; ts_dataset = [[],[]]; # > Gets the number of inputs of each class to split proportionally to each class n_classes = OUTPUT_NEURONS; class_count = [0] * n_classes; if(TEST_TYPE == TEST_SEEDS): for index in range(0, len(dataset[0])): inpt = dataset[0][index]; targ = dataset[1][index]; # > The target class is the position of the first 1 classification = can2class(targ); class_count[classification] += 1; # > Split inserted_train_count = [0] * n_classes; for index in range(0, len(dataset[0])): inpt = dataset[0][index]; targ = dataset[1][index]; # > The target class is the position of the first 1 classification = can2class(targ); if(inserted_train_count[classification] < (DATASET_TRAINING_RATE * class_count[classification])): tr_dataset[0].append(inpt); tr_dataset[1].append(targ); inserted_train_count[classification] += 1; else: ts_dataset[0].append(inpt); ts_dataset[1].append(targ); else: inserted_train_count = 0; size = len(dataset[0]); for index in range(0, len(dataset[0])): inpt = dataset[0][index]; targ = dataset[1][index]; if(inserted_train_count < DATASET_TRAINING_RATE * size): tr_dataset[0].append(inpt); tr_dataset[1].append(targ); inserted_train_count += 1; else: ts_dataset[0].append(inpt); ts_dataset[1].append(targ); print("* Train dataset lenght: %d \n* Test dataset length: %d" % (len(tr_dataset[0]), len(ts_dataset[0]))); return tr_dataset, ts_dataset; ################### # MAIN FLOW ################### def main(): net = mlp.MLP(NET_INPUT, LAYERS, LEARNING_RATE); print("=> Dataset reading"); dataset = get_dataset(); print ; print("=> Normalization step") dataset = norm_min_max(dataset); print ; print("=> Splitting dataset into training data and test data"); training_dataset, test_dataset = split_dataset(DATASET_TRAINING_RATE, dataset); print ; print("=> Training"); net.set_training_epochs(MAX_ITERATIONS); net.set_shuffle(RANDOMIZE_INPUTS); net.set_tolerance_error(TOLERANCE); net.set_verbose(VERBOSE); net.train_set(training_dataset[0], training_dataset[1]); print("\n* Trained %d inputs" % len(training_dataset[0])); print ; print("=> Classification"); k = 1; success=0; error_list = []; acum_error = 0; for index in range(0, len(test_dataset[0])): inpt = test_dataset[0][index]; targ = test_dataset[1][index]; result = net.classify(inpt); if(TEST_TYPE == TEST_SEEDS): result = canonical_array(result); if(compare_labels(targ, result)): success += 1; else: error_list.append(k); else: error = (targ - result[0]) ** 2; acum_error += error; k += 1; print("* Tested %d inputs" % len(test_dataset[0])); if(TEST_TYPE == TEST_SEEDS): success_rate = (100 * success)/NUM_TESTS; print("* Accuracy: %d%%" % success_rate); else: acum_error = acum_error / len(test_dataset[0]); print("* Average Squared Error: %f" % acum_error); print ; return 0; ################### main();
# -*- coding: utf-8 -*- """ A set of convenient utilities for numerical work. Most of this module requires Numerical Python or is meant to be used with it. See http://www.pfdubois.com/numpy for details. $Id: numutils.py 958 2005-12-27 23:17:51Z fperez $""" #***************************************************************************** # Copyright (C) 2001-2005 Fernando Perez <fperez@colorado.edu> # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #***************************************************************************** from IPython import Release __author__ = '%s <%s>' % Release.authors['Fernando'] __license__ = Release.license __all__ = ['sum_flat','mean_flat','rms_flat','base_repr','binary_repr', 'amin','amax','amap','zeros_like','empty_like', 'frange','diagonal_matrix','identity', 'fromfunction_kw','log2','ispower2', 'norm','l1norm','l2norm','exp_safe', 'inf','infty','Infinity', 'Numeric'] #**************************************************************************** # required modules import __main__ import math import operator import sys import Numeric from Numeric import * #***************************************************************************** # Globals # useful for testing infinities in results of array divisions (which don't # raise an exception) # Python, LaTeX and Mathematica names. inf = infty = Infinity = (array([1])/0.0)[0] #**************************************************************************** # function definitions exp_safe_MIN = math.log(2.2250738585072014e-308) exp_safe_MAX = 1.7976931348623157e+308 def exp_safe(x): """Compute exponentials which safely underflow to zero. Slow but convenient to use. Note that NumArray will introduce proper floating point exception handling with access to the underlying hardware.""" if type(x) is ArrayType: return exp(clip(x,exp_safe_MIN,exp_safe_MAX)) else: return math.exp(x) def amap(fn,*args): """amap(function, sequence[, sequence, ...]) -> array. Works like map(), but it returns an array. This is just a convenient shorthand for Numeric.array(map(...))""" return array(map(fn,*args)) def amin(m,axis=0): """amin(m,axis=0) returns the minimum of m along dimension axis. """ return minimum.reduce(asarray(m),axis) def amax(m,axis=0): """amax(m,axis=0) returns the maximum of m along dimension axis. """ return maximum.reduce(asarray(m),axis) def zeros_like(a): """Return an array of zeros of the shape and typecode of a. If you don't explicitly need the array to be zeroed, you should instead use empty_like(), which is faster as it only allocates memory.""" return zeros(a.shape,a.typecode()) def empty_like(a): """Return an empty (uninitialized) array of the shape and typecode of a. Note that this does NOT initialize the returned array. If you require your array to be initialized, you should use zeros_like(). This requires Numeric.empty(), which appeared in Numeric 23.7.""" return empty(a.shape,a.typecode()) def sum_flat(a): """Return the sum of all the elements of a, flattened out. It uses a.flat, and if a is not contiguous, a call to ravel(a) is made.""" if a.iscontiguous(): return Numeric.sum(a.flat) else: return Numeric.sum(ravel(a)) def mean_flat(a): """Return the mean of all the elements of a, flattened out.""" return sum_flat(a)/float(size(a)) def rms_flat(a): """Return the root mean square of all the elements of a, flattened out.""" return math.sqrt(sum_flat(absolute(a)**2)/float(size(a))) def l1norm(a): """Return the l1 norm of a, flattened out. Implemented as a separate function (not a call to norm() for speed). Ref: http://mathworld.wolfram.com/L1-Norm.html""" return sum_flat(absolute(a)) def l2norm(a): """Return the l2 norm of a, flattened out. Implemented as a separate function (not a call to norm() for speed). Ref: http://mathworld.wolfram.com/L2-Norm.html""" return math.sqrt(sum_flat(absolute(a)**2)) def norm(a,p=2): """norm(a,p=2) -> l-p norm of a.flat Return the l-p norm of a, considered as a flat array. This is NOT a true matrix norm, since arrays of arbitrary rank are always flattened. p can be a number or one of the strings ('inf','Infinity') to get the L-infinity norm. Ref: http://mathworld.wolfram.com/VectorNorm.html http://mathworld.wolfram.com/L-Infinity-Norm.html""" if p in ('inf','Infinity'): return max(absolute(a).flat) else: return (sum_flat(absolute(a)**p))**(1.0/p) def frange(xini,xfin=None,delta=None,**kw): """frange([start,] stop[, step, keywords]) -> array of floats Return a Numeric array() containing a progression of floats. Similar to arange(), but defaults to a closed interval. frange(x0, x1) returns [x0, x0+1, x0+2, ..., x1]; start defaults to 0, and the endpoint *is included*. This behavior is different from that of range() and arange(). This is deliberate, since frange will probably be more useful for generating lists of points for function evaluation, and endpoints are often desired in this use. The usual behavior of range() can be obtained by setting the keyword 'closed=0', in this case frange() basically becomes arange(). When step is given, it specifies the increment (or decrement). All arguments can be floating point numbers. frange(x0,x1,d) returns [x0,x0+d,x0+2d,...,xfin] where xfin<=x1. frange can also be called with the keyword 'npts'. This sets the number of points the list should contain (and overrides the value 'step' might have been given). arange() doesn't offer this option. Examples: >>> frange(3) array([ 0., 1., 2., 3.]) >>> frange(3,closed=0) array([ 0., 1., 2.]) >>> frange(1,6,2) array([1, 3, 5]) >>> frange(1,6.5,npts=5) array([ 1. , 2.375, 3.75 , 5.125, 6.5 ]) """ #defaults kw.setdefault('closed',1) endpoint = kw['closed'] != 0 # funny logic to allow the *first* argument to be optional (like range()) # This was modified with a simpler version from a similar frange() found # at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472 if xfin == None: xfin = xini + 0.0 xini = 0.0 if delta == None: delta = 1.0 # compute # of points, spacing and return final list try: npts=kw['npts'] delta=(xfin-xini)/float(npts-endpoint) except KeyError: # round() gets npts right even with the vagaries of floating point. npts=int(round((xfin-xini)/delta+endpoint)) return arange(npts)*delta+xini def diagonal_matrix(diag): """Return square diagonal matrix whose non-zero elements are given by the input array.""" return diag*identity(len(diag)) def identity(n,rank=2,typecode='l'): """identity(n,r) returns the identity matrix of shape (n,n,...,n) (rank r). For ranks higher than 2, this object is simply a multi-index Kronecker delta: / 1 if i0=i1=...=iR, id[i0,i1,...,iR] = -| \ 0 otherwise. Optionally a typecode may be given (it defaults to 'l'). Since rank defaults to 2, this function behaves in the default case (when only n is given) like the Numeric identity function.""" iden = zeros((n,)*rank,typecode=typecode) for i in range(n): idx = (i,)*rank iden[idx] = 1 return iden def base_repr (number, base = 2, padding = 0): """Return the representation of a number in any given base.""" chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' if number < base: \ return (padding - 1) * chars [0] + chars [int (number)] max_exponent = int (math.log (number)/math.log (base)) max_power = long (base) ** max_exponent lead_digit = int (number/max_power) return chars [lead_digit] + \ base_repr (number - max_power * lead_digit, base, \ max (padding - 1, max_exponent)) def binary_repr(number, max_length = 1025): """Return the binary representation of the input number as a string. This is more efficient than using base_repr with base 2. Increase the value of max_length for very large numbers. Note that on 32-bit machines, 2**1023 is the largest integer power of 2 which can be converted to a Python float.""" assert number < 2L << max_length shifts = map (operator.rshift, max_length * [number], \ range (max_length - 1, -1, -1)) digits = map (operator.mod, shifts, max_length * [2]) if not digits.count (1): return 0 digits = digits [digits.index (1):] return ''.join (map (repr, digits)).replace('L','') def log2(x,ln2 = math.log(2.0)): """Return the log(x) in base 2. This is a _slow_ function but which is guaranteed to return the correct integer value if the input is an ineger exact power of 2.""" try: bin_n = binary_repr(x)[1:] except (AssertionError,TypeError): return math.log(x)/ln2 else: if '1' in bin_n: return math.log(x)/ln2 else: return len(bin_n) def ispower2(n): """Returns the log base 2 of n if n is a power of 2, zero otherwise. Note the potential ambiguity if n==1: 2**0==1, interpret accordingly.""" bin_n = binary_repr(n)[1:] if '1' in bin_n: return 0 else: return len(bin_n) def fromfunction_kw(function, dimensions, **kwargs): """Drop-in replacement for fromfunction() from Numerical Python. Allows passing keyword arguments to the desired function. Call it as (keywords are optional): fromfunction_kw(MyFunction, dimensions, keywords) The function MyFunction() is responsible for handling the dictionary of keywords it will recieve.""" return function(tuple(indices(dimensions)),**kwargs) #**************************** end file <numutils.py> ************************
from __future__ import unicode_literals import os.path import optparse import shlex import sys from .downloader.external import list_external_downloaders from .compat import ( compat_expanduser, compat_get_terminal_size, compat_getenv, compat_kwargs, ) from .utils import ( write_string, ) from .version import __version__ def parseOpts(overrideArguments=None): def _readOptions(filename_bytes, default=[]): try: optionf = open(filename_bytes) except IOError: return default # silently skip if file is not present try: res = [] for l in optionf: res += shlex.split(l, comments=True) finally: optionf.close() return res def _readUserConf(): xdg_config_home = compat_getenv('XDG_CONFIG_HOME') if xdg_config_home: userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config') if not os.path.isfile(userConfFile): userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') else: userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config') if not os.path.isfile(userConfFile): userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf') userConf = _readOptions(userConfFile, None) if userConf is None: appdata_dir = compat_getenv('appdata') if appdata_dir: userConf = _readOptions( os.path.join(appdata_dir, 'youtube-dl', 'config'), default=None) if userConf is None: userConf = _readOptions( os.path.join(appdata_dir, 'youtube-dl', 'config.txt'), default=None) if userConf is None: userConf = _readOptions( os.path.join(compat_expanduser('~'), 'youtube-dl.conf'), default=None) if userConf is None: userConf = _readOptions( os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'), default=None) if userConf is None: userConf = [] return userConf def _format_option_string(option): ''' ('-o', '--option') -> -o, --format METAVAR''' opts = [] if option._short_opts: opts.append(option._short_opts[0]) if option._long_opts: opts.append(option._long_opts[0]) if len(opts) > 1: opts.insert(1, ', ') if option.takes_value(): opts.append(' %s' % option.metavar) return "".join(opts) def _comma_separated_values_options_callback(option, opt_str, value, parser): setattr(parser.values, option.dest, value.split(',')) def _hide_login_info(opts): opts = list(opts) for private_opt in ['-p', '--password', '-u', '--username', '--video-password']: try: i = opts.index(private_opt) opts[i + 1] = 'PRIVATE' except ValueError: pass return opts # No need to wrap help messages if we're on a wide console columns = compat_get_terminal_size().columns max_width = columns if columns else 80 max_help_position = 80 fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) fmt.format_option_strings = _format_option_string kw = { 'version': __version__, 'formatter': fmt, 'usage': '%prog [OPTIONS] URL [URL...]', 'conflict_handler': 'resolve', } parser = optparse.OptionParser(**compat_kwargs(kw)) general = optparse.OptionGroup(parser, 'General Options') general.add_option( '-h', '--help', action='help', help='print this help text and exit') general.add_option( '-v', '--version', action='version', help='print program version and exit') general.add_option( '-U', '--update', action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)') general.add_option( '-i', '--ignore-errors', action='store_true', dest='ignoreerrors', default=False, help='continue on download errors, for example to skip unavailable videos in a playlist') general.add_option( '--abort-on-error', action='store_false', dest='ignoreerrors', help='Abort downloading of further videos (in the playlist or the command line) if an error occurs') general.add_option( '--dump-user-agent', action='store_true', dest='dump_user_agent', default=False, help='display the current browser identification') general.add_option( '--list-extractors', action='store_true', dest='list_extractors', default=False, help='List all supported extractors and the URLs they would handle') general.add_option( '--extractor-descriptions', action='store_true', dest='list_extractor_descriptions', default=False, help='Output descriptions of all supported extractors') general.add_option( '--default-search', dest='default_search', metavar='PREFIX', help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.') general.add_option( '--ignore-config', action='store_true', help='Do not read configuration files. ' 'When given in the global configuration file /etc/youtube-dl.conf: ' 'Do not read the user configuration in ~/.config/youtube-dl/config ' '(%APPDATA%/youtube-dl/config.txt on Windows)') general.add_option( '--flat-playlist', action='store_const', dest='extract_flat', const='in_playlist', default=False, help='Do not extract the videos of a playlist, only list them.') general.add_option( '--no-color', '--no-colors', action='store_true', dest='no_color', default=False, help='Do not emit color codes in output.') network = optparse.OptionGroup(parser, 'Network Options') network.add_option( '--proxy', dest='proxy', default=None, metavar='URL', help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection') network.add_option( '--socket-timeout', dest='socket_timeout', type=float, default=None, metavar='SECONDS', help='Time to wait before giving up, in seconds') network.add_option( '--source-address', metavar='IP', dest='source_address', default=None, help='Client-side IP address to bind to (experimental)', ) network.add_option( '-4', '--force-ipv4', action='store_const', const='0.0.0.0', dest='source_address', help='Make all connections via IPv4 (experimental)', ) network.add_option( '-6', '--force-ipv6', action='store_const', const='::', dest='source_address', help='Make all connections via IPv6 (experimental)', ) network.add_option( '--cn-verification-proxy', dest='cn_verification_proxy', default=None, metavar='URL', help='Use this proxy to verify the IP address for some Chinese sites. ' 'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading. (experimental)' ) selection = optparse.OptionGroup(parser, 'Video Selection') selection.add_option( '--playlist-start', dest='playliststart', metavar='NUMBER', default=1, type=int, help='playlist video to start at (default is %default)') selection.add_option( '--playlist-end', dest='playlistend', metavar='NUMBER', default=None, type=int, help='playlist video to end at (default is last)') selection.add_option( '--playlist-items', dest='playlist_items', metavar='ITEM_SPEC', default=None, help='playlist video items to download. Specify indices of the videos in the playlist seperated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.') selection.add_option( '--match-title', dest='matchtitle', metavar='REGEX', help='download only matching titles (regex or caseless sub-string)') selection.add_option( '--reject-title', dest='rejecttitle', metavar='REGEX', help='skip download for matching titles (regex or caseless sub-string)') selection.add_option( '--max-downloads', dest='max_downloads', metavar='NUMBER', type=int, default=None, help='Abort after downloading NUMBER files') selection.add_option( '--min-filesize', metavar='SIZE', dest='min_filesize', default=None, help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)') selection.add_option( '--max-filesize', metavar='SIZE', dest='max_filesize', default=None, help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)') selection.add_option( '--date', metavar='DATE', dest='date', default=None, help='download only videos uploaded in this date') selection.add_option( '--datebefore', metavar='DATE', dest='datebefore', default=None, help='download only videos uploaded on or before this date (i.e. inclusive)') selection.add_option( '--dateafter', metavar='DATE', dest='dateafter', default=None, help='download only videos uploaded on or after this date (i.e. inclusive)') selection.add_option( '--min-views', metavar='COUNT', dest='min_views', default=None, type=int, help='Do not download any videos with less than COUNT views',) selection.add_option( '--max-views', metavar='COUNT', dest='max_views', default=None, type=int, help='Do not download any videos with more than COUNT views') selection.add_option( '--match-filter', metavar='FILTER', dest='match_filter', default=None, help=( '(Experimental) Generic video filter. ' 'Specify any key (see help for -o for a list of available keys) to' ' match if the key is present, ' '!key to check if the key is not present,' 'key > NUMBER (like "comment_count > 12", also works with ' '>=, <, <=, !=, =) to compare against a number, and ' '& to require multiple matches. ' 'Values which are not known are excluded unless you' ' put a question mark (?) after the operator.' 'For example, to only match videos that have been liked more than ' '100 times and disliked less than 50 times (or the dislike ' 'functionality is not available at the given service), but who ' 'also have a description, use --match-filter ' '"like_count > 100 & dislike_count <? 50 & description" .' )) selection.add_option( '--no-playlist', action='store_true', dest='noplaylist', default=False, help='If the URL refers to a video and a playlist, download only the video.') selection.add_option( '--yes-playlist', action='store_false', dest='noplaylist', default=False, help='If the URL refers to a video and a playlist, download the playlist.') selection.add_option( '--age-limit', metavar='YEARS', dest='age_limit', default=None, type=int, help='download only videos suitable for the given age') selection.add_option( '--download-archive', metavar='FILE', dest='download_archive', help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.') selection.add_option( '--include-ads', dest='include_ads', action='store_true', help='Download advertisements as well (experimental)') authentication = optparse.OptionGroup(parser, 'Authentication Options') authentication.add_option( '-u', '--username', dest='username', metavar='USERNAME', help='login with this account ID') authentication.add_option( '-p', '--password', dest='password', metavar='PASSWORD', help='account password. If this option is left out, youtube-dl will ask interactively.') authentication.add_option( '-2', '--twofactor', dest='twofactor', metavar='TWOFACTOR', help='two-factor auth code') authentication.add_option( '-n', '--netrc', action='store_true', dest='usenetrc', default=False, help='use .netrc authentication data') authentication.add_option( '--video-password', dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)') video_format = optparse.OptionGroup(parser, 'Video Format Options') video_format.add_option( '-f', '--format', action='store', dest='format', metavar='FORMAT', default=None, help=( 'video format code, specify the order of preference using' ' slashes, as in -f 22/17/18 . ' ' Instead of format codes, you can select by extension for the ' 'extensions aac, m4a, mp3, mp4, ogg, wav, webm. ' 'You can also use the special names "best",' ' "bestvideo", "bestaudio", "worst". ' ' You can filter the video results by putting a condition in' ' brackets, as in -f "best[height=720]"' ' (or -f "[filesize>10M]"). ' ' This works for filesize, height, width, tbr, abr, vbr, asr, and fps' ' and the comparisons <, <=, >, >=, =, !=' ' and for ext, acodec, vcodec, container, and protocol' ' and the comparisons =, != .' ' Formats for which the value is not known are excluded unless you' ' put a question mark (?) after the operator.' ' You can combine format filters, so ' '-f "[height <=? 720][tbr>500]" ' 'selects up to 720p videos (or videos where the height is not ' 'known) with a bitrate of at least 500 KBit/s.' ' By default, youtube-dl will pick the best quality.' ' Use commas to download multiple audio formats, such as' ' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.' ' You can merge the video and audio of two formats into a single' ' file using -f <video-format>+<audio-format> (requires ffmpeg or' ' avconv), for example -f bestvideo+bestaudio.')) video_format.add_option( '--all-formats', action='store_const', dest='format', const='all', help='download all available video formats') video_format.add_option( '--prefer-free-formats', action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested') video_format.add_option( '--max-quality', action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') video_format.add_option( '-F', '--list-formats', action='store_true', dest='listformats', help='list all available formats') video_format.add_option( '--youtube-include-dash-manifest', action='store_true', dest='youtube_include_dash_manifest', default=True, help=optparse.SUPPRESS_HELP) video_format.add_option( '--youtube-skip-dash-manifest', action='store_false', dest='youtube_include_dash_manifest', help='Do not download the DASH manifest on YouTube videos') video_format.add_option( '--merge-output-format', action='store', dest='merge_output_format', metavar='FORMAT', default=None, help=( 'If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv.' 'Ignored if no merge is required')) subtitles = optparse.OptionGroup(parser, 'Subtitle Options') subtitles.add_option( '--write-sub', '--write-srt', action='store_true', dest='writesubtitles', default=False, help='write subtitle file') subtitles.add_option( '--write-auto-sub', '--write-automatic-sub', action='store_true', dest='writeautomaticsub', default=False, help='write automatic subtitle file (youtube only)') subtitles.add_option( '--all-subs', action='store_true', dest='allsubtitles', default=False, help='downloads all the available subtitles of the video') subtitles.add_option( '--list-subs', action='store_true', dest='listsubtitles', default=False, help='lists all available subtitles for the video') subtitles.add_option( '--sub-format', action='store', dest='subtitlesformat', metavar='FORMAT', default='best', help='subtitle format, accepts formats preference, for example: "ass/srt/best"') subtitles.add_option( '--sub-lang', '--sub-langs', '--srt-lang', action='callback', dest='subtitleslangs', metavar='LANGS', type='str', default=[], callback=_comma_separated_values_options_callback, help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'') downloader = optparse.OptionGroup(parser, 'Download Options') downloader.add_option( '-r', '--rate-limit', dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)') downloader.add_option( '-R', '--retries', dest='retries', metavar='RETRIES', default=10, help='number of retries (default is %default), or "infinite".') downloader.add_option( '--buffer-size', dest='buffersize', metavar='SIZE', default='1024', help='size of download buffer (e.g. 1024 or 16K) (default is %default)') downloader.add_option( '--no-resize-buffer', action='store_true', dest='noresizebuffer', default=False, help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.') downloader.add_option( '--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP) downloader.add_option( '--playlist-reverse', action='store_true', help='Download playlist videos in reverse order') downloader.add_option( '--xattr-set-filesize', dest='xattr_set_filesize', action='store_true', help='(experimental) set file xattribute ytdl.filesize with expected filesize') downloader.add_option( '--hls-prefer-native', dest='hls_prefer_native', action='store_true', help='(experimental) Use the native HLS downloader instead of ffmpeg.') downloader.add_option( '--external-downloader', dest='external_downloader', metavar='COMMAND', help='Use the specified external downloader. ' 'Currently supports %s' % ','.join(list_external_downloaders())) downloader.add_option( '--external-downloader-args', dest='external_downloader_args', metavar='ARGS', help='Give these arguments to the external downloader.') workarounds = optparse.OptionGroup(parser, 'Workarounds') workarounds.add_option( '--encoding', dest='encoding', metavar='ENCODING', help='Force the specified encoding (experimental)') workarounds.add_option( '--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.') workarounds.add_option( '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure', help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)') workarounds.add_option( '--user-agent', metavar='UA', dest='user_agent', help='specify a custom user agent') workarounds.add_option( '--referer', metavar='URL', dest='referer', default=None, help='specify a custom referer, use if the video access is restricted to one domain', ) workarounds.add_option( '--add-header', metavar='FIELD:VALUE', dest='headers', action='append', help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times', ) workarounds.add_option( '--bidi-workaround', dest='bidi_workaround', action='store_true', help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH') workarounds.add_option( '--sleep-interval', metavar='SECONDS', dest='sleep_interval', type=float, help='Number of seconds to sleep before each download.') verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') verbosity.add_option( '-q', '--quiet', action='store_true', dest='quiet', default=False, help='activates quiet mode') verbosity.add_option( '--no-warnings', dest='no_warnings', action='store_true', default=False, help='Ignore warnings') verbosity.add_option( '-s', '--simulate', action='store_true', dest='simulate', default=False, help='do not download the video and do not write anything to disk',) verbosity.add_option( '--skip-download', action='store_true', dest='skip_download', default=False, help='do not download the video',) verbosity.add_option( '-g', '--get-url', action='store_true', dest='geturl', default=False, help='simulate, quiet but print URL') verbosity.add_option( '-e', '--get-title', action='store_true', dest='gettitle', default=False, help='simulate, quiet but print title') verbosity.add_option( '--get-id', action='store_true', dest='getid', default=False, help='simulate, quiet but print id') verbosity.add_option( '--get-thumbnail', action='store_true', dest='getthumbnail', default=False, help='simulate, quiet but print thumbnail URL') verbosity.add_option( '--get-description', action='store_true', dest='getdescription', default=False, help='simulate, quiet but print video description') verbosity.add_option( '--get-duration', action='store_true', dest='getduration', default=False, help='simulate, quiet but print video length') verbosity.add_option( '--get-filename', action='store_true', dest='getfilename', default=False, help='simulate, quiet but print output filename') verbosity.add_option( '--get-format', action='store_true', dest='getformat', default=False, help='simulate, quiet but print output format') verbosity.add_option( '-j', '--dump-json', action='store_true', dest='dumpjson', default=False, help='simulate, quiet but print JSON information. See --output for a description of available keys.') verbosity.add_option( '-J', '--dump-single-json', action='store_true', dest='dump_single_json', default=False, help='simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.') verbosity.add_option( '--print-json', action='store_true', dest='print_json', default=False, help='Be quiet and print the video information as JSON (video is still being downloaded).', ) verbosity.add_option( '--newline', action='store_true', dest='progress_with_newline', default=False, help='output progress bar as new lines') verbosity.add_option( '--no-progress', action='store_true', dest='noprogress', default=False, help='do not print progress bar') verbosity.add_option( '--console-title', action='store_true', dest='consoletitle', default=False, help='display progress in console titlebar') verbosity.add_option( '-v', '--verbose', action='store_true', dest='verbose', default=False, help='print various debugging information') verbosity.add_option( '--dump-pages', '--dump-intermediate-pages', action='store_true', dest='dump_intermediate_pages', default=False, help='print downloaded pages to debug problems (very verbose)') verbosity.add_option( '--write-pages', action='store_true', dest='write_pages', default=False, help='Write downloaded intermediary pages to files in the current directory to debug problems') verbosity.add_option( '--youtube-print-sig-code', action='store_true', dest='youtube_print_sig_code', default=False, help=optparse.SUPPRESS_HELP) verbosity.add_option( '--print-traffic', '--dump-headers', dest='debug_printtraffic', action='store_true', default=False, help='Display sent and read HTTP traffic') verbosity.add_option( '-C', '--call-home', dest='call_home', action='store_true', default=False, help='Contact the youtube-dl server for debugging.') verbosity.add_option( '--no-call-home', dest='call_home', action='store_false', default=False, help='Do NOT contact the youtube-dl server for debugging.') filesystem = optparse.OptionGroup(parser, 'Filesystem Options') filesystem.add_option( '-a', '--batch-file', dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') filesystem.add_option( '--id', default=False, action='store_true', dest='useid', help='use only video ID in file name') filesystem.add_option( '-o', '--output', dest='outtmpl', metavar='TEMPLATE', help=('output filename template. Use %(title)s to get the title, ' '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, ' '%(autonumber)s to get an automatically incremented number, ' '%(ext)s for the filename extension, ' '%(format)s for the format description (like "22 - 1280x720" or "HD"), ' '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), ' '%(upload_date)s for the upload date (YYYYMMDD), ' '%(extractor)s for the provider (youtube, metacafe, etc), ' '%(id)s for the video id, ' '%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, ' '%(playlist_index)s for the position in the playlist. ' '%(height)s and %(width)s for the width and height of the video format. ' '%(resolution)s for a textual description of the resolution of the video format. ' '%% for a literal percent. ' 'Use - to output to stdout. Can also be used to download to a different directory, ' 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')) filesystem.add_option( '--autonumber-size', dest='autonumber_size', metavar='NUMBER', help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given') filesystem.add_option( '--restrict-filenames', action='store_true', dest='restrictfilenames', default=False, help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames') filesystem.add_option( '-A', '--auto-number', action='store_true', dest='autonumber', default=False, help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] number downloaded files starting from 00000') filesystem.add_option( '-t', '--title', action='store_true', dest='usetitle', default=False, help='[deprecated] use title in file name (default)') filesystem.add_option( '-l', '--literal', default=False, action='store_true', dest='usetitle', help='[deprecated] alias of --title') filesystem.add_option( '-w', '--no-overwrites', action='store_true', dest='nooverwrites', default=False, help='do not overwrite files') filesystem.add_option( '-c', '--continue', action='store_true', dest='continue_dl', default=True, help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.') filesystem.add_option( '--no-continue', action='store_false', dest='continue_dl', help='do not resume partially downloaded files (restart from beginning)') filesystem.add_option( '--no-part', action='store_true', dest='nopart', default=False, help='do not use .part files - write directly into output file') filesystem.add_option( '--no-mtime', action='store_false', dest='updatetime', default=True, help='do not use the Last-modified header to set the file modification time') filesystem.add_option( '--write-description', action='store_true', dest='writedescription', default=False, help='write video description to a .description file') filesystem.add_option( '--write-info-json', action='store_true', dest='writeinfojson', default=False, help='write video metadata to a .info.json file') filesystem.add_option( '--write-annotations', action='store_true', dest='writeannotations', default=False, help='write video annotations to a .annotation file') filesystem.add_option( '--load-info', dest='load_info_filename', metavar='FILE', help='json file containing the video information (created with the "--write-json" option)') filesystem.add_option( '--cookies', dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in') filesystem.add_option( '--cache-dir', dest='cachedir', default=None, metavar='DIR', help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.') filesystem.add_option( '--no-cache-dir', action='store_const', const=False, dest='cachedir', help='Disable filesystem caching') filesystem.add_option( '--rm-cache-dir', action='store_true', dest='rm_cachedir', help='Delete all filesystem cache files') thumbnail = optparse.OptionGroup(parser, 'Thumbnail images') thumbnail.add_option( '--write-thumbnail', action='store_true', dest='writethumbnail', default=False, help='write thumbnail image to disk') thumbnail.add_option( '--write-all-thumbnails', action='store_true', dest='write_all_thumbnails', default=False, help='write all thumbnail image formats to disk') thumbnail.add_option( '--list-thumbnails', action='store_true', dest='list_thumbnails', default=False, help='Simulate and list all available thumbnail formats') postproc = optparse.OptionGroup(parser, 'Post-processing Options') postproc.add_option( '-x', '--extract-audio', action='store_true', dest='extractaudio', default=False, help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)') postproc.add_option( '--audio-format', metavar='FORMAT', dest='audioformat', default='best', help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default') postproc.add_option( '--audio-quality', metavar='QUALITY', dest='audioquality', default='5', help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)') postproc.add_option( '--recode-video', metavar='FORMAT', dest='recodevideo', default=None, help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)') postproc.add_option( '-k', '--keep-video', action='store_true', dest='keepvideo', default=False, help='keeps the video file on disk after the post-processing; the video is erased by default') postproc.add_option( '--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False, help='do not overwrite post-processed files; the post-processed files are overwritten by default') postproc.add_option( '--embed-subs', action='store_true', dest='embedsubtitles', default=False, help='embed subtitles in the video (only for mp4 videos)') postproc.add_option( '--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False, help='embed thumbnail in the audio as cover art') postproc.add_option( '--add-metadata', action='store_true', dest='addmetadata', default=False, help='write metadata to the video file') postproc.add_option( '--metadata-from-title', metavar='FORMAT', dest='metafromtitle', help='parse additional metadata like song title / artist from the video title. ' 'The format syntax is the same as --output, ' 'the parsed parameters replace existing values. ' 'Additional templates: %(album), %(artist). ' 'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like ' '"Coldplay - Paradise"') postproc.add_option( '--xattrs', action='store_true', dest='xattrs', default=False, help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)') postproc.add_option( '--fixup', metavar='POLICY', dest='fixup', default='detect_or_warn', help='Automatically correct known faults of the file. ' 'One of never (do nothing), warn (only emit a warning), ' 'detect_or_warn(the default; fix file if we can, warn otherwise)') postproc.add_option( '--prefer-avconv', action='store_false', dest='prefer_ffmpeg', help='Prefer avconv over ffmpeg for running the postprocessors (default)') postproc.add_option( '--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg', help='Prefer ffmpeg over avconv for running the postprocessors') postproc.add_option( '--ffmpeg-location', '--avconv-location', metavar='PATH', dest='ffmpeg_location', help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.') postproc.add_option( '--exec', metavar='CMD', dest='exec_cmd', help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'') postproc.add_option( '--convert-subtitles', '--convert-subs', metavar='FORMAT', dest='convertsubtitles', default=None, help='Convert the subtitles to other format (currently supported: srt|ass|vtt)') parser.add_option_group(general) parser.add_option_group(network) parser.add_option_group(selection) parser.add_option_group(downloader) parser.add_option_group(filesystem) parser.add_option_group(thumbnail) parser.add_option_group(verbosity) parser.add_option_group(workarounds) parser.add_option_group(video_format) parser.add_option_group(subtitles) parser.add_option_group(authentication) parser.add_option_group(postproc) if overrideArguments is not None: opts, args = parser.parse_args(overrideArguments) if opts.verbose: write_string('[debug] Override config: ' + repr(overrideArguments) + '\n') else: command_line_conf = sys.argv[1:] # Workaround for Python 2.x, where argv is a byte list if sys.version_info < (3,): command_line_conf = [ a.decode('utf-8', 'replace') for a in command_line_conf] if '--ignore-config' in command_line_conf: system_conf = [] user_conf = [] else: system_conf = _readOptions('/etc/youtube-dl.conf') if '--ignore-config' in system_conf: user_conf = [] else: user_conf = _readUserConf() argv = system_conf + user_conf + command_line_conf opts, args = parser.parse_args(argv) if opts.verbose: write_string('[debug] System config: ' + repr(_hide_login_info(system_conf)) + '\n') write_string('[debug] User config: ' + repr(_hide_login_info(user_conf)) + '\n') write_string('[debug] Command-line args: ' + repr(_hide_login_info(command_line_conf)) + '\n') return parser, opts, args
import base64 import asyncio from decimal import Decimal from asynctest import patch, CoroutineMock import pytest import gdax from tests.helpers import AsyncContextManagerMock, \ AsyncContextManagerMockPagination, generate_id @pytest.yield_fixture def event_loop(): """Create an instance of the default event loop for each test case.""" policy = asyncio.get_event_loop_policy() res = policy.new_event_loop() asyncio.set_event_loop(res) res._close = res.close res.close = lambda: None yield res res._close() @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) @pytest.mark.usefixtures('event_loop') @pytest.mark.asyncio class TestPublicClient: def init(self): self.client = gdax.trader.Trader() async def test_get_products(self, mock_get): products = [ { "id": "LTC-EUR", "base_currency": "LTC", "quote_currency": "EUR", "base_min_size": "0.01", "base_max_size": "1000000", "quote_increment": "0.01", "display_name": "LTC/EUR" }, { "id": "LTC-BTC", "base_currency": "LTC", "quote_currency": "BTC", "base_min_size": "0.01", "base_max_size": "1000000", "quote_increment": "0.00001", "display_name": "LTC/BTC" }, { "id": "BTC-GBP", "base_currency": "BTC", "quote_currency": "GBP", "base_min_size": "0.01", "base_max_size": "250", "quote_increment": "0.01", "display_name": "BTC/GBP" }, { "id": "BTC-EUR", "base_currency": "BTC", "quote_currency": "EUR", "base_min_size": "0.01", "base_max_size": "250", "quote_increment": "0.01", "display_name": "BTC/EUR" }, { "id": "ETH-EUR", "base_currency": "ETH", "quote_currency": "EUR", "base_min_size": "0.01", "base_max_size": "5000", "quote_increment": "0.01", "display_name": "ETH/EUR" }, { "id": "ETH-BTC", "base_currency": "ETH", "quote_currency": "BTC", "base_min_size": "0.01", "base_max_size": "5000", "quote_increment": "0.00001", "display_name": "ETH/BTC" }, { "id": "LTC-USD", "base_currency": "LTC", "quote_currency": "USD", "base_min_size": "0.01", "base_max_size": "1000000", "quote_increment": "0.01", "display_name": "LTC/USD" }, { "id": "BTC-USD", "base_currency": "BTC", "quote_currency": "USD", "base_min_size": "0.01", "base_max_size": "250", "quote_increment": "0.01", "display_name": "BTC/USD" }, { "id": "ETH-USD", "base_currency": "ETH", "quote_currency": "USD", "base_min_size": "0.01", "base_max_size": "5000", "quote_increment": "0.01", "display_name": "ETH/USD" } ] expected_products = [ { "id": "LTC-EUR", "base_currency": "LTC", "quote_currency": "EUR", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("1000000"), "quote_increment": Decimal("0.01"), "display_name": "LTC/EUR" }, { "id": "LTC-BTC", "base_currency": "LTC", "quote_currency": "BTC", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("1000000"), "quote_increment": Decimal("0.00001"), "display_name": "LTC/BTC" }, { "id": "BTC-GBP", "base_currency": "BTC", "quote_currency": "GBP", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("250"), "quote_increment": Decimal("0.01"), "display_name": "BTC/GBP" }, { "id": "BTC-EUR", "base_currency": "BTC", "quote_currency": "EUR", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("250"), "quote_increment": Decimal("0.01"), "display_name": "BTC/EUR" }, { "id": "ETH-EUR", "base_currency": "ETH", "quote_currency": "EUR", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("5000"), "quote_increment": Decimal("0.01"), "display_name": "ETH/EUR" }, { "id": "ETH-BTC", "base_currency": "ETH", "quote_currency": "BTC", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("5000"), "quote_increment": Decimal("0.00001"), "display_name": "ETH/BTC" }, { "id": "LTC-USD", "base_currency": "LTC", "quote_currency": "USD", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("1000000"), "quote_increment": Decimal("0.01"), "display_name": "LTC/USD" }, { "id": "BTC-USD", "base_currency": "BTC", "quote_currency": "USD", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("250"), "quote_increment": Decimal("0.01"), "display_name": "BTC/USD" }, { "id": "ETH-USD", "base_currency": "ETH", "quote_currency": "USD", "base_min_size": Decimal("0.01"), "base_max_size": Decimal("5000"), "quote_increment": Decimal("0.01"), "display_name": "ETH/USD" } ] mock_get.return_value.aenter.json = CoroutineMock( return_value=products) self.init() r = await self.client.get_products() assert r == expected_products async def test_get_product_ticker(self, mock_get): ticker = { "trade_id": 17429442, "price": "2483.64000000", "size": "0.80809483", "bid": "2481.18", "ask": "2483.61", "volume": "13990.13083225", "time": "2017-06-26T06:29:06.993000Z" } expected_ticker = { "trade_id": 17429442, "price": Decimal("2483.64000000"), "size": Decimal("0.80809483"), "bid": Decimal("2481.18"), "ask": Decimal("2483.61"), "volume": Decimal("13990.13083225"), "time": "2017-06-26T06:29:06.993000Z" } mock_get.return_value.aenter.json = CoroutineMock(return_value=ticker) self.init() r = await self.client.get_product_ticker('BTC-USD') assert r == expected_ticker async def test_get_product_trades(self, mock_get): trades = [ { "time": "2017-06-26T06:32:53.79Z", "trade_id": 17429512, "price": "2479.98000000", "size": "0.01997424", "side": "sell" }, { "time": "2017-06-26T06:32:24.113Z", "trade_id": 17429508, "price": "2479.97000000", "size": "0.54415961", "side": "buy" } ] expected_trades = [ { "time": "2017-06-26T06:32:53.79Z", "trade_id": 17429512, "price": Decimal("2479.98000000"), "size": Decimal("0.01997424"), "side": "sell" }, { "time": "2017-06-26T06:32:24.113Z", "trade_id": 17429508, "price": Decimal("2479.97000000"), "size": Decimal("0.54415961"), "side": "buy" } ] mock_get.return_value.aenter.json = CoroutineMock(return_value=trades) self.init() r = await self.client.get_product_trades('BTC-USD') assert r == expected_trades async def test_get_product_order_book(self, mock_get): orderbook = { "sequence": 3424558479, "bids": [ [ "2483.8", "0.01", 1 ] ], "asks": [ [ "2486.28", "0.01455", 1 ] ] } expected_orderbook = { "sequence": 3424558479, "bids": [ [ Decimal("2483.8"), Decimal("0.01"), 1 ] ], "asks": [ [ Decimal("2486.28"), Decimal("0.01455"), 1 ] ] } mock_get.return_value.aenter.json = CoroutineMock( return_value=orderbook) self.init() r = await self.client.get_product_order_book('BTC-USD') assert r == expected_orderbook orderbook = { "sequence": 3424562473, "bids": [ [ "2483.99", "0.01", 1 ], [ "2483.98", "0.9798", 5 ] ], "asks": [ [ "2486.48", "1.65567931", 1 ], [ "2487.72", "0.03", 3 ] ] } expected_orderbook = { "sequence": 3424562473, "bids": [ [ Decimal("2483.99"), Decimal("0.01"), 1 ], [ Decimal("2483.98"), Decimal("0.9798"), 5 ] ], "asks": [ [ Decimal("2486.48"), Decimal("1.65567931"), 1 ], [ Decimal("2487.72"), Decimal("0.03"), 3 ] ] } mock_get.return_value.aenter.json = CoroutineMock( return_value=orderbook) r = await self.client.get_product_order_book('BTC-USD', level=2) assert r == expected_orderbook id1, id2, id3, id4 = (generate_id() for _ in range(4)) orderbook = { "sequence": 3424562473, "bids": [ [ "2483.99", "0.01", id1 ], [ "2483.98", "0.9798", id2 ] ], "asks": [ [ "2486.48", "1.65567931", id3 ], [ "2487.72", "0.03", id4 ] ] } expected_orderbook = { "sequence": 3424562473, "bids": [ [ Decimal("2483.99"), Decimal("0.01"), id1 ], [ Decimal("2483.98"), Decimal("0.9798"), id2 ] ], "asks": [ [ Decimal("2486.48"), Decimal("1.65567931"), id3 ], [ Decimal("2487.72"), Decimal("0.03"), id4 ] ] } mock_get.return_value.aenter.json = CoroutineMock( return_value=orderbook) r = await self.client.get_product_order_book('BTC-USD', level=3) assert r == expected_orderbook async def test_get_product_historic_rates(self, mock_get): rates = [ [ 1498459140, 2488.79, 2489.96, 2489.47, 2489.96, 9.332934549999997 ], [ 1498459080, 2486.24, 2489.97, 2486.24, 2489.96, 6.937264829999997 ], ] expected_rates = [ [ 1498459140, Decimal('2488.79'), Decimal('2489.96'), Decimal('2489.47'), Decimal('2489.96'), Decimal('9.332934549999997') ], [ 1498459080, Decimal('2486.24'), Decimal('2489.97'), Decimal('2486.24'), Decimal('2489.96'), Decimal('6.937264829999997') ], ] mock_get.return_value.aenter.json = CoroutineMock(return_value=rates) self.init() r = await self.client.get_product_historic_rates('BTC-USD') assert r == expected_rates async def test_get_product_24hr_stats(self, mock_get): stats = { "open": "2586.26000000", "high": "2625.00000000", "low": "2430.05000000", "volume": "14063.90737841", "last": "2489.89000000", "volume_30day": "568418.24079392" } expected_stats = { "open": Decimal("2586.26000000"), "high": Decimal("2625.00000000"), "low": Decimal("2430.05000000"), "volume": Decimal("14063.90737841"), "last": Decimal("2489.89000000"), "volume_30day": Decimal("568418.24079392") } mock_get.return_value.aenter.json = CoroutineMock(return_value=stats) self.init() r = await self.client.get_product_24hr_stats('BTC-USD') assert r == expected_stats async def test_get_currencies(self, mock_get): currencies = [ { "id": "BTC", "name": "Bitcoin", "min_size": "0.00000001" }, { "id": "EUR", "name": "Euro", "min_size": "0.01000000" }, { "id": "LTC", "name": "Litecoin", "min_size": "0.00000001" }, { "id": "GBP", "name": "British Pound", "min_size": "0.01000000" }, { "id": "USD", "name": "United States Dollar", "min_size": "0.01000000" }, { "id": "ETH", "name": "Ether", "min_size": "0.00000001" } ] expected_currencies = [ { "id": "BTC", "name": "Bitcoin", "min_size": Decimal("0.00000001") }, { "id": "EUR", "name": "Euro", "min_size": Decimal("0.01000000") }, { "id": "LTC", "name": "Litecoin", "min_size": Decimal("0.00000001") }, { "id": "GBP", "name": "British Pound", "min_size": Decimal("0.01000000") }, { "id": "USD", "name": "United States Dollar", "min_size": Decimal("0.01000000") }, { "id": "ETH", "name": "Ether", "min_size": Decimal("0.00000001") } ] mock_get.return_value.aenter.json = CoroutineMock( return_value=currencies) self.init() r = await self.client.get_currencies() assert r == expected_currencies async def test_get_time(self, mock_get): r_time = {'iso': '2017-06-26T06:47:55.168Z', 'epoch': 1498459675.168} mock_get.return_value.aenter.json = CoroutineMock(return_value=r_time) self.init() r = await self.client.get_time() assert r == r_time @pytest.mark.usefixtures('event_loop') @pytest.mark.asyncio class TestPublicClientNotAuthenticated: def init(self): self.client = gdax.trader.Trader() async def test_get_account(self): self.init() with pytest.raises(AssertionError): await self.client.get_account() async def test_get_account_history(self): self.init() with pytest.raises(AssertionError): await self.client.get_account_history('account_id') async def test_get_account_holds(self): self.init() with pytest.raises(AssertionError): await self.client.get_account_holds('account_id') async def test_buy(self): self.init() with pytest.raises(AssertionError): await self.client.buy(product_id='product_id') async def test_sell(self): self.init() with pytest.raises(AssertionError): await self.client.sell(product_id='product_id') async def test_cancel_order(self): self.init() with pytest.raises(AssertionError): await self.client.cancel_order('order_id') async def test_cancel_all(self): self.init() with pytest.raises(AssertionError): await self.client.cancel_all('product_id') async def test_get_order(self): self.init() with pytest.raises(AssertionError): await self.client.get_order('order_id') async def test_get_orders(self): self.init() with pytest.raises(AssertionError): await self.client.get_orders() async def test_get_fills(self): self.init() with pytest.raises(AssertionError): await self.client.get_fills() async def test_get_fundings(self): self.init() with pytest.raises(AssertionError): await self.client.get_fundings('status') async def test_repay_funding(self): self.init() with pytest.raises(AssertionError): await self.client.repay_funding(Decimal('10'), 'USD') async def test_margin_transfer(self): self.init() with pytest.raises(AssertionError): await self.client.margin_transfer('id', 'deposit', 'USD', Decimal('10')) async def test_get_position(self): self.init() with pytest.raises(AssertionError): await self.client.get_position() async def test_close_position(self): self.init() with pytest.raises(AssertionError): await self.client.close_position() async def test_deposit(self): self.init() with pytest.raises(AssertionError): await self.client.deposit(Decimal('10'), 'USD', 'id') async def test_coinbase_deposit(self): self.init() with pytest.raises(AssertionError): await self.client.coinbase_deposit(Decimal('10'), 'USD', 'id') async def test_withdraw(self): self.init() with pytest.raises(AssertionError): await self.client.withdraw(Decimal('10'), 'USD', 'id') async def test_coinbase_withdraw(self): self.init() with pytest.raises(AssertionError): await self.client.coinbase_withdraw(Decimal('10'), 'USD', 'id') async def test_crypto_withdraw(self): self.init() with pytest.raises(AssertionError): await self.client.crypto_withdraw(Decimal('10'), 'USD', 'addr') async def test_get_payment_methods(self): self.init() with pytest.raises(AssertionError): await self.client.get_payment_methods() async def test_get_coinbase_accounts(self): self.init() with pytest.raises(AssertionError): await self.client.get_coinbase_accounts() async def test_create_report(self): self.init() with pytest.raises(AssertionError): await self.client.create_report('fills', 'start', 'end') async def test_get_report(self): self.init() with pytest.raises(AssertionError): await self.client.get_report('report_id') async def test_get_trailing_volume(self): self.init() with pytest.raises(AssertionError): await self.client.get_trailing_volume() def test_auth_headers(mocker): client = gdax.trader.Trader( api_key='a', api_secret=base64.b64encode(b'a' * 64), passphrase='b', ) path = '/test' method = 'DELETE' body = 'hello' timestamp = '1493343391.076892' mocker.patch('time.time', return_value=timestamp) auth_headers = client._auth_headers(path, method, body) expected_auth_headers = { 'Content-Type': 'application/json', 'CB-ACCESS-SIGN': 'a7ailLNCPtunAmPW4JlpJT02rSLtXP9O6JnEU+wSVMs=', 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': 'a', 'CB-ACCESS-PASSPHRASE': 'b', } assert auth_headers == expected_auth_headers @pytest.mark.asyncio class TestAuthClient(object): def init(self): self.client = gdax.trader.Trader( api_key='a', api_secret=base64.b64encode(b'a' * 64), passphrase='b', ) @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_account(self, mock_get): mock_get.return_value.aenter.json = CoroutineMock(return_value={}) self.init() r = await self.client.get_account() assert type(r) is dict @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_account_history(self, mock_get): mock_get.return_value.aenter.json = CoroutineMock(return_value={}) self.init() r = await self.client.get_account_history('id') assert type(r) is list @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_account_holds(self, mock_get): mock_get.return_value.aenter.json = CoroutineMock(return_value={}) self.init() r = await self.client.get_account_holds('id') assert type(r) is list @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_buy(self, mock_post): message = { "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2", "price": "0.10000000", "size": "0.01000000", "product_id": "BTC-USD", "side": "buy", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:02:28.53864Z", "fill_fees": "0.0000000000000000", "filled_size": "0.00000000", "executed_value": "0.0000000000000000", "status": "pending", "settled": False } expected_message = { "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2", "price": Decimal("0.10000000"), "size": Decimal("0.01000000"), "product_id": "BTC-USD", "side": "buy", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:02:28.53864Z", "fill_fees": Decimal("0.0000000000000000"), "filled_size": Decimal("0.00000000"), "executed_value": Decimal("0.0000000000000000"), "status": "pending", "settled": False } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.buy(product_id='product_id', price=Decimal('250.52'), size=Decimal('5.0'), funds=Decimal('500')) assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_sell(self, mock_post): message = { "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2", "price": "0.10000000", "size": "0.01000000", "product_id": "BTC-USD", "side": "sell", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:02:28.53864Z", "fill_fees": "0.0000000000000000", "filled_size": "0.00000000", "executed_value": "0.0000000000000000", "status": "pending", "settled": False } expected_message = { "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2", "price": Decimal("0.10000000"), "size": Decimal("0.01000000"), "product_id": "BTC-USD", "side": "sell", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:02:28.53864Z", "fill_fees": Decimal("0.0000000000000000"), "filled_size": Decimal("0.00000000"), "executed_value": Decimal("0.0000000000000000"), "status": "pending", "settled": False } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.sell(product_id='product_id', price=Decimal('250.52'), size=Decimal('5.0'), funds=Decimal('500')) assert r == expected_message @patch('aiohttp.ClientSession.delete', new_callable=AsyncContextManagerMock) async def test_cancel_order(self, mock_delete): mock_delete.return_value.aenter.json = CoroutineMock(return_value={}) self.init() r = await self.client.cancel_order(order_id='order_id') assert type(r) is dict @patch('aiohttp.ClientSession.delete', new_callable=AsyncContextManagerMock) async def test_cancel_all(self, mock_delete): message = [ "144c6f8e-713f-4682-8435-5280fbe8b2b4", "debe4907-95dc-442f-af3b-cec12f42ebda", "cf7aceee-7b08-4227-a76c-3858144323ab", "dfc5ae27-cadb-4c0c-beef-8994936fde8a", "34fecfbf-de33-4273-b2c6-baf8e8948be4" ] mock_delete.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.cancel_all() assert r == message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_order(self, mock_get): message = { "id": "68e6a28f-ae28-4788-8d4f-5ab4e5e5ae08", "price": "250.0000000", "size": "1.00000000", "product_id": "BTC-USD", "side": "buy", "stp": "dc", "funds": "9.9750623400000000", "specified_funds": "10.0000000000000000", "type": "limit", "post_only": False, "created_at": "2016-12-08T20:09:05.508883Z", "done_at": "2016-12-08T20:09:05.527Z", "done_reason": "filled", "fill_fees": "0.0249376391550000", "filled_size": "0.01291771", "executed_value": "9.9750556620000000", "status": "done", "settled": False } expected_message = { "id": "68e6a28f-ae28-4788-8d4f-5ab4e5e5ae08", "price": Decimal("250.0000000"), "size": Decimal("1.00000000"), "product_id": "BTC-USD", "side": "buy", "stp": "dc", "funds": Decimal("9.9750623400000000"), "specified_funds": Decimal("10.0000000000000000"), "type": "limit", "post_only": False, "created_at": "2016-12-08T20:09:05.508883Z", "done_at": "2016-12-08T20:09:05.527Z", "done_reason": "filled", "fill_fees": Decimal("0.0249376391550000"), "filled_size": Decimal("0.01291771"), "executed_value": Decimal("9.9750556620000000"), "status": "done", "settled": False } mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_order('order_id') assert r == expected_message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_orders(self, mock_get): message = [ { "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2", "price": "0.10000000", "size": "0.01000000", "product_id": "BTC-USD", "side": "buy", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:02:28.53864Z", "fill_fees": "0.0000000000000000", "filled_size": "0.00000000", "executed_value": "0.0000000000000000", "status": "open", "settled": False }, { "id": "8b99b139-58f2-4ab2-8e7a-c11c846e3022", "price": "1.00000000", "size": "1.00000000", "product_id": "BTC-USD", "side": "buy", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:01:19.038644Z", "fill_fees": "0.0000000000000000", "filled_size": "0.00000000", "executed_value": "0.0000000000000000", "status": "open", "settled": False } ] expected_message = [ { "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2", "price": Decimal("0.10000000"), "size": Decimal("0.01000000"), "product_id": "BTC-USD", "side": "buy", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:02:28.53864Z", "fill_fees": Decimal("0.0000000000000000"), "filled_size": Decimal("0.00000000"), "executed_value": Decimal("0.0000000000000000"), "status": "open", "settled": False }, { "id": "8b99b139-58f2-4ab2-8e7a-c11c846e3022", "price": Decimal("1.00000000"), "size": Decimal("1.00000000"), "product_id": "BTC-USD", "side": "buy", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": False, "created_at": "2016-12-08T20:01:19.038644Z", "fill_fees": Decimal("0.0000000000000000"), "filled_size": Decimal("0.00000000"), "executed_value": Decimal("0.0000000000000000"), "status": "open", "settled": False } ] mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_orders() assert r == expected_message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_fills(self, mock_get): message = [ { "trade_id": 74, "product_id": "BTC-USD", "price": "10.00", "size": "0.01", "order_id": "d50ec984-77a8-460a-b958-66f114b0de9b", "created_at": "2014-11-07T22:19:28.578544Z", "liquidity": "T", "fee": "0.00025", "settled": True, "side": "buy" } ] expected_message = [ { "trade_id": 74, "product_id": "BTC-USD", "price": Decimal("10.00"), "size": Decimal("0.01"), "order_id": "d50ec984-77a8-460a-b958-66f114b0de9b", "created_at": "2014-11-07T22:19:28.578544Z", "liquidity": "T", "fee": Decimal("0.00025"), "settled": True, "side": "buy" } ] mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_fills( order_id='d50ec984-77a8-460a-b958-66f114b0de9b', product_id='BTC-USD') assert r == expected_message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_fundings(self, mock_get): message = [ { "id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71", "order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "amount": "1057.6519956381537500", "status": "settled", "created_at": "2017-03-17T23:46:16.663397Z", "currency": "USD", "repaid_amount": "1057.6519956381537500", "default_amount": "0", "repaid_default": False }, { "id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "order_id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "amount": "545.2400000000000000", "status": "outstanding", "created_at": "2017-03-18T00:34:34.270484Z", "currency": "USD", "repaid_amount": "532.7580047716682500" }, { "id": "d6ec039a-00eb-4bec-a3e1-f5c6a97c4afc", "order_id": "d6ec039a-00eb-4bec-a3e1-f5c6a97c4afc", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "amount": "9.9999999958500000", "status": "outstanding", "created_at": "2017-03-19T23:16:11.615181Z", "currency": "USD", "repaid_amount": "0" } ] expected_message = [ { "id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71", "order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "amount": Decimal("1057.6519956381537500"), "status": "settled", "created_at": "2017-03-17T23:46:16.663397Z", "currency": "USD", "repaid_amount": Decimal("1057.6519956381537500"), "default_amount": Decimal("0"), "repaid_default": False }, { "id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "order_id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "amount": Decimal("545.2400000000000000"), "status": "outstanding", "created_at": "2017-03-18T00:34:34.270484Z", "currency": "USD", "repaid_amount": Decimal("532.7580047716682500") }, { "id": "d6ec039a-00eb-4bec-a3e1-f5c6a97c4afc", "order_id": "d6ec039a-00eb-4bec-a3e1-f5c6a97c4afc", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "amount": Decimal("9.9999999958500000"), "status": "outstanding", "created_at": "2017-03-19T23:16:11.615181Z", "currency": "USD", "repaid_amount": Decimal("0") } ] mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_fundings('status') assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_repay_funding(self, mock_post): mock_post.return_value.aenter.json = CoroutineMock(return_value={}) self.init() r = await self.client.repay_funding(Decimal('10'), 'USD') assert type(r) is dict @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_margin_transfer(self, mock_post): message = { "created_at": "2017-01-25T19:06:23.415126Z", "id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab", "user_id": "521c20b3d4ab09621f000011", "profile_id": "cda95996-ac59-45a3-a42e-30daeb061867", "margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be", "type": "deposit", "amount": "2", "currency": "USD", "account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5", "margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d", "margin_product_id": "BTC-USD", "status": "completed", "nonce": 25 } expected_message = { "created_at": "2017-01-25T19:06:23.415126Z", "id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab", "user_id": "521c20b3d4ab09621f000011", "profile_id": "cda95996-ac59-45a3-a42e-30daeb061867", "margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be", "type": "deposit", "amount": Decimal("2"), "currency": "USD", "account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5", "margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d", "margin_product_id": "BTC-USD", "status": "completed", "nonce": 25 } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.margin_transfer('id', 'deposit', 'USD', Decimal('2')) assert r == expected_message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_position(self, mock_get): message = { "status": "active", "funding": { "max_funding_value": "10000", "funding_value": "622.48199522418175", "oldest_outstanding": { "id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "order_id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "created_at": "2017-03-18T00:34:34.270484Z", "currency": "USD", "account_id": "202af5e9-1ac0-4888-bdf5-15599ae207e2", "amount": "545.2400000000000000" } }, "accounts": { "USD": { "id": "202af5e9-1ac0-4888-bdf5-15599ae207e2", "balance": "0.0000000000000000", "hold": "0.0000000000000000", "funded_amount": "622.4819952241817500", "default_amount": "0" }, "BTC": { "id": "1f690a52-d557-41b5-b834-e39eb10d7df0", "balance": "4.7051564815292853", "hold": "0.6000000000000000", "funded_amount": "0.0000000000000000", "default_amount": "0" } }, "margin_call": { "active": True, "price": "175.96000000", "side": "sell", "size": "4.70515648", "funds": "624.04210048" }, "user_id": "521c20b3d4ab09621f000011", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "position": { "type": "long", "size": "0.59968368", "complement": "-641.91999958602800000000000000", "max_size": "1.49000000" }, "product_id": "BTC-USD" } expected_message = { "status": "active", "funding": { "max_funding_value": Decimal("10000"), "funding_value": Decimal("622.48199522418175"), "oldest_outstanding": { "id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "order_id": "280c0a56-f2fa-4d3b-a199-92df76fff5cd", "created_at": "2017-03-18T00:34:34.270484Z", "currency": "USD", "account_id": "202af5e9-1ac0-4888-bdf5-15599ae207e2", "amount": Decimal("545.2400000000000000") } }, "accounts": { "USD": { "id": "202af5e9-1ac0-4888-bdf5-15599ae207e2", "balance": Decimal("0.0000000000000000"), "hold": Decimal("0.0000000000000000"), "funded_amount": Decimal("622.4819952241817500"), "default_amount": Decimal("0") }, "BTC": { "id": "1f690a52-d557-41b5-b834-e39eb10d7df0", "balance": Decimal("4.7051564815292853"), "hold": Decimal("0.6000000000000000"), "funded_amount": Decimal("0.0000000000000000"), "default_amount": Decimal("0") } }, "margin_call": { "active": True, "price": Decimal("175.96000000"), "side": "sell", "size": Decimal("4.70515648"), "funds": Decimal("624.04210048") }, "user_id": "521c20b3d4ab09621f000011", "profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6", "position": { "type": "long", "size": Decimal("0.59968368"), "complement": Decimal("-641.91999958602800000000000000"), "max_size": Decimal("1.49000000") }, "product_id": "BTC-USD" } mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_position() assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_close_position(self, mock_post): mock_post.return_value.aenter.json = CoroutineMock(return_value={}) self.init() r = await self.client.close_position() assert type(r) is dict @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_deposit(self, mock_post): message = { "amount": "10.00", "currency": "USD", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } expected_message = { "amount": Decimal('10.00'), "currency": "USD", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.deposit(Decimal('10.0'), 'id', 'USD') assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_coinbase_deposit(self, mock_post): message = { "amount": "10.00", "currency": "BTC", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } expected_message = { "amount": Decimal('10.00'), "currency": "BTC", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.coinbase_deposit(Decimal('10'), 'BTC', 'id') assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_withdraw(self, mock_post): message = { "id": "593533d2-ff31-46e0-b22e-ca754147a96a", "amount": "10.00", "currency": "USD", "payout_at": "2016-08-20T00:31:09Z" } expected_message = { "id": "593533d2-ff31-46e0-b22e-ca754147a96a", "amount": Decimal("10.00"), "currency": "USD", "payout_at": "2016-08-20T00:31:09Z" } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.withdraw(Decimal('10'), 'USD', 'id') assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_coinbase_withdraw(self, mock_post): message = { "amount": "10.00", "currency": "BTC", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } expected_message = { "amount": Decimal('10.00'), "currency": "BTC", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.coinbase_withdraw(Decimal('10'), 'USD', 'id') assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_crypto_withdraw(self, mock_post): message = { "amount": "10.00", "currency": "BTC", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } expected_message = { "amount": Decimal('10.00'), "currency": "BTC", "payment_method_id": "bc677162-d934-5f1a-968c-a496b1c1270b" } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.crypto_withdraw(Decimal('10'), 'USD', 'addr') assert r == expected_message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_payment_methods(self, mock_get): # NOTE: return values not converted message = [ { "id": "bc6d7162-d984-5ffa-963c-a493b1c1370b", "type": "ach_bank_account", "name": "Bank of America - eBan... ********7134", "currency": "USD", "primary_buy": True, "primary_sell": True, "allow_buy": True, "allow_sell": True, "allow_deposit": True, "allow_withdraw": True, "limits": { "buy": [ { "period_in_days": 1, "total": { "amount": "10000.00", "currency": "USD" }, "remaining": { "amount": "10000.00", "currency": "USD" } } ], "instant_buy": [ { "period_in_days": 7, "total": { "amount": "0.00", "currency": "USD" }, "remaining": { "amount": "0.00", "currency": "USD" } } ], "sell": [ { "period_in_days": 1, "total": { "amount": "10000.00", "currency": "USD" }, "remaining": { "amount": "10000.00", "currency": "USD" } } ], "deposit": [ { "period_in_days": 1, "total": { "amount": "10000.00", "currency": "USD" }, "remaining": { "amount": "10000.00", "currency": "USD" } } ] } }, ] mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_payment_methods() assert r == message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_coinbase_accounts(self, mock_get): message = [ { "id": "fc3a8a57-7142-542d-8436-95a3d82e1622", "name": "ETH Wallet", "balance": "0.00000000", "currency": "ETH", "type": "wallet", "primary": False, "active": True }, { "id": "2ae3354e-f1c3-5771-8a37-6228e9d239db", "name": "USD Wallet", "balance": "0.00", "currency": "USD", "type": "fiat", "primary": False, "active": True, "wire_deposit_information": { "account_number": "0199003122", "routing_number": "026013356", "bank_name": "Metropolitan Commercial Bank", "bank_address": "99 Park Ave 4th Fl New York, NY 10016", "bank_country": { "code": "US", "name": "United States" }, "account_name": "Coinbase, Inc", "account_address": "548 Market Street, #23008, SF", "reference": "BAOCAEUX" } }, { "id": "1bfad868-5223-5d3c-8a22-b5ed371e55cb", "name": "BTC Wallet", "balance": "0.00000000", "currency": "BTC", "type": "wallet", "primary": True, "active": True }, { "id": "2a11354e-f133-5771-8a37-622be9b239db", "name": "EUR Wallet", "balance": "0.00", "currency": "EUR", "type": "fiat", "primary": False, "active": True, "sepa_deposit_information": { "iban": "EE957700771001355096", "swift": "LHVBEE22", "bank_name": "AS LHV Pank", "bank_address": "Tartu mnt 2, 10145 Tallinn, Estonia", "bank_country_name": "Estonia", "account_name": "Coinbase UK, Ltd.", "account_address": "9th Floor, 107 Cheapside, London", "reference": "CBAEUXOVFXOXYX" } }, ] expected_message = [ { "id": "fc3a8a57-7142-542d-8436-95a3d82e1622", "name": "ETH Wallet", "balance": Decimal("0.00000000"), "currency": "ETH", "type": "wallet", "primary": False, "active": True }, { "id": "2ae3354e-f1c3-5771-8a37-6228e9d239db", "name": "USD Wallet", "balance": Decimal("0.00"), "currency": "USD", "type": "fiat", "primary": False, "active": True, "wire_deposit_information": { "account_number": "0199003122", "routing_number": "026013356", "bank_name": "Metropolitan Commercial Bank", "bank_address": "99 Park Ave 4th Fl New York, NY 10016", "bank_country": { "code": "US", "name": "United States" }, "account_name": "Coinbase, Inc", "account_address": "548 Market Street, #23008, SF", "reference": "BAOCAEUX" } }, { "id": "1bfad868-5223-5d3c-8a22-b5ed371e55cb", "name": "BTC Wallet", "balance": Decimal("0.00000000"), "currency": "BTC", "type": "wallet", "primary": True, "active": True }, { "id": "2a11354e-f133-5771-8a37-622be9b239db", "name": "EUR Wallet", "balance": Decimal("0.00"), "currency": "EUR", "type": "fiat", "primary": False, "active": True, "sepa_deposit_information": { "iban": "EE957700771001355096", "swift": "LHVBEE22", "bank_name": "AS LHV Pank", "bank_address": "Tartu mnt 2, 10145 Tallinn, Estonia", "bank_country_name": "Estonia", "account_name": "Coinbase UK, Ltd.", "account_address": "9th Floor, 107 Cheapside, London", "reference": "CBAEUXOVFXOXYX" } }, ] mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_coinbase_accounts() assert r == expected_message @patch('aiohttp.ClientSession.post', new_callable=AsyncContextManagerMock) async def test_create_report(self, mock_post): message = { "id": "0428b97b-bec1-429e-a94c-59232926778d", "type": "fills", "status": "pending", "created_at": "2015-01-06T10:34:47.000Z", "completed_at": None, "expires_at": "2015-01-13T10:35:47.000Z", "file_url": None, "params": { "start_date": "2014-11-01T00:00:00.000Z", "end_date": "2014-11-30T23:59:59.000Z" } } mock_post.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.create_report('fills', 'start', 'end', product_id='product_id', report_format='csv', email='email') assert r == message r = await self.client.create_report('account', 'start', 'end', account_id='account_id') assert r == message with pytest.raises(AssertionError): await self.client.create_report('fills', 'start', 'end') with pytest.raises(AssertionError): await self.client.create_report('account', 'start', 'end') with pytest.raises(AssertionError): await self.client.create_report('test', 'start', 'end') @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_report(self, mock_get): message = { "id": "0428b97b-bec1-429e-a94c-59232926778d", "type": "fills", "status": "ready", "created_at": "2015-01-06T10:34:47.000Z", "completed_at": "2015-01-06T10:35:47.000Z", "expires_at": "2015-01-13T10:35:47.000Z", "file_url": "https://example.com/0428b97b.../fills.pdf", "params": { "start_date": "2014-11-01T00:00:00.000Z", "end_date": "2014-11-30T23:59:59.000Z" } } mock_get.return_value.aenter.json = CoroutineMock( return_value=message) self.init() r = await self.client.get_report('report_id') assert r == message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMock) async def test_get_trailing_volume(self, mock_get): message = [ { "product_id": "BTC-USD", "exchange_volume": "11800.00000000", "volume": "100.00000000", "recorded_at": "1973-11-29T00:05:01.123456Z" }, { "product_id": "LTC-USD", "exchange_volume": "51010.04100000", "volume": "2010.04100000", "recorded_at": "1973-11-29T00:05:02.123456Z" } ] expected_message = [ { "product_id": "BTC-USD", "exchange_volume": Decimal("11800.00000000"), "volume": Decimal("100.00000000"), "recorded_at": "1973-11-29T00:05:01.123456Z" }, { "product_id": "LTC-USD", "exchange_volume": Decimal("51010.04100000"), "volume": Decimal("2010.04100000"), "recorded_at": "1973-11-29T00:05:02.123456Z" } ] mock_get.return_value.aenter.json = CoroutineMock(return_value=message) self.init() r = await self.client.get_trailing_volume() assert r == expected_message @patch('aiohttp.ClientSession.get', new_callable=AsyncContextManagerMockPagination) async def test_pagination(self, mock_get): pages = [[{'id': 1}], [{'id': 2}]] mock_get.return_value.aenter.json = CoroutineMock(side_effect=pages) self.init() r = await self.client.get_account_history('id') assert r == [{'id': 1}, {'id': 2}]
#!/usr/bin/env python ''' acron.py -- a cron replacement tool This is designed to be a replacement for the system cron. You can run acron and it will behave almost exactly like you expect the system cron to work, except there are some more advanced behaviors that can be expressed. Rationale: many cron jobs need to do similar kinds of tasks, and instead of implementing those functionalities in each and every cron, they can be implemented in the cron runner level. ''' import argparse import sys import logging import os import re import socket import time import hashlib import subprocess class Job(object): ''' A Job represents a single job that we might be running. It maintains information about the job parameters and whether or not the job is running, plus handles locking, running, collating output, etc. ''' def __init__(self, filename): ''' Initializes a job from a given file. We also start watching the file and, if it happens to go away, then we disable the job before the next scheduled execution. ''' self.filename = filename self.mtime = os.stat(filename).st_mtime self.command = '' self.errors = False self.next_run_ts = None self.running = False self.if_command = None in_command = False for line in open(filename).readlines(): c_line = ' '.join(line.split('#')[0].strip().split()) if in_command and line.startswith(' '): self.command += ' ' + c_line continue else: in_command = False if not c_line: continue if ' ' in c_line: cmd, args = c_line.split(' ', 1) else: cmd, args = c_line, '' if cmd == 'run': self.command = args in_command = True if cmd == 'if': self.if_command = args elif cmd == 'every': self.every = self._parse_time(cmd, args) elif cmd == 'timeout': self.timeout = self._parse_time(cmd, args) elif cmd == 'send-stdout': if args not in ('if-stderr', 'always', 'never'): logging.error('%s: send-stdout not valid', filename) self.errors = True self.send_stdout = args elif cmd == 'stderr': if not (args.startswith('/') or '@' in args or args == 'stdout'): logging.error('%s: stderr not filename, email, or "stdout"', filename) self.errors = True self.stderr = args elif cmd == 'stdout': if not (args.startswith('/') or '@' in args or args == 'stderr'): logging.error('%s: stdout not filename, email, or "stderr"', filename) self.errors = True self.stdout = args elif cmd == 'splay': if args not in ('on', 'off'): logging.error('%s: splay must be "on" or "off"', filename) self.errors = True self.splay = True if args == 'on' else False def _parse_time(self, cmd, arg): ''' Internal function: parses a time value input by a user. ''' converts = {'hour': '1h', 'minute': '1m', 'day': '1d'} if arg in converts: arg = converts[arg] if not re.match('^\d+[dhms]$', arg): logging.error('%s: %s value does not match formatting rules', self.filename, cmd) self.errors = True val = 1 else: val = int(arg[0:-1]) * {'d': 86400, 'h': 3600, 'm': 60, 's': 1}[arg[-1]] if not val or val < 1: val = 3600 # This is to prevent failures if we can't parse the time. return val def next_run(self): ''' next_run returns the number of seconds this job is waiting until it's next eligible to run. If this has never been called, we'll calculate the time, else we'll just wait for the time we've already calculated (helping prevent skips). ''' if self.errors: return None if self.next_run_ts is not None: return self.next_run_ts now = int(time.time()) # Next run times are always calculated from the last non-splayed offset. I.e., if we # run every minute, we go back to the last minute boundary and project forward from # there. If we've missed the splay point, we add it twice to get the next point. start_ts = now - (now % self.every) if not self.splay: self.next_run_ts = start_ts + self.every else: self.next_run_ts = start_ts + int(hashlib.md5(socket.gethostname()).hexdigest(), 16) % self.every if self.next_run_ts < now: self.next_run_ts += self.every return self.next_run_ts def run(self): ''' run executes the cron job. It gets run in a subprocess. ''' logging.info('Running %s', self.filename) self.next_run_ts = None #self.running = True def try_reaping(self): ''' try_reaping will check the status of a running job and, if necessary, reap it, kill it, or do some other maintenance. ''' pass def main(crondir): ''' main function of acron, handles the global state management, updating our crons, and then kicking off jobs. ''' jobs = {} while True: # Job definition update loop. now = int(time.time()) if now % 10 == 0: logging.info('Acron loop: %d jobs watched', len(jobs)) for root, dirs, files in os.walk(args.cron_dir): for fn in files: filename = os.path.join(root, fn) if filename in jobs: job = jobs[filename] if job.mtime < os.stat(filename).st_mtime: if job.running: logging.info('%s needs update, but still running', filename) continue # next file. else: logging.info('%s changed, reloading...', filename) del jobs[filename] job = Job(filename) else: continue else: job = Job(filename) if job.if_command: try: subprocess.check_output(['/bin/bash', '-c', job.if_command], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: logging.debug('%s: "if" returned error, skipping job', filename) continue jobs[filename] = job print jobs[filename].next_run(), jobs[filename].command # Job execution loop. for job in jobs.itervalues(): if job.errors: logging.debug('%s: has errors, skipping', job.filename) continue if job.running: logging.debug('%s: is running', job.filename) job.try_reaping() continue timeleft = job.next_run() - now logging.debug('%s: timeleft = %d', job.filename, timeleft) if timeleft > 0: continue # Actually run the job now. job.run() time.sleep(1) return 0 def usage(): print '''acron -- a cron replacement Blah blah, a usage file should be here. ''' sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='acron manager') parser.add_argument('--cron-dir', help="Directory of files to watch for crons") parser.add_argument('-v', dest='verbose', action='store_true') args = parser.parse_args() logging.basicConfig(format='[%(asctime)s %(levelname)s] %(message)s') log = logging.getLogger() if args.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) if not args.cron_dir: usage() if not os.path.isdir(args.cron_dir): logging.error('%s is not a directory', args.cron_dir) sys.exit(1) # TODO: add some locking to prevent multiple acrons from running. logging.info('Acron beginning run') sys.exit(main(args.cron_dir))
"""Legacy device tracker classes.""" from __future__ import annotations import asyncio from datetime import timedelta import hashlib from types import ModuleType from typing import Any, Callable, Sequence, final import attr import voluptuous as vol from homeassistant import util from homeassistant.components import zone from homeassistant.config import async_log_exception, load_yaml_config_file from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_GPS_ACCURACY, ATTR_ICON, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_NAME, CONF_ICON, CONF_MAC, CONF_NAME, DEVICE_DEFAULT_NAME, STATE_HOME, STATE_NOT_HOME, ) from homeassistant.core import HomeAssistant, callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_per_platform, discovery import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_registry import async_get_registry from homeassistant.helpers.event import ( async_track_time_interval, async_track_utc_time_change, ) from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.typing import ConfigType, GPSType from homeassistant.setup import async_prepare_setup_platform from homeassistant.util import dt as dt_util from homeassistant.util.yaml import dump from .const import ( ATTR_ATTRIBUTES, ATTR_BATTERY, ATTR_CONSIDER_HOME, ATTR_DEV_ID, ATTR_GPS, ATTR_HOST_NAME, ATTR_LOCATION_NAME, ATTR_MAC, ATTR_SOURCE_TYPE, CONF_CONSIDER_HOME, CONF_NEW_DEVICE_DEFAULTS, CONF_SCAN_INTERVAL, CONF_TRACK_NEW, DEFAULT_CONSIDER_HOME, DEFAULT_TRACK_NEW, DOMAIN, LOGGER, PLATFORM_TYPE_LEGACY, SCAN_INTERVAL, SOURCE_TYPE_BLUETOOTH, SOURCE_TYPE_BLUETOOTH_LE, SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER, ) SERVICE_SEE = "see" SOURCE_TYPES = ( SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER, SOURCE_TYPE_BLUETOOTH, SOURCE_TYPE_BLUETOOTH_LE, ) NEW_DEVICE_DEFAULTS_SCHEMA = vol.Any( None, vol.Schema({vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean}), ) PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend( { vol.Optional(CONF_SCAN_INTERVAL): cv.time_period, vol.Optional(CONF_TRACK_NEW): cv.boolean, vol.Optional(CONF_CONSIDER_HOME, default=DEFAULT_CONSIDER_HOME): vol.All( cv.time_period, cv.positive_timedelta ), vol.Optional(CONF_NEW_DEVICE_DEFAULTS, default={}): NEW_DEVICE_DEFAULTS_SCHEMA, } ) PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema) SERVICE_SEE_PAYLOAD_SCHEMA = vol.Schema( vol.All( cv.has_at_least_one_key(ATTR_MAC, ATTR_DEV_ID), { ATTR_MAC: cv.string, ATTR_DEV_ID: cv.string, ATTR_HOST_NAME: cv.string, ATTR_LOCATION_NAME: cv.string, ATTR_GPS: cv.gps, ATTR_GPS_ACCURACY: cv.positive_int, ATTR_BATTERY: cv.positive_int, ATTR_ATTRIBUTES: dict, ATTR_SOURCE_TYPE: vol.In(SOURCE_TYPES), ATTR_CONSIDER_HOME: cv.time_period, # Temp workaround for iOS app introduced in 0.65 vol.Optional("battery_status"): str, vol.Optional("hostname"): str, }, ) ) YAML_DEVICES = "known_devices.yaml" EVENT_NEW_DEVICE = "device_tracker_new_device" def see( hass: HomeAssistant, mac: str = None, dev_id: str = None, host_name: str = None, location_name: str = None, gps: GPSType = None, gps_accuracy=None, battery: int = None, attributes: dict = None, ): """Call service to notify you see device.""" data = { key: value for key, value in ( (ATTR_MAC, mac), (ATTR_DEV_ID, dev_id), (ATTR_HOST_NAME, host_name), (ATTR_LOCATION_NAME, location_name), (ATTR_GPS, gps), (ATTR_GPS_ACCURACY, gps_accuracy), (ATTR_BATTERY, battery), ) if value is not None } if attributes: data[ATTR_ATTRIBUTES] = attributes hass.services.call(DOMAIN, SERVICE_SEE, data) async def async_setup_integration(hass: HomeAssistant, config: ConfigType) -> None: """Set up the legacy integration.""" tracker = await get_tracker(hass, config) legacy_platforms = await async_extract_config(hass, config) setup_tasks = [ asyncio.create_task(legacy_platform.async_setup_legacy(hass, tracker)) for legacy_platform in legacy_platforms ] if setup_tasks: await asyncio.wait(setup_tasks) async def async_platform_discovered(p_type, info): """Load a platform.""" platform = await async_create_platform_type(hass, config, p_type, {}) if platform is None or platform.type != PLATFORM_TYPE_LEGACY: return await platform.async_setup_legacy(hass, tracker, info) discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered) # Clean up stale devices async_track_utc_time_change( hass, tracker.async_update_stale, second=range(0, 60, 5) ) async def async_see_service(call): """Service to see a device.""" # Temp workaround for iOS, introduced in 0.65 data = dict(call.data) data.pop("hostname", None) data.pop("battery_status", None) await tracker.async_see(**data) hass.services.async_register( DOMAIN, SERVICE_SEE, async_see_service, SERVICE_SEE_PAYLOAD_SCHEMA ) # restore await tracker.async_setup_tracked_device() @attr.s class DeviceTrackerPlatform: """Class to hold platform information.""" LEGACY_SETUP = ( "async_get_scanner", "get_scanner", "async_setup_scanner", "setup_scanner", ) name: str = attr.ib() platform: ModuleType = attr.ib() config: dict = attr.ib() @property def type(self): """Return platform type.""" for methods, platform_type in ((self.LEGACY_SETUP, PLATFORM_TYPE_LEGACY),): for meth in methods: if hasattr(self.platform, meth): return platform_type return None async def async_setup_legacy(self, hass, tracker, discovery_info=None): """Set up a legacy platform.""" LOGGER.info("Setting up %s.%s", DOMAIN, self.name) try: scanner = None setup = None if hasattr(self.platform, "async_get_scanner"): scanner = await self.platform.async_get_scanner( hass, {DOMAIN: self.config} ) elif hasattr(self.platform, "get_scanner"): scanner = await hass.async_add_executor_job( self.platform.get_scanner, hass, {DOMAIN: self.config} ) elif hasattr(self.platform, "async_setup_scanner"): setup = await self.platform.async_setup_scanner( hass, self.config, tracker.async_see, discovery_info ) elif hasattr(self.platform, "setup_scanner"): setup = await hass.async_add_executor_job( self.platform.setup_scanner, hass, self.config, tracker.see, discovery_info, ) else: raise HomeAssistantError("Invalid legacy device_tracker platform.") if setup: hass.config.components.add(f"{DOMAIN}.{self.name}") if scanner: async_setup_scanner_platform( hass, self.config, scanner, tracker.async_see, self.type ) return if not setup: LOGGER.error("Error setting up platform %s %s", self.type, self.name) return except Exception: # pylint: disable=broad-except LOGGER.exception("Error setting up platform %s %s", self.type, self.name) async def async_extract_config(hass, config): """Extract device tracker config and split between legacy and modern.""" legacy = [] for platform in await asyncio.gather( *( async_create_platform_type(hass, config, p_type, p_config) for p_type, p_config in config_per_platform(config, DOMAIN) ) ): if platform is None: continue if platform.type == PLATFORM_TYPE_LEGACY: legacy.append(platform) else: raise ValueError( f"Unable to determine type for {platform.name}: {platform.type}" ) return legacy async def async_create_platform_type( hass, config, p_type, p_config ) -> DeviceTrackerPlatform | None: """Determine type of platform.""" platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type) if platform is None: return None return DeviceTrackerPlatform(p_type, platform, p_config) @callback def async_setup_scanner_platform( hass: HomeAssistant, config: ConfigType, scanner: Any, async_see_device: Callable, platform: str, ): """Set up the connect scanner-based platform to device tracker. This method must be run in the event loop. """ interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL) update_lock = asyncio.Lock() scanner.hass = hass # Initial scan of each mac we also tell about host name for config seen: Any = set() async def async_device_tracker_scan(now: dt_util.dt.datetime): """Handle interval matches.""" if update_lock.locked(): LOGGER.warning( "Updating device list from %s took longer than the scheduled " "scan interval %s", platform, interval, ) return async with update_lock: found_devices = await scanner.async_scan_devices() for mac in found_devices: if mac in seen: host_name = None else: host_name = await scanner.async_get_device_name(mac) seen.add(mac) try: extra_attributes = await scanner.async_get_extra_attributes(mac) except NotImplementedError: extra_attributes = {} kwargs = { "mac": mac, "host_name": host_name, "source_type": SOURCE_TYPE_ROUTER, "attributes": { "scanner": scanner.__class__.__name__, **extra_attributes, }, } zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME) if zone_home: kwargs["gps"] = [ zone_home.attributes[ATTR_LATITUDE], zone_home.attributes[ATTR_LONGITUDE], ] kwargs["gps_accuracy"] = 0 hass.async_create_task(async_see_device(**kwargs)) async_track_time_interval(hass, async_device_tracker_scan, interval) hass.async_create_task(async_device_tracker_scan(None)) async def get_tracker(hass, config): """Create a tracker.""" yaml_path = hass.config.path(YAML_DEVICES) conf = config.get(DOMAIN, []) conf = conf[0] if conf else {} consider_home = conf.get(CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME) defaults = conf.get(CONF_NEW_DEVICE_DEFAULTS, {}) track_new = conf.get(CONF_TRACK_NEW) if track_new is None: track_new = defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW) devices = await async_load_config(yaml_path, hass, consider_home) tracker = DeviceTracker(hass, consider_home, track_new, defaults, devices) return tracker class DeviceTracker: """Representation of a device tracker.""" def __init__( self, hass: HomeAssistant, consider_home: timedelta, track_new: bool, defaults: dict, devices: Sequence, ) -> None: """Initialize a device tracker.""" self.hass = hass self.devices = {dev.dev_id: dev for dev in devices} self.mac_to_dev = {dev.mac: dev for dev in devices if dev.mac} self.consider_home = consider_home self.track_new = ( track_new if track_new is not None else defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW) ) self.defaults = defaults self._is_updating = asyncio.Lock() for dev in devices: if self.devices[dev.dev_id] is not dev: LOGGER.warning("Duplicate device IDs detected %s", dev.dev_id) if dev.mac and self.mac_to_dev[dev.mac] is not dev: LOGGER.warning("Duplicate device MAC addresses detected %s", dev.mac) def see( self, mac: str = None, dev_id: str = None, host_name: str = None, location_name: str = None, gps: GPSType = None, gps_accuracy: int = None, battery: int = None, attributes: dict = None, source_type: str = SOURCE_TYPE_GPS, picture: str = None, icon: str = None, consider_home: timedelta = None, ): """Notify the device tracker that you see a device.""" self.hass.add_job( self.async_see( mac, dev_id, host_name, location_name, gps, gps_accuracy, battery, attributes, source_type, picture, icon, consider_home, ) ) async def async_see( self, mac: str = None, dev_id: str = None, host_name: str = None, location_name: str = None, gps: GPSType = None, gps_accuracy: int = None, battery: int = None, attributes: dict = None, source_type: str = SOURCE_TYPE_GPS, picture: str = None, icon: str = None, consider_home: timedelta = None, ): """Notify the device tracker that you see a device. This method is a coroutine. """ registry = await async_get_registry(self.hass) if mac is None and dev_id is None: raise HomeAssistantError("Neither mac or device id passed in") if mac is not None: mac = str(mac).upper() device = self.mac_to_dev.get(mac) if not device: dev_id = util.slugify(host_name or "") or util.slugify(mac) else: dev_id = cv.slug(str(dev_id).lower()) device = self.devices.get(dev_id) if device: await device.async_seen( host_name, location_name, gps, gps_accuracy, battery, attributes, source_type, consider_home, ) if device.track: device.async_write_ha_state() return # Guard from calling see on entity registry entities. entity_id = f"{DOMAIN}.{dev_id}" if registry.async_is_registered(entity_id): LOGGER.error( "The see service is not supported for this entity %s", entity_id ) return # If no device can be found, create it dev_id = util.ensure_unique_string(dev_id, self.devices.keys()) device = Device( self.hass, consider_home or self.consider_home, self.track_new, dev_id, mac, picture=picture, icon=icon, ) self.devices[dev_id] = device if mac is not None: self.mac_to_dev[mac] = device await device.async_seen( host_name, location_name, gps, gps_accuracy, battery, attributes, source_type, ) if device.track: device.async_write_ha_state() self.hass.bus.async_fire( EVENT_NEW_DEVICE, { ATTR_ENTITY_ID: device.entity_id, ATTR_HOST_NAME: device.host_name, ATTR_MAC: device.mac, }, ) # update known_devices.yaml self.hass.async_create_task( self.async_update_config( self.hass.config.path(YAML_DEVICES), dev_id, device ) ) async def async_update_config(self, path, dev_id, device): """Add device to YAML configuration file. This method is a coroutine. """ async with self._is_updating: await self.hass.async_add_executor_job( update_config, self.hass.config.path(YAML_DEVICES), dev_id, device ) @callback def async_update_stale(self, now: dt_util.dt.datetime): """Update stale devices. This method must be run in the event loop. """ for device in self.devices.values(): if (device.track and device.last_update_home) and device.stale(now): self.hass.async_create_task(device.async_update_ha_state(True)) async def async_setup_tracked_device(self): """Set up all not exists tracked devices. This method is a coroutine. """ async def async_init_single_device(dev): """Init a single device_tracker entity.""" await dev.async_added_to_hass() dev.async_write_ha_state() tasks = [] for device in self.devices.values(): if device.track and not device.last_seen: tasks.append( self.hass.async_create_task(async_init_single_device(device)) ) if tasks: await asyncio.wait(tasks) class Device(RestoreEntity): """Base class for a tracked device.""" host_name: str = None location_name: str = None gps: GPSType = None gps_accuracy: int = 0 last_seen: dt_util.dt.datetime = None consider_home: dt_util.dt.timedelta = None battery: int = None attributes: dict = None icon: str = None # Track if the last update of this device was HOME. last_update_home = False _state = STATE_NOT_HOME def __init__( self, hass: HomeAssistant, consider_home: timedelta, track: bool, dev_id: str, mac: str, name: str = None, picture: str = None, gravatar: str = None, icon: str = None, ) -> None: """Initialize a device.""" self.hass = hass self.entity_id = f"{DOMAIN}.{dev_id}" # Timedelta object how long we consider a device home if it is not # detected anymore. self.consider_home = consider_home # Device ID self.dev_id = dev_id self.mac = mac # If we should track this device self.track = track # Configured name self.config_name = name # Configured picture if gravatar is not None: self.config_picture = get_gravatar_for_email(gravatar) else: self.config_picture = picture self.icon = icon self.source_type = None self._attributes = {} @property def name(self): """Return the name of the entity.""" return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME @property def state(self): """Return the state of the device.""" return self._state @property def entity_picture(self): """Return the picture of the device.""" return self.config_picture @final @property def state_attributes(self): """Return the device state attributes.""" attributes = {ATTR_SOURCE_TYPE: self.source_type} if self.gps: attributes[ATTR_LATITUDE] = self.gps[0] attributes[ATTR_LONGITUDE] = self.gps[1] attributes[ATTR_GPS_ACCURACY] = self.gps_accuracy if self.battery: attributes[ATTR_BATTERY] = self.battery return attributes @property def extra_state_attributes(self): """Return device state attributes.""" return self._attributes async def async_seen( self, host_name: str = None, location_name: str = None, gps: GPSType = None, gps_accuracy=0, battery: int = None, attributes: dict = None, source_type: str = SOURCE_TYPE_GPS, consider_home: timedelta = None, ): """Mark the device as seen.""" self.source_type = source_type self.last_seen = dt_util.utcnow() self.host_name = host_name or self.host_name self.location_name = location_name self.consider_home = consider_home or self.consider_home if battery: self.battery = battery if attributes: self._attributes.update(attributes) self.gps = None if gps is not None: try: self.gps = float(gps[0]), float(gps[1]) self.gps_accuracy = gps_accuracy or 0 except (ValueError, TypeError, IndexError): self.gps = None self.gps_accuracy = 0 LOGGER.warning("Could not parse gps value for %s: %s", self.dev_id, gps) await self.async_update() def stale(self, now: dt_util.dt.datetime = None): """Return if device state is stale. Async friendly. """ return ( self.last_seen is None or (now or dt_util.utcnow()) - self.last_seen > self.consider_home ) def mark_stale(self): """Mark the device state as stale.""" self._state = STATE_NOT_HOME self.gps = None self.last_update_home = False async def async_update(self): """Update state of entity. This method is a coroutine. """ if not self.last_seen: return if self.location_name: self._state = self.location_name elif self.gps is not None and self.source_type == SOURCE_TYPE_GPS: zone_state = zone.async_active_zone( self.hass, self.gps[0], self.gps[1], self.gps_accuracy ) if zone_state is None: self._state = STATE_NOT_HOME elif zone_state.entity_id == zone.ENTITY_ID_HOME: self._state = STATE_HOME else: self._state = zone_state.name elif self.stale(): self.mark_stale() else: self._state = STATE_HOME self.last_update_home = True async def async_added_to_hass(self): """Add an entity.""" await super().async_added_to_hass() state = await self.async_get_last_state() if not state: return self._state = state.state self.last_update_home = state.state == STATE_HOME self.last_seen = dt_util.utcnow() for attribute, var in ( (ATTR_SOURCE_TYPE, "source_type"), (ATTR_GPS_ACCURACY, "gps_accuracy"), (ATTR_BATTERY, "battery"), ): if attribute in state.attributes: setattr(self, var, state.attributes[attribute]) if ATTR_LONGITUDE in state.attributes: self.gps = ( state.attributes[ATTR_LATITUDE], state.attributes[ATTR_LONGITUDE], ) class DeviceScanner: """Device scanner object.""" hass: HomeAssistant = None def scan_devices(self) -> list[str]: """Scan for devices.""" raise NotImplementedError() async def async_scan_devices(self) -> Any: """Scan for devices.""" return await self.hass.async_add_executor_job(self.scan_devices) def get_device_name(self, device: str) -> str: """Get the name of a device.""" raise NotImplementedError() async def async_get_device_name(self, device: str) -> Any: """Get the name of a device.""" return await self.hass.async_add_executor_job(self.get_device_name, device) def get_extra_attributes(self, device: str) -> dict: """Get the extra attributes of a device.""" raise NotImplementedError() async def async_get_extra_attributes(self, device: str) -> Any: """Get the extra attributes of a device.""" return await self.hass.async_add_executor_job(self.get_extra_attributes, device) async def async_load_config(path: str, hass: HomeAssistant, consider_home: timedelta): """Load devices from YAML configuration file. This method is a coroutine. """ dev_schema = vol.Schema( { vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_ICON, default=None): vol.Any(None, cv.icon), vol.Optional("track", default=False): cv.boolean, vol.Optional(CONF_MAC, default=None): vol.Any( None, vol.All(cv.string, vol.Upper) ), vol.Optional("gravatar", default=None): vol.Any(None, cv.string), vol.Optional("picture", default=None): vol.Any(None, cv.string), vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All( cv.time_period, cv.positive_timedelta ), } ) result = [] try: devices = await hass.async_add_executor_job(load_yaml_config_file, path) except HomeAssistantError as err: LOGGER.error("Unable to load %s: %s", path, str(err)) return [] except FileNotFoundError: return [] for dev_id, device in devices.items(): # Deprecated option. We just ignore it to avoid breaking change device.pop("vendor", None) device.pop("hide_if_away", None) try: device = dev_schema(device) device["dev_id"] = cv.slugify(dev_id) except vol.Invalid as exp: async_log_exception(exp, dev_id, devices, hass) else: result.append(Device(hass, **device)) return result def update_config(path: str, dev_id: str, device: Device): """Add device to YAML configuration file.""" with open(path, "a") as out: device = { device.dev_id: { ATTR_NAME: device.name, ATTR_MAC: device.mac, ATTR_ICON: device.icon, "picture": device.config_picture, "track": device.track, } } out.write("\n") out.write(dump(device)) def get_gravatar_for_email(email: str): """Return an 80px Gravatar for the given email address. Async friendly. """ return ( f"https://www.gravatar.com/avatar/" f"{hashlib.md5(email.encode('utf-8').lower()).hexdigest()}.jpg?s=80&d=wavatar" )
import unittest from nose.tools import assert_equal, assert_list_equal, nottest, raises from py_stringmatching.tokenizer.delimiter_tokenizer import DelimiterTokenizer from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer import numpy as np import pandas as pd from py_stringsimjoin.filter.suffix_filter import SuffixFilter from py_stringsimjoin.utils.converter import dataframe_column_to_str from py_stringsimjoin.utils.generic_helper import COMP_OP_MAP, \ remove_redundant_attrs from py_stringsimjoin.utils.simfunctions import get_sim_function # test SuffixFilter.filter_pair method class FilterPairTestCases(unittest.TestCase): def setUp(self): self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True) self.qg2 = QgramTokenizer(2) # tests for JACCARD measure def test_jac_dlm_08_prune(self): self.test_filter_pair('aa bb cc dd ee', 'xx yy cc zz ww', self.dlm, 'JACCARD', 0.8, False, False, True) def test_jac_dlm_08_pass(self): self.test_filter_pair('aa bb cc dd ee', 'xx aa cc dd ee', self.dlm, 'JACCARD', 0.8, False, False, False) # tests for COSINE measure def test_cos_dlm_08_prune(self): self.test_filter_pair('aa bb cc dd ee', 'xx yy cc zz ww', self.dlm, 'COSINE', 0.8, False, False, True) def test_cos_dlm_08_pass(self): self.test_filter_pair('aa bb cc dd ee', 'xx aa cc dd ee', self.dlm, 'COSINE', 0.8, False, False, False) # tests for DICE measure def test_dice_dlm_08_prune(self): self.test_filter_pair('aa bb cc dd ee', 'xx yy cc zz ww', self.dlm, 'DICE', 0.8, False, False, True) def test_dice_dlm_08_pass(self): self.test_filter_pair('aa bb cc dd ee', 'xx aa cc dd ee', self.dlm, 'DICE', 0.8, False, False, False) # tests for OVERLAP measure def test_overlap_dlm_2_prune(self): self.test_filter_pair('dd ee', 'yy zz', self.dlm, 'OVERLAP', 2, False, False, True) def test_overlap_dlm_2_pass(self): self.test_filter_pair('dd zz', 'yy zz', self.dlm, 'OVERLAP', 2, False, False, False) def test_overlap_dlm_empty(self): self.test_filter_pair('', '', self.dlm, 'OVERLAP', 1, False, False, True) def test_overlap_dlm_empty_with_allow_empty(self): self.test_filter_pair('', '', self.dlm, 'OVERLAP', 1, True, False, True) # tests for EDIT_DISTANCE measure def test_edit_dist_qg2_prune(self): self.test_filter_pair('67126790', '26123485', self.qg2, 'EDIT_DISTANCE', 1, False, False, True) def test_edit_dist_qg2_pass(self): self.test_filter_pair('128690', '129695', self.qg2, 'EDIT_DISTANCE', 2, False, False, False) def test_edit_dist_qg2_empty(self): self.test_filter_pair('', '', self.qg2, 'EDIT_DISTANCE', 1, False, False, False) def test_edit_dist_qg2_empty_with_allow_empty(self): self.test_filter_pair('', '', self.qg2, 'EDIT_DISTANCE', 1, True, False, False) def test_edit_dist_qg2_no_padding_empty(self): self.test_filter_pair('', '', QgramTokenizer(2, padding=False), 'EDIT_DISTANCE', 1, False, False, False) # tests for empty string input def test_empty_lstring(self): self.test_filter_pair('ab', '', self.dlm, 'JACCARD', 0.8, False, False, True) def test_empty_rstring(self): self.test_filter_pair('', 'ab', self.dlm, 'JACCARD', 0.8, False, False, True) def test_empty_strings(self): self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8, False, False, True) def test_empty_strings_with_allow_empty(self): self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8, True, False, False) @nottest def test_filter_pair(self, lstring, rstring, tokenizer, sim_measure_type, threshold, allow_empty, allow_missing, expected_output): suffix_filter = SuffixFilter(tokenizer, sim_measure_type, threshold, allow_empty, allow_missing) actual_output = suffix_filter.filter_pair(lstring, rstring) assert_equal(actual_output, expected_output) # test SuffixFilter.filter_tables method class FilterTablesTestCases(unittest.TestCase): def setUp(self): self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True) self.A = pd.DataFrame([{'id': 1, 'attr':'ab cd ef aa bb'}, {'id': 2, 'attr':''}, {'id': 3, 'attr':'ab'}, {'id': 4, 'attr':'ll oo he'}, {'id': 5, 'attr':'xy xx zz fg'}, {'id': 6, 'attr':np.NaN}, {'id': 7, 'attr':''}]) self.B = pd.DataFrame([{'id': 1, 'attr':'zz fg xx'}, {'id': 2, 'attr':'he ll'}, {'id': 3, 'attr':'xy pl ou'}, {'id': 4, 'attr':'aa'}, {'id': 5, 'attr':'fg cd aa ef ab'}, {'id': 6, 'attr':None}, {'id': 7, 'attr':' '}]) self.empty_table = pd.DataFrame(columns=['id', 'attr']) self.default_l_out_prefix = 'l_' self.default_r_out_prefix = 'r_' # tests for JACCARD measure def test_jac_dlm_075(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr')) def test_jac_dlm_075_with_out_attrs(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['id', 'attr'], ['id', 'attr'])) def test_jac_dlm_075_with_out_prefix(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['attr'], ['attr'], 'ltable.', 'rtable.')) # tests for COSINE measure def test_cos_dlm_08(self): self.test_filter_tables(self.dlm, 'COSINE', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr')) # tests for DICE measure def test_dice_dlm_08(self): self.test_filter_tables(self.dlm, 'DICE', 0.8, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr')) # tests for OVERLAP measure def test_overlap_dlm_3(self): self.test_filter_tables(self.dlm, 'OVERLAP', 3, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr')) # tests for EDIT_DISTANCE measure def test_edit_distance_qg2_2(self): A = pd.DataFrame([{'l_id': 1, 'l_attr':'19990'}, {'l_id': 2, 'l_attr':'200'}, {'l_id': 3, 'l_attr':'0'}, {'l_id': 4, 'l_attr':''}, {'l_id': 5, 'l_attr':np.NaN}]) B = pd.DataFrame([{'r_id': 1, 'r_attr':'200155'}, {'r_id': 2, 'r_attr':'190'}, {'r_id': 3, 'r_attr':'2010'}, {'r_id': 4, 'r_attr':''}, {'r_id': 5, 'r_attr':np.NaN}, {'r_id': 6, 'r_attr':'18950'}]) qg2_tok = QgramTokenizer(2) expected_pairs = set(['1,2', '1,6', '2,2', '2,3', '3,2', '4,4']) self.test_filter_tables(qg2_tok, 'EDIT_DISTANCE', 2, False, False, (A, B, 'l_id', 'r_id', 'l_attr', 'r_attr')) # test with n_jobs above 1 def test_jac_dlm_075_with_njobs_above_1(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['attr'], ['attr'], 'ltable.', 'rtable.', 2)) # test filter attribute of type int def test_jac_qg2_with_filter_attr_of_type_int(self): A = pd.DataFrame([{'l_id': 1, 'l_attr':1990}, {'l_id': 2, 'l_attr':2000}, {'l_id': 3, 'l_attr':0}, {'l_id': 4, 'l_attr':-1}, {'l_id': 5, 'l_attr':1986}]) B = pd.DataFrame([{'r_id': 1, 'r_attr':2001}, {'r_id': 2, 'r_attr':1992}, {'r_id': 3, 'r_attr':1886}, {'r_id': 4, 'r_attr':2007}, {'r_id': 5, 'r_attr':2012}]) dataframe_column_to_str(A, 'l_attr', inplace=True) dataframe_column_to_str(B, 'r_attr', inplace=True) qg2_tok = QgramTokenizer(2, return_set=True) self.test_filter_tables(qg2_tok, 'JACCARD', 0.3, False, False, (A, B, 'l_id', 'r_id', 'l_attr', 'r_attr')) # test allow_missing flag def test_jac_dlm_075_allow_missing(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, True, (self.A, self.B, 'id', 'id', 'attr', 'attr')) # test allow_empty flag def test_jac_dlm_075_allow_empty(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, True, False, (self.A, self.B, 'id', 'id', 'attr', 'attr')) # test allow_empty flag with output attributes def test_jac_dlm_075_allow_empty_with_out_attrs(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, True, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['attr'], ['attr'])) # test with n_jobs above 1 def test_jac_dlm_075_with_njobs_above_1(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False, (self.A, self.B, 'id', 'id', 'attr', 'attr', ['attr'], ['attr'], 'ltable.', 'rtable.', 2)) # tests for empty table input def test_empty_ltable(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.empty_table, self.B, 'id', 'id', 'attr', 'attr')) def test_empty_rtable(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.A, self.empty_table, 'id', 'id', 'attr', 'attr')) def test_empty_tables(self): self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False, (self.empty_table, self.empty_table, 'id', 'id', 'attr', 'attr')) @nottest def test_filter_tables(self, tokenizer, sim_measure_type, threshold, allow_empty, allow_missing, args): suffix_filter = SuffixFilter(tokenizer, sim_measure_type, threshold, allow_empty, allow_missing) sim_fn = get_sim_function(sim_measure_type) # compute the join output pairs join_output_pairs = set() for l_idx, l_row in args[0].iterrows(): for r_idx, r_row in args[1].iterrows(): # if allow_missing is set to True, then add pairs containing # missing value to the join output. if pd.isnull(l_row[args[4]]) or pd.isnull(r_row[args[5]]): if allow_missing: join_output_pairs.add(','.join((str(l_row[args[2]]), str(r_row[args[3]])))) continue if sim_measure_type == 'EDIT_DISTANCE': l_join_val = str(l_row[args[4]]) r_join_val = str(r_row[args[5]]) comp_fn = COMP_OP_MAP['<='] else: l_join_val = tokenizer.tokenize(str(l_row[args[4]])) r_join_val = tokenizer.tokenize(str(r_row[args[5]])) comp_fn = COMP_OP_MAP['>='] if (len(l_join_val) == 0 and len(r_join_val) == 0 and sim_measure_type not in ['OVERLAP', 'EDIT_DISTANCE']): if allow_empty: join_output_pairs.add(','.join((str(l_row[args[2]]), str(r_row[args[3]])))) continue # if both attributes are not missing and not empty, then check # if the pair satisfies the join condition. If yes, then add it # to the join output. if comp_fn(sim_fn(l_join_val, r_join_val), threshold): join_output_pairs.add(','.join((str(l_row[args[2]]), str(r_row[args[3]])))) actual_candset = suffix_filter.filter_tables(*args) expected_output_attrs = ['_id'] l_out_prefix = self.default_l_out_prefix r_out_prefix = self.default_r_out_prefix # Check for l_out_prefix in args. if len(args) > 8: l_out_prefix = args[8] expected_output_attrs.append(l_out_prefix + args[2]) # Check for r_out_prefix in args. if len(args) > 9: r_out_prefix = args[9] expected_output_attrs.append(r_out_prefix + args[3]) # Check for l_out_attrs in args. if len(args) > 6: if args[6]: l_out_attrs = remove_redundant_attrs(args[6], args[2]) for attr in l_out_attrs: expected_output_attrs.append(l_out_prefix + attr) # Check for r_out_attrs in args. if len(args) > 7: if args[7]: r_out_attrs = remove_redundant_attrs(args[7], args[3]) for attr in r_out_attrs: expected_output_attrs.append(r_out_prefix + attr) # verify whether the output table has the necessary attributes. assert_list_equal(list(actual_candset.columns.values), expected_output_attrs) actual_pairs = set() for idx, row in actual_candset.iterrows(): actual_pairs.add(','.join((str(int(row[l_out_prefix + args[2]])), str(int(row[r_out_prefix + args[3]]))))) # verify whether all the join output pairs are # present in the actual output pairs common_pairs = actual_pairs.intersection(join_output_pairs) assert_equal(len(common_pairs), len(join_output_pairs)) # test SuffixFilter.filter_candset method class FilterCandsetTestCases(unittest.TestCase): def setUp(self): self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True) self.A = pd.DataFrame([{'l_id': 1, 'l_attr':'ab cd ef aa bb'}, {'l_id': 2, 'l_attr':''}, {'l_id': 3, 'l_attr':'ab'}, {'l_id': 4, 'l_attr':'ll oo he'}, {'l_id': 5, 'l_attr':'xy xx zz fg'}, {'l_id': 6, 'l_attr': np.NaN}]) self.B = pd.DataFrame([{'r_id': 1, 'r_attr':'zz fg xx'}, {'r_id': 2, 'r_attr':'he ll'}, {'r_id': 3, 'r_attr':'xy pl ou'}, {'r_id': 4, 'r_attr':'aa'}, {'r_id': 5, 'r_attr':'fg cd aa ef ab'}, {'r_id': 6, 'r_attr':None}]) # generate cartesian product A x B to be used as candset self.A['tmp_join_key'] = 1 self.B['tmp_join_key'] = 1 self.C = pd.merge(self.A[['l_id', 'tmp_join_key']], self.B[['r_id', 'tmp_join_key']], on='tmp_join_key').drop('tmp_join_key', 1) self.empty_A = pd.DataFrame(columns=['l_id', 'l_attr']) self.empty_B = pd.DataFrame(columns=['r_id', 'r_attr']) self.empty_candset = pd.DataFrame(columns=['l_id', 'r_id']) # tests for JACCARD measure def test_jac_dlm_075(self): expected_pairs = set(['1,5', '3,4', '5,1', '5,3']) self.test_filter_candset(self.dlm, 'JACCARD', 0.75, False, False, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # tests for COSINE measure def test_cos_dlm_08(self): expected_pairs = set(['1,5', '3,4', '4,2', '5,1', '5,3']) self.test_filter_candset(self.dlm, 'COSINE', 0.8, False, False, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # tests for DICE measure def test_dice_dlm_08(self): expected_pairs = set(['1,5', '3,4', '4,2', '5,1', '5,3']) self.test_filter_candset(self.dlm, 'DICE', 0.8, False, False, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # test allow_missing flag def test_jac_dlm_075_allow_missing(self): expected_pairs = set(['1,5', '3,4', '5,1', '5,3', '6,1', '6,2', '6,3', '6,4', '6,5', '6,6', '1,6', '2,6', '3,6', '4,6', '5,6']) self.test_filter_candset(self.dlm, 'JACCARD', 0.75, False, True, (self.C, 'l_id', 'r_id', self.A, self.B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) # tests for empty candset input def test_empty_candset(self): expected_pairs = set() self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, False, (self.empty_candset, 'l_id', 'r_id', self.empty_A, self.empty_B, 'l_id', 'r_id', 'l_attr', 'r_attr'), expected_pairs) @nottest def test_filter_candset(self, tokenizer, sim_measure_type, threshold, allow_empty, allow_missing, args, expected_pairs): suffix_filter = SuffixFilter(tokenizer, sim_measure_type, threshold, allow_empty, allow_missing) actual_output_candset = suffix_filter.filter_candset(*args) # verify whether the output table has the necessary attributes. assert_list_equal(list(actual_output_candset.columns.values), list(args[0].columns.values)) actual_pairs = set() for idx, row in actual_output_candset.iterrows(): actual_pairs.add(','.join((str(row[args[1]]), str(row[args[2]])))) # verify whether the actual pairs and the expected pairs match. assert_equal(len(expected_pairs), len(actual_pairs)) common_pairs = actual_pairs.intersection(expected_pairs) assert_equal(len(common_pairs), len(expected_pairs)) class SuffixFilterInvalidTestCases(unittest.TestCase): def setUp(self): self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}]) self.B = pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}]) self.tokenizer = DelimiterTokenizer(delim_set=[' '], return_set=True) self.sim_measure_type = 'JACCARD' self.threshold = 0.8 @raises(TypeError) def test_invalid_ltable(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables([], self.B, 'A.id', 'B.id', 'A.attr', 'B.attr') @raises(TypeError) def test_invalid_rtable(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, [], 'A.id', 'B.id', 'A.attr', 'B.attr') @raises(AssertionError) def test_invalid_l_key_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.invalid_id', 'B.id', 'A.attr', 'B.attr') @raises(AssertionError) def test_invalid_r_key_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.invalid_id', 'A.attr', 'B.attr') @raises(AssertionError) def test_invalid_l_filter_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.invalid_attr', 'B.attr') @raises(AssertionError) def test_invalid_r_filter_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.invalid_attr') @raises(AssertionError) def test_numeric_l_filter_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.int_attr', 'B.attr') @raises(AssertionError) def test_numeric_r_filter_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.int_attr') @raises(AssertionError) def test_invalid_l_out_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr', ['A.invalid_attr'], ['B.attr']) @raises(AssertionError) def test_invalid_r_out_attr(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, self.threshold) suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id', 'A.attr', 'B.attr', ['A.attr'], ['B.invalid_attr']) @raises(TypeError) def test_invalid_tokenizer(self): suffix_filter = SuffixFilter([], self.sim_measure_type, self.threshold) @raises(AssertionError) def test_invalid_tokenizer_for_edit_distance(self): suffix_filter = SuffixFilter(self.tokenizer, 'EDIT_DISTANCE', 2) @raises(TypeError) def test_invalid_sim_measure_type(self): suffix_filter = SuffixFilter(self.tokenizer, 'INVALID_TYPE', self.threshold) @raises(AssertionError) def test_invalid_threshold(self): suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, 1.2)
__author__ = 'mertsalik' import os from urlparse import urlparse from urllib import urlencode from requests import get import requests.exceptions from StringIO import StringIO from csv import reader as csv_reader import logging from PIL import Image from falib import get_image_name import time import uuid from shutil import copy import sys FLASH_AIR_ROOT = "/DCIM" DEFAULT_FLASHAIR_IP = "flashairdev.local" API_ADDRESS = "http://{ip}/command.cgi" HOST_ADDRESS = "http://{ip}" OP_FILE_LIST = 100 OP_LAST_CHANGE = 121 OP_CONTROL_IMAGE = 109 FA_FILE_IDENTIFIER = 32 FA_DIR_IDENTIFIER = 16 class FAFileFilterEnum: FILES_AND_DIRS = 4 DIRS_ONLY = 3 FILES_ONLY = 2 class FACommandException(Exception): def __str__(self): return ': '.join([str(i) for i in self.args]) class FACommandAPIException(Exception): def __str__(self): return ': '.join([str(i) for i in self.args]) class FACommand: # TODO: Write Docstrings """ """ def __init__(self, host=DEFAULT_FLASHAIR_IP, ops=None, request_url=None): if request_url is None: self.request_url = API_ADDRESS.format(ip=host) self.host = host self.__build__(ops) else: self.request_url = request_url self.__validate__() def __str__(self): return "{} : \"{}\"".format(self.__class__.__name__, self.request_url) def __build__(self, ops): if not isinstance(ops, dict): raise FACommandException("FACommand takes ops argument as dict!") if self.request_url[-1] not in ('?', '&'): self.request_url += '&' if ('?' in self.request_url) else '?' self.request_url = self.request_url + urlencode(ops) logging.getLogger().debug("__build__ request_url: " + self.request_url) def __validate__(self): o = urlparse(self.request_url) if o.scheme != 'http': raise FACommandException("Request Url is not valid") if o.path == '': raise FACommandException( "Request Url does not contain any command") def __doget__(self): try: response = get(self.request_url) if response.status_code != 200: raise FACommandException("Http request failed", response.status_code) return response.content except requests.exceptions.HTTPError as httpex: raise httpex except Exception as e: raise FACommandException("Exception in API call", e.message) def run(self): # TODO: Write Docstrings """ :return: API call response """ return self.__doget__() class FACommandAPI: def __init__(self, host=DEFAULT_FLASHAIR_IP): self.host = host @classmethod def __parse_file_list(cls, dir_info, file_filter=FAFileFilterEnum.FILES_AND_DIRS): file_list = [] f = StringIO(dir_info) reader = csv_reader(f, delimiter=',') rows = [] for row in reader: rows.append(row) if len(rows) > 0: if len(rows[0]) != 1 and rows[0] != "WLANSD_FILELIST": raise FACommandAPIException( "Unexpected file entry result at first line", rows[0]) # TODO: implement a mapping function for filtering by type logging.getLogger().debug( "Row count of file information : " + str(len(rows))) for row in rows[1:]: if len(row) != 6: raise FACommandAPIException("Unknown file entry ", row) if file_filter is None or file_filter is FAFileFilterEnum.FILES_AND_DIRS: logging.getLogger().debug("Filtering disabled.") file_list.append({ "Path": row[0] + "/" + row[1], "DecimalDate": row[4], "DecimalTime": row[5] }) elif file_filter is FAFileFilterEnum.FILES_ONLY: # only files if int(row[3]) == FA_FILE_IDENTIFIER: logging.getLogger().debug("Filtering only files.") file_list.append({ "Path": row[0] + "/" + row[1], "DecimalDate": row[4], "DecimalTime": row[5] }) elif file_filter is FAFileFilterEnum.DIRS_ONLY: # only directories if int(row[3]) == FA_DIR_IDENTIFIER: logging.getLogger().debug( "Filtering only directories.") file_list.append({ "Path": row[0] + "/" + row[1], "DecimalDate": row[4], "DecimalTime": row[5] }) else: raise FACommandAPIException("Unknown file filtering!") return file_list def get_last_operation_time(self): # TODO: Write Docstrings """ :return: """ command = FACommand(self.host, ops={"op": OP_LAST_CHANGE}) last_op_time = command.run() if not last_op_time.isdigit(): raise FACommandAPIException( "Unexpected last write time content from FlashAir") return { "Status": 1, "Result": last_op_time } def get_directories(self, path=FLASH_AIR_ROOT): """S.get_directories(path) -> [,str] :param path: :return:""" dir_info_response = None try: command = FACommand(self.host, ops={"op": OP_FILE_LIST, "DIR": path}) dir_info_response = command.run() except FACommandAPIException as facaex: logging.getLogger().error(facaex) return {"Status": 0, "Result": facaex} except FACommandException as facex: logging.getLogger().error(facex) return {"Status": 0, "Result": facex} except Exception as ex: logging.getLogger().error(ex) return {"Status": 0, "Result": ex} dir_list = self.__parse_file_list(dir_info_response) return { "Status": 1, "Result": dir_list } def get_files(self, path=FLASH_AIR_ROOT): # TODO: Write Docstrings """ :param path: :return: """ try: command = FACommand(self.host, ops={"op": OP_FILE_LIST, "DIR": path}) dir_info_response = command.run() except FACommandAPIException as facaex: logging.getLogger().error(facaex) return {"Status": 0, "Result": facaex} except FACommandException as facex: logging.getLogger().error(facex) return {"Status": 0, "Result": facex} except Exception as ex: logging.getLogger().error(ex) return {"Status": 0, "Result": ex} dir_list = self.__parse_file_list(dir_info_response, file_filter=FAFileFilterEnum.FILES_ONLY) return { "Status": 1, "Result": dir_list } def get_control_image(self): # TODO: Write Docstrings """ :return: """ control_image_path = None try: command = FACommand(self.host, {"op": OP_CONTROL_IMAGE}) control_image_path = command.run() except FACommandAPIException as facaex: logging.getLogger().error(facaex) return {"Status": 0, "Result": facaex} except FACommandException as facex: logging.getLogger().error(facex) return {"Status": 0, "Result": facex} except Exception as ex: logging.getLogger().error(ex) return {"Status": 0, "Result": ex} return control_image_path def download_file(self, remote_path, destination, skip_existing=False): # TODO: Write Docstrings if skip_existing: if os.path.exists(destination): return try: image_url = HOST_ADDRESS.format(ip=self.host) + remote_path r = requests.get(image_url) i = Image.open(StringIO(r.content)) i.save(destination) except Exception as e: logging.getLogger().error(e) return {"Status": 0, "Result": e} return {"Status": 1, "Result": destination} def download_file_v2(self, remote_path, destination, skip_existing=False): if skip_existing: if os.path.exists(destination): return try: url = HOST_ADDRESS.format(ip=self.host) + remote_path temp_name = str(uuid.uuid4()) with open(temp_name, 'wb') as f: start = time.clock() r = requests.get(url, stream=True) if 'content-length' in r.headers: total_length = int(r.headers.get('content-length')) f.seek(total_length - 1) f.write("\0") f.flush() f.seek(0) dl = 0 if total_length is None: # no content length header f.write(r.content) else: for chunk in r.iter_content(1024 * 512): dl += len(chunk) f.write(chunk) done = int(100 * dl / total_length) sys.stdout.write("\r[%s%s] %s bps" % ( '=' * done, ' ' * (100 - done), dl / (time.clock() - start))) sys.stdout.flush() else: for chunk in r.iter_content(1024 * 512): f.write(chunk) sys.stdout.write("\nComplete\n") logging.info("Download completed.") copy(temp_name, destination) os.remove(temp_name) except Exception as e: logging.getLogger().error(e) return {"Status": 0, "Result": e} return {"Status": 1, "Result": destination} @classmethod def get_root_directory(cls): return FLASH_AIR_ROOT if __name__ == "__main__": import sys logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) api = FACommandAPI() logging.getLogger().debug("api.host " + api.host) root_directories = api.get_directories() logging.getLogger().debug(root_directories) if root_directories['Status']: for dir_obj in root_directories['Result']: files = api.get_files(dir_obj['Path']) if files['Status'] == 0: raise FACommandAPIException(files['Result']) for file in files["Result"]: api.download_file(file["Path"], "/Users/mertsalik/Desktop/" + get_image_name( file)) break
from fuzz import * import process import utils import itertools import unittest class RatioTest(unittest.TestCase): def setUp(self): self.s1 = "new york mets" self.s1a = "new york mets" self.s2 = "new YORK mets" self.s3 = "the wonderful new york mets" self.s4 = "new york mets vs atlanta braves" self.s5 = "atlanta braves vs new york mets" self.s6 = "new york mets - atlanta braves" self.cirque_strings = [ "cirque du soleil - zarkana - las vegas", "cirque du soleil ", "cirque du soleil las vegas", "zarkana las vegas", "las vegas cirque du soleil at the bellagio", "zarakana - cirque du soleil - bellagio" ] self.baseball_strings = [ "new york mets vs chicago cubs", "chicago cubs vs chicago white sox", "philladelphia phillies vs atlanta braves", "braves vs mets", ] def tearDown(self): pass def testEqual(self): self.assertEqual(ratio(self.s1, self.s1a),100) def testCaseInsensitive(self): self.assertNotEqual(ratio(self.s1, self.s2),100) self.assertEqual(ratio(utils.full_process(self.s1), utils.full_process(self.s2)),100) def testPartialRatio(self): self.assertEqual(partial_ratio(self.s1, self.s3),100) def testTokenSortRatio(self): self.assertEqual(token_sort_ratio(self.s1, self.s1a),100) def testPartialTokenSortRatio(self): self.assertEqual(partial_token_sort_ratio(self.s1, self.s1a),100) self.assertEqual(partial_token_sort_ratio(self.s4, self.s5),100) def testTokenSetRatio(self): self.assertEqual(token_set_ratio(self.s4, self.s5),100) def testPartialTokenSetRatio(self): self.assertEqual(partial_token_set_ratio(self.s4, self.s5),100) def testQuickRatioEqual(self): self.assertEqual(QRatio(self.s1, self.s1a), 100) def testQuickRatioCaseInsensitive(self): self.assertEqual(QRatio(self.s1, self.s2), 100) def testQuickRatioNotEqual(self): self.assertNotEqual(QRatio(self.s1, self.s3), 100) def testWRatioEqual(self): self.assertEqual(WRatio(self.s1, self.s1a), 100) def testWRatioCaseInsensitive(self): self.assertEqual(WRatio(self.s1, self.s2), 100) def testWRatioPartialMatch(self): # a partial match is scaled by .9 self.assertEqual(WRatio(self.s1, self.s3), 90) def testWRatioMisorderedMatch(self): # misordered full matches are scaled by .95 self.assertEqual(WRatio(self.s4, self.s5), 95) # test processing methods def testGetBestChoice1(self): query = "new york mets at atlanta braves" best = process.extractOne(query, self.baseball_strings) self.assertEqual(best[0], "braves vs mets") def testGetBestChoice2(self): query = "philadelphia phillies at atlanta braves" best = process.extractOne(query, self.baseball_strings) self.assertEqual(best[0], self.baseball_strings[2]) def testGetBestChoice3(self): query = "atlanta braves at philadelphia phillies" best = process.extractOne(query, self.baseball_strings) self.assertEqual(best[0], self.baseball_strings[2]) def testGetBestChoice4(self): query = "chicago cubs vs new york mets" best = process.extractOne(query, self.baseball_strings) self.assertEqual(best[0], self.baseball_strings[0]) class ProcessTest(unittest.TestCase): def setUp(self): self.s1 = "new york mets" self.s1a = "new york mets" self.s2 = "new YORK mets" self.s3 = "the wonderful new york mets" self.s4 = "new york mets vs atlanta braves" self.s5 = "atlanta braves vs new york mets" self.s6 = "new york mets - atlanta braves" self.cirque_strings = [ "cirque du soleil - zarkana - las vegas", "cirque du soleil ", "cirque du soleil las vegas", "zarkana las vegas", "las vegas cirque du soleil at the bellagio", "zarakana - cirque du soleil - bellagio" ] self.baseball_strings = [ "new york mets vs chicago cubs", "chicago cubs vs chicago white sox", "philladelphia phillies vs atlanta braves", "braves vs mets", ] def testWithProcessor(self): events = [ ["chicago cubs vs new york mets", "CitiField", "2011-05-11", "8pm"], ["new york yankees vs boston red sox", "Fenway Park", "2011-05-11", "8pm"], ["atlanta braves vs pittsburgh pirates", "PNC Park", "2011-05-11", "8pm"], ] query = "new york mets vs chicago cubs" processor = lambda event: event[0] best = process.extractOne(query, events, processor=processor) self.assertEqual(best[0], events[0]) def testWithScorer(self): choices = [ "new york mets vs chicago cubs", "chicago cubs at new york mets", "atlanta braves vs pittsbugh pirates", "new york yankees vs boston red sox" ] # in this hypothetical example we care about ordering, so we use quick ratio query = "new york mets at chicago cubs" scorer = QRatio # first, as an example, the normal way would select the "more 'complete' match of choices[1]" best = process.extractOne(query, choices) self.assertEqual(best[0], choices[1]) # now, use the custom scorer best = process.extractOne(query, choices, scorer=scorer) self.assertEqual(best[0], choices[0]) def testWithCutoff(self): choices = [ "new york mets vs chicago cubs", "chicago cubs at new york mets", "atlanta braves vs pittsbugh pirates", "new york yankees vs boston red sox" ] query = "los angeles dodgers vs san francisco giants" # in this situation, this is an event that does not exist in the list # we don't want to randomly match to something, so we use a reasonable cutoff best = process.extractOne(query, choices, score_cutoff=50) self.assertIsNone(best) # however if we had no cutoff, something would get returned best = process.extractOne(query, choices) self.assertIsNotNone(best) def testEmptyStrings(self): choices = [ "", "new york mets vs chicago cubs", "new york yankees vs boston red sox", "", "" ] query = "new york mets at chicago cubs" best = process.extractOne(query, choices) self.assertEqual(best[0], choices[1]) def testNullStrings(self): choices = [ None, "new york mets vs chicago cubs", "new york yankees vs boston red sox", None, None ] query = "new york mets at chicago cubs" best = process.extractOne(query, choices) self.assertEqual(best[0], choices[1]) if __name__ == '__main__': unittest.main() # run all tests
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Python source expertise for coverage.py""" import os.path import types import zipimport from coverage import env, files from coverage.misc import contract, expensive, isolate_module, join_regex from coverage.misc import CoverageException, NoSource from coverage.parser import PythonParser from coverage.phystokens import source_token_lines, source_encoding from coverage.plugin import FileReporter os = isolate_module(os) @contract(returns='bytes') def read_python_source(filename): """Read the Python source text from `filename`. Returns bytes. """ with open(filename, "rb") as f: source = f.read() if env.IRONPYTHON: # IronPython reads Unicode strings even for "rb" files. source = bytes(source) return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n") @contract(returns='unicode') def get_python_source(filename): """Return the source code, as unicode.""" base, ext = os.path.splitext(filename) if ext == ".py" and env.WINDOWS: exts = [".py", ".pyw"] else: exts = [ext] for ext in exts: try_filename = base + ext if os.path.exists(try_filename): # A regular text file: open it. source = read_python_source(try_filename) break # Maybe it's in a zip file? source = get_zip_bytes(try_filename) if source is not None: break else: # Couldn't find source. exc_msg = "No source for code: '%s'.\n" % (filename,) exc_msg += "Aborting report output, consider using -i." raise NoSource(exc_msg) # Replace \f because of http://bugs.python.org/issue19035 source = source.replace(b'\f', b' ') source = source.decode(source_encoding(source), "replace") # Python code should always end with a line with a newline. if source and source[-1] != '\n': source += '\n' return source @contract(returns='bytes|None') def get_zip_bytes(filename): """Get data from `filename` if it is a zip file path. Returns the bytestring data read from the zip file, or None if no zip file could be found or `filename` isn't in it. The data returned will be an empty string if the file is empty. """ markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep] for marker in markers: if marker in filename: parts = filename.split(marker) try: zi = zipimport.zipimporter(parts[0]+marker[:-1]) except zipimport.ZipImportError: continue try: data = zi.get_data(parts[1]) except IOError: continue return data return None def source_for_file(filename): """Return the source filename for `filename`. Given a file name being traced, return the best guess as to the source file to attribute it to. """ if filename.endswith(".py"): # .py files are themselves source files. return filename elif filename.endswith((".pyc", ".pyo")): # Bytecode files probably have source files near them. py_filename = filename[:-1] if os.path.exists(py_filename): # Found a .py file, use that. return py_filename if env.WINDOWS: # On Windows, it could be a .pyw file. pyw_filename = py_filename + "w" if os.path.exists(pyw_filename): return pyw_filename # Didn't find source, but it's probably the .py file we want. return py_filename elif filename.endswith("$py.class"): # Jython is easy to guess. return filename[:-9] + ".py" # No idea, just use the file name as-is. return filename def source_for_morf(morf): """Get the source filename for the module-or-file `morf`.""" if hasattr(morf, '__file__') and morf.__file__: filename = morf.__file__ elif isinstance(morf, types.ModuleType): # A module should have had .__file__, otherwise we can't use it. # This could be a PEP-420 namespace package. raise CoverageException("Module {0} has no file".format(morf)) else: filename = morf filename = source_for_file(files.unicode_filename(filename)) return filename class PythonFileReporter(FileReporter): """Report support for a Python file.""" def __init__(self, morf, coverage=None): self.coverage = coverage filename = source_for_morf(morf) super(PythonFileReporter, self).__init__(files.canonical_filename(filename)) if hasattr(morf, '__name__'): name = morf.__name__.replace(".", os.sep) if os.path.basename(filename).startswith('__init__.'): name += os.sep + "__init__" name += ".py" name = files.unicode_filename(name) else: name = files.relative_filename(filename) self.relname = name self._source = None self._parser = None self._statements = None self._excluded = None def __repr__(self): return "<PythonFileReporter {0!r}>".format(self.filename) @contract(returns='unicode') def relative_filename(self): return self.relname @property def parser(self): """Lazily create a :class:`PythonParser`.""" if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex('exclude'), ) self._parser.parse_source() return self._parser def lines(self): """Return the line numbers of statements in the file.""" return self.parser.statements def excluded_lines(self): """Return the line numbers of statements in the file.""" return self.parser.excluded def translate_lines(self, lines): return self.parser.translate_lines(lines) def translate_arcs(self, arcs): return self.parser.translate_arcs(arcs) @expensive def no_branch_lines(self): no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list) ) return no_branch @expensive def arcs(self): return self.parser.arcs() @expensive def exit_counts(self): return self.parser.exit_counts() def missing_arc_description(self, start, end, executed_arcs=None): return self.parser.missing_arc_description(start, end, executed_arcs) @contract(returns='unicode') def source(self): if self._source is None: self._source = get_python_source(self.filename) return self._source def should_be_python(self): """Does it seem like this file should contain Python? This is used to decide if a file reported as part of the execution of a program was really likely to have contained Python in the first place. """ # Get the file extension. _, ext = os.path.splitext(self.filename) # Anything named *.py* should be Python. if ext.startswith('.py'): return True # A file with no extension should be Python. if not ext: return True # Everything else is probably not Python. return False def source_token_lines(self): return source_token_lines(self.source())
# -*- coding: utf-8 -*- from django.conf import settings from django.contrib.auth.models import User from cms.tests.base import CMSTestCase from cms.models import Page from cms.menu import CMSMenu from menus.templatetags.menu_tags import show_menu, show_sub_menu,\ show_breadcrumb, language_chooser, page_language_url, show_menu_below_id from menus.menu_pool import menu_pool from menus.base import NavigationNode class MenusTestCase(CMSTestCase): def setUp(self): settings.CMS_MODERATOR = False u = User(username="test", is_staff = True, is_active = True, is_superuser = True) u.set_password("test") u.save() self.login_user(u) if not menu_pool.discovered: menu_pool.discover_menus() self.old_menu = menu_pool.menus menu_pool.menus = {'CMSMenu':self.old_menu['CMSMenu']} menu_pool.clear(settings.SITE_ID) self.create_some_nodes() def tearDown(self): menu_pool.menus = self.old_menu def create_some_nodes(self): """ Creates the following structure: + P1 | + P2 | + P3 + P4 | + P5 + P6 (not in menu) + P7 + P8 """ self.page1 = self.create_page(parent_page=None, published=True, in_navigation=True) self.page2 = self.create_page(parent_page=self.page1, published=True, in_navigation=True) self.page3 = self.create_page(parent_page=self.page2, published=True, in_navigation=True) self.page4 = self.create_page(parent_page=None, published=True, in_navigation=True) self.page5 = self.create_page(parent_page=self.page4, published=True, in_navigation=True) self.page6 = self.create_page(parent_page=None, published=True, in_navigation=False) self.page7 = self.create_page(parent_page=self.page6, published=True, in_navigation=True) self.page8 = self.create_page(parent_page=self.page6, published=True, in_navigation=True) self.all_pages = [self.page1, self.page2, self.page3, self.page4, self.page5, self.page6, self.page7, self.page8] self.top_level_pages = [self.page1, self.page4] self.level1_pages = [self.page2, self.page5,self.page7,self.page8] self.level2_pages = [self.page3] def test_01_basic_cms_menu(self): self.assertEqual(len(menu_pool.menus), 1) response = self.client.get("/") self.assertEquals(response.status_code, 200) request = self.get_request() # test the cms menu class menu = CMSMenu() nodes = menu.get_nodes(request) self.assertEqual(len(nodes), len(self.all_pages)) def test_02_show_menu(self): context = self.get_context() # test standard show_menu nodes = show_menu(context)['children'] self.assertEqual(len(nodes), 2) self.assertEqual(nodes[0].selected, True) self.assertEqual(nodes[0].sibling, False) self.assertEqual(nodes[0].descendant, False) self.assertEqual(nodes[0].children[0].descendant, True) self.assertEqual(nodes[0].children[0].children[0].descendant, True) self.assertEqual(nodes[0].get_absolute_url(), "/") self.assertEqual(nodes[1].get_absolute_url(), self.page4.get_absolute_url()) self.assertEqual(nodes[1].sibling, True) self.assertEqual(nodes[1].selected, False) def test_03_only_active_tree(self): context = self.get_context() # test standard show_menu nodes = show_menu(context, 0, 100, 0, 100)['children'] self.assertEqual(len(nodes[1].children), 0) self.assertEqual(len(nodes[0].children), 1) self.assertEqual(len(nodes[0].children[0].children), 1) context = self.get_context(path=self.page4.get_absolute_url()) nodes = show_menu(context, 0, 100, 0, 100)['children'] self.assertEqual(len(nodes[1].children), 1) self.assertEqual(len(nodes[0].children), 0) def test_04_only_one_active_level(self): context = self.get_context() # test standard show_menu nodes = show_menu(context, 0, 100, 0, 1)['children'] self.assertEqual(len(nodes[1].children), 0) self.assertEqual(len(nodes[0].children), 1) self.assertEqual(len(nodes[0].children[0].children), 0) def test_05_only_level_zero(self): context = self.get_context() # test standard show_menu nodes = show_menu(context, 0, 0, 0, 0)['children'] for node in nodes: self.assertEqual(len(node.children), 0) def test_06_only_level_one(self): context = self.get_context() # test standard show_menu nodes = show_menu(context, 1, 1, 100, 100)['children'] self.assertEqual(len(nodes), len(self.level1_pages)) for node in nodes: self.assertEqual(len(node.children), 0) def test_07_only_level_one_active(self): context = self.get_context() # test standard show_menu nodes = show_menu(context, 1, 1, 0, 100)['children'] self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].descendant, True) self.assertEqual(len(nodes[0].children), 0) def test_08_level_zero_and_one(self): context = self.get_context() # test standard show_menu nodes = show_menu(context, 0, 1, 100, 100)['children'] self.assertEqual(len(nodes), 2) for node in nodes: self.assertEqual(len(node.children), 1) def test_09_show_submenu(self): context = self.get_context() # test standard show_menu nodes = show_sub_menu(context)['children'] self.assertEqual(nodes[0].descendant, True) self.assertEqual(len(nodes), 1) self.assertEqual(len(nodes[0].children), 1) nodes = show_sub_menu(context, 1)['children'] self.assertEqual(len(nodes), 1) self.assertEqual(len(nodes[0].children), 0) def test_10_show_breadcrumb(self): context = self.get_context(path=self.page3.get_absolute_url()) nodes = show_breadcrumb(context)['ancestors'] self.assertEqual(len(nodes), 3) nodes = show_breadcrumb(context, 1)['ancestors'] self.assertEqual(len(nodes), 2) context = self.get_context() nodes = show_breadcrumb(context)['ancestors'] self.assertEqual(len(nodes), 1) nodes = show_breadcrumb(context, 1)['ancestors'] self.assertEqual(len(nodes), 0) page1 = Page.objects.get(pk=self.page1.pk) page1.in_navigation = False page1.save() page2 = Page.objects.get(pk=self.page2.pk) context = self.get_context(path=self.page2.get_absolute_url()) nodes = show_breadcrumb(context)['ancestors'] self.assertEqual(len(nodes), 2) self.assertEqual(nodes[0].get_absolute_url(), "/") self.assertEqual(isinstance(nodes[0], NavigationNode), True) self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url()) def test_11_language_chooser(self): # test simple language chooser with default args context = self.get_context(path=self.page3.get_absolute_url()) new_context = language_chooser(context) self.assertEqual(len(new_context['languages']), len(settings.CMS_SITE_LANGUAGES[settings.SITE_ID])) self.assertEqual(new_context['current_language'], settings.LANGUAGE_CODE) # try a different template and some different args new_context = language_chooser(context, 'menu/test_language_chooser.html') self.assertEqual(new_context['template'], 'menu/test_language_chooser.html') new_context = language_chooser(context, 'short', 'menu/test_language_chooser.html') self.assertEqual(new_context['template'], 'menu/test_language_chooser.html') for lang in new_context['languages']: self.assertEqual(*lang) def test_12_page_language_url(self): context = self.get_context(path=self.page3.get_absolute_url()) url = page_language_url(context, settings.LANGUAGES[0][0])['content'] self.assertEqual( url, "/%s%s" % (settings.LANGUAGES[0][0], self.page3.get_absolute_url())) def test_13_show_menu_below_id(self): page2 = Page.objects.get(pk=self.page2.pk) page2.reverse_id = "hello" page2.save() page2 = Page.objects.get(pk=self.page2.pk) self.assertEqual(page2.reverse_id, "hello") context = self.get_context(path=self.page5.get_absolute_url()) nodes = show_menu_below_id(context, "hello")['children'] self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].get_absolute_url(), self.page3.get_absolute_url()) page2.in_navigation = False page2.save() context = self.get_context(path=self.page5.get_absolute_url()) nodes = show_menu_below_id(context, "hello")['children'] self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].get_absolute_url(), self.page3.get_absolute_url()) def test_14_unpublished(self): page2 = Page.objects.get(pk=self.page2.pk) page2.published = False page2.save() context = self.get_context() nodes = show_menu(context)['children'] self.assertEqual(len(nodes), 2) self.assertEqual(len(nodes[0].children), 0) def test_15_home_not_in_menu(self): page1 = Page.objects.get(pk=self.page1.pk) page1.in_navigation = False page1.save() page4 = Page.objects.get(pk=self.page4.pk) page4.in_navigation = False page4.save() context = self.get_context() nodes = show_menu(context, 0, 100, 100, 100)['children'] self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].get_absolute_url(), "/%s/" % self.page2.get_slug()) self.assertEqual(nodes[0].children[0].get_absolute_url(), "/%s/%s/" % (self.page2.get_slug(), self.page3.get_slug())) page4 = Page.objects.get(pk=self.page4.pk) page4.in_navigation = True page4.save() menu_pool.clear(settings.SITE_ID) context = self.get_context() nodes = show_menu(context, 0, 100, 100, 100)['children'] self.assertEqual(len(nodes), 2) def test_15_empty_menu(self): Page.objects.all().delete() request = self.get_request() nodes = menu_pool.get_nodes(request) context = self.get_context() nodes = show_menu(context, 0, 100, 100, 100)['children'] def test_16_softroot(self): page2 = Page.objects.get(pk=self.page2.pk) page2.soft_root = True page2.save() context = self.get_context(path=page2.get_absolute_url()) nodes = show_menu(context, 0, 100, 100, 100)['children'] self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].get_absolute_url(), page2.get_absolute_url()) page3 = Page.objects.get(pk=self.page3.pk) context = self.get_context(path=page3.get_absolute_url()) nodes = show_menu(context, 0, 100, 100, 100)['children'] self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].get_absolute_url(), page2.get_absolute_url()) page1 = Page.objects.get(pk=self.page1.pk) context = self.get_context(path=page1.get_absolute_url()) nodes = show_menu(context, 0, 100, 100, 100)['children'] self.assertEqual(len(nodes), 2) self.assertEqual(nodes[0].get_absolute_url(), page1.get_absolute_url()) self.assertEqual(len(nodes[0].children[0].children), 0) context = self.get_context(path="/no/real/path/") nodes = show_menu(context, 0, 100, 100, 100)['children'] self.assertEqual(len(nodes), 2) self.assertEqual(nodes[0].get_absolute_url(), page1.get_absolute_url()) self.assertEqual(len(nodes[0].children[0].children), 0) page5 = Page.objects.get(pk=self.page5.pk) context = self.get_context(path=page5.get_absolute_url()) nodes = show_menu(context, 0, 100, 100, 100)['children'] self.assertEqual(len(nodes), 2) self.assertEqual(nodes[0].get_absolute_url(), page1.get_absolute_url()) self.assertEqual(len(nodes[0].children[0].children), 0) def test_17_show_submenu_from_non_menu_page(self): page6 = Page.objects.get(pk=self.page6.pk) context = self.get_context(page6.get_absolute_url()) nodes = show_menu(context, 1, 100, 0, 1)['children'] self.assertEqual(len(nodes), len(page6.children.all())) page7 = Page.objects.get(pk=self.page7.pk) context = self.get_context(page7.get_absolute_url()) nodes = show_menu(context, 1, 100, 0, 1)['children'] self.assertEqual(len(nodes), len(page6.children.all())) nodes = show_menu(context, 2, 100, 0, 1)['children'] self.assertEqual(len(nodes), len(page7.children.all()))
"""Tests for the Roku Media Player platform.""" from datetime import timedelta from unittest.mock import patch from rokuecp import RokuError from homeassistant.components.media_player import DEVICE_CLASS_RECEIVER, DEVICE_CLASS_TV from homeassistant.components.media_player.const import ( ATTR_APP_ID, ATTR_APP_NAME, ATTR_INPUT_SOURCE, ATTR_MEDIA_CHANNEL, ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_DURATION, ATTR_MEDIA_POSITION, ATTR_MEDIA_TITLE, ATTR_MEDIA_VOLUME_MUTED, DOMAIN as MP_DOMAIN, MEDIA_CLASS_APP, MEDIA_CLASS_CHANNEL, MEDIA_CLASS_DIRECTORY, MEDIA_TYPE_APP, MEDIA_TYPE_APPS, MEDIA_TYPE_CHANNEL, MEDIA_TYPE_CHANNELS, SERVICE_PLAY_MEDIA, SERVICE_SELECT_SOURCE, SUPPORT_BROWSE_MEDIA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP, ) from homeassistant.components.roku.const import ATTR_KEYWORD, DOMAIN, SERVICE_SEARCH from homeassistant.components.websocket_api.const import TYPE_RESULT from homeassistant.config import async_process_ha_core_config from homeassistant.const import ( ATTR_ENTITY_ID, SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE, SERVICE_VOLUME_UP, STATE_HOME, STATE_IDLE, STATE_ON, STATE_PAUSED, STATE_PLAYING, STATE_STANDBY, STATE_UNAVAILABLE, ) from homeassistant.core import HomeAssistant from homeassistant.helpers import device_registry as dr, entity_registry as er from homeassistant.util import dt as dt_util from tests.common import async_fire_time_changed from tests.components.roku import NAME_ROKUTV, UPNP_SERIAL, setup_integration from tests.test_util.aiohttp import AiohttpClientMocker MAIN_ENTITY_ID = f"{MP_DOMAIN}.my_roku_3" TV_ENTITY_ID = f"{MP_DOMAIN}.58_onn_roku_tv" TV_HOST = "192.168.1.161" TV_LOCATION = "Living room" TV_MANUFACTURER = "Onn" TV_MODEL = "100005844" TV_SERIAL = "YN00H5555555" TV_SW_VERSION = "9.2.0" async def test_setup(hass: HomeAssistant, aioclient_mock: AiohttpClientMocker) -> None: """Test setup with basic config.""" await setup_integration(hass, aioclient_mock) entity_registry = er.async_get(hass) main = entity_registry.async_get(MAIN_ENTITY_ID) assert hass.states.get(MAIN_ENTITY_ID) assert main assert main.device_class == DEVICE_CLASS_RECEIVER assert main.unique_id == UPNP_SERIAL async def test_idle_setup( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test setup with idle device.""" await setup_integration(hass, aioclient_mock, power=False) state = hass.states.get(MAIN_ENTITY_ID) assert state.state == STATE_STANDBY async def test_tv_setup( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test Roku TV setup.""" await setup_integration( hass, aioclient_mock, device="rokutv", app="tvinput-dtv", host=TV_HOST, unique_id=TV_SERIAL, ) entity_registry = er.async_get(hass) tv = entity_registry.async_get(TV_ENTITY_ID) assert hass.states.get(TV_ENTITY_ID) assert tv assert tv.device_class == DEVICE_CLASS_TV assert tv.unique_id == TV_SERIAL async def test_availability( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test entity availability.""" now = dt_util.utcnow() future = now + timedelta(minutes=1) with patch("homeassistant.util.dt.utcnow", return_value=now): await setup_integration(hass, aioclient_mock) with patch( "homeassistant.components.roku.coordinator.Roku.update", side_effect=RokuError ), patch("homeassistant.util.dt.utcnow", return_value=future): async_fire_time_changed(hass, future) await hass.async_block_till_done() assert hass.states.get(MAIN_ENTITY_ID).state == STATE_UNAVAILABLE future += timedelta(minutes=1) with patch("homeassistant.util.dt.utcnow", return_value=future): async_fire_time_changed(hass, future) await hass.async_block_till_done() assert hass.states.get(MAIN_ENTITY_ID).state == STATE_HOME async def test_supported_features( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test supported features.""" await setup_integration(hass, aioclient_mock) # Features supported for Rokus state = hass.states.get(MAIN_ENTITY_ID) assert ( SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE | SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_BROWSE_MEDIA == state.attributes.get("supported_features") ) async def test_tv_supported_features( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test supported features for Roku TV.""" await setup_integration( hass, aioclient_mock, device="rokutv", app="tvinput-dtv", host=TV_HOST, unique_id=TV_SERIAL, ) state = hass.states.get(TV_ENTITY_ID) assert ( SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE | SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_BROWSE_MEDIA == state.attributes.get("supported_features") ) async def test_attributes( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test attributes.""" await setup_integration(hass, aioclient_mock) state = hass.states.get(MAIN_ENTITY_ID) assert state.state == STATE_HOME assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None assert state.attributes.get(ATTR_APP_ID) is None assert state.attributes.get(ATTR_APP_NAME) == "Roku" assert state.attributes.get(ATTR_INPUT_SOURCE) == "Roku" async def test_attributes_app( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test attributes for app.""" await setup_integration(hass, aioclient_mock, app="netflix") state = hass.states.get(MAIN_ENTITY_ID) assert state.state == STATE_ON assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_APP assert state.attributes.get(ATTR_APP_ID) == "12" assert state.attributes.get(ATTR_APP_NAME) == "Netflix" assert state.attributes.get(ATTR_INPUT_SOURCE) == "Netflix" async def test_attributes_app_media_playing( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test attributes for app with playing media.""" await setup_integration(hass, aioclient_mock, app="pluto", media_state="play") state = hass.states.get(MAIN_ENTITY_ID) assert state.state == STATE_PLAYING assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_APP assert state.attributes.get(ATTR_MEDIA_DURATION) == 6496 assert state.attributes.get(ATTR_MEDIA_POSITION) == 38 assert state.attributes.get(ATTR_APP_ID) == "74519" assert state.attributes.get(ATTR_APP_NAME) == "Pluto TV - It's Free TV" assert state.attributes.get(ATTR_INPUT_SOURCE) == "Pluto TV - It's Free TV" async def test_attributes_app_media_paused( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test attributes for app with paused media.""" await setup_integration(hass, aioclient_mock, app="pluto", media_state="pause") state = hass.states.get(MAIN_ENTITY_ID) assert state.state == STATE_PAUSED assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_APP assert state.attributes.get(ATTR_MEDIA_DURATION) == 6496 assert state.attributes.get(ATTR_MEDIA_POSITION) == 313 assert state.attributes.get(ATTR_APP_ID) == "74519" assert state.attributes.get(ATTR_APP_NAME) == "Pluto TV - It's Free TV" assert state.attributes.get(ATTR_INPUT_SOURCE) == "Pluto TV - It's Free TV" async def test_attributes_screensaver( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test attributes for app with screensaver.""" await setup_integration(hass, aioclient_mock, app="screensaver") state = hass.states.get(MAIN_ENTITY_ID) assert state.state == STATE_IDLE assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None assert state.attributes.get(ATTR_APP_ID) is None assert state.attributes.get(ATTR_APP_NAME) == "Roku" assert state.attributes.get(ATTR_INPUT_SOURCE) == "Roku" async def test_tv_attributes( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test attributes for Roku TV.""" await setup_integration( hass, aioclient_mock, device="rokutv", app="tvinput-dtv", host=TV_HOST, unique_id=TV_SERIAL, ) state = hass.states.get(TV_ENTITY_ID) assert state.state == STATE_ON assert state.attributes.get(ATTR_APP_ID) == "tvinput.dtv" assert state.attributes.get(ATTR_APP_NAME) == "Antenna TV" assert state.attributes.get(ATTR_INPUT_SOURCE) == "Antenna TV" assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_CHANNEL assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "getTV (14.3)" assert state.attributes.get(ATTR_MEDIA_TITLE) == "Airwolf" async def test_tv_device_registry( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test device registered for Roku TV in the device registry.""" await setup_integration( hass, aioclient_mock, device="rokutv", app="tvinput-dtv", host=TV_HOST, unique_id=TV_SERIAL, ) device_registry = dr.async_get(hass) reg_device = device_registry.async_get_device(identifiers={(DOMAIN, TV_SERIAL)}) assert reg_device.model == TV_MODEL assert reg_device.sw_version == TV_SW_VERSION assert reg_device.manufacturer == TV_MANUFACTURER assert reg_device.suggested_area == TV_LOCATION assert reg_device.name == NAME_ROKUTV async def test_services( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test the different media player services.""" await setup_integration(hass, aioclient_mock) with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True ) remote_mock.assert_called_once_with("poweroff") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True ) remote_mock.assert_called_once_with("poweron") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_MEDIA_PAUSE, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True, ) remote_mock.assert_called_once_with("play") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_MEDIA_PLAY, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True, ) remote_mock.assert_called_once_with("play") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True, ) remote_mock.assert_called_once_with("play") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_MEDIA_NEXT_TRACK, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True, ) remote_mock.assert_called_once_with("forward") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True, ) remote_mock.assert_called_once_with("reverse") with patch("homeassistant.components.roku.coordinator.Roku.launch") as launch_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_APP, ATTR_MEDIA_CONTENT_ID: "11", }, blocking=True, ) launch_mock.assert_called_once_with("11") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_SELECT_SOURCE, {ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_INPUT_SOURCE: "Home"}, blocking=True, ) remote_mock.assert_called_once_with("home") with patch("homeassistant.components.roku.coordinator.Roku.launch") as launch_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_SELECT_SOURCE, {ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_INPUT_SOURCE: "Netflix"}, blocking=True, ) launch_mock.assert_called_once_with("12") with patch("homeassistant.components.roku.coordinator.Roku.launch") as launch_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_SELECT_SOURCE, {ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_INPUT_SOURCE: 12}, blocking=True, ) launch_mock.assert_called_once_with("12") async def test_tv_services( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test the media player services related to Roku TV.""" await setup_integration( hass, aioclient_mock, device="rokutv", app="tvinput-dtv", host=TV_HOST, unique_id=TV_SERIAL, ) with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: TV_ENTITY_ID}, blocking=True ) remote_mock.assert_called_once_with("volume_up") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_VOLUME_DOWN, {ATTR_ENTITY_ID: TV_ENTITY_ID}, blocking=True, ) remote_mock.assert_called_once_with("volume_down") with patch("homeassistant.components.roku.coordinator.Roku.remote") as remote_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_VOLUME_MUTE, {ATTR_ENTITY_ID: TV_ENTITY_ID, ATTR_MEDIA_VOLUME_MUTED: True}, blocking=True, ) remote_mock.assert_called_once_with("volume_mute") with patch("homeassistant.components.roku.coordinator.Roku.tune") as tune_mock: await hass.services.async_call( MP_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: TV_ENTITY_ID, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL, ATTR_MEDIA_CONTENT_ID: "55", }, blocking=True, ) tune_mock.assert_called_once_with("55") async def test_media_browse(hass, aioclient_mock, hass_ws_client): """Test browsing media.""" await setup_integration( hass, aioclient_mock, device="rokutv", app="tvinput-dtv", host=TV_HOST, unique_id=TV_SERIAL, ) client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "media_player/browse_media", "entity_id": TV_ENTITY_ID, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] assert msg["result"] assert msg["result"]["title"] == "Media Library" assert msg["result"]["media_class"] == MEDIA_CLASS_DIRECTORY assert msg["result"]["media_content_type"] == "library" assert msg["result"]["can_expand"] assert not msg["result"]["can_play"] assert len(msg["result"]["children"]) == 2 # test apps await client.send_json( { "id": 2, "type": "media_player/browse_media", "entity_id": TV_ENTITY_ID, "media_content_type": MEDIA_TYPE_APPS, "media_content_id": "apps", } ) msg = await client.receive_json() assert msg["id"] == 2 assert msg["type"] == TYPE_RESULT assert msg["success"] assert msg["result"] assert msg["result"]["title"] == "Apps" assert msg["result"]["media_class"] == MEDIA_CLASS_DIRECTORY assert msg["result"]["media_content_type"] == MEDIA_TYPE_APPS assert msg["result"]["children_media_class"] == MEDIA_CLASS_APP assert msg["result"]["can_expand"] assert not msg["result"]["can_play"] assert len(msg["result"]["children"]) == 11 assert msg["result"]["children_media_class"] == MEDIA_CLASS_APP assert msg["result"]["children"][0]["title"] == "Satellite TV" assert msg["result"]["children"][0]["media_content_type"] == MEDIA_TYPE_APP assert msg["result"]["children"][0]["media_content_id"] == "tvinput.hdmi2" assert ( "/browse_media/app/tvinput.hdmi2" in msg["result"]["children"][0]["thumbnail"] ) assert msg["result"]["children"][0]["can_play"] assert msg["result"]["children"][3]["title"] == "Roku Channel Store" assert msg["result"]["children"][3]["media_content_type"] == MEDIA_TYPE_APP assert msg["result"]["children"][3]["media_content_id"] == "11" assert "/browse_media/app/11" in msg["result"]["children"][3]["thumbnail"] assert msg["result"]["children"][3]["can_play"] # test channels await client.send_json( { "id": 3, "type": "media_player/browse_media", "entity_id": TV_ENTITY_ID, "media_content_type": MEDIA_TYPE_CHANNELS, "media_content_id": "channels", } ) msg = await client.receive_json() assert msg["id"] == 3 assert msg["type"] == TYPE_RESULT assert msg["success"] assert msg["result"] assert msg["result"]["title"] == "Channels" assert msg["result"]["media_class"] == MEDIA_CLASS_DIRECTORY assert msg["result"]["media_content_type"] == MEDIA_TYPE_CHANNELS assert msg["result"]["children_media_class"] == MEDIA_CLASS_CHANNEL assert msg["result"]["can_expand"] assert not msg["result"]["can_play"] assert len(msg["result"]["children"]) == 2 assert msg["result"]["children_media_class"] == MEDIA_CLASS_CHANNEL assert msg["result"]["children"][0]["title"] == "WhatsOn" assert msg["result"]["children"][0]["media_content_type"] == MEDIA_TYPE_CHANNEL assert msg["result"]["children"][0]["media_content_id"] == "1.1" assert msg["result"]["children"][0]["can_play"] # test invalid media type await client.send_json( { "id": 4, "type": "media_player/browse_media", "entity_id": TV_ENTITY_ID, "media_content_type": "invalid", "media_content_id": "invalid", } ) msg = await client.receive_json() assert msg["id"] == 4 assert msg["type"] == TYPE_RESULT assert not msg["success"] async def test_media_browse_internal(hass, aioclient_mock, hass_ws_client): """Test browsing media with internal url.""" await async_process_ha_core_config( hass, {"internal_url": "http://example.local:8123"}, ) assert hass.config.internal_url == "http://example.local:8123" await setup_integration( hass, aioclient_mock, device="rokutv", app="tvinput-dtv", host=TV_HOST, unique_id=TV_SERIAL, ) client = await hass_ws_client(hass) with patch( "homeassistant.helpers.network._get_request_host", return_value="example.local" ): await client.send_json( { "id": 2, "type": "media_player/browse_media", "entity_id": TV_ENTITY_ID, "media_content_type": MEDIA_TYPE_APPS, "media_content_id": "apps", } ) msg = await client.receive_json() assert msg["id"] == 2 assert msg["type"] == TYPE_RESULT assert msg["success"] assert msg["result"] assert msg["result"]["title"] == "Apps" assert msg["result"]["media_class"] == MEDIA_CLASS_DIRECTORY assert msg["result"]["media_content_type"] == MEDIA_TYPE_APPS assert msg["result"]["children_media_class"] == MEDIA_CLASS_APP assert msg["result"]["can_expand"] assert not msg["result"]["can_play"] assert len(msg["result"]["children"]) == 11 assert msg["result"]["children_media_class"] == MEDIA_CLASS_APP assert msg["result"]["children"][0]["title"] == "Satellite TV" assert msg["result"]["children"][0]["media_content_type"] == MEDIA_TYPE_APP assert msg["result"]["children"][0]["media_content_id"] == "tvinput.hdmi2" assert "/query/icon/tvinput.hdmi2" in msg["result"]["children"][0]["thumbnail"] assert msg["result"]["children"][0]["can_play"] assert msg["result"]["children"][3]["title"] == "Roku Channel Store" assert msg["result"]["children"][3]["media_content_type"] == MEDIA_TYPE_APP assert msg["result"]["children"][3]["media_content_id"] == "11" assert "/query/icon/11" in msg["result"]["children"][3]["thumbnail"] assert msg["result"]["children"][3]["can_play"] async def test_integration_services( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ) -> None: """Test integration services.""" await setup_integration(hass, aioclient_mock) with patch("homeassistant.components.roku.coordinator.Roku.search") as search_mock: await hass.services.async_call( DOMAIN, SERVICE_SEARCH, {ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_KEYWORD: "Space Jam"}, blocking=True, ) search_mock.assert_called_once_with("Space Jam")
"""The ALEExperiment class handles the logic for training a deep Q-learning agent in the Arcade Learning Environment. Author: Nathan Sprague """ import logging import numpy as np import random import cv2 # Number of rows to crop off the bottom of the (downsampled) screen. # This is appropriate for breakout, but it may need to be modified # for other games. CROP_OFFSET = 8 class ALEExperiment(object): def __init__(self, ale, agent, resized_width, resized_height, resize_method, num_epochs, epoch_length, test_length, frame_skip, death_ends_episode, max_start_nullops): self.ale = ale self.agent = agent self.num_epochs = num_epochs self.epoch_length = epoch_length self.test_length = test_length self.frame_skip = frame_skip self.death_ends_episode = death_ends_episode self.min_action_set = ale.getMinimalActionSet() self.resized_width = resized_width self.resized_height = resized_height self.resize_method = resize_method self.width, self.height = ale.getScreenDims() self.buffer_length = 2 self.buffer_count = 0 self.screen_rgb = np.empty((self.height, self.width, 3), dtype=np.uint8) self.screen_buffer = np.empty((self.buffer_length, self.height, self.width), dtype=np.uint8) self.terminal_lol = False # Most recent episode ended on a loss of life self.max_start_nullops = max_start_nullops def run(self): """ Run the desired number of training epochs, a testing epoch is conducted after each training epoch. """ for epoch in range(1, self.num_epochs + 1): self.run_epoch(epoch, self.epoch_length) self.agent.finish_epoch(epoch) if self.test_length > 0: self.agent.start_testing() self.run_epoch(epoch, self.test_length, True) self.agent.finish_testing(epoch) def run_epoch(self, epoch, num_steps, testing=False): """ Run one 'epoch' of training or testing, where an epoch is defined by the number of steps executed. Prints a progress report after every trial Arguments: epoch - the current epoch number num_steps - steps per epoch testing - True if this Epoch is used for testing and not training """ self.terminal_lol = False # Make sure each epoch starts with a reset. steps_left = num_steps while steps_left > 0: _, num_steps = self.run_episode(steps_left, testing) rewards_sum = np.sum(self.agent.data_set.rewards) rewards_per_sample = rewards_sum/(self.agent.data_set.count + 1e-6) prefix = "testing" if testing else "training" logging.info(prefix + " epoch:%d"%epoch + " steps_left:%d"%steps_left + " num_steps:%d"%num_steps + " rewards_sum:%.0f"%rewards_sum + " rewards_per_sample:%.3g"%rewards_per_sample) steps_left -= num_steps def _init_episode(self): """ This method resets the game if needed, performs enough null actions to ensure that the screen buffer is ready and optionally performs a randomly determined number of null action to randomize the initial game state.""" if not self.terminal_lol or self.ale.game_over(): self.ale.reset_game() if self.max_start_nullops > 0: random_actions = random.randint(0, self.max_start_nullops) for _ in range(random_actions): self._act(0) # Null action # Make sure the screen buffer is filled at the beginning of # each episode... self._act(0) self._act(0) def _act(self, action): """Perform the indicated action for a single frame, return the resulting reward and store the resulting screen image in the buffer """ reward = self.ale.act(action) if reward: print "REWARD 1" index = self.buffer_count % self.buffer_length self.ale.getScreenRGB(self.screen_rgb) cv2.cvtColor(self.screen_rgb, cv2.COLOR_RGB2GRAY, dst=self.screen_buffer[index, ...]) self.buffer_count += 1 return reward def _step(self, action): """ Repeat one action the appopriate number of times and return the summed reward. """ reward = 0 for _ in range(self.frame_skip): reward += self._act(action) return reward def run_episode(self, max_steps, testing): """Run a single training episode. The boolean terminal value returned indicates whether the episode ended because the game ended or the agent died (True) or because the maximum number of steps was reached (False). Currently this value will be ignored. Return: (terminal, num_steps) """ self._init_episode() start_lives = self.ale.lives() action = self.agent.start_episode(self.get_observation()) num_steps = 0 while True: reward = self._step(self.min_action_set[action]) self.terminal_lol = (self.death_ends_episode and not testing and self.ale.lives() < start_lives) terminal = self.ale.game_over() or self.terminal_lol num_steps += 1 if terminal or num_steps >= max_steps: self.agent.end_episode(reward, terminal) break action = self.agent.step(reward, self.get_observation()) return terminal, num_steps def get_observation(self): """ Resize and merge the previous two screen images """ assert self.buffer_count >= 2 index = self.buffer_count % self.buffer_length - 1 max_image = np.maximum(self.screen_buffer[index, ...], self.screen_buffer[index - 1, ...]) return self.resize_image(max_image) def resize_image(self, image): """ Appropriately resize a single image """ if self.resize_method == 'crop': # resize keeping aspect ratio resize_height = int(round( float(self.height) * self.resized_width / self.width)) resized = cv2.resize(image, (self.resized_width, resize_height), interpolation=cv2.INTER_LINEAR) # Crop the part we want crop_y_cutoff = resize_height - CROP_OFFSET - self.resized_height cropped = resized[crop_y_cutoff: crop_y_cutoff + self.resized_height, :] return cropped elif self.resize_method == 'scale': return cv2.resize(image, (self.resized_width, self.resized_height), interpolation=cv2.INTER_LINEAR) elif self.resize_method == 'tetris': # 27:204,22:64 return image[27:27+self.resized_height,22:22+self.resized_width] else: raise ValueError('Unrecognized image resize method.')
""" MDWeb Navigation structure and parsing. TODO: Describe navigation parsing Navigation structure Navigation( _content_path: /my/content _root_content_path: /my/content child_navs: [ Navigation( _content_path: /my/content/about _root_content_path: /my/content child_navs: [] child_pages: [ Page(), Page(), ... ] id: 'd5324c9d8797e07c58b139b50efc5cf0' slug: 'about' has_page: True has_children: False is_top: False level: 1 name: 'about' page: Page() ), ... ] child_pages: [] has_page: True id: 'd5324c9d8797e07c58b139b50efc5cf0' slug: '_' has_children: True is_top: True level: 0 name: None page: Page() ) Future Features: * Ordering navigation levels """ from collections import OrderedDict import hashlib import os import re from six import string_types from mdweb.Exceptions import ContentException, ContentStructureException from mdweb.Page import Page, load_page from mdweb.BaseObjects import NavigationBaseItem, MetaInfParser class NavigationMetaInf(MetaInfParser): # pylint: disable=R0903 """MDWeb Navigation Meta Information.""" FIELD_TYPES = { 'nav_name': ('unicode', None), 'order': ('int', 0), } class Navigation(NavigationBaseItem): """Navigation level representation. Navigation is built recursively by walking the content directory. Each directory represents a navigation level, each file represents a page. Each nav level's name is determined by the directory name. """ #: MetaInf file name nav_metainf_file_name = '_navlevel.txt' #: Allowed extensions for content files extensions = ['.md'] #: Special files to skip skip_files = [ '400.md', '403.md', '404.md', '405.md', '500.md', ] skip_directories = [ 'assets', ] #: Root path to content _root_content_path = None def __init__(self, content_path, nav_level=0): """Initialize navigation level.""" #: path to content for current navigation level self._content_path = os.path.abspath(content_path) #: Navigation level self.level = nav_level #: Navigation level name (populated during scan) if self.level == 0: Navigation._root_content_path = self._content_path self.name = None else: # Extract directory name and use as nav name relative_nav_path = re.sub(r"^%s" % self._root_content_path, '', self._content_path) self.name = os.path.split(relative_nav_path)[-1].lower() #: Relative path to navigation self.path = content_path.replace(self._root_content_path, '') #: Navigation level meta information self.meta_inf = None #: Ordered list of child Navigatyion objects self.child_navs = [] #: Ordered list of child Page object self.child_pages = [] #: Is this the top level of navigation self.is_top = nav_level == 0 #: Navigation page if one is provided (populated during scan) self.page = None #: Does the nav level have an associated page? (populated during scan) self.has_page = False #: Order in the navigation self.order = 0 #: Navigation slug self.slug = self.path.replace('.md', '').strip('/') \ .replace('/', '_').replace('.', '_') if self.path != '' else '_' #: Navigation ID self.id = hashlib.md5(self.slug.encode('utf-8')).hexdigest().lower() #: Navigation level published status self.published = True # Build the nav level self._scan() # Ensure a root index if 0 == self.level and (self.page is None or '' != self.page.url_path): raise ContentException("Missing root index.md") @property def has_children(self): """Check if the navigation level has any pages or nav children.""" return len(self.child_navs) > 0 or \ len(self.child_pages) > 0 @property def children(self): """Return a list of the child_navs and child_pages.""" return self.child_navs + self.child_pages @property def root_content_path(self): """Return the root_content_path.""" return self._root_content_path @property def content_path(self): """Return the content_path.""" return self._content_path @property def is_published(self): return self.published def get_child_by_name(self, name): """Find the child with the given name""" for child in self.child_navs: if child.name == name.lower(): return child return None def get_child_by_id(self, id): """Find the child with the given ID""" for child in self.child_navs: if child.id == id.lower(): return child return None def _scan(self): """Scan the root content path recursively for pages and navigation.""" # Get a list of files in content_directory directory_files = os.listdir(self._content_path) if self.nav_metainf_file_name in directory_files: # We have a nav-level metainf file, parse it absolute_meta_inf_path = os.path.join(self._content_path, self.nav_metainf_file_name) # Read the meta-inf file with open(absolute_meta_inf_path, 'r') as file: file_string = file.read() self.meta_inf = NavigationMetaInf(file_string) if hasattr(self.meta_inf, 'order'): self.order = self.meta_inf.order if hasattr(self.meta_inf, 'nav_name'): self.name = self.meta_inf.nav_name.lower() if \ self.meta_inf.nav_name is not None else None if hasattr(self.meta_inf, 'published'): if isinstance(self.meta_inf.published, string_types): self.published = self.meta_inf.published.lower() == 'true' elif isinstance(self.meta_inf.published, bool): self.published = self.meta_inf.published else: self.published = True # Traverse through all files for file_name in directory_files: file_path = os.path.join(self._content_path, file_name) # Check if it's a normal file or directory if os.path.isfile(file_path): if file_name in self.skip_files: continue page_name, ext = os.path.splitext(os.path.basename(file_path)) if ext not in self.extensions: continue # Only allow index at the top level # Allowing pages other than index at the top leads to # a confusing navigation structure. if self.level == 0 and 'index' != page_name: raise ContentStructureException( "Only index allowed in top level navigation, found %s" % page_name) # We have got a nav file! page = Page(*load_page(self._root_content_path, file_path)) # If it's an index file use it for the page for this nav object if 'index' == page_name: self.page = page self.has_page = True else: self.child_pages.append(page) elif os.path.isdir(file_path): if file_name in self.skip_directories: continue # We got a directory, create a new nav level self.child_navs.append(Navigation(file_path, self.level + 1)) # Now sort self.child_navs.sort(key=lambda x: x.order) self.child_pages.sort(key=lambda x: x.meta_inf.order) def get_page_dict(self, nav=None): """Return a flattened dictionary of pages.""" pages = OrderedDict() # If no nav is given start at self (top level) if nav is None: nav = self if nav.page is not None: pages[nav.page.url_path] = nav.page for page in nav.child_pages: pages[page.url_path] = page for child_nav in nav.child_navs: page = self.get_page_dict(nav=child_nav) pages.update(page) return pages def __repr__(self): return '{0}'.format(self.path)
from django.test import TestCase from django.contrib.auth.models import User from django.test import Client from django.core.urlresolvers import reverse from django.conf import settings from ..models.device import Device class DeviceViewTestCase(TestCase): def setUp(self): self.test1 = User.objects.create_user( username='TestUser1', email='test1@example.com', password='We love HDM !', ) self.test2 = User.objects.create_user( username='TestUser2', email='test2@example.com', password='We love HDM !', ) self.test1_device1 = Device.objects.create( user=self.test1, device_name='device-1-test-user-1', device_ip='127.0.0.1', description='Standard description 1', ) self.test2_device1 = Device.objects.create( user=self.test2, device_name='device-1-test-user-2', device_ip='127.0.0.2', description='Standard description 2', ) self.anonymous = Client() self.client.login(username='TestUser1', password='We love HDM !') def tearDown(self): self.test1.delete() def deny_anonymous_test(self, view_name, args=[]): response = self.anonymous.get( reverse(view_name, args=args), follow=True, ) expected_url = '{login}?next={url}'.format( login=settings.LOGIN_URL, url=reverse(view_name, args=args), ) self.assertRedirects(response, expected_url) def load_test(self, view_name, template_name, args=[]): response = self.client.get( reverse(view_name, args=args), follow=True, ) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, template_name) def get_object_test(self, view_name): '''The view should return a Device owned by the current user or throw a 404 error.''' # Try with a device owned by the user response1 = self.client.get( reverse(view_name, args=[self.test1_device1.pk]), ) self.assertEqual(self.test1_device1, response1.context['device']) # Try with a device owned by another user response2 = self.client.get( reverse(view_name, args=[self.test2_device1.pk]), ) self.assertEqual(response2.status_code, 404) # Try with a device that does not exists response3 = self.client.get( reverse(view_name, args=[42133742]), ) self.assertEqual(response3.status_code, 404) def test_view_deny_anonymous(self): self.deny_anonymous_test('network:index') def test_view_loads(self): self.load_test('network:index', 'network/devices.html') def test_view_get_queryset(self): '''get_queryset should return an array of Devices.''' response = self.client.get(reverse('network:index')) device_list = response.context['user_devices_list'] self.assertTrue(all([isinstance(x, Device) for x in device_list])) self.assertNotEqual(device_list.count, 0) def test_detail_view_deny_anonymous(self): self.deny_anonymous_test( 'network:device_detail', args=[self.test1_device1.pk], ) def test_detail_view_loads(self): self.load_test( 'network:device_detail', 'network/device_detail.html', args=[self.test1_device1.pk], ) def test_detail_view_get_object(self): self.get_object_test('network:device_detail') def test_create_view_deny_anonymous(self): self.deny_anonymous_test('network:device_create') def test_create_view_loads(self): self.load_test( 'network:device_create', 'network/device_form.html', ) def test_create_view_form_valid(self): '''The registered device should have a unique name and should belong to the current user. The name should also match the database requirements.''' # Try with a correct device self.client.post( reverse('network:device_create'), {'device_name': 'NewDevice1', 'description': 'New device 1'}, ) newDevice1 = Device.objects.filter( user=self.test1, device_name='NewDevice1', ) self.assertNotEqual(newDevice1, []) # Try with an incorrect device name self.client.post( reverse('network:device_create'), {'device_name': 'New Device 2', 'description': 'New device 2'}, ) newDevice2 = Device.objects.filter( user=self.test1, device_name='New Device 2', ) self.assertEqual(newDevice2.count(), 0) # Try with a device name aleready taken response3 = self.client.post( reverse('network:device_create'), { 'device_name': 'device-1-test-user-1', 'description': 'New device 3', }, ) self.assertContains( response3, 'Device name aleready taken', html=False, ) def test_update_view_deny_anonymous(self): self.deny_anonymous_test( 'network:device_update', args=[self.test1_device1.pk], ) def test_update_view_loads(self): self.load_test( 'network:device_update', 'network/device_form.html', args=[self.test1_device1.pk], ) def test_update_view_get_object(self): self.get_object_test('network:device_update') def test_update_view_commit(self): '''A device should only be editable by the device owner The primary key given in request should match a device in db.''' # Try with a device owned by the user response1 = self.client.post( reverse('network:device_update', args=[self.test1_device1.pk]), {'device_name': 'device-1-test-user-1', 'description': '4242'}, ) self.assertEqual(response1.status_code, 302) self.assertRedirects(response1, reverse('network:index')) def test_delete_view_deny_anonymous(self): self.deny_anonymous_test( 'network:device_delete', args=[self.test1_device1.pk], ) def test_delete_view_loads(self): self.load_test( 'network:device_delete', 'network/device_confirm_delete.html', args=[self.test1_device1.pk], ) def test_delete_view_get_object(self): self.get_object_test('network:device_delete') def test_delete_view_commit(self): '''A device should only be deleted by the device owner The primary key given in request should match a device in db.''' # Try with a device owned by the user response1 = self.client.post( reverse('network:device_delete', args=[self.test1_device1.pk]), ) self.assertEqual(response1.status_code, 302) self.assertRedirects(response1, reverse('network:index'))
import re import hashlib from struct import pack from socket import error as socket_error class ShinyConnection(object): def __init__(self, conn_info, log): self.conn, self.addr = conn_info self.log = log def send(self): pass def recv(self): pass class TelnetConnection(ShinyConnection): win_change_regexp = re.compile(r"\xff\xfa\x1f(?P<size>.*?)\xff\xf0") def __init__(self, conn_info, log): ShinyConnection.__init__(self, conn_info, log) self.win_size = (80,40) self.set_telnet_options() # Put our socket into non-blocking mode - we'll periodically poll for data # instead of blocking until we get it self.conn.setblocking(0) def send(self, queue): try: for index, line in enumerate(queue): if index != (len(queue) - 1): line += '\r\n' self.conn.send(line) del queue[:] except socket_error: # If we die here, it's probably because we got a broken pipe... # tell the function that's calling us we're not alive anymore return False else: return True def recv(self): try: new_stuff = self.conn.recv(256) except socket_error: # In non-blocking mode, recv generates an error if it doesn't find # any data to recieve. We want to ignore that error and quitely wait until # there is data. return False else: # Get rid of the \r \n line terminators new_stuff = new_stuff.replace('\n', '').replace('\r', '') # See if the input is a notice of window size change self.parse_winchange(new_stuff) # Ignore any other telnet negotiations new_stuff = re.sub(r"\xff((\xfa.*?\xf0)|(..))", '', new_stuff) if new_stuff: return new_stuff return False def close(self): self.conn.close() def set_telnet_options(self): """Petition client to run in linemode and to send window size change notifications. Some telnet clients, such as putty, start in non-linemode by default (they transmit each character as they receive it from the player). We want them to switch to linemode in this case, where they transmit each line after it's been assembled. We also wan't the client to tell us their screen size so we can display things appropriately. """ # IAC + WILL + LINEMODE self.conn.send(chr(255) + chr(251) + chr(34) + '\r\n') # We should get a response from their client (immediately) self.conn.settimeout(1.0) try: result = list(self.conn.recv(256)) except socket.timeout: # This just means that their telnet client didn't send us a timely # response to our initiating linemode... we should just move on result = 'Client response FAIL for linemode.' finally: self.log.debug(result) # IAC DO NAWS (Negotiate About Window Size) self.conn.send(chr(255) + chr(253) + chr(31) + '\r\n') try: result = list(self.conn.recv(256)) except: result = 'Client response FAIL for NAWS.' else: # IAC WILL NAWS if result[0:3] == ['\xff', '\xfb', '\x1f']: # win, they're willing to do NAWS! Parse their window info stuff = ''.join(result[3:]) self.parse_winchange(stuff) finally: self.conn.settimeout(None) self.log.debug(str(result)) def parse_winchange(self, data): """Parse and set the terminal size of the player.""" match = self.win_change_regexp.match(data) if match: size = match.group('size') self.win_size = (ord(size[1]), ord(size[3])) class WebsocketConnection(ShinyConnection): handshake_string = "HTTP/1.1 101 Web Socket Protocol Handshake\r\n\ Upgrade: WebSocket\r\n\ Connection: Upgrade\r\n\ Sec-WebSocket-Origin: %(origin)s\r\n\ Sec-WebSocket-Location: ws://%(host)s/\r\n\r\n" def __init__(self, conn_info, log, host, port): ShinyConnection.__init__(self, conn_info, log) self.host = host self.port = port self.data_fragment = '' self.handshake() self.conn.setblocking(0) def send(self, queue): try: while (len(queue) > 0): line = queue.pop(0) line = '\x00' + line.encode('utf-8') + '\xFF' self.conn.send(line) except socket_error: # If we die here, it's probably because we got a broken pipe... # tell the function that's calling us we're not alive anymore return False else: return True def recv(self): try: new_stuff = self.data_fragment + self.conn.recv(256) except socket_error: # In non-blocking mode, recv generates an error if it doesn't find # any data to recieve. We want to ignore that error and quitely wait until # there is data. return False else: valid_lines = [] # Split all lines on the terminating character lines = new_stuff.split('\xFF') # Pop the last line off of the end - this will either be an empty string if the # last line was terminated, or a left over fragment that should wait for the next batch # of lines to be processed self.data_fragment = lines.pop() # Now we should make sure the lines have a valid prefix. Ignore any that don't. for line in lines: if line[0] == '\x00': valid_lines.append(line[1:]) elif line == '': # The client wishes to terminate - send the closing handshake and disconnect self.close() # Return None so that the player object knows to log the player out return None else: self.log.error('Received invalid message from client ' '(frame did not begin with 0x00 byte): %s' % (line)) if valid_lines: return valid_lines return False def handshake(self): data = self.conn.recv(1024) host = re.findall(r'Host: (.*?)\r\n', data)[0] origin = re.findall(r'Origin: (.*?)\r\n', data)[0] response = self.handshake_string % {'origin': origin, 'host': host} response = response.encode('utf-8') + self.parse_hybi00(data) self.conn.send(response) def close(self): self.conn.close() def parse_hybi00(self, request): """ Parses an HTTP request header and forms a response according to The WebSocket protocol, draft-ietf-hybi-thewebsocketprotocol-00 (http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00). """ # The random tokens will be in the last 8 bytes of the request random_tokens = request[-8:] # Grab the sooper seekret keys hidden away in the request headers key1 = re.findall(r'Sec-WebSocket-Key1: (.*?)\r\n', request)[0] key2 = re.findall(r'Sec-WebSocket-Key2: (.*?)\r\n', request)[0] def parse_key(key): spaces = 0 digits = '' for char in list(key): if char.isdigit(): # If the character is a digit, concatonate it to our string of digits digits += char elif char == ' ': # If the character is a space, add it to our space counter orgecc spaces += 1 result = int(int(digits) / spaces) return result response = pack('>II8s', parse_key(key1), parse_key(key2), str(random_tokens)) hashed_response = hashlib.md5() hashed_response.update(response) return hashed_response.digest()
import pytest from TruSTAR_V2 import TrustarClient, Utils import trustar from trustar.models.indicator import Indicator from trustar.models.enclave import EnclavePermissions from trustar.models.report import Report from trustar.models.intelligence_source import IntelligenceSource from trustar.models.phishing_submission import PhishingSubmission, PhishingIndicator from trustar.models.indicator_summary import IndicatorSummary, IndicatorAttribute, IndicatorScore @pytest.fixture def client(): client = client = TrustarClient(config={ 'user_api_key': "test_api_key", 'user_api_secret': "test_api_secret", 'api_endpoint': "test_api_endpoint", 'client_type': "Python_SDK", 'client_metatag': "demisto-xsoar" }) return client @pytest.fixture def enclaves(): return [ EnclavePermissions( id="931a7386-ed4f-4acd-bda0-b13b2b6b823f71", name="TestEnclave", type="CLOSED", read=True, create=False, update=True ) ] @pytest.fixture def related_indicators(mocker): return mocker.Mock( items=[ Indicator( type="SHA256", value="a127d88fb73f8f1a3671557f3084d02d981396d5f5218163ef26d61314ced3c1" ), Indicator( type="URL", value="www.testUrl.com" ) ] ) @pytest.fixture def trending_indicators(): return [ Indicator( correlation_count=724, type="URL", value="badware.info" ), Indicator( correlation_count=694, type="URL", value="botvrij.eu" ) ] @pytest.fixture def indicators_metadata(): return [ Indicator( value="185.220.101.141", first_seen=1588884576620, last_seen=1588923302059, correlation_count=0, type="IP", enclave_ids=[ '011ad71b-fd7d-44c2-834a-0d751299fb1f', '71f337a0-9696-4331-988a-5679271656a0', 'd915e45a-d0c8-4a75-987a-775649020c96' ] ) ] @pytest.fixture def indicator_summaries(mocker): return mocker.Mock( items=[ IndicatorSummary( value="185.220.101.141", indicator_type="IP", source=IntelligenceSource(key="virustotal", name="VirusTotal"), severity_level=3, updated=1589782796000, enclave_id='011ad71b-fd7d-44c2-834a-0d751299fb1f', report_id='67c60023-83ea-4376-960e-2ff8fc9fbd33', attributes=[ IndicatorAttribute( description='Number of associated URLs detected as bad', name='Detected URLs', value=1, ), IndicatorAttribute( description='Number of hostnames this IP resolved to', name='Hostname Resolutions', value=2, ), IndicatorAttribute( name='ASN', value='200052', ), ], score=IndicatorScore(name="Positives/Total Scans", value="64/75") ), IndicatorSummary( value="185.220.100.141", indicator_type="IP", source=IntelligenceSource(key="OTRO", name="VirusTotal"), severity_level=3, updated=1589782796000, enclave_id='011ad71b-fd7d-44c2-834a-0d751299fb1f', report_id='67c60023-83ea-4376-960e-2ff8fc9fbd33', attributes=[ IndicatorAttribute( description='Number of associated URLs detected as bad', name='Detected URLs', value=1, ), IndicatorAttribute( description='Number of hostnames this IP resolved to', name='Hostname Resolutions', value=2, ), IndicatorAttribute( name='ASN', value='200052', ), ], score=IndicatorScore(name="Positives/Total Scans", value="64/75") ) ] ) @pytest.fixture def reports(mocker): return mocker.MagicMock( items=[ Report( id="1", title="Test Report", body="Test Body", ), Report( id="2", title="Test Report2", body="{'testField': 'test'}", ), ] ) @pytest.fixture def correlated_reports(mocker): return [ Report( id="1", title="Test Report", body="Test Body", ), Report( id="2", title="Test Report2", body="{'testField': 'test'}", ), ] @pytest.fixture def whitelisted_indicators(mocker): return mocker.Mock( items=[ Indicator( type="MD5", value="1e82dd741e908d02e4eff82461f1297e" ), Indicator( type="EMAIL_ADDRESS", value="truphish1337@gmail.com" ) ] ) @pytest.fixture def phishing_submissions(mocker): return mocker.Mock( items=[ PhishingSubmission( submission_id="TEST-SUBMISSION-ID", title="TEST PHISHING SUBMISSION", priority_event_score=3, status="UNRESOLVED" ) ] ) @pytest.fixture def phishing_indicators(mocker): return mocker.Mock( items=[ PhishingIndicator( indicator_type="URL", value="www.test.com", source_key="test_source", normalized_indicator_score=3, original_indicator_score=3 ) ] ) def test_get_enclaves(client, enclaves, monkeypatch): def mock_get_enclaves(*args, **kwargs): return enclaves monkeypatch.setattr(trustar.TruStar, "get_user_enclaves", mock_get_enclaves) response = client.get_enclaves() expected = enclaves[0].to_dict(remove_nones=True) assert response.get('Contents')[0] == expected def test_related_indicators(client, related_indicators, monkeypatch): def mock_get_related_indicators(*args, **kwargs): return related_indicators monkeypatch.setattr(trustar.TruStar, "get_related_indicators_page", mock_get_related_indicators) indicators = ["a127d88fb73f8f1a3671557f3084d02d981396d5f5218163ef26d61314ced3c1", "www.testUrl.com"] response = client.get_related_indicators(indicators) expected = [i.to_dict(remove_nones=True) for i in related_indicators.items] assert response[0].get('Contents') == expected def test_trending_indicators(client, trending_indicators, monkeypatch): def mock_get_trending_indicators(*args, **kwargs): return trending_indicators monkeypatch.setattr(trustar.TruStar, "get_community_trends", mock_get_trending_indicators) response = client.get_trending_indicators() expected = [i.to_dict(remove_nones=True) for i in trending_indicators] assert response[0].get('Contents') == expected def test_get_indicators_metadata(client, indicators_metadata, monkeypatch): def mock_get_metadata(*args, **kwargs): return indicators_metadata monkeypatch.setattr(trustar.TruStar, "get_indicators_metadata", mock_get_metadata) response = client.get_indicators_metadata(indicators=['185.220.101.141']) expected = indicators_metadata[0].to_dict(remove_nones=True) expected["firstSeen"] = Utils.normalize_time(expected.get('firstSeen')) expected["lastSeen"] = Utils.normalize_time(expected.get('lastSeen')) assert response[0].get('Contents')[0] == expected def test_get_indicator_summaries(client, indicator_summaries, monkeypatch): def mock_get_summaries(*args, **kwargs): return indicator_summaries monkeypatch.setattr(trustar.TruStar, "get_indicator_summaries_page", mock_get_summaries) response = client.get_indicator_summaries(values=['185.220.101.141']) expected = indicator_summaries.items[0].to_dict(remove_nones=True) expected['indicatorType'] = expected.pop('type') assert response[0].get('Contents')[0] == expected def test_get_whitelisted_indicators(client, whitelisted_indicators, monkeypatch): def mock_get_whitelist(*args, **kwargs): return whitelisted_indicators monkeypatch.setattr(trustar.TruStar, "get_whitelist_page", mock_get_whitelist) response = client.get_whitelist() expected = [i.to_dict(remove_nones=True) for i in whitelisted_indicators.items] assert response[0].get('Contents') == expected def test_get_indicators_for_report(client, whitelisted_indicators, monkeypatch): def mock_get_indicators_for_report(*args, **kwargs): return whitelisted_indicators monkeypatch.setattr(trustar.TruStar, "get_indicators_for_report_page", mock_get_indicators_for_report) response = client.get_indicators_for_report("76cc1321-f630-test-b82b-eb00a9022445") expected = [i.to_dict(remove_nones=True) for i in whitelisted_indicators.items] assert response[0].get('Contents') == expected def test_move_report(client, monkeypatch): def mock_move_report(*args, **kwargs): return kwargs["report_id"] report_id = "94a476d8-17e3-490a-9020-f6971b692daf" enclave_id = "6ef1078c-a74a-4b42-9344-56c6adea0bda" monkeypatch.setattr(trustar.TruStar, "move_report", mock_move_report) response = client.move_report(report_id, enclave_id) assert response == f"{report_id} has been moved to enclave id: {enclave_id}" def test_copy_report(client, monkeypatch): def mock_copy_report(*args, **kwargs): return "NEW-Test-ID" report_id = "94a476d8-17e3-490a-9020-f6971b692daf" dest_enclave_id = "6ef1078c-a74a-4b42-9344-56c6adea0bda" monkeypatch.setattr(trustar.TruStar, "copy_report", mock_copy_report) response = client.copy_report(report_id, dest_enclave_id) assert response == f"{report_id} has been copied to enclave id: {dest_enclave_id} with id: NEW-Test-ID" def test_get_reports(client, reports, monkeypatch): def mock_get_reports(*args, **kwargs): return reports monkeypatch.setattr(trustar.TruStar, "get_reports_page", mock_get_reports) response = client.get_reports() expected = [report.to_dict(remove_nones=True) for report in reports.items] for e in expected: e["reportDeepLink"] = client.get_report_deep_link(e.get("id")) assert response.get('Contents') == expected def test_get_report_details(client, reports, monkeypatch): def mock_get_report_details(*args, **kwargs): return reports.items[0] monkeypatch.setattr(trustar.TruStar, "get_report_details", mock_get_report_details) response = client.get_report_details(report_id="1") expected = reports.items[0].to_dict(remove_nones=True) expected['reportDeepLink'] = client.get_report_deep_link("1") assert response.get('Contents') == expected def test_update_report(client, reports, monkeypatch): def mock_update_report(*args, **kwargs): return reports.items[0] monkeypatch.setattr(trustar.TruStar, "get_report_details", mock_update_report) monkeypatch.setattr(trustar.TruStar, "update_report", lambda x, y: None) response = client.update_report(report_id="1", title="NEW TEST TITLE") expected = reports.items[0].to_dict() expected['reportDeepLink'] = client.get_report_deep_link("1") expected['title'] = "NEW TEST TITLE" assert response.get('Contents') == expected def test_search_reports(client, reports, monkeypatch): def mock_search_reports(*args, **kwargs): return reports.items monkeypatch.setattr(trustar.TruStar, "search_reports_page", mock_search_reports) response = client.search_reports() expected = [r.to_dict(remove_nones=True) for r in reports.items] assert response.get('Contents') == expected def test_delete_report(client, monkeypatch): report_id = "94a476d8-17e3-490a-9020-f6971b692daf" monkeypatch.setattr(trustar.TruStar, "delete_report", lambda x, y, z: None) response = client.delete_report(report_id) assert response == f"Report {report_id} was successfully deleted" def test_submit_report(client, monkeypatch, mocker): m = mocker.Mock(id=1) monkeypatch.setattr(trustar.TruStar, "submit_report", lambda x, y: m) response = client.submit_report( title="Test enclave", report_body="TEST BODY", enclave_ids=["testEnclaveId"] ) assert response.get('Contents').get('id') == 1 assert response.get('Contents').get('title') == "Test enclave" assert response.get('Contents').get('reportBody') == "TEST BODY" def test_add_to_whitelist(client, monkeypatch): monkeypatch.setattr(trustar.TruStar, "add_terms_to_whitelist", lambda x, y: y) indicators = ["test@trustar.co", "www.testUrl.com"] response = client.add_to_whitelist(indicators) assert response == f"{indicators} added to the whitelist successfully" def test_remove_from_whitelist(client, monkeypatch): monkeypatch.setattr(trustar.TruStar, "delete_indicator_from_whitelist", lambda x, y: None) indicator = "htain@trustar.co" response = client.remove_from_whitelist(indicator) assert response == f'{indicator} removed from the whitelist successfully' def test_correlated_reports(client, correlated_reports, monkeypatch): def mock_get_correlated_reports(*args, **kwargs): return correlated_reports monkeypatch.setattr(trustar.TruStar, "get_correlated_reports_page", mock_get_correlated_reports) response = client.get_correlated_reports(indicators="5f67fc0a85ef8f1b6c17ee54acb55140") expected = [r.to_dict(remove_nones=True) for r in correlated_reports] assert response.get('Contents') == expected def test_get_all_phishing_indicators(client, phishing_indicators, monkeypatch): def mock_get_phishing_indicators(*args, **kwargs): return phishing_indicators monkeypatch.setattr(trustar.TruStar, "get_phishing_indicators_page", mock_get_phishing_indicators) response = client.get_all_phishing_indicators() expected = phishing_indicators.items[0].to_dict(remove_nones=True) assert response[0].get('Contents')[0] == expected def test_get_phishing_submissions(client, phishing_submissions, monkeypatch): def mock_get_phishing_submissions(*args, **kwargs): return phishing_submissions monkeypatch.setattr(trustar.TruStar, "get_phishing_submissions_page", mock_get_phishing_submissions) response = client.get_phishing_submissions() expected = phishing_submissions.items[0].to_dict(remove_nones=True) assert response.get('Contents')[0] == expected def test_set_triage_status(client, monkeypatch, mocker): m = mocker.Mock() m.raise_for_status = lambda: None monkeypatch.setattr(trustar.TruStar, "mark_triage_status", lambda x, y, z: m) response = client.set_triage_status("TEST-ID", "RESOLVED") assert response == "Submission ID TEST-ID is RESOLVED" def test_search_indicators(client, whitelisted_indicators, monkeypatch): def mock_search_indicators(*args, **kwargs): return whitelisted_indicators.items monkeypatch.setattr(trustar.TruStar, "search_indicators_page", mock_search_indicators) response = client.search_indicators() expected = [i.to_dict(remove_nones=True) for i in whitelisted_indicators.items] assert response[0].get('Contents') == expected
import numpy as np import control as con import optim_tools import matplotlib.pyplot as plt # Solution directly from polyplacement char. polynomial to wanted polynomial (fastest) with given polynomical coeff a def k_explizit_a(roots_p1, roots_pmin, pmin, a): n = len(roots_p1) k0 = np.zeros((n,)) k1 = np.zeros((n,)) a_tilde_p1 = np.matrix(np.poly(roots_p1)[:0:-1]).T # get the characteristical polynomial backwards a_tilde_pmin = np.matrix(np.poly(roots_pmin)[:0:-1]).T # get the characteristical polynomial backwards k1 = (a_tilde_p1 - a_tilde_pmin) / (1.0-1.0/pmin) k0 = a_tilde_p1 - a - k1 return np.matrix(k0), np.matrix(k1) #%timeit k_explizit_a([1,2,4], [2,4,8], 0.1, a) # Do canonical form first (costy) def k_explizit_Ab(roots_p1, roots_pmin, pmin, A, b): (A_R, _, _, _), _, _ = optim_tools.get_Steuerungsnormalform(A, b, b, 0) # second b should be c but values involking c are dropped a = -A_R[-1][:].T n = len(roots_p1) k0 = np.zeros((n,)) k1 = np.zeros((n,)) a_tilde_p1 = np.matrix(np.poly(roots_p1)[:0:-1]).T # get the characteristical polynomial backwards a_tilde_pmin = np.matrix(np.poly(roots_pmin)[:0:-1]).T # get the characteristical polynomial backwards k1 = (a_tilde_p1 - a_tilde_pmin) / (1.0-1.0/pmin) k0 = a_tilde_p1 - a - k1 return np.matrix(k0), np.matrix(k1) #%timeit k_explizit_Ab([1,2,4], [2,4,8], 0.1, A, b) # Use python control to place poles and then interpolate (faster with A and b) def k_explizit_Ab2(roots_p1, roots_pmin, pmin, A, b): r0 = con.place(A, b, roots_p1) #k(p=1) r1 = con.place(A, b, roots_pmin) #k(p=pmin) # This seems to work as expected k1 = 1.0/(1.0-1.0/pmin) * (r0 - r1) k0 = r0 - k1 return k0.T, k1.T #%timeit k_explizit_Ab2([1,2,4], [2,4,8], 0.1, A, b) #""" # Aemo of plotting complex functions in python. # # Jim M | Feb 2011 | GPL #""" # Plotting functions ; see the example below # and http://matplotlib.sourceforge.net/ #from matplotlib.pyplot import plot, legend # Complex math (cmath) python functions ; # see see http://docs.python.org/library/cmath.html #from cmath import sin, cos, exp, pi, log, polar, rect, phase, sqrt # Note that python represents imaginary numbers like "3i" as "3j", # where "j" meaning "sqrt(-1) must be preceded by a number, # so "sqrt(-1)" alone would in python be "1j". # # (3,4) complex rectangular form: z = 3 + 4j # (x,y) complex rectangular form : z = x + y * 1j # polar form : z = r * exp(1j * theta) # abs(z) is length of complex number = r # phase(z) is angle of complex number = theta # z.real is real part # z.imag is imaginary part # # abs() is a python built-in; as are complex numbers themselves. # But the other functions needed to be imported in their complex versions. # The numeric constant pi can be imported from math or cmath. # Remember that # 1. lambda(x: ...) is an anyonymous function of x, e.g. lambda(x: 2*x+1) # 2. map(f, [a, b, c, ...]) # returns [f(a), f(b), f(c), ...] # # # == So here are a few utility functions for multiplying scalars and vectors. # # return real part of a vector def real_vector(vector): return map(lambda x: x.real, vector) # return imaginary part of a vector def imag_vector(vector): return map(lambda x: x.imag, vector) def plot_moving_poles(A, b, c, d, k_0, k_1, pmin=0.1): poles = [] for p in np.arange(1, pmin-0.001, -0.001): sys_closed = con.ss(A-b*(k_0+1.0/p*k_1).T, b, c, d) pole = con.matlab.pole(sys_closed) poles.append(pole) # another approach to plot real_part = real_vector(poles) imag_part = imag_vector(poles) # Display a window with a plot of real, imag plt.plot(real_part, imag_part, 'b-') plt.plot(real_part[0], imag_part[0], 'b*') plt.plot(real_part[-1], imag_part[-1], 'rx') plt.show def narf(): # a scalar times a vector returns a vector def scale_vector(scale, vector): result = [0]*len(vector) for i in range(len(result)): result[i] = scale * vector[i] return result # dot product of two vectors = sum(x[0]*y[0] + ... + x[n-1]*y[n-1]) def vector_dot(vector1, vector2): result = 0 for i in range(len(vector1)): result += vector1[i] * vector2[i] return result from cmath import sin, cos, exp, pi, log, polar, rect, phase, sqrt # Generate numbers around the complex unit circle. # (These are the same numbers that show up in the Fourier Transform.) N = 128 theta = scale_vector(2*pi/N, range(N)) exp_theta = map(lambda x: exp(1j * x), theta) real_part = real_vector(exp_theta) imag_part = imag_vector(exp_theta) # Display a window with a plot of real, imag plt.plot(theta, real_part, '-', label='real') plt.plot(theta, imag_part, '--', label='imag') plt.legend(loc='lower left', title='exp(i theta)') plt.show() # TEST k_explizit functions # uboot A_x = np.matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., -0.005]]) a_x = -A_x[-1][:].T #!!!! b_x = np.matrix([[0], [0], [1.]]) d_x = 0 c_x = np.matrix([[1], [0], [0]]) sys_x = con.ss(A_x, b_x, c_x.T, d_x) #mag, phase, omega = bode(sys1) #arr1, arr2 = control.step(sys) #plt.plot(arr2, arr1) #poles, zeros = control.matlab.pzmap(sys, True) plt.axis([-5,.1,-3,3]) #plt.show roots_p1_x = [-1, -1+1j, -1-1j] roots_pmin_x = [-3, -3+2j, -3-2j] pmin_x = 0.1 k0_x, k1_x = k_explizit(roots_p1_x, roots_pmin_x, pmin_x, a_x) k0_x2, k1_x2 = k_explizit_x(roots_p1_x, roots_pmin_x, pmin_x, A_x, b_x) k0_x3, k1_x3 = k_explizit2(roots_p1_x, roots_pmin_x, pmin_x, A_x, b_x) #print k0_x, k1_x #print k0_x2, k1_x2 #print k0_x3, k1_x3 assert np.allclose(k0_x, k0_x2) assert np.allclose(k1_x, k1_x2) assert np.allclose(k0_x2, k0_x3) assert np.allclose(k1_x2, k1_x3) poles = [] for p in np.arange(1, 0.09, -0.01): #sys_cl = control.ss(A-b*(r0), b, c.T, d) #poles2, zeros2 = control.matlab.pzmap(sys_cl, True) #sys_cl = control.ss(A-b*(r1), b, c.T, d) #poles2, zeros2 = control.matlab.pzmap(sys_cl, True) #print p sys_cl = con.ss(A-b*(k0_x3+1.0/p*k1_x3).T, b, c, d) pole, zeros = con.matlab.pzmap(sys_cl, True) #print pole[0] poles.append(pole) #print poles2 plt.show # another approach to plot real_part = real_vector(poles) imag_part = imag_vector(poles) # Display a window with a plot of real, imag plt.plot(real_part, imag_part, '-x') plt.show
########################################################################## # # Copyright 2011 Jose Fonseca # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ##########################################################################/ from .dxgi import * from .d3d10sdklayers import * HRESULT = MAKE_HRESULT([ "D3D10_ERROR_FILE_NOT_FOUND", "D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS", "D3DERR_INVALIDCALL", "D3DERR_WASSTILLDRAWING", ]) D3D10_PRIMITIVE_TOPOLOGY = Enum("D3D10_PRIMITIVE_TOPOLOGY", [ "D3D10_PRIMITIVE_TOPOLOGY_UNDEFINED", "D3D10_PRIMITIVE_TOPOLOGY_POINTLIST", "D3D10_PRIMITIVE_TOPOLOGY_LINELIST", "D3D10_PRIMITIVE_TOPOLOGY_LINESTRIP", "D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST", "D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP", "D3D10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ", "D3D10_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ", "D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ", "D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ", ]) D3D10_BLEND = Enum("D3D10_BLEND", [ "D3D10_BLEND_ZERO", "D3D10_BLEND_ONE", "D3D10_BLEND_SRC_COLOR", "D3D10_BLEND_INV_SRC_COLOR", "D3D10_BLEND_SRC_ALPHA", "D3D10_BLEND_INV_SRC_ALPHA", "D3D10_BLEND_DEST_ALPHA", "D3D10_BLEND_INV_DEST_ALPHA", "D3D10_BLEND_DEST_COLOR", "D3D10_BLEND_INV_DEST_COLOR", "D3D10_BLEND_SRC_ALPHA_SAT", "D3D10_BLEND_BLEND_FACTOR", "D3D10_BLEND_INV_BLEND_FACTOR", "D3D10_BLEND_SRC1_COLOR", "D3D10_BLEND_INV_SRC1_COLOR", "D3D10_BLEND_SRC1_ALPHA", "D3D10_BLEND_INV_SRC1_ALPHA", ]) D3D10_BLEND_OP = Enum("D3D10_BLEND_OP", [ "D3D10_BLEND_OP_ADD", "D3D10_BLEND_OP_SUBTRACT", "D3D10_BLEND_OP_REV_SUBTRACT", "D3D10_BLEND_OP_MIN", "D3D10_BLEND_OP_MAX", ]) D3D10_BLEND_DESC = Struct("D3D10_BLEND_DESC", [ (BOOL, "AlphaToCoverageEnable"), (Array(BOOL, 8), "BlendEnable"), (D3D10_BLEND, "SrcBlend"), (D3D10_BLEND, "DestBlend"), (D3D10_BLEND_OP, "BlendOp"), (D3D10_BLEND, "SrcBlendAlpha"), (D3D10_BLEND, "DestBlendAlpha"), (D3D10_BLEND_OP, "BlendOpAlpha"), (Array(UINT8, 8), "RenderTargetWriteMask"), ]) D3D10_DEPTH_WRITE_MASK = Enum("D3D10_DEPTH_WRITE_MASK", [ "D3D10_DEPTH_WRITE_MASK_ZERO", "D3D10_DEPTH_WRITE_MASK_ALL", ]) D3D10_COMPARISON_FUNC = Enum("D3D10_COMPARISON_FUNC", [ "D3D10_COMPARISON_NEVER", "D3D10_COMPARISON_LESS", "D3D10_COMPARISON_EQUAL", "D3D10_COMPARISON_LESS_EQUAL", "D3D10_COMPARISON_GREATER", "D3D10_COMPARISON_NOT_EQUAL", "D3D10_COMPARISON_GREATER_EQUAL", "D3D10_COMPARISON_ALWAYS", ]) D3D10_STENCIL_OP = Enum("D3D10_STENCIL_OP", [ "D3D10_STENCIL_OP_KEEP", "D3D10_STENCIL_OP_ZERO", "D3D10_STENCIL_OP_REPLACE", "D3D10_STENCIL_OP_INCR_SAT", "D3D10_STENCIL_OP_DECR_SAT", "D3D10_STENCIL_OP_INVERT", "D3D10_STENCIL_OP_INCR", "D3D10_STENCIL_OP_DECR", ]) D3D10_DEPTH_STENCILOP_DESC = Struct("D3D10_DEPTH_STENCILOP_DESC", [ (D3D10_STENCIL_OP, "StencilFailOp"), (D3D10_STENCIL_OP, "StencilDepthFailOp"), (D3D10_STENCIL_OP, "StencilPassOp"), (D3D10_COMPARISON_FUNC, "StencilFunc"), ]) D3D10_DEPTH_STENCIL_DESC = Struct("D3D10_DEPTH_STENCIL_DESC", [ (BOOL, "DepthEnable"), (D3D10_DEPTH_WRITE_MASK, "DepthWriteMask"), (D3D10_COMPARISON_FUNC, "DepthFunc"), (BOOL, "StencilEnable"), (UINT8, "StencilReadMask"), (UINT8, "StencilWriteMask"), (D3D10_DEPTH_STENCILOP_DESC, "FrontFace"), (D3D10_DEPTH_STENCILOP_DESC, "BackFace"), ]) D3D10_FILL_MODE = Enum("D3D10_FILL_MODE", [ "D3D10_FILL_WIREFRAME", "D3D10_FILL_SOLID", ]) D3D10_CULL_MODE = Enum("D3D10_CULL_MODE", [ "D3D10_CULL_NONE", "D3D10_CULL_FRONT", "D3D10_CULL_BACK", ]) D3D10_RASTERIZER_DESC = Struct("D3D10_RASTERIZER_DESC", [ (D3D10_FILL_MODE, "FillMode"), (D3D10_CULL_MODE, "CullMode"), (BOOL, "FrontCounterClockwise"), (INT, "DepthBias"), (FLOAT, "DepthBiasClamp"), (FLOAT, "SlopeScaledDepthBias"), (BOOL, "DepthClipEnable"), (BOOL, "ScissorEnable"), (BOOL, "MultisampleEnable"), (BOOL, "AntialiasedLineEnable"), ]) D3D10_FILTER = Enum("D3D10_FILTER", [ "D3D10_FILTER_MIN_MAG_MIP_POINT", "D3D10_FILTER_MIN_MAG_POINT_MIP_LINEAR", "D3D10_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT", "D3D10_FILTER_MIN_POINT_MAG_MIP_LINEAR", "D3D10_FILTER_MIN_LINEAR_MAG_MIP_POINT", "D3D10_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR", "D3D10_FILTER_MIN_MAG_LINEAR_MIP_POINT", "D3D10_FILTER_MIN_MAG_MIP_LINEAR", "D3D10_FILTER_ANISOTROPIC", "D3D10_FILTER_COMPARISON_MIN_MAG_MIP_POINT", "D3D10_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR", "D3D10_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT", "D3D10_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR", "D3D10_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT", "D3D10_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR", "D3D10_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT", "D3D10_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR", "D3D10_FILTER_COMPARISON_ANISOTROPIC", "D3D10_FILTER_TEXT_1BIT", ]) D3D10_TEXTURE_ADDRESS_MODE = Enum("D3D10_TEXTURE_ADDRESS_MODE", [ "D3D10_TEXTURE_ADDRESS_WRAP", "D3D10_TEXTURE_ADDRESS_MIRROR", "D3D10_TEXTURE_ADDRESS_CLAMP", "D3D10_TEXTURE_ADDRESS_BORDER", "D3D10_TEXTURE_ADDRESS_MIRROR_ONCE", ]) D3D10_SAMPLER_DESC = Struct("D3D10_SAMPLER_DESC", [ (D3D10_FILTER, "Filter"), (D3D10_TEXTURE_ADDRESS_MODE, "AddressU"), (D3D10_TEXTURE_ADDRESS_MODE, "AddressV"), (D3D10_TEXTURE_ADDRESS_MODE, "AddressW"), (FLOAT, "MipLODBias"), (UINT, "MaxAnisotropy"), (D3D10_COMPARISON_FUNC, "ComparisonFunc"), (Array(FLOAT, 4), "BorderColor"), (FLOAT, "MinLOD"), (FLOAT, "MaxLOD"), ]) D3D10_FORMAT_SUPPORT = Flags(UINT, [ "D3D10_FORMAT_SUPPORT_BUFFER", "D3D10_FORMAT_SUPPORT_IA_VERTEX_BUFFER", "D3D10_FORMAT_SUPPORT_IA_INDEX_BUFFER", "D3D10_FORMAT_SUPPORT_SO_BUFFER", "D3D10_FORMAT_SUPPORT_TEXTURE1D", "D3D10_FORMAT_SUPPORT_TEXTURE2D", "D3D10_FORMAT_SUPPORT_TEXTURE3D", "D3D10_FORMAT_SUPPORT_TEXTURECUBE", "D3D10_FORMAT_SUPPORT_SHADER_LOAD", "D3D10_FORMAT_SUPPORT_SHADER_SAMPLE", "D3D10_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON", "D3D10_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT", "D3D10_FORMAT_SUPPORT_MIP", "D3D10_FORMAT_SUPPORT_MIP_AUTOGEN", "D3D10_FORMAT_SUPPORT_RENDER_TARGET", "D3D10_FORMAT_SUPPORT_BLENDABLE", "D3D10_FORMAT_SUPPORT_DEPTH_STENCIL", "D3D10_FORMAT_SUPPORT_CPU_LOCKABLE", "D3D10_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE", "D3D10_FORMAT_SUPPORT_DISPLAY", "D3D10_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT", "D3D10_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET", "D3D10_FORMAT_SUPPORT_MULTISAMPLE_LOAD", "D3D10_FORMAT_SUPPORT_SHADER_GATHER", "D3D10_FORMAT_SUPPORT_BACK_BUFFER_CAST", ]) D3D10_COUNTER = Enum("D3D10_COUNTER", [ "D3D10_COUNTER_GPU_IDLE", "D3D10_COUNTER_VERTEX_PROCESSING", "D3D10_COUNTER_GEOMETRY_PROCESSING", "D3D10_COUNTER_PIXEL_PROCESSING", "D3D10_COUNTER_OTHER_GPU_PROCESSING", "D3D10_COUNTER_HOST_ADAPTER_BANDWIDTH_UTILIZATION", "D3D10_COUNTER_LOCAL_VIDMEM_BANDWIDTH_UTILIZATION", "D3D10_COUNTER_VERTEX_THROUGHPUT_UTILIZATION", "D3D10_COUNTER_TRIANGLE_SETUP_THROUGHPUT_UTILIZATION", "D3D10_COUNTER_FILLRATE_THROUGHPUT_UTILIZATION", "D3D10_COUNTER_VS_MEMORY_LIMITED", "D3D10_COUNTER_VS_COMPUTATION_LIMITED", "D3D10_COUNTER_GS_MEMORY_LIMITED", "D3D10_COUNTER_GS_COMPUTATION_LIMITED", "D3D10_COUNTER_PS_MEMORY_LIMITED", "D3D10_COUNTER_PS_COMPUTATION_LIMITED", "D3D10_COUNTER_POST_TRANSFORM_CACHE_HIT_RATE", "D3D10_COUNTER_TEXTURE_CACHE_HIT_RATE", "D3D10_COUNTER_DEVICE_DEPENDENT_0", ]) D3D10_COUNTER_DESC = Struct("D3D10_COUNTER_DESC", [ (D3D10_COUNTER, "Counter"), (UINT, "MiscFlags"), ]) D3D10_COUNTER_TYPE = Enum("D3D10_COUNTER_TYPE", [ "D3D10_COUNTER_TYPE_FLOAT32", "D3D10_COUNTER_TYPE_UINT16", "D3D10_COUNTER_TYPE_UINT32", "D3D10_COUNTER_TYPE_UINT64", ]) D3D10_COUNTER_INFO = Struct("D3D10_COUNTER_INFO", [ (D3D10_COUNTER, "LastDeviceDependentCounter"), (UINT, "NumSimultaneousCounters"), (UINT8, "NumDetectableParallelUnits"), ]) D3D10_RESOURCE_DIMENSION = Enum("D3D10_RESOURCE_DIMENSION", [ "D3D10_RESOURCE_DIMENSION_UNKNOWN", "D3D10_RESOURCE_DIMENSION_BUFFER", "D3D10_RESOURCE_DIMENSION_TEXTURE1D", "D3D10_RESOURCE_DIMENSION_TEXTURE2D", "D3D10_RESOURCE_DIMENSION_TEXTURE3D", ]) D3D10_USAGE = Enum("D3D10_USAGE", [ "D3D10_USAGE_DEFAULT", "D3D10_USAGE_IMMUTABLE", "D3D10_USAGE_DYNAMIC", "D3D10_USAGE_STAGING", ]) D3D10_BIND_FLAG = Flags(UINT, [ "D3D10_BIND_VERTEX_BUFFER", "D3D10_BIND_INDEX_BUFFER", "D3D10_BIND_CONSTANT_BUFFER", "D3D10_BIND_SHADER_RESOURCE", "D3D10_BIND_STREAM_OUTPUT", "D3D10_BIND_RENDER_TARGET", "D3D10_BIND_DEPTH_STENCIL", ]) D3D10_CPU_ACCESS_FLAG = Flags(UINT, [ "D3D10_CPU_ACCESS_WRITE", "D3D10_CPU_ACCESS_READ", ]) D3D10_RESOURCE_MISC_FLAG = Flags(UINT, [ "D3D10_RESOURCE_MISC_GENERATE_MIPS", "D3D10_RESOURCE_MISC_SHARED", "D3D10_RESOURCE_MISC_TEXTURECUBE", "D3D10_RESOURCE_MISC_SHARED_KEYEDMUTEX", "D3D10_RESOURCE_MISC_GDI_COMPATIBLE", ]) D3D10_BUFFER_DESC = Struct("D3D10_BUFFER_DESC", [ (UINT, "ByteWidth"), (D3D10_USAGE, "Usage"), (D3D10_BIND_FLAG, "BindFlags"), (D3D10_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D10_RESOURCE_MISC_FLAG, "MiscFlags"), ]) D3D10_MAP = Enum("D3D10_MAP", [ "D3D10_MAP_READ", "D3D10_MAP_WRITE", "D3D10_MAP_READ_WRITE", "D3D10_MAP_WRITE_DISCARD", "D3D10_MAP_WRITE_NO_OVERWRITE", ]) D3D10_TEXTURE1D_DESC = Struct("D3D10_TEXTURE1D_DESC", [ (UINT, "Width"), (UINT, "MipLevels"), (UINT, "ArraySize"), (DXGI_FORMAT, "Format"), (D3D10_USAGE, "Usage"), (D3D10_BIND_FLAG, "BindFlags"), (D3D10_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D10_RESOURCE_MISC_FLAG, "MiscFlags"), ]) D3D10_TEXTURE2D_DESC = Struct("D3D10_TEXTURE2D_DESC", [ (UINT, "Width"), (UINT, "Height"), (UINT, "MipLevels"), (UINT, "ArraySize"), (DXGI_FORMAT, "Format"), (DXGI_SAMPLE_DESC, "SampleDesc"), (D3D10_USAGE, "Usage"), (D3D10_BIND_FLAG, "BindFlags"), (D3D10_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D10_RESOURCE_MISC_FLAG, "MiscFlags"), ]) D3D10_TEXTURE3D_DESC = Struct("D3D10_TEXTURE3D_DESC", [ (UINT, "Width"), (UINT, "Height"), (UINT, "Depth"), (UINT, "MipLevels"), (DXGI_FORMAT, "Format"), (D3D10_USAGE, "Usage"), (D3D10_BIND_FLAG, "BindFlags"), (D3D10_CPU_ACCESS_FLAG, "CPUAccessFlags"), (D3D10_RESOURCE_MISC_FLAG, "MiscFlags"), ]) D3D10_DSV_DIMENSION = Enum("D3D10_DSV_DIMENSION", [ "D3D10_DSV_DIMENSION_UNKNOWN", "D3D10_DSV_DIMENSION_TEXTURE1D", "D3D10_DSV_DIMENSION_TEXTURE1DARRAY", "D3D10_DSV_DIMENSION_TEXTURE2D", "D3D10_DSV_DIMENSION_TEXTURE2DARRAY", "D3D10_DSV_DIMENSION_TEXTURE2DMS", "D3D10_DSV_DIMENSION_TEXTURE2DMSARRAY", ]) D3D10_TEX1D_DSV = Struct("D3D10_TEX1D_DSV", [ (UINT, "MipSlice"), ]) D3D10_TEX1D_ARRAY_DSV = Struct("D3D10_TEX1D_ARRAY_DSV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX2D_DSV = Struct("D3D10_TEX2D_DSV", [ (UINT, "MipSlice"), ]) D3D10_TEX2D_ARRAY_DSV = Struct("D3D10_TEX2D_ARRAY_DSV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX2DMS_DSV = Struct("D3D10_TEX2DMS_DSV", [ (UINT, "UnusedField_NothingToDefine"), ]) D3D10_TEX2DMS_ARRAY_DSV = Struct("D3D10_TEX2DMS_ARRAY_DSV", [ (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_DEPTH_STENCIL_VIEW_DESC = Struct("D3D10_DEPTH_STENCIL_VIEW_DESC", [ (DXGI_FORMAT, "Format"), (D3D10_DSV_DIMENSION, "ViewDimension"), (Union("{self}.ViewDimension", [ ("D3D10_DSV_DIMENSION_TEXTURE1D", D3D10_TEX1D_DSV, "Texture1D"), ("D3D10_DSV_DIMENSION_TEXTURE1DARRAY", D3D10_TEX1D_ARRAY_DSV, "Texture1DArray"), ("D3D10_DSV_DIMENSION_TEXTURE2D", D3D10_TEX2D_DSV, "Texture2D"), ("D3D10_DSV_DIMENSION_TEXTURE2DARRAY", D3D10_TEX2D_ARRAY_DSV, "Texture2DArray"), ("D3D10_DSV_DIMENSION_TEXTURE2DMS", D3D10_TEX2DMS_DSV, "Texture2DMS"), ("D3D10_DSV_DIMENSION_TEXTURE2DMSARRAY", D3D10_TEX2DMS_ARRAY_DSV, "Texture2DMSArray"), ]), None), ]) D3D10_RTV_DIMENSION = Enum("D3D10_RTV_DIMENSION", [ "D3D10_RTV_DIMENSION_UNKNOWN", "D3D10_RTV_DIMENSION_BUFFER", "D3D10_RTV_DIMENSION_TEXTURE1D", "D3D10_RTV_DIMENSION_TEXTURE1DARRAY", "D3D10_RTV_DIMENSION_TEXTURE2D", "D3D10_RTV_DIMENSION_TEXTURE2DARRAY", "D3D10_RTV_DIMENSION_TEXTURE2DMS", "D3D10_RTV_DIMENSION_TEXTURE2DMSARRAY", "D3D10_RTV_DIMENSION_TEXTURE3D", ]) D3D10_BUFFER_RTV = Struct("D3D10_BUFFER_RTV", [ (UINT, "FirstElement"), (UINT, "NumElements"), ]) D3D10_TEX1D_RTV = Struct("D3D10_TEX1D_RTV", [ (UINT, "MipSlice"), ]) D3D10_TEX1D_ARRAY_RTV = Struct("D3D10_TEX1D_ARRAY_RTV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX2D_RTV = Struct("D3D10_TEX2D_RTV", [ (UINT, "MipSlice"), ]) D3D10_TEX2D_ARRAY_RTV = Struct("D3D10_TEX2D_ARRAY_RTV", [ (UINT, "MipSlice"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX2DMS_RTV = Struct("D3D10_TEX2DMS_RTV", [ (UINT, "UnusedField_NothingToDefine"), ]) D3D10_TEX2DMS_ARRAY_RTV = Struct("D3D10_TEX2DMS_ARRAY_RTV", [ (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX3D_RTV = Struct("D3D10_TEX3D_RTV", [ (UINT, "MipSlice"), (UINT, "FirstWSlice"), (UINT, "WSize"), ]) D3D10_RENDER_TARGET_VIEW_DESC = Struct("D3D10_RENDER_TARGET_VIEW_DESC", [ (DXGI_FORMAT, "Format"), (D3D10_RTV_DIMENSION, "ViewDimension"), (Union("{self}.ViewDimension", [ ("D3D10_RTV_DIMENSION_BUFFER", D3D10_BUFFER_RTV, "Buffer"), ("D3D10_RTV_DIMENSION_TEXTURE1D", D3D10_TEX1D_RTV, "Texture1D"), ("D3D10_RTV_DIMENSION_TEXTURE1DARRAY", D3D10_TEX1D_ARRAY_RTV, "Texture1DArray"), ("D3D10_RTV_DIMENSION_TEXTURE2D", D3D10_TEX2D_RTV, "Texture2D"), ("D3D10_RTV_DIMENSION_TEXTURE2DARRAY", D3D10_TEX2D_ARRAY_RTV, "Texture2DArray"), ("D3D10_RTV_DIMENSION_TEXTURE2DMS", D3D10_TEX2DMS_RTV, "Texture2DMS"), ("D3D10_RTV_DIMENSION_TEXTURE2DMSARRAY", D3D10_TEX2DMS_ARRAY_RTV, "Texture2DMSArray"), ("D3D10_RTV_DIMENSION_TEXTURE3D", D3D10_TEX3D_RTV, "Texture3D"), ]), None), ]) D3D10_SRV_DIMENSION = Enum("D3D10_SRV_DIMENSION", [ "D3D10_SRV_DIMENSION_UNKNOWN", "D3D10_SRV_DIMENSION_BUFFER", "D3D10_SRV_DIMENSION_TEXTURE1D", "D3D10_SRV_DIMENSION_TEXTURE1DARRAY", "D3D10_SRV_DIMENSION_TEXTURE2D", "D3D10_SRV_DIMENSION_TEXTURE2DARRAY", "D3D10_SRV_DIMENSION_TEXTURE2DMS", "D3D10_SRV_DIMENSION_TEXTURE2DMSARRAY", "D3D10_SRV_DIMENSION_TEXTURE3D", "D3D10_SRV_DIMENSION_TEXTURECUBE", ]) D3D10_BUFFER_SRV = Struct("D3D10_BUFFER_SRV", [ (UINT, "FirstElement"), (UINT, "NumElements"), ]) D3D10_TEX1D_SRV = Struct("D3D10_TEX1D_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D10_TEX1D_ARRAY_SRV = Struct("D3D10_TEX1D_ARRAY_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX2D_SRV = Struct("D3D10_TEX2D_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D10_TEX2D_ARRAY_SRV = Struct("D3D10_TEX2D_ARRAY_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX2DMS_SRV = Struct("D3D10_TEX2DMS_SRV", [ (UINT, "UnusedField_NothingToDefine"), ]) D3D10_TEX2DMS_ARRAY_SRV = Struct("D3D10_TEX2DMS_ARRAY_SRV", [ (UINT, "FirstArraySlice"), (UINT, "ArraySize"), ]) D3D10_TEX3D_SRV = Struct("D3D10_TEX3D_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D10_TEXCUBE_SRV = Struct("D3D10_TEXCUBE_SRV", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), ]) D3D10_SHADER_RESOURCE_VIEW_DESC = Struct("D3D10_SHADER_RESOURCE_VIEW_DESC", [ (DXGI_FORMAT, "Format"), (D3D10_SRV_DIMENSION, "ViewDimension"), (Union("{self}.ViewDimension", [ ("D3D10_SRV_DIMENSION_BUFFER", D3D10_BUFFER_SRV, "Buffer"), ("D3D10_SRV_DIMENSION_TEXTURE1D", D3D10_TEX1D_SRV, "Texture1D"), ("D3D10_SRV_DIMENSION_TEXTURE1DARRAY", D3D10_TEX1D_ARRAY_SRV, "Texture1DArray"), ("D3D10_SRV_DIMENSION_TEXTURE2D", D3D10_TEX2D_SRV, "Texture2D"), ("D3D10_SRV_DIMENSION_TEXTURE2DARRAY", D3D10_TEX2D_ARRAY_SRV, "Texture2DArray"), ("D3D10_SRV_DIMENSION_TEXTURE2DMS", D3D10_TEX2DMS_SRV, "Texture2DMS"), ("D3D10_SRV_DIMENSION_TEXTURE2DMSARRAY", D3D10_TEX2DMS_ARRAY_SRV, "Texture2DMSArray"), ("D3D10_SRV_DIMENSION_TEXTURE3D", D3D10_TEX3D_SRV, "Texture3D"), ("D3D10_SRV_DIMENSION_TEXTURECUBE", D3D10_TEXCUBE_SRV, "TextureCube"), ]), None), ]) D3D10_BOX = Struct("D3D10_BOX", [ (UINT, "left"), (UINT, "top"), (UINT, "front"), (UINT, "right"), (UINT, "bottom"), (UINT, "back"), ]) D3D10_SUBRESOURCE_DATA = Struct("D3D10_SUBRESOURCE_DATA", [ (Blob(Const(Void), "_calcSubresourceSize(pDesc, {i}, {self}.SysMemPitch, {self}.SysMemSlicePitch)"), "pSysMem"), (UINT, "SysMemPitch"), (UINT, "SysMemSlicePitch"), ]) D3D10_SO_DECLARATION_ENTRY = Struct("D3D10_SO_DECLARATION_ENTRY", [ (LPCSTR, "SemanticName"), (UINT, "SemanticIndex"), (BYTE, "StartComponent"), (BYTE, "ComponentCount"), (BYTE, "OutputSlot"), ]) D3D10_INPUT_CLASSIFICATION = Enum("D3D10_INPUT_CLASSIFICATION", [ "D3D10_INPUT_PER_VERTEX_DATA", "D3D10_INPUT_PER_INSTANCE_DATA", ]) D3D10_INPUT_ELEMENT_DESC = Struct("D3D10_INPUT_ELEMENT_DESC", [ (LPCSTR, "SemanticName"), (UINT, "SemanticIndex"), (DXGI_FORMAT, "Format"), (UINT, "InputSlot"), (UINT, "AlignedByteOffset"), (D3D10_INPUT_CLASSIFICATION, "InputSlotClass"), (UINT, "InstanceDataStepRate"), ]) D3D10_QUERY = Enum("D3D10_QUERY", [ "D3D10_QUERY_EVENT", "D3D10_QUERY_OCCLUSION", "D3D10_QUERY_TIMESTAMP", "D3D10_QUERY_TIMESTAMP_DISJOINT", "D3D10_QUERY_PIPELINE_STATISTICS", "D3D10_QUERY_OCCLUSION_PREDICATE", "D3D10_QUERY_SO_STATISTICS", "D3D10_QUERY_SO_OVERFLOW_PREDICATE", ]) D3D10_QUERY_MISC_FLAG = Flags(UINT, [ "D3D10_QUERY_MISC_PREDICATEHINT", ]) D3D10_QUERY_DESC = Struct("D3D10_QUERY_DESC", [ (D3D10_QUERY, "Query"), (D3D10_QUERY_MISC_FLAG, "MiscFlags"), ]) D3D10_RECT = Alias("D3D10_RECT", RECT) D3D10_VIEWPORT = Struct("D3D10_VIEWPORT", [ (INT, "TopLeftX"), (INT, "TopLeftY"), (UINT, "Width"), (UINT, "Height"), (FLOAT, "MinDepth"), (FLOAT, "MaxDepth"), ]) D3D10_MAPPED_TEXTURE2D = Struct("D3D10_MAPPED_TEXTURE2D", [ (LinearPointer(Void, "_MappedSize"), "pData"), (UINT, "RowPitch"), ]) D3D10_MAPPED_TEXTURE3D = Struct("D3D10_MAPPED_TEXTURE3D", [ (LinearPointer(Void, "_MappedSize"), "pData"), (UINT, "RowPitch"), (UINT, "DepthPitch"), ]) D3D10_MAP_FLAG = Flags(UINT, [ "D3D10_MAP_FLAG_DO_NOT_WAIT", ]) D3D10_CLEAR_FLAG = Flags(UINT, [ "D3D10_CLEAR_DEPTH", "D3D10_CLEAR_STENCIL", ]) D3D10_COLOR_WRITE_ENABLE = Flags(UINT, [ "D3D10_COLOR_WRITE_ENABLE_ALL", "D3D10_COLOR_WRITE_ENABLE_RED", "D3D10_COLOR_WRITE_ENABLE_GREEN", "D3D10_COLOR_WRITE_ENABLE_BLUE", "D3D10_COLOR_WRITE_ENABLE_ALPHA", ]) D3D10_TEXTURECUBE_FACE = Enum("D3D10_TEXTURECUBE_FACE", [ "D3D10_TEXTURECUBE_FACE_POSITIVE_X", "D3D10_TEXTURECUBE_FACE_NEGATIVE_X", "D3D10_TEXTURECUBE_FACE_POSITIVE_Y", "D3D10_TEXTURECUBE_FACE_NEGATIVE_Y", "D3D10_TEXTURECUBE_FACE_POSITIVE_Z", "D3D10_TEXTURECUBE_FACE_NEGATIVE_Z", ]) D3D10_ASYNC_GETDATA_FLAG = Flags(UINT, [ "D3D10_ASYNC_GETDATA_DONOTFLUSH", ]) D3D10_FILTER_TYPE = Enum("D3D10_FILTER_TYPE", [ "D3D10_FILTER_TYPE_POINT", "D3D10_FILTER_TYPE_LINEAR", ]) D3D10_QUERY_DATA_TIMESTAMP_DISJOINT = Struct("D3D10_QUERY_DATA_TIMESTAMP_DISJOINT", [ (UINT64, "Frequency"), (BOOL, "Disjoint"), ]) D3D10_QUERY_DATA_PIPELINE_STATISTICS = Struct("D3D10_QUERY_DATA_PIPELINE_STATISTICS", [ (UINT64, "IAVertices"), (UINT64, "IAPrimitives"), (UINT64, "VSInvocations"), (UINT64, "GSInvocations"), (UINT64, "GSPrimitives"), (UINT64, "CInvocations"), (UINT64, "CPrimitives"), (UINT64, "PSInvocations"), ]) D3D10_QUERY_DATA_SO_STATISTICS = Struct("D3D10_QUERY_DATA_SO_STATISTICS", [ (UINT64, "NumPrimitivesWritten"), (UINT64, "PrimitivesStorageNeeded"), ]) D3D10_QUERY_DATA = Polymorphic("_getQueryType(_this)", [ ("D3D10_QUERY_EVENT", Pointer(BOOL)), ("D3D10_QUERY_OCCLUSION", Pointer(UINT64)), ("D3D10_QUERY_TIMESTAMP", Pointer(UINT64)), ("D3D10_QUERY_TIMESTAMP_DISJOINT", Pointer(D3D10_QUERY_DATA_TIMESTAMP_DISJOINT)), ("D3D10_QUERY_PIPELINE_STATISTICS", Pointer(D3D10_QUERY_DATA_PIPELINE_STATISTICS)), ("D3D10_QUERY_OCCLUSION_PREDICATE", Pointer(BOOL)), ("D3D10_QUERY_SO_STATISTICS", Pointer(D3D10_QUERY_DATA_SO_STATISTICS)), ("D3D10_QUERY_SO_OVERFLOW_PREDICATE", Pointer(BOOL)), ], Blob(Void, "DataSize"), contextLess=False) # TODO: Handle ID3D10Counter::GetData too. D3D10_COUNTER_DATA = Polymorphic("_getCounterType(_this)", [ ("D3D10_COUNTER_GPU_IDLE", Pointer(FLOAT32)), ("D3D10_COUNTER_VERTEX_PROCESSING", Pointer(FLOAT32)), ("D3D10_COUNTER_GEOMETRY_PROCESSING", Pointer(FLOAT32)), ("D3D10_COUNTER_PIXEL_PROCESSING", Pointer(FLOAT32)), ("D3D10_COUNTER_OTHER_GPU_PROCESSING", Pointer(FLOAT32)), ("D3D10_COUNTER_HOST_ADAPTER_BANDWIDTH_UTILIZATION", Pointer(FLOAT32)), ("D3D10_COUNTER_LOCAL_VIDMEM_BANDWIDTH_UTILIZATION", Pointer(FLOAT32)), ("D3D10_COUNTER_VERTEX_THROUGHPUT_UTILIZATION", Pointer(FLOAT32)), ("D3D10_COUNTER_TRIANGLE_SETUP_THROUGHPUT_UTILIZATION", Pointer(FLOAT32)), ("D3D10_COUNTER_FILLRATE_THROUGHPUT_UTILIZATION", Pointer(FLOAT32)), ("D3D10_COUNTER_VS_MEMORY_LIMITED", Pointer(FLOAT32)), ("D3D10_COUNTER_VS_COMPUTATION_LIMITED", Pointer(FLOAT32)), ("D3D10_COUNTER_GS_MEMORY_LIMITED", Pointer(FLOAT32)), ("D3D10_COUNTER_GS_COMPUTATION_LIMITED", Pointer(FLOAT32)), ("D3D10_COUNTER_PS_MEMORY_LIMITED", Pointer(FLOAT32)), ("D3D10_COUNTER_PS_COMPUTATION_LIMITED", Pointer(FLOAT32)), ("D3D10_COUNTER_POST_TRANSFORM_CACHE_HIT_RATE", Pointer(FLOAT32)), ("D3D10_COUNTER_TEXTURE_CACHE_HIT_RATE", Pointer(FLOAT32)), ], Blob(Void, "DataSize"), contextLess=False) D3D10_CREATE_DEVICE_FLAG = Flags(UINT, [ "D3D10_CREATE_DEVICE_SINGLETHREADED", "D3D10_CREATE_DEVICE_DEBUG", "D3D10_CREATE_DEVICE_SWITCH_TO_REF", "D3D10_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS", "D3D10_CREATE_DEVICE_ALLOW_NULL_FROM_MAP", "D3D10_CREATE_DEVICE_BGRA_SUPPORT", "D3D10_CREATE_DEVICE_STRICT_VALIDATION", "D3D10_CREATE_DEVICE_BGRA_SUPPORT", "D3D10_CREATE_DEVICE_PREVENT_ALTERING_LAYER_SETTINGS_FROM_REGISTRY", "D3D10_CREATE_DEVICE_STRICT_VALIDATION", "D3D10_CREATE_DEVICE_DEBUGGABLE", ]) D3D10_RAISE_FLAG = Flags(UINT, [ "D3D10_RAISE_FLAG_DRIVER_INTERNAL_ERROR", ]) D3D10_SAMPLE_MASK = FakeEnum(UINT, [ "D3D10_DEFAULT_SAMPLE_MASK" ]) ID3D10DeviceChild = Interface("ID3D10DeviceChild", IUnknown) ID3D10Resource = Interface("ID3D10Resource", ID3D10DeviceChild) ID3D10Buffer = Interface("ID3D10Buffer", ID3D10Resource) ID3D10Texture1D = Interface("ID3D10Texture1D", ID3D10Resource) ID3D10Texture2D = Interface("ID3D10Texture2D", ID3D10Resource) ID3D10Texture3D = Interface("ID3D10Texture3D", ID3D10Resource) ID3D10View = Interface("ID3D10View", ID3D10DeviceChild) ID3D10DepthStencilView = Interface("ID3D10DepthStencilView", ID3D10View) ID3D10RenderTargetView = Interface("ID3D10RenderTargetView", ID3D10View) ID3D10ShaderResourceView = Interface("ID3D10ShaderResourceView", ID3D10View) ID3D10BlendState = Interface("ID3D10BlendState", ID3D10DeviceChild) ID3D10DepthStencilState = Interface("ID3D10DepthStencilState", ID3D10DeviceChild) ID3D10GeometryShader = Interface("ID3D10GeometryShader", ID3D10DeviceChild) ID3D10InputLayout = Interface("ID3D10InputLayout", ID3D10DeviceChild) ID3D10PixelShader = Interface("ID3D10PixelShader", ID3D10DeviceChild) ID3D10RasterizerState = Interface("ID3D10RasterizerState", ID3D10DeviceChild) ID3D10SamplerState = Interface("ID3D10SamplerState", ID3D10DeviceChild) ID3D10VertexShader = Interface("ID3D10VertexShader", ID3D10DeviceChild) ID3D10Asynchronous = Interface("ID3D10Asynchronous", ID3D10DeviceChild) ID3D10Counter = Interface("ID3D10Counter", ID3D10Asynchronous) ID3D10Query = Interface("ID3D10Query", ID3D10Asynchronous) ID3D10Predicate = Interface("ID3D10Predicate", ID3D10Query) ID3D10Device = Interface("ID3D10Device", IUnknown) ID3D10Multithread = Interface("ID3D10Multithread", IUnknown) ID3D10DeviceChild.methods += [ StdMethod(Void, "GetDevice", [Out(Pointer(ObjPointer(ID3D10Device)), "ppDevice")]), StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), InOut(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")], sideeffects=False), StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")], sideeffects=False), StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")], sideeffects=False), ] ID3D10Resource.methods += [ StdMethod(Void, "GetType", [Out(Pointer(D3D10_RESOURCE_DIMENSION), "rType")], sideeffects=False), StdMethod(Void, "SetEvictionPriority", [(UINT, "EvictionPriority")]), StdMethod(UINT, "GetEvictionPriority", [], sideeffects=False), ] ID3D10Buffer.methods += [ StdMethod(HRESULT, "Map", [(D3D10_MAP, "MapType"), (D3D10_MAP_FLAG, "MapFlags"), Out(Pointer(LinearPointer(Void, "_MappedSize")), "ppData")]), StdMethod(Void, "Unmap", []), StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_BUFFER_DESC), "pDesc")], sideeffects=False), ] ID3D10Texture1D.methods += [ StdMethod(HRESULT, "Map", [(UINT, "Subresource"), (D3D10_MAP, "MapType"), (D3D10_MAP_FLAG, "MapFlags"), Out(Pointer(LinearPointer(Void, "_MappedSize")), "ppData")]), StdMethod(Void, "Unmap", [(UINT, "Subresource")]), StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_TEXTURE1D_DESC), "pDesc")], sideeffects=False), ] ID3D10Texture2D.methods += [ StdMethod(HRESULT, "Map", [(UINT, "Subresource"), (D3D10_MAP, "MapType"), (D3D10_MAP_FLAG, "MapFlags"), Out(Pointer(D3D10_MAPPED_TEXTURE2D), "pMappedTex2D")]), StdMethod(Void, "Unmap", [(UINT, "Subresource")]), StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_TEXTURE2D_DESC), "pDesc")], sideeffects=False), ] ID3D10Texture3D.methods += [ StdMethod(HRESULT, "Map", [(UINT, "Subresource"), (D3D10_MAP, "MapType"), (D3D10_MAP_FLAG, "MapFlags"), Out(Pointer(D3D10_MAPPED_TEXTURE3D), "pMappedTex3D")]), StdMethod(Void, "Unmap", [(UINT, "Subresource")]), StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_TEXTURE3D_DESC), "pDesc")], sideeffects=False), ] ID3D10View.methods += [ StdMethod(Void, "GetResource", [Out(Pointer(ObjPointer(ID3D10Resource)), "ppResource")]), ] ID3D10DepthStencilView.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_DEPTH_STENCIL_VIEW_DESC), "pDesc")], sideeffects=False), ] ID3D10RenderTargetView.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_RENDER_TARGET_VIEW_DESC), "pDesc")], sideeffects=False), ] ID3D10ShaderResourceView.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_SHADER_RESOURCE_VIEW_DESC), "pDesc")], sideeffects=False), ] ID3D10BlendState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_BLEND_DESC), "pDesc")], sideeffects=False), ] ID3D10DepthStencilState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_DEPTH_STENCIL_DESC), "pDesc")], sideeffects=False), ] ID3D10RasterizerState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_RASTERIZER_DESC), "pDesc")], sideeffects=False), ] ID3D10SamplerState.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_SAMPLER_DESC), "pDesc")], sideeffects=False), ] ID3D10Asynchronous.methods += [ StdMethod(Void, "Begin", []), StdMethod(Void, "End", []), StdMethod(HRESULT, "GetData", [Out(D3D10_QUERY_DATA, "pData"), (UINT, "DataSize"), (D3D10_ASYNC_GETDATA_FLAG, "GetDataFlags")]), StdMethod(UINT, "GetDataSize", [], sideeffects=False), ] ID3D10Counter.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_COUNTER_DESC), "pDesc")], sideeffects=False), ] ID3D10Query.methods += [ StdMethod(Void, "GetDesc", [Out(Pointer(D3D10_QUERY_DESC), "pDesc")], sideeffects=False), ] ID3D10Device.methods += [ StdMethod(Void, "VSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D10Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "PSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D10ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "PSSetShader", [(ObjPointer(ID3D10PixelShader), "pPixelShader")]), StdMethod(Void, "PSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D10SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "VSSetShader", [(ObjPointer(ID3D10VertexShader), "pVertexShader")]), StdMethod(Void, "DrawIndexed", [(UINT, "IndexCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation")]), StdMethod(Void, "Draw", [(UINT, "VertexCount"), (UINT, "StartVertexLocation")]), StdMethod(Void, "PSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D10Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "IASetInputLayout", [(ObjPointer(ID3D10InputLayout), "pInputLayout")]), StdMethod(Void, "IASetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D10Buffer)), "NumBuffers"), "ppVertexBuffers"), (Array(Const(UINT), "NumBuffers"), "pStrides"), (Array(Const(UINT), "NumBuffers"), "pOffsets")]), StdMethod(Void, "IASetIndexBuffer", [(ObjPointer(ID3D10Buffer), "pIndexBuffer"), (DXGI_FORMAT, "Format"), (UINT, "Offset")]), StdMethod(Void, "DrawIndexedInstanced", [(UINT, "IndexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation"), (UINT, "StartInstanceLocation")]), StdMethod(Void, "DrawInstanced", [(UINT, "VertexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartVertexLocation"), (UINT, "StartInstanceLocation")]), StdMethod(Void, "GSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D10Buffer)), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "GSSetShader", [(ObjPointer(ID3D10GeometryShader), "pShader")]), StdMethod(Void, "IASetPrimitiveTopology", [(D3D10_PRIMITIVE_TOPOLOGY, "Topology")]), StdMethod(Void, "VSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D10ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "VSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D10SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "SetPredication", [(ObjPointer(ID3D10Predicate), "pPredicate"), (BOOL, "PredicateValue")]), StdMethod(Void, "GSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D10ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "GSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D10SamplerState)), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "OMSetRenderTargets", [(UINT, "NumViews"), (Array(Const(ObjPointer(ID3D10RenderTargetView)), "NumViews"), "ppRenderTargetViews"), (ObjPointer(ID3D10DepthStencilView), "pDepthStencilView")]), StdMethod(Void, "OMSetBlendState", [(ObjPointer(ID3D10BlendState), "pBlendState"), (Array(Const(FLOAT), 4), "BlendFactor"), (D3D10_SAMPLE_MASK, "SampleMask")]), StdMethod(Void, "OMSetDepthStencilState", [(ObjPointer(ID3D10DepthStencilState), "pDepthStencilState"), (UINT, "StencilRef")]), StdMethod(Void, "SOSetTargets", [(UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D10Buffer)), "NumBuffers"), "ppSOTargets"), (Array(Const(UINT), "NumBuffers"), "pOffsets")]), StdMethod(Void, "DrawAuto", []), StdMethod(Void, "RSSetState", [(ObjPointer(ID3D10RasterizerState), "pRasterizerState")]), StdMethod(Void, "RSSetViewports", [(UINT, "NumViewports"), (Array(Const(D3D10_VIEWPORT), "NumViewports"), "pViewports")]), StdMethod(Void, "RSSetScissorRects", [(UINT, "NumRects"), (Array(Const(D3D10_RECT), "NumRects"), "pRects")]), StdMethod(Void, "CopySubresourceRegion", [(ObjPointer(ID3D10Resource), "pDstResource"), (UINT, "DstSubresource"), (UINT, "DstX"), (UINT, "DstY"), (UINT, "DstZ"), (ObjPointer(ID3D10Resource), "pSrcResource"), (UINT, "SrcSubresource"), (Pointer(Const(D3D10_BOX)), "pSrcBox")]), StdMethod(Void, "CopyResource", [(ObjPointer(ID3D10Resource), "pDstResource"), (ObjPointer(ID3D10Resource), "pSrcResource")]), StdMethod(Void, "UpdateSubresource", [(ObjPointer(ID3D10Resource), "pDstResource"), (UINT, "DstSubresource"), (Pointer(Const(D3D10_BOX)), "pDstBox"), (Blob(Const(Void), "_calcSubresourceSize(pDstResource, DstSubresource, pDstBox, SrcRowPitch, SrcDepthPitch)"), "pSrcData"), (UINT, "SrcRowPitch"), (UINT, "SrcDepthPitch")]), StdMethod(Void, "ClearRenderTargetView", [(ObjPointer(ID3D10RenderTargetView), "pRenderTargetView"), (Array(Const(FLOAT), 4), "ColorRGBA")]), StdMethod(Void, "ClearDepthStencilView", [(ObjPointer(ID3D10DepthStencilView), "pDepthStencilView"), (D3D10_CLEAR_FLAG, "ClearFlags"), (FLOAT, "Depth"), (UINT8, "Stencil")]), StdMethod(Void, "GenerateMips", [(ObjPointer(ID3D10ShaderResourceView), "pShaderResourceView")]), StdMethod(Void, "ResolveSubresource", [(ObjPointer(ID3D10Resource), "pDstResource"), (UINT, "DstSubresource"), (ObjPointer(ID3D10Resource), "pSrcResource"), (UINT, "SrcSubresource"), (DXGI_FORMAT, "Format")]), StdMethod(Void, "VSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D10Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "PSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D10ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "PSGetShader", [Out(Pointer(ObjPointer(ID3D10PixelShader)), "ppPixelShader")]), StdMethod(Void, "PSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D10SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "VSGetShader", [Out(Pointer(ObjPointer(ID3D10VertexShader)), "ppVertexShader")]), StdMethod(Void, "PSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D10Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "IAGetInputLayout", [Out(Pointer(ObjPointer(ID3D10InputLayout)), "ppInputLayout")]), StdMethod(Void, "IAGetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D10Buffer), "NumBuffers"), "ppVertexBuffers"), Out(Array(UINT, "NumBuffers"), "pStrides"), Out(Array(UINT, "NumBuffers"), "pOffsets")]), StdMethod(Void, "IAGetIndexBuffer", [Out(Pointer(ObjPointer(ID3D10Buffer)), "pIndexBuffer"), Out(Pointer(DXGI_FORMAT), "Format"), Out(Pointer(UINT), "Offset")]), StdMethod(Void, "GSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D10Buffer), "NumBuffers"), "ppConstantBuffers")]), StdMethod(Void, "GSGetShader", [Out(Pointer(ObjPointer(ID3D10GeometryShader)), "ppGeometryShader")]), StdMethod(Void, "IAGetPrimitiveTopology", [Out(Pointer(D3D10_PRIMITIVE_TOPOLOGY), "pTopology")], sideeffects=False), StdMethod(Void, "VSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D10ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "VSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D10SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "GetPredication", [Out(Pointer(ObjPointer(ID3D10Predicate)), "ppPredicate"), Out(Pointer(BOOL), "pPredicateValue")]), StdMethod(Void, "GSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D10ShaderResourceView), "NumViews"), "ppShaderResourceViews")]), StdMethod(Void, "GSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D10SamplerState), "NumSamplers"), "ppSamplers")]), StdMethod(Void, "OMGetRenderTargets", [(UINT, "NumViews"), Out(Array(ObjPointer(ID3D10RenderTargetView), "NumViews"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D10DepthStencilView)), "ppDepthStencilView")]), StdMethod(Void, "OMGetBlendState", [Out(Pointer(ObjPointer(ID3D10BlendState)), "ppBlendState"), Out(Array(FLOAT, 4), "BlendFactor"), Out(Pointer(D3D10_SAMPLE_MASK), "pSampleMask")]), StdMethod(Void, "OMGetDepthStencilState", [Out(Pointer(ObjPointer(ID3D10DepthStencilState)), "ppDepthStencilState"), Out(Pointer(UINT), "pStencilRef")]), StdMethod(Void, "SOGetTargets", [(UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D10Buffer), "NumBuffers"), "ppSOTargets"), Out(Array(UINT, "NumBuffers"), "pOffsets")]), StdMethod(Void, "RSGetState", [Out(Pointer(ObjPointer(ID3D10RasterizerState)), "ppRasterizerState")]), StdMethod(Void, "RSGetViewports", [InOut(Pointer(UINT), "pNumViewports"), Out(Array(D3D10_VIEWPORT, "*pNumViewports"), "pViewports")], sideeffects=False), StdMethod(Void, "RSGetScissorRects", [InOut(Pointer(UINT), "pNumRects"), Out(Array(D3D10_RECT, "*pNumRects"), "pRects")], sideeffects=False), StdMethod(HRESULT, "GetDeviceRemovedReason", [], sideeffects=False), StdMethod(HRESULT, "SetExceptionMode", [(D3D10_RAISE_FLAG, "RaiseFlags")]), StdMethod(D3D10_RAISE_FLAG, "GetExceptionMode", [], sideeffects=False), StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), InOut(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")], sideeffects=False), StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")], sideeffects=False), StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")], sideeffects=False), StdMethod(Void, "ClearState", []), StdMethod(Void, "Flush", []), StdMethod(HRESULT, "CreateBuffer", [(Pointer(Const(D3D10_BUFFER_DESC)), "pDesc"), (Array(Const(D3D10_SUBRESOURCE_DATA), 1), "pInitialData"), Out(Pointer(ObjPointer(ID3D10Buffer)), "ppBuffer")]), StdMethod(HRESULT, "CreateTexture1D", [(Pointer(Const(D3D10_TEXTURE1D_DESC)), "pDesc"), (Array(Const(D3D10_SUBRESOURCE_DATA), "_getNumSubResources(pDesc)"), "pInitialData"), Out(Pointer(ObjPointer(ID3D10Texture1D)), "ppTexture1D")]), StdMethod(HRESULT, "CreateTexture2D", [(Pointer(Const(D3D10_TEXTURE2D_DESC)), "pDesc"), (Array(Const(D3D10_SUBRESOURCE_DATA), "_getNumSubResources(pDesc)"), "pInitialData"), Out(Pointer(ObjPointer(ID3D10Texture2D)), "ppTexture2D")]), StdMethod(HRESULT, "CreateTexture3D", [(Pointer(Const(D3D10_TEXTURE3D_DESC)), "pDesc"), (Array(Const(D3D10_SUBRESOURCE_DATA), "_getNumSubResources(pDesc)"), "pInitialData"), Out(Pointer(ObjPointer(ID3D10Texture3D)), "ppTexture3D")]), StdMethod(HRESULT, "CreateShaderResourceView", [(ObjPointer(ID3D10Resource), "pResource"), (Pointer(Const(D3D10_SHADER_RESOURCE_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D10ShaderResourceView)), "ppSRView")]), StdMethod(HRESULT, "CreateRenderTargetView", [(ObjPointer(ID3D10Resource), "pResource"), (Pointer(Const(D3D10_RENDER_TARGET_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D10RenderTargetView)), "ppRTView")]), StdMethod(HRESULT, "CreateDepthStencilView", [(ObjPointer(ID3D10Resource), "pResource"), (Pointer(Const(D3D10_DEPTH_STENCIL_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D10DepthStencilView)), "ppDepthStencilView")]), StdMethod(HRESULT, "CreateInputLayout", [(Array(Const(D3D10_INPUT_ELEMENT_DESC), "NumElements"), "pInputElementDescs"), (UINT, "NumElements"), (Blob(Const(Void), "BytecodeLength"), "pShaderBytecodeWithInputSignature"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D10InputLayout)), "ppInputLayout")]), StdMethod(HRESULT, "CreateVertexShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D10VertexShader)), "ppVertexShader")]), StdMethod(HRESULT, "CreateGeometryShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D10GeometryShader)), "ppGeometryShader")]), StdMethod(HRESULT, "CreateGeometryShaderWithStreamOutput", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (Array(Const(D3D10_SO_DECLARATION_ENTRY), "NumEntries"), "pSODeclaration"), (UINT, "NumEntries"), (UINT, "OutputStreamStride"), Out(Pointer(ObjPointer(ID3D10GeometryShader)), "ppGeometryShader")]), StdMethod(HRESULT, "CreatePixelShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D10PixelShader)), "ppPixelShader")]), StdMethod(HRESULT, "CreateBlendState", [(Pointer(Const(D3D10_BLEND_DESC)), "pBlendStateDesc"), Out(Pointer(ObjPointer(ID3D10BlendState)), "ppBlendState")]), StdMethod(HRESULT, "CreateDepthStencilState", [(Pointer(Const(D3D10_DEPTH_STENCIL_DESC)), "pDepthStencilDesc"), Out(Pointer(ObjPointer(ID3D10DepthStencilState)), "ppDepthStencilState")]), StdMethod(HRESULT, "CreateRasterizerState", [(Pointer(Const(D3D10_RASTERIZER_DESC)), "pRasterizerDesc"), Out(Pointer(ObjPointer(ID3D10RasterizerState)), "ppRasterizerState")]), StdMethod(HRESULT, "CreateSamplerState", [(Pointer(Const(D3D10_SAMPLER_DESC)), "pSamplerDesc"), Out(Pointer(ObjPointer(ID3D10SamplerState)), "ppSamplerState")]), StdMethod(HRESULT, "CreateQuery", [(Pointer(Const(D3D10_QUERY_DESC)), "pQueryDesc"), Out(Pointer(ObjPointer(ID3D10Query)), "ppQuery")]), StdMethod(HRESULT, "CreatePredicate", [(Pointer(Const(D3D10_QUERY_DESC)), "pPredicateDesc"), Out(Pointer(ObjPointer(ID3D10Predicate)), "ppPredicate")]), StdMethod(HRESULT, "CreateCounter", [(Pointer(Const(D3D10_COUNTER_DESC)), "pCounterDesc"), Out(Pointer(ObjPointer(ID3D10Counter)), "ppCounter")]), StdMethod(HRESULT, "CheckFormatSupport", [(DXGI_FORMAT, "Format"), Out(Pointer(D3D10_FORMAT_SUPPORT), "pFormatSupport")], sideeffects=False), StdMethod(HRESULT, "CheckMultisampleQualityLevels", [(DXGI_FORMAT, "Format"), (UINT, "SampleCount"), Out(Pointer(UINT), "pNumQualityLevels")], sideeffects=False), StdMethod(Void, "CheckCounterInfo", [Out(Pointer(D3D10_COUNTER_INFO), "pCounterInfo")], sideeffects=False), StdMethod(HRESULT, "CheckCounter", [(Pointer(Const(D3D10_COUNTER_DESC)), "pDesc"), Out(Pointer(D3D10_COUNTER_TYPE), "pType"), Out(Pointer(UINT), "pActiveCounters"), Out(LPSTR, "szName"), Out(Pointer(UINT), "pNameLength"), Out(LPSTR, "szUnits"), Out(Pointer(UINT), "pUnitsLength"), Out(LPSTR, "szDescription"), Out(Pointer(UINT), "pDescriptionLength")], sideeffects=False), StdMethod(D3D10_CREATE_DEVICE_FLAG, "GetCreationFlags", [], sideeffects=False), StdMethod(HRESULT, "OpenSharedResource", [(HANDLE, "hResource"), (REFIID, "ReturnedInterface"), Out(Pointer(ObjPointer(Void)), "ppResource")]), StdMethod(Void, "SetTextFilterSize", [(UINT, "Width"), (UINT, "Height")]), StdMethod(Void, "GetTextFilterSize", [Out(Pointer(UINT), "pWidth"), Out(Pointer(UINT), "pHeight")], sideeffects=False), ] ID3D10Multithread.methods += [ StdMethod(Void, "Enter", []), StdMethod(Void, "Leave", []), StdMethod(BOOL, "SetMultithreadProtected", [(BOOL, "bMTProtect")]), StdMethod(BOOL, "GetMultithreadProtected", [], sideeffects=False), ] ID3D10DebugTest = Interface('ID3D10DebugTest', IUnknown) ID3D10DebugTest.methods += [ StdMethod(Void, 'PreventFurtherExecutionOnError', [(BOOL, 'Enable')]), StdMethod(Void, 'VSSetConstantBuffer14', [(ObjPointer(ID3D10Buffer), 'pCB14')]), StdMethod(Void, 'GSSetConstantBuffer14', [(ObjPointer(ID3D10Buffer), 'pCB14')]), StdMethod(Void, 'PSSetConstantBuffer14', [(ObjPointer(ID3D10Buffer), 'pCB14')]), ] D3D10_DRIVER_TYPE = Enum("D3D10_DRIVER_TYPE", [ "D3D10_DRIVER_TYPE_HARDWARE", "D3D10_DRIVER_TYPE_REFERENCE", "D3D10_DRIVER_TYPE_NULL", "D3D10_DRIVER_TYPE_SOFTWARE", "D3D10_DRIVER_TYPE_WARP", ]) d3d10 = Module("d3d10") d3d10.addFunctions([ StdFunction(HRESULT, "D3D10CreateDevice", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D10_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D10_CREATE_DEVICE_FLAG, "Flags"), (UINT, "SDKVersion"), Out(Pointer(ObjPointer(ID3D10Device)), "ppDevice")]), StdFunction(HRESULT, "D3D10CreateDeviceAndSwapChain", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D10_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D10_CREATE_DEVICE_FLAG, "Flags"), (UINT, "SDKVersion"), (Pointer(DXGI_SWAP_CHAIN_DESC), "pSwapChainDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain"), Out(Pointer(ObjPointer(ID3D10Device)), "ppDevice")]), ]) d3d10.addInterfaces([ ID3D10Debug, ID3D10InfoQueue, ID3D10Multithread, ID3D10SwitchToRef, ID3D10DebugTest, ]) # # D3D10.1 # D3D10_FEATURE_LEVEL1 = Enum("D3D10_FEATURE_LEVEL1", [ "D3D10_FEATURE_LEVEL_10_0", "D3D10_FEATURE_LEVEL_10_1", "D3D10_FEATURE_LEVEL_9_1", "D3D10_FEATURE_LEVEL_9_2", "D3D10_FEATURE_LEVEL_9_3", ]) D3D10_RENDER_TARGET_BLEND_DESC1 = Struct("D3D10_RENDER_TARGET_BLEND_DESC1", [ (BOOL, "BlendEnable"), (D3D10_BLEND, "SrcBlend"), (D3D10_BLEND, "DestBlend"), (D3D10_BLEND_OP, "BlendOp"), (D3D10_BLEND, "SrcBlendAlpha"), (D3D10_BLEND, "DestBlendAlpha"), (D3D10_BLEND_OP, "BlendOpAlpha"), (UINT8, "RenderTargetWriteMask"), ]) D3D10_SIMULTANEOUS_RENDER_TARGET_COUNT = 8 D3D10_BLEND_DESC1 = Struct("D3D10_BLEND_DESC1", [ (BOOL, "AlphaToCoverageEnable"), (BOOL, "IndependentBlendEnable"), (Array(D3D10_RENDER_TARGET_BLEND_DESC1, D3D10_SIMULTANEOUS_RENDER_TARGET_COUNT), "RenderTarget"), ]) ID3D10BlendState1 = Interface("ID3D10BlendState1", ID3D10BlendState) ID3D10BlendState1.methods += [ StdMethod(Void, "GetDesc1", [Out(Pointer(D3D10_BLEND_DESC1), "pDesc")], sideeffects=False), ] D3D10_SRV_DIMENSION1 = Enum("D3D10_SRV_DIMENSION1", [ "D3D10_1_SRV_DIMENSION_UNKNOWN", "D3D10_1_SRV_DIMENSION_BUFFER", "D3D10_1_SRV_DIMENSION_TEXTURE1D", "D3D10_1_SRV_DIMENSION_TEXTURE1DARRAY", "D3D10_1_SRV_DIMENSION_TEXTURE2D", "D3D10_1_SRV_DIMENSION_TEXTURE2DARRAY", "D3D10_1_SRV_DIMENSION_TEXTURE2DMS", "D3D10_1_SRV_DIMENSION_TEXTURE2DMSARRAY", "D3D10_1_SRV_DIMENSION_TEXTURE3D", "D3D10_1_SRV_DIMENSION_TEXTURECUBE", "D3D10_1_SRV_DIMENSION_TEXTURECUBEARRAY", ]) D3D10_TEXCUBE_ARRAY_SRV1 = Struct("D3D10_TEXCUBE_ARRAY_SRV1", [ (UINT, "MostDetailedMip"), (UINT, "MipLevels"), (UINT, "First2DArrayFace"), (UINT, "NumCubes"), ]) D3D10_SHADER_RESOURCE_VIEW_DESC1 = Struct("D3D10_SHADER_RESOURCE_VIEW_DESC1", [ (DXGI_FORMAT, "Format"), (D3D10_SRV_DIMENSION1, "ViewDimension"), (Union("{self}.ViewDimension", [ ("D3D10_1_SRV_DIMENSION_BUFFER", D3D10_BUFFER_SRV, "Buffer"), ("D3D10_1_SRV_DIMENSION_TEXTURE1D", D3D10_TEX1D_SRV, "Texture1D"), ("D3D10_1_SRV_DIMENSION_TEXTURE1DARRAY", D3D10_TEX1D_ARRAY_SRV, "Texture1DArray"), ("D3D10_1_SRV_DIMENSION_TEXTURE2D", D3D10_TEX2D_SRV, "Texture2D"), ("D3D10_1_SRV_DIMENSION_TEXTURE2DARRAY", D3D10_TEX2D_ARRAY_SRV, "Texture2DArray"), ("D3D10_1_SRV_DIMENSION_TEXTURE2DMS", D3D10_TEX2DMS_SRV, "Texture2DMS"), ("D3D10_1_SRV_DIMENSION_TEXTURE2DMSARRAY", D3D10_TEX2DMS_ARRAY_SRV, "Texture2DMSArray"), ("D3D10_1_SRV_DIMENSION_TEXTURE3D", D3D10_TEX3D_SRV, "Texture3D"), ("D3D10_1_SRV_DIMENSION_TEXTURECUBE", D3D10_TEXCUBE_SRV, "TextureCube"), ("D3D10_1_SRV_DIMENSION_TEXTURECUBEARRAY", D3D10_TEXCUBE_ARRAY_SRV1, "TextureCubeArray"), ]), None), ]) ID3D10ShaderResourceView1 = Interface("ID3D10ShaderResourceView1", ID3D10ShaderResourceView) ID3D10ShaderResourceView1.methods += [ StdMethod(Void, "GetDesc1", [Out(Pointer(D3D10_SHADER_RESOURCE_VIEW_DESC1), "pDesc")], sideeffects=False), ] ID3D10Device1 = Interface("ID3D10Device1", ID3D10Device) ID3D10Device1.methods += [ StdMethod(HRESULT, "CreateShaderResourceView1", [(ObjPointer(ID3D10Resource), "pResource"), (Pointer(Const(D3D10_SHADER_RESOURCE_VIEW_DESC1)), "pDesc"), Out(Pointer(ObjPointer(ID3D10ShaderResourceView1)), "ppSRView")]), StdMethod(HRESULT, "CreateBlendState1", [(Pointer(Const(D3D10_BLEND_DESC1)), "pBlendStateDesc"), Out(Pointer(ObjPointer(ID3D10BlendState1)), "ppBlendState")]), StdMethod(D3D10_FEATURE_LEVEL1, "GetFeatureLevel", [], sideeffects=False), ] d3d10_1 = Module("d3d10_1") d3d10_1.addFunctions([ StdFunction(HRESULT, "D3D10CreateDevice1", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D10_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D10_CREATE_DEVICE_FLAG, "Flags"), (D3D10_FEATURE_LEVEL1, "HardwareLevel"), (UINT, "SDKVersion"), Out(Pointer(ObjPointer(ID3D10Device1)), "ppDevice")]), StdFunction(HRESULT, "D3D10CreateDeviceAndSwapChain1", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D10_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D10_CREATE_DEVICE_FLAG, "Flags"), (D3D10_FEATURE_LEVEL1, "HardwareLevel"), (UINT, "SDKVersion"), (Pointer(DXGI_SWAP_CHAIN_DESC), "pSwapChainDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain"), Out(Pointer(ObjPointer(ID3D10Device1)), "ppDevice")]), ]) d3d10_1.addInterfaces([ ID3D10Debug, ID3D10InfoQueue, ID3D10Multithread, ID3D10SwitchToRef, ])
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools import pytest import re from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from scipy import sparse from sklearn import svm, linear_model, datasets, metrics, base from sklearn.svm import LinearSVC from sklearn.svm import LinearSVR from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils._testing import ignore_warnings from sklearn.utils.validation import _num_samples from sklearn.utils import shuffle from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError, UndefinedMetricWarning from sklearn.multiclass import OneVsRestClassifier # mypy error: Module 'sklearn.svm' has no attribute '_libsvm' from sklearn.svm import _libsvm # type: ignore # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel="linear").fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.0]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ("linear", "rbf"): clf = svm.SVC(kernel=k).fit(iris.data, iris.target) assert np.mean(clf.predict(iris.data) == iris.target) > 0.9 assert hasattr(clf, "coef_") == (k == "linear") assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API # We unpack the values to create a dictionary with some of the return values # from Libsvm's fit. ( libsvm_support, libsvm_support_vectors, libsvm_n_class_SV, libsvm_sv_coef, libsvm_intercept, libsvm_probA, libsvm_probB, # libsvm_fit_status and libsvm_n_iter won't be used below. libsvm_fit_status, libsvm_n_iter, ) = _libsvm.fit(iris.data, iris.target.astype(np.float64)) model_params = { "support": libsvm_support, "SV": libsvm_support_vectors, "nSV": libsvm_n_class_SV, "sv_coef": libsvm_sv_coef, "intercept": libsvm_intercept, "probA": libsvm_probA, "probB": libsvm_probB, } pred = _libsvm.predict(iris.data, **model_params) assert np.mean(pred == iris.target) > 0.95 # We unpack the values to create a dictionary with some of the return values # from Libsvm's fit. ( libsvm_support, libsvm_support_vectors, libsvm_n_class_SV, libsvm_sv_coef, libsvm_intercept, libsvm_probA, libsvm_probB, # libsvm_fit_status and libsvm_n_iter won't be used below. libsvm_fit_status, libsvm_n_iter, ) = _libsvm.fit(iris.data, iris.target.astype(np.float64), kernel="linear") model_params = { "support": libsvm_support, "SV": libsvm_support_vectors, "nSV": libsvm_n_class_SV, "sv_coef": libsvm_sv_coef, "intercept": libsvm_intercept, "probA": libsvm_probA, "probB": libsvm_probB, } pred = _libsvm.predict(iris.data, **model_params, kernel="linear") assert np.mean(pred == iris.target) > 0.95 pred = _libsvm.cross_validation( iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 ) assert np.mean(pred == iris.target) > 0.95 # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = _libsvm.cross_validation( iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 ) assert_array_equal(pred, pred2) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel="precomputed") # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) with pytest.raises(ValueError): clf.predict(KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel def kfunc(x, y): return np.dot(x, y.T) clf = svm.SVC(kernel=kfunc) clf.fit(np.array(X), Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel="precomputed") clf2 = svm.SVC(kernel="linear") K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) clf = svm.SVC(kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in ( svm.NuSVR(kernel="linear", nu=0.4, C=1.0), svm.NuSVR(kernel="linear", nu=0.4, C=10.0), svm.SVR(kernel="linear", C=10.0), svm.LinearSVR(C=10.0), svm.LinearSVR(C=10.0), ): clf.fit(diabetes.data, diabetes.target) assert clf.score(diabetes.data, diabetes.target) > 0.02 # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel="linear", C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001) assert_almost_equal(score1, score2, 2) def test_linearsvr_fit_sampleweight(): # check correct result when sample_weight is 1 # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() n_samples = len(diabetes.target) unit_weight = np.ones(n_samples) lsvr = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit( diabetes.data, diabetes.target, sample_weight=unit_weight ) score1 = lsvr.score(diabetes.data, diabetes.target) lsvr_no_weight = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit( diabetes.data, diabetes.target ) score2 = lsvr_no_weight.score(diabetes.data, diabetes.target) assert_allclose( np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001 ) assert_almost_equal(score1, score2, 2) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvr_unflat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit( diabetes.data, diabetes.target, sample_weight=random_weight ) score3 = lsvr_unflat.score( diabetes.data, diabetes.target, sample_weight=random_weight ) X_flat = np.repeat(diabetes.data, random_weight, axis=0) y_flat = np.repeat(diabetes.target, random_weight, axis=0) lsvr_flat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(X_flat, y_flat) score4 = lsvr_flat.score(X_flat, y_flat) assert_almost_equal(score3, score4, 2) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) with pytest.raises(ValueError): clf.predict(X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM() clf.fit(X) pred = clf.predict(T) assert_array_equal(pred, [1, -1, -1]) assert pred.dtype == np.dtype("intp") assert_array_almost_equal(clf.intercept_, [-1.218], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.750, 0.750, 0.750, 0.750]], decimal=3) with pytest.raises(AttributeError): (lambda: clf.coef_)() def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert np.mean(y_pred_test == 1) > 0.9 y_pred_outliers = clf.predict(X_outliers) assert np.mean(y_pred_outliers == -1) > 0.9 dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_oneclass_score_samples(): X_train = [[1, 1], [1, 2], [2, 1]] clf = svm.OneClassSVM(gamma=1).fit(X_train) assert_array_equal( clf.score_samples([[2.0, 2.0]]), clf.decision_function([[2.0, 2.0]]) + clf.offset_, ) # TODO: Remove in v1.2 def test_oneclass_fit_params_is_deprecated(): clf = svm.OneClassSVM() params = { "unused_param": "", "extra_param": None, } msg = ( "Passing additional keyword parameters has no effect and is deprecated " "in 1.0. An error will be raised from 1.2 and beyond. The ignored " f"keyword parameter(s) are: {params.keys()}." ) with pytest.warns(FutureWarning, match=re.escape(msg)): clf.fit(X, **params) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel="linear", C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]]) assert_array_equal(clf.predict([[-0.1, -0.1]]), [1]) clf._dual_coef_ = np.array([[0.0, 1.0]]) assert_array_equal(clf.predict([[-0.1, -0.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in ( svm.SVC(probability=True, random_state=0, C=1.0), svm.NuSVC(probability=True, random_state=0), ): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9 assert_almost_equal( clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8 ) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo").fit( iris.data, iris.target ) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int)] ) expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel="rbf", gamma=1, decision_function_shape="ovo") clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) @pytest.mark.parametrize("SVM", (svm.SVC, svm.NuSVC)) def test_decision_function_shape(SVM): # check that decision_function_shape='ovr' or 'ovo' gives # correct shape and is consistent with predict clf = SVM(kernel="linear", decision_function_shape="ovr").fit( iris.data, iris.target ) dec = clf.decision_function(iris.data) assert dec.shape == (len(iris.data), 3) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = SVM(kernel="linear", decision_function_shape="ovr").fit(X_train, y_train) dec = clf.decision_function(X_test) assert dec.shape == (len(X_test), 5) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = SVM(kernel="linear", decision_function_shape="ovo").fit(X_train, y_train) dec = clf.decision_function(X_train) assert dec.shape == (len(X_train), 10) with pytest.raises(ValueError, match="must be either 'ovr' or 'ovo'"): SVM(decision_function_shape="bad").fit(X_train, y_train) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel="linear", C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel="rbf", gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) def test_weight(): # Test class weights clf = svm.SVC(class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification( n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2 ) for clf in ( linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC(), ): clf.set_params(class_weight={0: 0.1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert f1_score(y_[100:], y_pred) > 0.3 @pytest.mark.parametrize("estimator", [svm.SVC(C=1e-2), svm.NuSVC()]) def test_svm_classifier_sided_sample_weight(estimator): # fit a linear SVM and check that giving more weight to opposed samples # in the space will flip the decision toward these samples. X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]] estimator.set_params(kernel="linear") # check that with unit weights, a sample is supposed to be predicted on # the boundary sample_weight = [1] * 6 estimator.fit(X, Y, sample_weight=sample_weight) y_pred = estimator.decision_function([[-1.0, 1.0]]) assert y_pred == pytest.approx(0) # give more weights to opposed samples sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10] estimator.fit(X, Y, sample_weight=sample_weight) y_pred = estimator.decision_function([[-1.0, 1.0]]) assert y_pred < 0 sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1] estimator.fit(X, Y, sample_weight=sample_weight) y_pred = estimator.decision_function([[-1.0, 1.0]]) assert y_pred > 0 @pytest.mark.parametrize("estimator", [svm.SVR(C=1e-2), svm.NuSVR(C=1e-2)]) def test_svm_regressor_sided_sample_weight(estimator): # similar test to test_svm_classifier_sided_sample_weight but for # SVM regressors X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]] estimator.set_params(kernel="linear") # check that with unit weights, a sample is supposed to be predicted on # the boundary sample_weight = [1] * 6 estimator.fit(X, Y, sample_weight=sample_weight) y_pred = estimator.predict([[-1.0, 1.0]]) assert y_pred == pytest.approx(1.5) # give more weights to opposed samples sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10] estimator.fit(X, Y, sample_weight=sample_weight) y_pred = estimator.predict([[-1.0, 1.0]]) assert y_pred < 1.5 sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1] estimator.fit(X, Y, sample_weight=sample_weight) y_pred = estimator.predict([[-1.0, 1.0]]) assert y_pred > 1.5 def test_svm_equivalence_sample_weight_C(): # test that rescaling all samples is the same as changing C clf = svm.SVC() clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_allclose(dual_coef_no_weight, clf.dual_coef_) @pytest.mark.parametrize( "Estimator, err_msg", [ (svm.SVC, "Invalid input - all samples have zero or negative weights."), (svm.NuSVC, "(negative dimensions are not allowed|nu is infeasible)"), (svm.SVR, "Invalid input - all samples have zero or negative weights."), (svm.NuSVR, "Invalid input - all samples have zero or negative weights."), (svm.OneClassSVM, "Invalid input - all samples have zero or negative weights."), ], ids=["SVC", "NuSVC", "SVR", "NuSVR", "OneClassSVM"], ) @pytest.mark.parametrize( "sample_weight", [[0] * len(Y), [-0.3] * len(Y)], ids=["weights-are-zero", "weights-are-negative"], ) def test_negative_sample_weights_mask_all_samples(Estimator, err_msg, sample_weight): est = Estimator(kernel="linear") with pytest.raises(ValueError, match=err_msg): est.fit(X, Y, sample_weight=sample_weight) @pytest.mark.parametrize( "Classifier, err_msg", [ ( svm.SVC, "Invalid input - all samples with positive weights have the same label", ), (svm.NuSVC, "specified nu is infeasible"), ], ids=["SVC", "NuSVC"], ) @pytest.mark.parametrize( "sample_weight", [[0, -0.5, 0, 1, 1, 1], [1, 1, 1, 0, -0.1, -0.3]], ids=["mask-label-1", "mask-label-2"], ) def test_negative_weights_svc_leave_just_one_label(Classifier, err_msg, sample_weight): clf = Classifier(kernel="linear") with pytest.raises(ValueError, match=err_msg): clf.fit(X, Y, sample_weight=sample_weight) @pytest.mark.parametrize( "Classifier, model", [ (svm.SVC, {"when-left": [0.3998, 0.4], "when-right": [0.4, 0.3999]}), (svm.NuSVC, {"when-left": [0.3333, 0.3333], "when-right": [0.3333, 0.3333]}), ], ids=["SVC", "NuSVC"], ) @pytest.mark.parametrize( "sample_weight, mask_side", [([1, -0.5, 1, 1, 1, 1], "when-left"), ([1, 1, 1, 0, 1, 1], "when-right")], ids=["partial-mask-label-1", "partial-mask-label-2"], ) def test_negative_weights_svc_leave_two_labels( Classifier, model, sample_weight, mask_side ): clf = Classifier(kernel="linear") clf.fit(X, Y, sample_weight=sample_weight) assert_allclose(clf.coef_, [model[mask_side]], rtol=1e-3) @pytest.mark.parametrize( "Estimator", [svm.SVC, svm.NuSVC, svm.NuSVR], ids=["SVC", "NuSVC", "NuSVR"] ) @pytest.mark.parametrize( "sample_weight", [[1, -0.5, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1]], ids=["partial-mask-label-1", "partial-mask-label-2"], ) def test_negative_weight_equal_coeffs(Estimator, sample_weight): # model generates equal coefficients est = Estimator(kernel="linear") est.fit(X, Y, sample_weight=sample_weight) coef = np.abs(est.coef_).ravel() assert coef[0] == pytest.approx(coef[1], rel=1e-3) @ignore_warnings(category=UndefinedMetricWarning) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: # class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight("balanced", classes=classes, y=y[unbalanced]) assert np.argmax(class_weights) == 2 for clf in ( svm.SVC(kernel="linear"), svm.LinearSVC(random_state=0), LogisticRegression(), ): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight="balanced") y_pred_balanced = clf.fit( X[unbalanced], y[unbalanced], ).predict(X) assert metrics.f1_score(y, y_pred, average="macro") <= metrics.f1_score( y, y_pred_balanced, average="macro" ) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C with pytest.raises(ValueError): svm.SVC(C=-1).fit(X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) with pytest.raises(ValueError): clf.fit(X, Y) Y2 = Y[:-1] # wrong dimensions for labels with pytest.raises(ValueError): clf.fit(X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert not Xf.flags["C_CONTIGUOUS"] yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert not yf.flags["F_CONTIGUOUS"] assert not yf.flags["C_CONTIGUOUS"] clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel="precomputed") with pytest.raises(ValueError): clf.fit(X, Y) # predict with sparse input when trained with dense clf = svm.SVC().fit(X, Y) with pytest.raises(ValueError): clf.predict(sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) with pytest.raises(ValueError): clf.predict(X) clf = svm.SVC() clf.fit(X, Y) with pytest.raises(ValueError): clf.predict(Xt) def test_svc_nonfinite_params(): # Check SVC throws ValueError when dealing with non-finite parameter values rng = np.random.RandomState(0) n_samples = 10 fmax = np.finfo(np.float64).max X = fmax * rng.uniform(size=(n_samples, 2)) y = rng.randint(0, 2, size=n_samples) clf = svm.SVC() msg = "The dual coefficients or intercepts are not finite" with pytest.raises(ValueError, match=msg): clf.fit(X, y) @pytest.mark.parametrize( "Estimator, data", [ (svm.SVC, datasets.load_iris(return_X_y=True)), (svm.NuSVC, datasets.load_iris(return_X_y=True)), (svm.SVR, datasets.load_diabetes(return_X_y=True)), (svm.NuSVR, datasets.load_diabetes(return_X_y=True)), (svm.OneClassSVM, datasets.load_iris(return_X_y=True)), ], ) @pytest.mark.parametrize( "gamma, err_msg", [ ( "auto_deprecated", "When 'gamma' is a string, it should be either 'scale' or 'auto'", ), ( -1, "gamma value must be > 0; -1 is invalid. Use" " a positive number or use 'auto' to set gamma to a" " value of 1 / n_features.", ), ( 0.0, "gamma value must be > 0; 0.0 is invalid. Use" " a positive number or use 'auto' to set gamma to a" " value of 1 / n_features.", ), ( np.array([1.0, 4.0]), "The gamma value should be set to 'scale'," f" 'auto' or a positive float value. {np.array([1.0, 4.0])!r}" " is not a valid option", ), ( [], "The gamma value should be set to 'scale', 'auto' or a positive" f" float value. {[]} is not a valid option", ), ( {}, "The gamma value should be set to 'scale', 'auto' or a positive" " float value. {} is not a valid option", ), ], ) def test_svm_gamma_error(Estimator, data, gamma, err_msg): X, y = data est = Estimator(gamma=gamma) with pytest.raises(ValueError, match=(re.escape(err_msg))): est.fit(X, y) def test_unicode_kernel(): # Test that a unicode kernel name does not cause a TypeError clf = svm.SVC(kernel="linear", probability=True) clf.fit(X, Y) clf.predict_proba(T) _libsvm.cross_validation( iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0 ) def test_sparse_precomputed(): clf = svm.SVC(kernel="precomputed") sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) with pytest.raises(TypeError, match="Sparse precomputed"): clf.fit(sparse_gram, [0, 1]) def test_sparse_fit_support_vectors_empty(): # Regression test for #14893 X_train = sparse.csr_matrix( [[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]] ) y_train = np.array([0.04, 0.04, 0.10, 0.16]) model = svm.SVR(kernel="linear") model.fit(X_train, y_train) assert not model.support_vectors_.data.size assert not model.dual_coef_.data.size def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ["hinge", "squared_hinge", "logistic_regression", "foo"] penalties, duals = ["l1", "l2", "bar"], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ( (loss, penalty) == ("hinge", "l1") or (loss, penalty, dual) == ("hinge", "l2", False) or (penalty, dual) == ("l1", True) or loss == "foo" or penalty == "bar" ): with pytest.raises( ValueError, match="Unsupported set of arguments.*penalty='%s.*loss='%s.*dual=%s" % (penalty, loss, dual), ): clf.fit(X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised with pytest.raises(ValueError, match=".*loss='l3' is not supported.*"): svm.LinearSVC(loss="l3").fit(X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] msg = "loss='SQuared_hinge' is not supported" with pytest.raises(ValueError, match=msg): svm.LinearSVC(loss="SQuared_hinge").fit(X, y) msg = "The combination of penalty='L2' and loss='squared_hinge' is not supported" with pytest.raises(ValueError, match=msg): svm.LinearSVC(penalty="L2").fit(X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert clf.fit_intercept assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC( penalty="l1", loss="squared_hinge", dual=False, random_state=0 ).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty="l2", dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty="l2", loss="hinge", dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class="crammer_singer", random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert (ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > 0.9 # classifiers shouldn't be the same assert (ovr_clf.coef_ != cs_clf.coef_).all() # test decision function assert_array_equal( cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1), ) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_fit_sampleweight(): # check correct result when sample_weight is 1 n_samples = len(X) unit_weight = np.ones(n_samples) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf_unitweight = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).fit( X, Y, sample_weight=unit_weight ) # check if same as sample_weight=None assert_array_equal(clf_unitweight.predict(T), clf.predict(T)) assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) random_weight = random_state.randint(0, 10, n_samples) lsvc_unflat = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).fit( X, Y, sample_weight=random_weight ) pred1 = lsvc_unflat.predict(T) X_flat = np.repeat(X, random_weight, axis=0) y_flat = np.repeat(Y, random_weight, axis=0) lsvc_flat = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).fit( X_flat, y_flat ) pred2 = lsvc_flat.predict(T) assert_array_equal(pred1, pred2) assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = ( svm.LinearSVC( fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0, ) .fit(X, y) .score(X, y) ) assert acc > 0.9 def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert set(clf.classes_) == set(iris.target_names) assert np.mean(clf.predict(iris.data) == target) > 0.8 dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier( fit_intercept=True, penalty="l1", loss="squared_hinge", dual=False, C=4, tol=1e-7, random_state=0, ) assert clf.intercept_scaling == 1, clf.intercept_scaling assert clf.fit_intercept # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert intercept1 < -1 # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel="linear").fit(iris.data, iris.target), svm.NuSVC(kernel="linear").fit(iris.data, iris.target), svm.SVR(kernel="linear").fit(iris.data, iris.target), svm.NuSVR(kernel="linear").fit(iris.data, iris.target), svm.OneClassSVM(kernel="linear").fit(iris.data), ] for clf in svms: with pytest.raises(AttributeError): clf.__setattr__("coef_", np.arange(3)) with pytest.raises((RuntimeError, ValueError)): clf.coef_.__setitem__((0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC( kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape="ovr", ) # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC( kernel="linear", probability=True, random_state=0, decision_function_shape="ovr" ) svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal( svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4, ) assert_array_almost_equal( svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data), ) def test_svc_bad_kernel(): svc = svm.SVC(kernel=lambda x, y: x) with pytest.raises(ValueError): svc.fit(X, Y) def test_libsvm_convergence_warnings(): a = svm.SVC( kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=2 ) warning_msg = ( r"Solver terminated early \(max_iter=2\). Consider pre-processing " r"your data with StandardScaler or MinMaxScaler." ) with pytest.warns(ConvergenceWarning, match=warning_msg): a.fit(np.array(X), Y) assert np.all(a.n_iter_ == 2) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC() with pytest.raises(Exception, match=r".*\bSVC\b.*\bnot\b.*\bfitted\b"): clf.predict(X) clf = svm.NuSVR() with pytest.raises(Exception, match=r".*\bNuSVR\b.*\bnot\b.*\bfitted\b"): clf.predict(X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svm_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(random_state=0, max_iter=2) warning_msg = "Liblinear failed to converge, increase the number of iterations." with pytest.warns(ConvergenceWarning, match=warning_msg): lsvc.fit(X, Y) # Check that we have an n_iter_ attribute with int type as opposed to a # numpy array or an np.int32 so as to match the docstring. assert isinstance(lsvc.n_iter_, int) assert lsvc.n_iter_ == 2 lsvr = svm.LinearSVR(random_state=0, max_iter=2) with pytest.warns(ConvergenceWarning, match=warning_msg): lsvr.fit(iris.data, iris.target) assert isinstance(lsvr.n_iter_, int) assert lsvr.n_iter_ == 2 def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel="linear"), svm.NuSVR(kernel="linear"), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal( svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_ ) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ( "Intercept scaling is %r but needs to be greater than 0." " To disable fitting an intercept," " set fit_intercept=False." % lsvc.intercept_scaling ) with pytest.raises(ValueError, match=msg): lsvc.fit(X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert lsvc.intercept_ == 0.0 def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(probability=True) assert hasattr(G, "predict_proba") G.fit(iris.data, iris.target) assert hasattr(G, "predict_proba") G = svm.SVC(probability=False) assert not hasattr(G, "predict_proba") G.fit(iris.data, iris.target) assert not hasattr(G, "predict_proba") # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert hasattr(G, "predict_proba") msg = "predict_proba is not available when fitted with probability=False" with pytest.raises(NotFittedError, match=msg): G.predict_proba(iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator(decision_function_shape="ovr")).fit( X, y ) assert len(clf.predict(X)) == len(y) def test_ovr_decision_function(): # One point from each quadrant represents one class X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]) y_train = [0, 1, 2, 3] # First point is closer to the decision boundaries than the second point base_points = np.array([[5, 5], [10, 10]]) # For all the quadrants (classes) X_test = np.vstack( ( base_points * [1, 1], # Q1 base_points * [-1, 1], # Q2 base_points * [-1, -1], # Q3 base_points * [1, -1], # Q4 ) ) y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2 clf = svm.SVC(kernel="linear", decision_function_shape="ovr") clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Test if the prediction is the same as y assert_array_equal(y_pred, y_test) deci_val = clf.decision_function(X_test) # Assert that the predicted class has the maximum value assert_array_equal(np.argmax(deci_val, axis=1), y_pred) # Get decision value at test points for the predicted class pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2)) # Assert pred_class_deci_val > 0 here assert np.min(pred_class_deci_val) > 0.0 # Test if the first point has lower decision value on every quadrant # compared to the second point assert np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]) @pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC]) def test_svc_invalid_break_ties_param(SVCClass): X, y = make_blobs(random_state=42) svm = SVCClass( kernel="linear", decision_function_shape="ovo", break_ties=True, random_state=42 ).fit(X, y) with pytest.raises(ValueError, match="break_ties must be False"): svm.predict(y) @pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC]) def test_svc_ovr_tie_breaking(SVCClass): """Test if predict breaks ties in OVR mode. Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277 """ X, y = make_blobs(random_state=0, n_samples=20, n_features=2) xs = np.linspace(X[:, 0].min(), X[:, 0].max(), 100) ys = np.linspace(X[:, 1].min(), X[:, 1].max(), 100) xx, yy = np.meshgrid(xs, ys) common_params = dict( kernel="rbf", gamma=1e6, random_state=42, decision_function_shape="ovr" ) svm = SVCClass( break_ties=False, **common_params, ).fit(X, y) pred = svm.predict(np.c_[xx.ravel(), yy.ravel()]) dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()]) assert not np.all(pred == np.argmax(dv, axis=1)) svm = SVCClass( break_ties=True, **common_params, ).fit(X, y) pred = svm.predict(np.c_[xx.ravel(), yy.ravel()]) dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()]) assert np.all(pred == np.argmax(dv, axis=1)) def test_gamma_auto(): X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1] with pytest.warns(None) as record: svm.SVC(kernel="linear").fit(X, y) assert not [w.message for w in record] with pytest.warns(None) as record: svm.SVC(kernel="precomputed").fit(X, y) assert not [w.message for w in record] def test_gamma_scale(): X, y = [[0.0], [1.0]], [0, 1] clf = svm.SVC() with pytest.warns(None) as record: clf.fit(X, y) assert not [w.message for w in record] assert_almost_equal(clf._gamma, 4) # X_var ~= 1 shouldn't raise warning, for when # gamma is not explicitly set. X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1] with pytest.warns(None) as record: clf.fit(X, y) assert not [w.message for w in record] @pytest.mark.parametrize( "SVM, params", [ (LinearSVC, {"penalty": "l1", "loss": "squared_hinge", "dual": False}), (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": True}), (LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": False}), (LinearSVC, {"penalty": "l2", "loss": "hinge", "dual": True}), (LinearSVR, {"loss": "epsilon_insensitive", "dual": True}), (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}), (LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}), ], ) def test_linearsvm_liblinear_sample_weight(SVM, params): X = np.array( [ [1, 3], [1, 3], [1, 3], [1, 3], [2, 1], [2, 1], [2, 1], [2, 1], [3, 3], [3, 3], [3, 3], [3, 3], [4, 1], [4, 1], [4, 1], [4, 1], ], dtype=np.dtype("float"), ) y = np.array( [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int") ) X2 = np.vstack([X, X]) y2 = np.hstack([y, 3 - y]) sample_weight = np.ones(shape=len(y) * 2) sample_weight[len(y) :] = 0 X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0) base_estimator = SVM(random_state=42) base_estimator.set_params(**params) base_estimator.set_params(tol=1e-12, max_iter=1000) est_no_weight = base.clone(base_estimator).fit(X, y) est_with_weight = base.clone(base_estimator).fit( X2, y2, sample_weight=sample_weight ) for method in ("predict", "decision_function"): if hasattr(base_estimator, method): X_est_no_weight = getattr(est_no_weight, method)(X) X_est_with_weight = getattr(est_with_weight, method)(X) assert_allclose(X_est_no_weight, X_est_with_weight) def test_n_support_oneclass_svr(): # Make n_support is correct for oneclass and SVR (used to be # non-initialized) # this is a non regression test for issue #14774 X = np.array([[0], [0.44], [0.45], [0.46], [1]]) clf = svm.OneClassSVM() assert not hasattr(clf, "n_support_") clf.fit(X) assert clf.n_support_ == clf.support_vectors_.shape[0] assert clf.n_support_.size == 1 assert clf.n_support_ == 3 y = np.arange(X.shape[0]) reg = svm.SVR().fit(X, y) assert reg.n_support_ == reg.support_vectors_.shape[0] assert reg.n_support_.size == 1 assert reg.n_support_ == 4 @pytest.mark.parametrize("Estimator", [svm.SVC, svm.SVR]) def test_custom_kernel_not_array_input(Estimator): """Test using a custom kernel that is not fed with array-like for floats""" data = ["A A", "A", "B", "B B", "A B"] X = np.array([[2, 0], [1, 0], [0, 1], [0, 2], [1, 1]]) # count encoding y = np.array([1, 1, 2, 2, 1]) def string_kernel(X1, X2): assert isinstance(X1[0], str) n_samples1 = _num_samples(X1) n_samples2 = _num_samples(X2) K = np.zeros((n_samples1, n_samples2)) for ii in range(n_samples1): for jj in range(ii, n_samples2): K[ii, jj] = X1[ii].count("A") * X2[jj].count("A") K[ii, jj] += X1[ii].count("B") * X2[jj].count("B") K[jj, ii] = K[ii, jj] return K K = string_kernel(data, data) assert_array_equal(np.dot(X, X.T), K) svc1 = Estimator(kernel=string_kernel).fit(data, y) svc2 = Estimator(kernel="linear").fit(X, y) svc3 = Estimator(kernel="precomputed").fit(K, y) assert svc1.score(data, y) == svc3.score(K, y) assert svc1.score(data, y) == svc2.score(X, y) if hasattr(svc1, "decision_function"): # classifier assert_allclose(svc1.decision_function(data), svc2.decision_function(X)) assert_allclose(svc1.decision_function(data), svc3.decision_function(K)) assert_array_equal(svc1.predict(data), svc2.predict(X)) assert_array_equal(svc1.predict(data), svc3.predict(K)) else: # regressor assert_allclose(svc1.predict(data), svc2.predict(X)) assert_allclose(svc1.predict(data), svc3.predict(K)) def test_svc_raises_error_internal_representation(): """Check that SVC raises error when internal representation is altered. Non-regression test for #18891 and https://nvd.nist.gov/vuln/detail/CVE-2020-28975 """ clf = svm.SVC(kernel="linear").fit(X, Y) clf._n_support[0] = 1000000 msg = "The internal representation of SVC was altered" with pytest.raises(ValueError, match=msg): clf.predict(X) @pytest.mark.parametrize( "estimator, expected_n_iter_type", [ (svm.SVC, np.ndarray), (svm.NuSVC, np.ndarray), (svm.SVR, int), (svm.NuSVR, int), (svm.OneClassSVM, int), ], ) @pytest.mark.parametrize( "dataset", [ make_classification(n_classes=2, n_informative=2, random_state=0), make_classification(n_classes=3, n_informative=3, random_state=0), make_classification(n_classes=4, n_informative=4, random_state=0), ], ) def test_n_iter_libsvm(estimator, expected_n_iter_type, dataset): # Check that the type of n_iter_ is correct for the classes that inherit # from BaseSVC. # Note that for SVC, and NuSVC this is an ndarray; while for SVR, NuSVR, and # OneClassSVM, it is an int. # For SVC and NuSVC also check the shape of n_iter_. X, y = dataset n_iter = estimator(kernel="linear").fit(X, y).n_iter_ assert type(n_iter) == expected_n_iter_type if estimator in [svm.SVC, svm.NuSVC]: n_classes = len(np.unique(y)) assert n_iter.shape == (n_classes * (n_classes - 1) // 2,)
#!/usr/bin/python -tt # # DESCRIPTION: # Collect everything that can be collected out of jstat (shells out 5 times) # and spits to STDOUT in a graphite ready format, thus meant to be used with a # graphite metric tcp handler. # Since it shells out to jps(1) you will need the user running the sensu client # executing this script to be able to run jps as the same user running the JVM # you are trying to get stats from. # In addition it will also need to be able to run jstat(2) against the JVM # This can be all achieved by allowing the script to be ran as the same user # running the JVM, for instance by prepending "sudo -u <jvm_process_owner>" # in the command check definition (with the proper sudoers config to allow this # with no password being asked) # # The graphite node is composed of an optional root node (defaults to 'metrics') # the specified FQDN "reversed" ('foo.bar.com' becomes 'com.bar.foo') and an # optional scheme (defaults to 'jstat') # # (1) http://docs.oracle.com/javase/8/docs/technotes/tools/share/jps.html # (2) http://docs.oracle.com/javase/8/docs/technotes/tools/share/jstat.html # # OUTPUT: # Graphite plain-text format (name value timestamp\n) # # DEPENDENCIES: # Python 2.7 (untested on python 3 but should work fine) # Java 8 # # # Released under the same terms as Sensu (the MIT license); see LICENSE # for details. # #RED import logging import logging.handlers import optparse import sys import time """ Python 2.6 support for check_output: http://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5 """ try: from subprocess import STDOUT, check_output, CalledProcessError except ImportError: # pragma: no cover # python 2.6 doesn't include check_output # monkey patch it in! import subprocess STDOUT = subprocess.STDOUT def check_output(*popenargs, **kwargs): if 'stdout' in kwargs: # pragma: no cover raise ValueError('stdout argument not allowed, ' 'it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, _ = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output subprocess.check_output = check_output # overwrite CalledProcessError due to `output` # keyword not being available (in 2.6) class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % ( self.cmd, self.returncode) subprocess.CalledProcessError = CalledProcessError class JstatMetricsToGraphiteFormat(object): '''Prints jstat metrics to stdout in graphite format Shells out to run jstat using the JVM id found via jps (also shelled out) and passed argument to print to STDOUT (for use with sensu) the metrics value. Jstat column titles are replaced with more explanatory names. Requires to be ran as a user that can get the JVM id via jps and run jstat on that JVM''' def main(self): # Setting up logging to syslog try: logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(pathname)s: %(message)s") handler = logging.handlers.SysLogHandler(address = '/dev/log') handler.setFormatter(formatter) logger.addHandler(handler) except Exception: # booting is more important than logging logging.critical("Failed to configure syslog handler") parser = optparse.OptionParser() parser.add_option('-g', '--graphite-base', default = 'metrics', dest = 'graphite_base', help = 'The base graphite node', metavar = 'NODE') parser.add_option('-D', '--debug', action = 'store_true', default = False, dest = 'debug', help = 'Debug output (NOISY!)') parser.add_option('-H', '--host', default = None, dest = 'hostname', help = 'The name of the host to run jstat on', metavar = 'HOST') parser.add_option('-j', '--java-name', default = None, dest = 'java_app_name', help = 'The name of the Java app to call jstat on', metavar = 'JAVANAME') parser.add_option('-s', '--scheme', default = 'jstat', dest = 'service', help = 'Metric naming scheme, text to prepend to metric', metavar = 'SERVICE') (options, args) = parser.parse_args() if not options.java_app_name: parser.error('A Java app name is required') if not options.hostname: parser.error('A host name is required') # Replace jstat colums titles with more explicit ones # Stats coming from -gc metric_maps_gc = { "S0U": "survivor_space_0_utilization_KB", "S1U": "survivor_space_1_utilization_KB", "EC": "current_eden_space_capacity_KB", "EU": "eden_space_utilization_KB", "OC": "current_old_space_capacity_KB", "OU": "old_space_utilization_KB", "MC": "metaspace_capacity_KB", "MU": "metacspace_utilization_KB", "CCSC": "compressed_class_space_capacity_KB", "CCSU": "compressed_class_space_used_KB", "YGC": "number_of_young_generation_GC_events", "YGCT": "young_generation_garbage_collection_time", "FGC": "number_of_stop_the_world_events", "FGCT": "full_garbage_collection_time", "GCT": "total_garbage_collection_time" } # Stats coming from -gccapacity metric_maps_gccapacity = { "NGCMN": "minimum_size_of_new_area", "NGCMX": "maximum_size_of_new_area", "NGC": "current_size_of_new_area", "OGCMN": "minimum_size_of_old_area", "OGCMX": "maximum_size_of_old_area", "OGC": "current_size_of_old_area", "MCMN": "minimum_metaspace_capacity", "MCMX": "maximum_metaspace_capacity", "MC": "metaspace_capacity", "CCSMN": "compressed_class_space_minimum_capacity", "CCSMX": "compressed_class_space_maximum_capacity", "CCSC": "compressed_class_space_capacity" } # Stats coming from -gcnew metric_maps_gcnew = { "TT" : "tenuring_threshold", "MTT": "maximum_tenuring_threshold", "DSS": "adequate_size_of_survivor" } # Stats coming from -compiler metric_maps_compiler = { "Compiled": "compilation_tasks_performed", "Failed": "compilation_tasks_failed", "Invalid": "compilation_tasks_invalidated", "Time": "time_spent_on_compilation_tasks" } # Stats coming from -class ## Note that since "Bytes" appears twice in jstat -class output we need ## to differentiate them by colum number metric_maps_class = { "Loaded": "loaded_classes", "Bytes_column2": "loaded_KB", "Unloaded": "unloaded_classes", "Bytes_column4": "unloaded_KB", "Time": "time_spent_on_class_load_unload" } def get_jstat_metrics(jstat_option, lvmid, metric_maps): '''Runs jstat with provided option on provided host, returns mapped stats''' def is_number(s): '''returns true if string is a number''' try: float(s) return True except ValueError: pass try: import unicodedata unicodedata.numeric(s) return True except (TypeError, ValueError): pass return False # Get stats from jstat stdout try : jstat_gc_out = check_output(["jstat", jstat_option, lvmid]) except Exception as e: if options.debug: print e sys.exit(1) logger.critical(e) sys.exit(1) values_all = jstat_gc_out.split("\n")[1].split() # Remove non number strings values = [ jstat_val for jstat_val in values_all if is_number(jstat_val) ] # Transform float strings to integers values = map(int, map(float, values)) # Change stats titles to long names titles = jstat_gc_out.split("\n")[0].split() # Deal with -class special "double Bytes" output if jstat_option == "-class": titles[2] = "Bytes_column2" titles[4] = "Bytes_column4" return dict([(metric_maps[title], values[position]) for position, title in enumerate(titles) if title in metric_maps]) # Get lvmid (JVM id) try : jps_out = check_output(["jps", "-v"]) except Exception as e: if options.debug: print e sys.exit(1) logger.critical(e) sys.exit(1) lvmid = False for line in jps_out.split("\n"): if options.java_app_name in line: lvmid = line.split()[0] if not lvmid: if options.debug: print "Could not get an LVM id" sys.exit(1) logger.critical("Could not get an LVM id") sys.exit(1) # Get stats from -gc gc_stats = get_jstat_metrics("-gc", lvmid, metric_maps_gc) if options.debug: print gc_stats # Get stats from -gccapacity gccapacity_stats = get_jstat_metrics("-gccapacity", lvmid, metric_maps_gccapacity) if options.debug: print gccapacity_stats # Get stats from -gcnew gcnew_stats = get_jstat_metrics("-gcnew", lvmid, metric_maps_gcnew) if options.debug: print gccapacity_stats # Put all GC related stats to the same dict gc_stats.update(gccapacity_stats) gc_stats.update(gcnew_stats) # Get stats from -compiler compiler_stats = get_jstat_metrics("-compiler", lvmid, metric_maps_compiler) if options.debug: print compiler_stats # Get stats from -class class_stats = get_jstat_metrics("-class", lvmid, metric_maps_class) if options.debug: print class_stats # Print to stdout in graphite format now = time.time() graphite_base = '.'.join([options.graphite_base, '.'.join(reversed(options.hostname.split('.')))]) for metric in gc_stats: print "%s.%s.jvm.gc.%s %s %d" % (graphite_base, options.service, metric, gc_stats[metric], now) for metric in compiler_stats: print "%s.%s.jvm.compiler.%s %s %d" % (graphite_base, options.service, metric, compiler_stats[metric], now) for metric in class_stats: print "%s.%s.jvm.class.%s %s %d" % (graphite_base, options.service, metric, class_stats[metric], now) sys.exit(0) if '__main__' == __name__: JstatMetricsToGraphiteFormat().main()
from PySide import QtGui class Page: virtAddr = None # Address in virtual memory data = [] # Data within memory # Constructor def __init__(self, page_size, p_virtAddr): self.data = [0 for x in range(page_size)] self.virtAddr = p_virtAddr class Model(object): def __init__(self): self._update_funcs = [] self.config_section = 'settings' #### model variables #### self.VMAddr0 = "1" self.VMAddr1 = "None" self.VMAddr2 = "None" self.VMAddr3 = "None" self.VMAddr4 = "0" self.VMAddr5 = "None" self.VMAddr6 = "None" self.VMAddr7 = "None" self.VMAddr8 = "2" self.VMAddr9 = "None" self.VMAddr10 = "None" self.VMAddr11 = "None" self.VMAddr12 = "None" self.VMAddr13 = "None" self.VMAddr14 = "3" self.VMAddr15 = "None" self.PABit0 = "1" self.PABit1 = "0" self.PABit2 = "0" self.PABit3 = "0" self.PABit4 = "1" self.PABit5 = "0" self.PABit6 = "0" self.PABit7 = "0" self.PABit8 = "1" self.PABit9 = "0" self.PABit10 = "0" self.PABit11 = "0" self.PABit12 = "0" self.PABit13 = "0" self.PABit14 = "1" self.PABit15 = "0" self.PMAddr0 = "4" self.PMAddr1 = "0" self.PMAddr2 = "8" self.PMAddr3 = "14" self.Connect = True self.Disconnect = False self.curPHAddr = "None" self.curVMAddr = "None" self.announce_update() def subscribe_update_func(self, func): if func not in self._update_funcs: self._update_funcs.append(func) def unsubscribe_update_func(self, func): if func in self._update_funcs: self._update_funcs.remove(func) def announce_update(self): for func in self._update_funcs: #print(func) func() def __setVMAddr(self,index,val): if index == 0: self.VMAddr0 = val elif index == 1: self.VMAddr1 = val elif index == 2: self.VMAddr2 = val elif index == 3: self.VMAddr3 = val elif index == 4: self.VMAddr4 = val elif index == 5: self.VMAddr5 = val elif index == 6: self.VMAddr6 = val elif index == 7: self.VMAddr7 = val elif index == 8: self.VMAddr8 = val elif index == 9: self.VMAddr9 = val elif index == 10: self.VMAddr10 = val elif index == 11: self.VMAddr11 = val elif index == 12: self.VMAddr12 = val elif index == 13: self.VMAddr13 = val elif index == 14: self.VMAddr14 = val elif index == 15: self.VMAddr15 = val else: return -1 def __getVMAddr(self,index): if index == 0: return self.VMAddr0 if index == 1: return self.VMAddr1 if index == 2: return self.VMAddr2 if index == 3: return self.VMAddr3 if index == 4: return self.VMAddr4 if index == 5: return self.VMAddr5 if index == 6: return self.VMAddr6 if index == 7: return self.VMAddr7 if index == 8: return self.VMAddr8 if index == 9: return self.VMAddr9 if index == 10: return self.VMAddr10 if index == 11: return self.VMAddr11 if index == 12: return self.VMAddr12 if index == 13: return self.VMAddr13 if index == 14: return self.VMAddr14 if index == 15: return self.VMAddr15 return -1 def __setPMAddr(self,index,val): if index == 0: self.PMAddr0 = val elif index == 1: self.PMAddr1 = val elif index == 2: self.PMAddr2 = val elif index == 3: self.PMAddr3 = val else: return -1 def __setPABit(self,index,val): if index == 0: self.PABit0 = val elif index == 1: self.PABit1 = val elif index == 2: self.PABit2 = val elif index == 3: self.PABit3 = val elif index == 4: self.PABit4 = val elif index == 5: self.PABit5 = val elif index == 6: self.PABit6 = val elif index == 7: self.PABit7 = val elif index == 8: self.PABit8 = val elif index == 9: self.PABit9 = val elif index == 10: self.PABit10 = val elif index == 11: self.PABit11 = val elif index == 12: self.PABit12 = val elif index == 13: self.PABit13 = val elif index == 14: self.PABit14 = val elif index == 15: self.PABit15 = val else: return -1 # Load page from virtual memory to physical memory def _pagein(self, pagenum, framenum): # Place page into physical memory self.__setPMAddr(framenum,str(pagenum)) # Update page table self.__setVMAddr(pagenum,str(framenum)) self.__setPABit(pagenum,"1") # Remove page from physical memory def _pageout(self, pagenum, framenum): # Remove page from physical memory self.__setPMAddr(framenum,"None") # Update page table self.__setPABit(pagenum,"0") # Parse input def parsein(self, msg): #print(str(msg)) msg[0] = int.from_bytes(msg[0],byteorder='big') msg[1] = int.from_bytes(msg[1],byteorder='big') msg_type = (msg[0] & 0xF0) >> 4 phy_addr = msg[0] & 0x0F frame_num = (phy_addr & 0x0C) >> 2 if msg_type == (1 << 3): # Valid Read print('Valid Read') vm_addr = (msg[1] & 0xFC) >> 2 self.curPHAddr = str(phy_addr) self.curVMAddr = str(vm_addr) elif msg_type == (1 << 2): # Page Fault print('Page Fault') vm_out = (msg[1] & 0xF0) >> 4 vm_in = msg[1] & 0x0F ofst = phy_addr & 0x03 vm_addr = (vm_in << 2) | (ofst) #print('Page out:', vm_out, 'Page in:', vm_in, 'Frame num:', frame_num) self._pageout(vm_out, frame_num) self._pagein(vm_in, frame_num) self.curPHAddr = str(phy_addr) self.curVMAddr = str(vm_addr) else: print("Rx'd", msg) return -1 self.announce_update() return 0
""" :Authors: - Wilker Aziz """ import logging import argparse import sys """ :Authors: - Wilker Aziz """ from os.path import splitext import subprocess as sp import shlex import argparse import logging import sys import itertools import os import numpy as np import traceback from multiprocessing import Pool from functools import partial from collections import deque from grasp.loss.fast_bleu import DecodingBLEU from grasp.loss.fast_bleu import doc_bleu, stream_doc_bleu import grasp.ptypes as ptypes from grasp.recipes import smart_ropen, smart_wopen, make_unique_directory, pickle_it, unpickle_it, traceit from grasp.scoring.scorer import TableLookupScorer, StatelessScorer, StatefulScorer from grasp.scoring.util import make_models from grasp.scoring.util import read_weights from grasp.mt.cdec_format import load_grammar from grasp.mt.util import GoalRuleMaker from grasp.mt.util import save_forest, save_ffs, load_ffs, make_dead_srule, make_batches, number_of_batches from grasp.mt.segment import SegmentMetaData from grasp.mt.input import make_pass_grammar import grasp.semiring as semiring from grasp.semiring.operator import FixedLHS, FixedRHS from grasp.formal.scfgop import output_projection from grasp.formal.fsa import make_dfa, make_dfa_set, make_dfa_set2 from grasp.formal.scfgop import make_hypergraph_from_input_view, output_projection from grasp.formal.scfgop import lookup_components, stateless_components from grasp.formal.topsort import AcyclicTopSortTable from grasp.formal.traversal import bracketed_string, yield_string from grasp.formal.wfunc import TableLookupFunction, ConstantFunction, derivation_weight from grasp.cfg.model import DummyConstant from grasp.cfg.symbol import Nonterminal from grasp.cfg.symbol import Terminal from grasp.cfg.srule import OutputView from grasp.alg.deduction import NederhofParser, EarleyParser, EarleyRescorer from grasp.alg.inference import viterbi_derivation, AncestralSampler from grasp.alg.value import acyclic_value_recursion, acyclic_reversed_value_recursion, compute_edge_expectation from grasp.alg.rescoring import weight_edges from grasp.alg.rescoring import SlicedRescoring from grasp.alg.rescoring import stateless_rescoring from grasp.alg.chain import apply_filters, group_by_identity, group_by_projection from grasp.alg.expectation import expected_components from grasp.scoring.frepr import FComponents from grasp.io.results import save_mcmc_yields, save_mcmc_derivations, save_markov_chain from random import shuffle from numpy import linalg as LA from scipy.optimize import minimize from time import time, strftime from types import SimpleNamespace import grasp.mt.pipeline2 as pipeline def npvec2str(nparray, fnames=None, separator=' '): """converts an array of feature values into a string (fnames can be provided)""" if fnames is None: return separator.join(repr(fvalue) for fvalue in nparray) else: return separator.join('{0}={1}'.format(fname, repr(fvalue)) for fname, fvalue in zip(fnames, nparray)) def cmd_optimisation(parser): # Optimisation parser.add_argument("--maxiter", '-M', type=int, default=10, help="Maximum number of iterations") parser.add_argument('--mode', type=str, default='10', help="use 'all' for all data, use 'online' for online updates, " "use 0-100 to specify batch size in percentage") parser.add_argument('--shuffle', action='store_true', help='shuffle training instances') parser.add_argument('--temperature', type=float, default=1.0, help='scales the initial model') parser.add_argument('--proxy-init', type=str, default='uniform', help="use 'uniform' for uniform weights, 'random' for random weights, or choose a default weight") parser.add_argument('--target-init', type=str, default='uniform', help="use 'uniform' for uniform weights, 'random' for random weights, or choose a default weight") parser.add_argument("--resume", type=int, default=0, help="Resume from a certain iteration (requires the config file of the preceding run)") parser.add_argument('--merge', type=int, default=0, help="how many iterations should we consider in estimating Z(x) (use 0 or less for all)") parser.add_argument("--sgd", type=int, nargs=2, default=[10, 10], help="Number of iterations and function evaluations for target optimisation") parser.add_argument("--tol", type=float, nargs=2, default=[1e-9, 1e-9], help="f-tol and g-tol in target optimisation") parser.add_argument("--L2", type=float, default=0.0, help="Weight of L2 regulariser in target optimisation") def cmd_logging(parser): parser.add_argument('--save-d', action='store_true', default=0, help='store sampled derivations (after MCMC filters apply)') parser.add_argument('--save-y', action='store_true', default=0, help='store sampled translations (after MCMC filters apply)') parser.add_argument('--verbose', '-v', action='count', default=0, help='increase the verbosity level') def cmd_loss(group): group.add_argument('--bleu-order', type=int, default=4, metavar='N', help="longest n-gram feature for sentence-level IBM-BLEU") group.add_argument('--bleu-smoothing', type=float, default=1.0, metavar='F', help="add-p smoothing for sentence-level IBM-BLEU") def cmd_parser(group): group.add_argument('--goal', type=str, default='GOAL', metavar='LABEL', help='default goal symbol (root after parsing/intersection)') group.add_argument('--framework', type=str, default='exact', choices=['exact', 'slice'], metavar='FRAMEWORK', help="inference framework: 'exact', 'slice' sampling") def cmd_grammar(group): group.add_argument('--start', '-S', type=str, default='S', metavar='LABEL', help='default start symbol') group.add_argument("--dev-grammars", type=str, help="grammars for the dev set") group.add_argument("--devtest-grammars", type=str, help="grammars for the devtest set") group.add_argument('--extra-grammar', action='append', default=[], metavar='PATH', help="path to an additional grammar (multiple allowed)") group.add_argument('--glue-grammar', action='append', default=[], metavar='PATH', help="glue rules are only applied to initial states (multiple allowed)") group.add_argument('--pass-through', action='store_true', help="add pass-through rules for every input word (and an indicator feature for unknown words)") group.add_argument('--default-symbol', '-X', type=str, default='X', metavar='LABEL', help='default nonterminal (used for pass-through rules and automatic glue rules)') def cmd_sampler(group): group.add_argument('--samples', type=int, default=100, metavar='N', help="number of samples from proxy") def get_argparser(): parser = argparse.ArgumentParser(description='Training by MLE', formatter_class=argparse.ArgumentDefaultsHelpFormatter) #parser.add_argument('config', type=str, help="configuration file") parser.add_argument("workspace", type=str, default=None, help="where samples can be found and where decisions are placed") parser.add_argument("proxy", type=str, help="proxy model description") parser.add_argument("target", type=str, help="target model description") parser.add_argument("dev", type=str, help="development set") parser.add_argument('--experiment', type=str, help='folder within the workspace where results are stored' 'by default we use a timestamp and a random suffix') parser.add_argument("--proxy-weights", '-Q', type=str, help="proxy weights") parser.add_argument("--target-weights", '-P', type=str, help="target weights") parser.add_argument("--proxy-temperature", '-Tq', type=float, default=1.0, help="scales the model (the bigger the more uniform)") parser.add_argument("--target-temperature", '-Tp', type=float, default=1.0, help="scales the model (the bigger the more uniform)") parser.add_argument("--jobs", type=int, default=2, help="number of processes") parser.add_argument('--dev-alias', type=str, default='dev', help='Change the alias of the dev set') parser.add_argument("--devtest", type=str, help="devtest set") parser.add_argument('--devtest-alias', type=str, default='devtest', help='Change the alias of the devtest set') parser.add_argument('--redo', action='store_true', help='overwrite already computed files (by default we do not repeat computation)') cmd_parser(parser.add_argument_group('Parser')) cmd_grammar(parser.add_argument_group('Grammar')) cmd_optimisation(parser.add_argument_group('Parameter optimisation by SGD')) cmd_loss(parser.add_argument_group('Loss')) cmd_sampler(parser.add_argument_group('Importance sampler')) cmd_logging(parser.add_argument_group('Logging')) # General return parser def make_dirs(args, exist_ok=True): """ Make output directories and saves the command line arguments for documentation purpose. :param args: command line arguments :return: main output directory within workspace (prefix is a timestamp and suffix is a unique random string) """ # create the workspace if missing logging.info('Workspace: %s', args.workspace) if not os.path.exists(args.workspace): os.makedirs(args.workspace, exist_ok=exist_ok) # create a unique experiment area or reuse a given one if not args.experiment: outdir = make_unique_directory(args.workspace) else: outdir = '{0}/{1}'.format(args.workspace, args.experiment) os.makedirs(outdir, exist_ok=exist_ok) logging.info('Writing files to: %s', outdir) devdir = '{0}/{1}'.format(outdir, args.dev_alias) os.makedirs(devdir, exist_ok=exist_ok) if args.devtest: devtestdir = '{0}/{1}'.format(outdir, args.devtest_alias) os.makedirs(devtestdir, exist_ok=exist_ok) dynamicdir = '{0}/iterations'.format(outdir) os.makedirs(dynamicdir, exist_ok=exist_ok) return outdir, devdir @traceit def pass0_to_pass2(seg, options, workingdir, model, redo, log): saving = {'pass2.forest': '{0}/{1}.q-forest'.format(workingdir, seg.id), 'pass2.components': '{0}/{1}.q-components'.format(workingdir, seg.id)} if pipeline.all_steps_complete(saving, redo): return True forest, components = pipeline.pass0_to_pass2(seg, options, model.lookup, model.stateless, model.stateful, saving=saving, redo=redo, log=log) return forest.n_nodes() > 0 def make_pass0_to_pass2_options(args): options = SimpleNamespace() options.extra_grammars = args.extra_grammar options.glue_grammars = args.glue_grammar options.pass_through = args.pass_through options.default_symbol = args.default_symbol options.goal = args.goal options.start = args.start return options def parse_training(args, staticdir, model, segments): logging.info('Parsing %d training instances using %d workers', len(segments), args.jobs) with Pool(args.jobs) as workers: feedback = workers.map(partial(pass0_to_pass2, options=make_pass0_to_pass2_options(args), workingdir=staticdir, model=model, redo=args.redo, log=logging.info), segments) return tuple([seg for seg, status in zip(segments, feedback) if status]) def make_impsamp_options(args): options = make_pass0_to_pass2_options(args) options.samples = args.samples options.bleu_order = args.bleu_order options.bleu_smoothing = args.bleu_smoothing return options @traceit def importance_sample(seg, options, staticdir, workingdir, proxy, target, redo, log): saving = {'is.samples': '{0}/samples/{1}.is'.format(workingdir, seg.id), 'pass2.forest': '{0}/{1}.q-forest'.format(staticdir, seg.id), 'pass2.components': '{0}/{1}.q-components'.format(staticdir, seg.id)} # TODO: # 1. normalise q(d) \propto g(d) exactly? # 2. use sample frequency for q(d)? # 3. use unnormalised g(d) samples = pipeline.importance_sample(seg, options, proxy, target, saving=saving, redo=redo, log=log) # support Y = [None] * len(samples) # posterior Q = np.zeros(len(samples), dtype=ptypes.weight) P = np.zeros(len(samples), dtype=ptypes.weight) # compute posterior for i, sample in enumerate(samples): Y[i] = sample.y.split() D = sample.D qy = 0.0 py = 0.0 #py = semiring.inside.zero for d in D: f = target.score(d.p_comps) g = proxy.score(d.q_comps) # TODO: consider normalising g exactly w = semiring.inside.divide(f, g) qy += float(d.count) / len(samples) py += d.count * semiring.inside.as_real(w) #py = semiring.inside.plus(semiring.inside.times(semiring.inside.from_real(d.count), w), py) #P[i] = semiring.inside.as_real(py) Q[i] = qy P[i] = py P /= P.sum() # compute consensus loss bleu = DecodingBLEU(Y, P, max_order=options.bleu_order, smoothing=options.bleu_smoothing) L = [bleu.loss(y) for y in Y] ranking = sorted(range(len(Y)), key=lambda i: (L[i], -P[i])) with smart_wopen('{0}/samples/{1}.ranking.gz'.format(workingdir, seg.id)) as fo: print('# L ||| p(y) ||| q(y) ||| y', file=fo) for i in ranking: print('{0} ||| {1} ||| {2} ||| {3}'.format(L[i], P[i], Q[i], samples[i].y), file=fo) return samples[i].y, P[i], L[i] def sample_and_decode(args, staticdir, workingdir, proxy, target, segments): logging.info('Decoding %d segments using %d workers', len(segments), args.jobs) os.makedirs('{0}/samples'.format(workingdir), exist_ok=True) with Pool(args.jobs) as workers: decisions = workers.map(partial(importance_sample, options=make_impsamp_options(args), staticdir=staticdir, workingdir=workingdir, proxy=proxy, target=target, redo=args.redo, log=logging.info), segments) return decisions def mteval(args, workspace, iteration, proxy, target, segments, alias): decisions = sample_and_decode(args, '{0}/{1}'.format(workspace, alias), '{0}/iterations/{1}/{2}'.format(workspace, iteration, alias), proxy, target, segments) evaldir = '{0}/iterations/{1}/{2}'.format(workspace, iteration, alias) os.makedirs(evaldir, exist_ok=True) with smart_wopen('{0}/hyps'.format(evaldir)) as fo: for y, p, l in decisions: print(y, file=fo) bleu, pn, bp = stream_doc_bleu(smart_ropen('{0}/hyps'.format(evaldir)), smart_ropen('{0}/{1}/refs'.format(workspace, alias)), max_order=args.bleu_order, smoothing=args.bleu_smoothing) logging.info('BLEU %s: %.4f', alias, bleu) return bleu def sanity_checks(args): failed = False if not os.path.exists(args.dev): logging.error('Training set not found: %s', args.dev) failed = True if args.devtest and not os.path.exists(args.devtest): logging.error('Validation set not found: %s', args.devtest) failed = True if not os.path.exists(args.proxy): logging.error('Proxy model description not found: %s', args.proxy) failed = True if not os.path.exists(args.target): logging.error('Target model description not found: %s', args.target) failed = True if args.proxy_weights and not os.path.exists(args.proxy_weights): logging.error('Proxy model weights not found: %s', args.proxy_weights) failed = True if args.target_weights and not os.path.exists(args.target_weights): logging.error('Target model weights not found: %s', args.target_weights) failed = True return not failed def core(args): workspace, devdir = make_dirs(args) if not sanity_checks(args): raise FileNotFoundError('One or more files could not be found') proxy = pipeline.load_model(args.proxy, args.proxy_weights, args.proxy_init, args.proxy_temperature) logging.info('Proxy:\n%s', proxy) target = pipeline.load_model(args.target, args.target_weights, args.target_init, args.target_temperature) logging.info('Target:\n%s', target) # 2. Parse data dev = pipeline.read_segments_from_file(args.dev, args.dev_grammars) dev = parse_training(args, devdir, proxy, dev) logging.info(' %d training instances', len(dev)) # store references for evaluation purposes pipeline.save_references('{0}/{1}/refs'.format(workspace, args.dev_alias), dev) # Validation set if args.devtest is None: args.devtest = args.dev args.devtest_alias = args.dev_alias args.devtest_grammars = args.dev_grammars devtest = dev else: devtest = pipeline.read_segments_from_file(args.devtest, args.devtest_grammars) devtest = parse_training(args, '{0}/{1}'.format(workspace, args.devtest_alias), proxy, devtest) logging.info(' %d validation instances', len(devtest)) pipeline.save_references('{0}/{1}/refs'.format(workspace, args.devtest_alias), devtest) # evaluate the initial model mteval(args, workspace, 0, proxy, target, devtest, args.devtest_alias) ##print('{0} ||| init ||| {1}={2} ||| {3}'.format(0, args.devtest_alias, bleu, npvec2str(model.weights().densify(), fnames))) # 3. Optimise #dimensionality = len(fnames) def main(): args = get_argparser().parse_args() if args.verbose == 1: logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S') elif args.verbose > 1: logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S') core(args) if __name__ == '__main__': main()
# Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import marathon import mock from pyramid import testing from pytest import raises from paasta_tools import marathon_tools from paasta_tools.api import settings from paasta_tools.api.views import instance from paasta_tools.api.views.exception import ApiFailure from paasta_tools.chronos_tools import ChronosJobConfig @mock.patch('paasta_tools.api.views.instance.marathon_job_status', autospec=True) @mock.patch('paasta_tools.api.views.instance.marathon_tools.get_matching_appids', autospec=True) @mock.patch('paasta_tools.api.views.instance.marathon_tools.load_marathon_service_config', autospec=True) @mock.patch('paasta_tools.api.views.instance.validate_service_instance', autospec=True) @mock.patch('paasta_tools.api.views.instance.get_actual_deployments', autospec=True) def test_instances_status_marathon( mock_get_actual_deployments, mock_validate_service_instance, mock_load_marathon_service_config, mock_get_matching_appids, mock_marathon_job_status, ): settings.cluster = 'fake_cluster' mock_get_actual_deployments.return_value = { 'fake_cluster.fake_instance': 'GIT_SHA', 'fake_cluster.fake_instance2': 'GIT_SHA', 'fake_cluster2.fake_instance': 'GIT_SHA', 'fake_cluster2.fake_instance2': 'GIT_SHA', } mock_validate_service_instance.return_value = 'marathon' settings.marathon_clients = mock.Mock() mock_get_matching_appids.return_value = ['a', 'b'] mock_service_config = marathon_tools.MarathonServiceConfig( service='fake_service', cluster='fake_cluster', instance='fake_instance', config_dict={'bounce_method': 'fake_bounce'}, branch_dict=None, ) mock_load_marathon_service_config.return_value = mock_service_config mock_marathon_job_status.return_value = 'fake_marathon_status' request = testing.DummyRequest() request.swagger_data = {'service': 'fake_service', 'instance': 'fake_instance'} response = instance.instance_status(request) assert response['marathon']['bounce_method'] == 'fake_bounce' assert response['marathon']['desired_state'] == 'start' @mock.patch('paasta_tools.api.views.instance.chronos_tools.load_chronos_config', autospec=True) @mock.patch('paasta_tools.api.views.instance.chronos_tools.get_chronos_client', autospec=True) @mock.patch('paasta_tools.api.views.instance.chronos_tools.load_chronos_job_config', autospec=True) @mock.patch('paasta_tools.api.views.instance.validate_service_instance', autospec=True) @mock.patch('paasta_tools.api.views.instance.get_actual_deployments', autospec=True) @mock.patch('paasta_tools.api.views.instance.select_tasks_by_id', autospec=True) @mock.patch('paasta_tools.api.views.instance.get_cached_list_of_running_tasks_from_frameworks', autospec=True) def test_chronos_instance_status( mock_get_cached_list_of_running_tasks_from_frameworks, mock_select_tasks_by_id, mock_get_actual_deployments, mock_validate_service_instance, mock_load_chronos_job_config, mock_get_chronos_client, mock_load_chronos_config, ): settings.cluster = 'fake_cluster' mock_get_actual_deployments.return_value = { 'fake_cluster.fake_instance': 'GIT_SHA', 'fake_cluster.fake_instance2': 'GIT_SHA', 'fake_cluster2.fake_instance': 'GIT_SHA', 'fake_cluster2.fake_instance2': 'GIT_SHA', } mock_validate_service_instance.return_value = 'chronos' mock_select_tasks_by_id.return_value = [1, 2, 3] mock_load_chronos_job_config.return_value = ChronosJobConfig( 'fake_service', 'fake_instance', 'fake_cluster', { 'schedule': 'always', }, None, ) request = testing.DummyRequest() request.swagger_data = {'service': 'fake_service', 'instance': 'fake_instance'} response = instance.instance_status(request) assert response['chronos']['schedule']['schedule'] == 'always' assert response['chronos']['schedule_type'] == 'schedule' @mock.patch('paasta_tools.api.views.instance.adhoc_instance_status', autospec=True) @mock.patch('paasta_tools.api.views.instance.validate_service_instance', autospec=True) @mock.patch('paasta_tools.api.views.instance.get_actual_deployments', autospec=True) def test_instances_status_adhoc( mock_get_actual_deployments, mock_validate_service_instance, mock_adhoc_instance_status, ): settings.cluster = 'fake_cluster' mock_get_actual_deployments.return_value = { 'fake_cluster.fake_instance': 'GIT_SHA', 'fake_cluster.fake_instance2': 'GIT_SHA', 'fake_cluster2.fake_instance': 'GIT_SHA', 'fake_cluster2.fake_instance2': 'GIT_SHA', } mock_validate_service_instance.return_value = 'adhoc' mock_adhoc_instance_status.return_value = {} request = testing.DummyRequest() request.swagger_data = {'service': 'fake_service', 'instance': 'fake_instance'} response = instance.instance_status(request) assert mock_adhoc_instance_status.called assert response == { 'service': 'fake_service', 'instance': 'fake_instance', 'git_sha': 'GIT_SHA', 'adhoc': {}, } @mock.patch('paasta_tools.api.views.instance.get_running_tasks_from_frameworks', autospec=True) @mock.patch('paasta_tools.api.views.instance.marathon_tools.is_app_id_running', autospec=True) def test_marathon_job_status_verbose( mock_is_app_id_running, mock_get_running_tasks_from_frameworks, ): mock_tasks = [ mock.Mock(slave={'hostname': 'host1'}), mock.Mock(slave={'hostname': 'host1'}), mock.Mock(slave={'hostname': 'host2'}), ] mock_get_running_tasks_from_frameworks.return_value = mock_tasks mock_is_app_id_running.return_value = True app = mock.create_autospec(marathon.models.app.MarathonApp) app.instances = 5 app.tasks_running = 5 app.deployments = [] app.id = 'mock_app_id' client = mock.create_autospec(marathon.MarathonClient) client.get_app.return_value = app job_config = mock.create_autospec(marathon_tools.MarathonServiceConfig) job_config.format_marathon_app_dict.return_value = {'id': 'mock_app_id'} job_config.get_instances.return_value = 5 mstatus = {} instance.marathon_job_status(mstatus, client, job_config, verbose=True) expected = { 'deploy_status': 'Running', 'running_instance_count': 5, 'expected_instance_count': 5, 'app_id': 'mock_app_id', } expected_slaves = ['host2', 'host1'] slaves = mstatus.pop('slaves') assert len(slaves) == len(expected_slaves) and sorted(slaves) == sorted(expected_slaves) assert mstatus == expected @mock.patch('paasta_tools.api.views.instance.add_executor_info', autospec=True) @mock.patch('paasta_tools.api.views.instance.add_slave_info', autospec=True) @mock.patch('paasta_tools.api.views.instance.instance_status', autospec=True) @mock.patch('paasta_tools.api.views.instance.get_tasks_from_app_id', autospec=True) def test_instance_tasks(mock_get_tasks_from_app_id, mock_instance_status, mock_add_slave_info, mock_add_executor_info): mock_request = mock.Mock(swagger_data={'task_id': '123', 'slave_hostname': 'host1'}) mock_instance_status.return_value = {'marathon': {'app_id': 'app1'}} mock_task_1 = mock.Mock() mock_task_2 = mock.Mock() mock_get_tasks_from_app_id.return_value = [mock_task_1, mock_task_2] ret = instance.instance_tasks(mock_request) assert not mock_add_slave_info.called assert not mock_add_executor_info.called mock_request = mock.Mock(swagger_data={'task_id': '123', 'slave_hostname': 'host1', 'verbose': True}) ret = instance.instance_tasks(mock_request) mock_add_executor_info.assert_has_calls([mock.call(mock_task_1), mock.call(mock_task_2)]) mock_add_slave_info.assert_has_calls([ mock.call(mock_add_executor_info.return_value), mock.call(mock_add_executor_info.return_value), ]) expected = [ mock_add_slave_info.return_value._Task__items, mock_add_slave_info.return_value._Task__items, ] def ids(l): return {id(x) for x in l} assert len(ret) == len(expected) and ids(expected) == ids(ret) mock_instance_status.return_value = {'chronos': {}} with raises(ApiFailure): ret = instance.instance_tasks(mock_request) @mock.patch('paasta_tools.api.views.instance.add_executor_info', autospec=True) @mock.patch('paasta_tools.api.views.instance.add_slave_info', autospec=True) @mock.patch('paasta_tools.api.views.instance.instance_status', autospec=True) @mock.patch('paasta_tools.api.views.instance.get_task', autospec=True) def test_instance_task(mock_get_task, mock_instance_status, mock_add_slave_info, mock_add_executor_info): mock_request = mock.Mock(swagger_data={'task_id': '123', 'slave_hostname': 'host1'}) mock_instance_status.return_value = {'marathon': {'app_id': 'app1'}} mock_task_1 = mock.Mock() mock_get_task.return_value = mock_task_1 ret = instance.instance_task(mock_request) assert not mock_add_slave_info.called assert not mock_add_executor_info.called assert ret == mock_task_1._Task__items mock_request = mock.Mock(swagger_data={'task_id': '123', 'slave_hostname': 'host1', 'verbose': True}) ret = instance.instance_task(mock_request) mock_add_slave_info.assert_called_with(mock_task_1) mock_add_executor_info.assert_called_with(mock_add_slave_info.return_value) expected = mock_add_executor_info.return_value._Task__items assert ret == expected mock_instance_status.return_value = {'chronos': {}} with raises(ApiFailure): ret = instance.instance_task(mock_request) @mock.patch('paasta_tools.api.views.instance.marathon_tools.get_app_queue', autospec=True) @mock.patch('paasta_tools.api.views.instance.marathon_tools.load_marathon_service_config', autospec=True) def test_instance_delay(mock_load_config, mock_get_app_queue): mock_unused_offers = mock.Mock() mock_unused_offers.last_unused_offers = [ { 'reason': ['foo', 'bar'], }, { 'reason': ['bar', 'baz'], }, { 'reason': [], }, ] mock_get_app_queue.return_value = mock_unused_offers mock_config = mock.Mock() mock_config.format_marathon_app_dict = lambda: {'id': 'foo'} mock_load_config.return_value = mock_config request = testing.DummyRequest() request.swagger_data = {'service': 'fake_service', 'instance': 'fake_instance'} response = instance.instance_delay(request) assert response['foo'] == 1 assert response['bar'] == 2 assert response['baz'] == 1 def test_add_executor_info(): mock_mesos_task = mock.Mock() mock_executor = { 'tasks': [mock_mesos_task], 'some': 'thing', 'completed_tasks': [mock_mesos_task], 'queued_tasks': [mock_mesos_task], } mock_task = mock.Mock( _Task__items={'a': 'thing'}, executor=mock_executor, ) ret = instance.add_executor_info(mock_task) expected = { 'a': 'thing', 'executor': {'some': 'thing'}, } assert ret._Task__items == expected with raises(KeyError): ret._Task__items['executor']['completed_tasks'] with raises(KeyError): ret._Task__items['executor']['tasks'] with raises(KeyError): ret._Task__items['executor']['queued_tasks'] def test_add_slave_info(): mock_slave = mock.Mock(_MesosSlave__items={'some': 'thing'}) mock_task = mock.Mock( _Task__items={'a': 'thing'}, slave=mock_slave, ) expected = { 'a': 'thing', 'slave': {'some': 'thing'}, } assert instance.add_slave_info(mock_task)._Task__items == expected
from mpmath import * def test_interval_identity(): iv.dps = 15 assert mpi(2) == mpi(2, 2) assert mpi(2) != mpi(-2, 2) assert not (mpi(2) != mpi(2, 2)) assert mpi(-1, 1) == mpi(-1, 1) assert str(mpi('0.1')) == "[0.099999999999999991673, 0.10000000000000000555]" assert repr(mpi('0.1')) == "mpi('0.099999999999999992', '0.10000000000000001')" u = mpi(-1, 3) assert -1 in u assert 2 in u assert 3 in u assert -1.1 not in u assert 3.1 not in u assert mpi(-1, 3) in u assert mpi(0, 1) in u assert mpi(-1.1, 2) not in u assert mpi(2.5, 3.1) not in u w = mpi(-inf, inf) assert mpi(-5, 5) in w assert mpi(2, inf) in w assert mpi(0, 2) in mpi(0, 10) assert not (3 in mpi(-inf, 0)) def test_interval_hash(): assert hash(mpi(3)) == hash(3) assert hash(mpi(3.25)) == hash(3.25) assert hash(mpi(3,4)) == hash(mpi(3,4)) assert hash(iv.mpc(3)) == hash(3) assert hash(iv.mpc(3,4)) == hash(3+4j) assert hash(iv.mpc((1,3),(2,4))) == hash(iv.mpc((1,3),(2,4))) def test_interval_arithmetic(): iv.dps = 15 assert mpi(2) + mpi(3,4) == mpi(5,6) assert mpi(1, 2)**2 == mpi(1, 4) assert mpi(1) + mpi(0, 1e-50) == mpi(1, mpf('1.0000000000000002')) x = 1 / (1 / mpi(3)) assert x.a < 3 < x.b x = mpi(2) ** mpi(0.5) iv.dps += 5 sq = iv.sqrt(2) iv.dps -= 5 assert x.a < sq < x.b assert mpi(1) / mpi(1, inf) assert mpi(2, 3) / inf == mpi(0, 0) assert mpi(0) / inf == 0 assert mpi(0) / 0 == mpi(-inf, inf) assert mpi(inf) / 0 == mpi(-inf, inf) assert mpi(0) * inf == mpi(-inf, inf) assert 1 / mpi(2, inf) == mpi(0, 0.5) assert str((mpi(50, 50) * mpi(-10, -10)) / 3) == \ '[-166.66666666666668561, -166.66666666666665719]' assert mpi(0, 4) ** 3 == mpi(0, 64) assert mpi(2,4).mid == 3 iv.dps = 30 a = mpi(iv.pi) iv.dps = 15 b = +a assert b.a < a.a assert b.b > a.b a = mpi(iv.pi) assert a == +a assert abs(mpi(-1,2)) == mpi(0,2) assert abs(mpi(0.5,2)) == mpi(0.5,2) assert abs(mpi(-3,2)) == mpi(0,3) assert abs(mpi(-3,-0.5)) == mpi(0.5,3) assert mpi(0) * mpi(2,3) == mpi(0) assert mpi(2,3) * mpi(0) == mpi(0) assert mpi(1,3).delta == 2 assert mpi(1,2) - mpi(3,4) == mpi(-3,-1) assert mpi(-inf,0) - mpi(0,inf) == mpi(-inf,0) assert mpi(-inf,0) - mpi(-inf,inf) == mpi(-inf,inf) assert mpi(0,inf) - mpi(-inf,1) == mpi(-1,inf) def test_interval_mul(): assert mpi(-1, 0) * inf == mpi(-inf, 0) assert mpi(-1, 0) * -inf == mpi(0, inf) assert mpi(0, 1) * inf == mpi(0, inf) assert mpi(0, 1) * mpi(0, inf) == mpi(0, inf) assert mpi(-1, 1) * inf == mpi(-inf, inf) assert mpi(-1, 1) * mpi(0, inf) == mpi(-inf, inf) assert mpi(-1, 1) * mpi(-inf, inf) == mpi(-inf, inf) assert mpi(-inf, 0) * mpi(0, 1) == mpi(-inf, 0) assert mpi(-inf, 0) * mpi(0, 0) * mpi(-inf, 0) assert mpi(-inf, 0) * mpi(-inf, inf) == mpi(-inf, inf) assert mpi(-5,0)*mpi(-32,28) == mpi(-140,160) assert mpi(2,3) * mpi(-1,2) == mpi(-3,6) # Should be undefined? assert mpi(inf, inf) * 0 == mpi(-inf, inf) assert mpi(-inf, -inf) * 0 == mpi(-inf, inf) assert mpi(0) * mpi(-inf,2) == mpi(-inf,inf) assert mpi(0) * mpi(-2,inf) == mpi(-inf,inf) assert mpi(-2,inf) * mpi(0) == mpi(-inf,inf) assert mpi(-inf,2) * mpi(0) == mpi(-inf,inf) def test_interval_pow(): assert mpi(3)**2 == mpi(9, 9) assert mpi(-3)**2 == mpi(9, 9) assert mpi(-3, 1)**2 == mpi(0, 9) assert mpi(-3, -1)**2 == mpi(1, 9) assert mpi(-3, -1)**3 == mpi(-27, -1) assert mpi(-3, 1)**3 == mpi(-27, 1) assert mpi(-2, 3)**2 == mpi(0, 9) assert mpi(-3, 2)**2 == mpi(0, 9) assert mpi(4) ** -1 == mpi(0.25, 0.25) assert mpi(-4) ** -1 == mpi(-0.25, -0.25) assert mpi(4) ** -2 == mpi(0.0625, 0.0625) assert mpi(-4) ** -2 == mpi(0.0625, 0.0625) assert mpi(0, 1) ** inf == mpi(0, 1) assert mpi(0, 1) ** -inf == mpi(1, inf) assert mpi(0, inf) ** inf == mpi(0, inf) assert mpi(0, inf) ** -inf == mpi(0, inf) assert mpi(1, inf) ** inf == mpi(1, inf) assert mpi(1, inf) ** -inf == mpi(0, 1) assert mpi(2, 3) ** 1 == mpi(2, 3) assert mpi(2, 3) ** 0 == 1 assert mpi(1,3) ** mpi(2) == mpi(1,9) def test_interval_sqrt(): assert mpi(4) ** 0.5 == mpi(2) def test_interval_div(): assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) assert mpi(0, 1) / mpi(0, 1) == mpi(0, inf) assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) assert mpi(inf, inf) / mpi(2, inf) == mpi(0, inf) assert mpi(inf, inf) / mpi(2, 2) == mpi(inf, inf) assert mpi(0, inf) / mpi(2, inf) == mpi(0, inf) assert mpi(0, inf) / mpi(2, 2) == mpi(0, inf) assert mpi(2, inf) / mpi(2, 2) == mpi(1, inf) assert mpi(2, inf) / mpi(2, inf) == mpi(0, inf) assert mpi(-4, 8) / mpi(1, inf) == mpi(-4, 8) assert mpi(-4, 8) / mpi(0.5, inf) == mpi(-8, 16) assert mpi(-inf, 8) / mpi(0.5, inf) == mpi(-inf, 16) assert mpi(-inf, inf) / mpi(0.5, inf) == mpi(-inf, inf) assert mpi(8, inf) / mpi(0.5, inf) == mpi(0, inf) assert mpi(-8, inf) / mpi(0.5, inf) == mpi(-16, inf) assert mpi(-4, 8) / mpi(inf, inf) == mpi(0, 0) assert mpi(0, 8) / mpi(inf, inf) == mpi(0, 0) assert mpi(0, 0) / mpi(inf, inf) == mpi(0, 0) assert mpi(-inf, 0) / mpi(inf, inf) == mpi(-inf, 0) assert mpi(-inf, 8) / mpi(inf, inf) == mpi(-inf, 0) assert mpi(-inf, inf) / mpi(inf, inf) == mpi(-inf, inf) assert mpi(-8, inf) / mpi(inf, inf) == mpi(0, inf) assert mpi(0, inf) / mpi(inf, inf) == mpi(0, inf) assert mpi(8, inf) / mpi(inf, inf) == mpi(0, inf) assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) assert mpi(-1, 2) / mpi(0, 1) == mpi(-inf, +inf) assert mpi(0, 1) / mpi(0, 1) == mpi(0.0, +inf) assert mpi(-1, 0) / mpi(0, 1) == mpi(-inf, 0.0) assert mpi(-0.5, -0.25) / mpi(0, 1) == mpi(-inf, -0.25) assert mpi(0.5, 1) / mpi(0, 1) == mpi(0.5, +inf) assert mpi(0.5, 4) / mpi(0, 1) == mpi(0.5, +inf) assert mpi(-1, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) assert mpi(-4, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) assert mpi(-1, 2) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(0, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(-1, 0) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(-0.5, -0.25) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(0.5, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(0.5, 4) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(-1, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(-4, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) assert mpi(-1, 2) / mpi(-1, 0) == mpi(-inf, +inf) assert mpi(0, 1) / mpi(-1, 0) == mpi(-inf, 0.0) assert mpi(-1, 0) / mpi(-1, 0) == mpi(0.0, +inf) assert mpi(-0.5, -0.25) / mpi(-1, 0) == mpi(0.25, +inf) assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) assert mpi(0.5, 4) / mpi(-1, 0) == mpi(-inf, -0.5) assert mpi(-1, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) assert mpi(-4, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) assert mpi(-1, 2) / mpi(0.5, 1) == mpi(-2.0, 4.0) assert mpi(0, 1) / mpi(0.5, 1) == mpi(0.0, 2.0) assert mpi(-1, 0) / mpi(0.5, 1) == mpi(-2.0, 0.0) assert mpi(-0.5, -0.25) / mpi(0.5, 1) == mpi(-1.0, -0.25) assert mpi(0.5, 1) / mpi(0.5, 1) == mpi(0.5, 2.0) assert mpi(0.5, 4) / mpi(0.5, 1) == mpi(0.5, 8.0) assert mpi(-1, -0.5) / mpi(0.5, 1) == mpi(-2.0, -0.5) assert mpi(-4, -0.5) / mpi(0.5, 1) == mpi(-8.0, -0.5) assert mpi(-1, 2) / mpi(-2, -0.5) == mpi(-4.0, 2.0) assert mpi(0, 1) / mpi(-2, -0.5) == mpi(-2.0, 0.0) assert mpi(-1, 0) / mpi(-2, -0.5) == mpi(0.0, 2.0) assert mpi(-0.5, -0.25) / mpi(-2, -0.5) == mpi(0.125, 1.0) assert mpi(0.5, 1) / mpi(-2, -0.5) == mpi(-2.0, -0.25) assert mpi(0.5, 4) / mpi(-2, -0.5) == mpi(-8.0, -0.25) assert mpi(-1, -0.5) / mpi(-2, -0.5) == mpi(0.25, 2.0) assert mpi(-4, -0.5) / mpi(-2, -0.5) == mpi(0.25, 8.0) # Should be undefined? assert mpi(0, 0) / mpi(0, 0) == mpi(-inf, inf) assert mpi(0, 0) / mpi(0, 1) == mpi(-inf, inf) def test_interval_cos_sin(): iv.dps = 15 cos = iv.cos sin = iv.sin tan = iv.tan pi = iv.pi # Around 0 assert cos(mpi(0)) == 1 assert sin(mpi(0)) == 0 assert cos(mpi(0,1)) == mpi(0.54030230586813965399, 1.0) assert sin(mpi(0,1)) == mpi(0, 0.8414709848078966159) assert cos(mpi(1,2)) == mpi(-0.4161468365471424069, 0.54030230586813976501) assert sin(mpi(1,2)) == mpi(0.84147098480789650488, 1.0) assert sin(mpi(1,2.5)) == mpi(0.59847214410395643824, 1.0) assert cos(mpi(-1, 1)) == mpi(0.54030230586813965399, 1.0) assert cos(mpi(-1, 0.5)) == mpi(0.54030230586813965399, 1.0) assert cos(mpi(-1, 1.5)) == mpi(0.070737201667702906405, 1.0) assert sin(mpi(-1,1)) == mpi(-0.8414709848078966159, 0.8414709848078966159) assert sin(mpi(-1,0.5)) == mpi(-0.8414709848078966159, 0.47942553860420300538) assert mpi(-0.8414709848078966159, 1.00000000000000002e-100) in sin(mpi(-1,1e-100)) assert mpi(-2.00000000000000004e-100, 1.00000000000000002e-100) in sin(mpi(-2e-100,1e-100)) # Same interval assert cos(mpi(2, 2.5)) assert cos(mpi(3.5, 4)) == mpi(-0.93645668729079634129, -0.65364362086361182946) assert cos(mpi(5, 5.5)) == mpi(0.28366218546322624627, 0.70866977429126010168) assert mpi(0.59847214410395654927, 0.90929742682568170942) in sin(mpi(2, 2.5)) assert sin(mpi(3.5, 4)) == mpi(-0.75680249530792831347, -0.35078322768961983646) assert sin(mpi(5, 5.5)) == mpi(-0.95892427466313856499, -0.70554032557039181306) # Higher roots iv.dps = 55 w = 4*10**50 + mpi(0.5) for p in [15, 40, 80]: iv.dps = p assert 0 in sin(4*mpi(pi)) assert 0 in sin(4*10**50*mpi(pi)) assert 0 in cos((4+0.5)*mpi(pi)) assert 0 in cos(w*mpi(pi)) assert 1 in cos(4*mpi(pi)) assert 1 in cos(4*10**50*mpi(pi)) iv.dps = 15 assert cos(mpi(2,inf)) == mpi(-1,1) assert sin(mpi(2,inf)) == mpi(-1,1) assert cos(mpi(-inf,2)) == mpi(-1,1) assert sin(mpi(-inf,2)) == mpi(-1,1) u = tan(mpi(0.5,1)) assert mpf(u.a).ae(mp.tan(0.5)) assert mpf(u.b).ae(mp.tan(1)) v = iv.cot(mpi(0.5,1)) assert mpf(v.a).ae(mp.cot(1)) assert mpf(v.b).ae(mp.cot(0.5)) # Sanity check of evaluation at n*pi and (n+1/2)*pi for n in range(-5,7,2): x = iv.cos(n*iv.pi) assert -1 in x assert x >= -1 assert x != -1 x = iv.sin((n+0.5)*iv.pi) assert -1 in x assert x >= -1 assert x != -1 for n in range(-6,8,2): x = iv.cos(n*iv.pi) assert 1 in x assert x <= 1 if n: assert x != 1 x = iv.sin((n+0.5)*iv.pi) assert 1 in x assert x <= 1 assert x != 1 for n in range(-6,7): x = iv.cos((n+0.5)*iv.pi) assert x.a < 0 < x.b x = iv.sin(n*iv.pi) if n: assert x.a < 0 < x.b def test_interval_complex(): # TODO: many more tests iv.dps = 15 mp.dps = 15 assert iv.mpc(2,3) == 2+3j assert iv.mpc(2,3) != 2+4j assert iv.mpc(2,3) != 1+3j assert 1+3j in iv.mpc([1,2],[3,4]) assert 2+5j not in iv.mpc([1,2],[3,4]) assert iv.mpc(1,2) + 1j == 1+3j assert iv.mpc([1,2],[2,3]) + 2+3j == iv.mpc([3,4],[5,6]) assert iv.mpc([2,4],[4,8]) / 2 == iv.mpc([1,2],[2,4]) assert iv.mpc([1,2],[2,4]) * 2j == iv.mpc([-8,-4],[2,4]) assert iv.mpc([2,4],[4,8]) / 2j == iv.mpc([2,4],[-2,-1]) assert iv.exp(2+3j).ae(mp.exp(2+3j)) assert iv.log(2+3j).ae(mp.log(2+3j)) assert (iv.mpc(2,3) ** iv.mpc(0.5,2)).ae(mp.mpc(2,3) ** mp.mpc(0.5,2)) assert 1j in (iv.mpf(-1) ** 0.5) assert 1j in (iv.mpc(-1) ** 0.5) assert abs(iv.mpc(0)) == 0 assert abs(iv.mpc(inf)) == inf assert abs(iv.mpc(3,4)) == 5 assert abs(iv.mpc(4)) == 4 assert abs(iv.mpc(0,4)) == 4 assert abs(iv.mpc(0,[2,3])) == iv.mpf([2,3]) assert abs(iv.mpc(0,[-3,2])) == iv.mpf([0,3]) assert abs(iv.mpc([3,5],[4,12])) == iv.mpf([5,13]) assert abs(iv.mpc([3,5],[-4,12])) == iv.mpf([3,13]) assert iv.mpc(2,3) ** 0 == 1 assert iv.mpc(2,3) ** 1 == (2+3j) assert iv.mpc(2,3) ** 2 == (2+3j)**2 assert iv.mpc(2,3) ** 3 == (2+3j)**3 assert iv.mpc(2,3) ** 4 == (2+3j)**4 assert iv.mpc(2,3) ** 5 == (2+3j)**5 assert iv.mpc(2,2) ** (-1) == (2+2j) ** (-1) assert iv.mpc(2,2) ** (-2) == (2+2j) ** (-2) assert iv.cos(2).ae(mp.cos(2)) assert iv.sin(2).ae(mp.sin(2)) assert iv.cos(2+3j).ae(mp.cos(2+3j)) assert iv.sin(2+3j).ae(mp.sin(2+3j)) def test_interval_complex_arg(): mp.dps = 15 iv.dps = 15 assert iv.arg(3) == 0 assert iv.arg(0) == 0 assert iv.arg([0,3]) == 0 assert iv.arg(-3).ae(pi) assert iv.arg(2+3j).ae(iv.arg(2+3j)) z = iv.mpc([-2,-1],[3,4]) t = iv.arg(z) assert t.a.ae(mp.arg(-1+4j)) assert t.b.ae(mp.arg(-2+3j)) z = iv.mpc([-2,1],[3,4]) t = iv.arg(z) assert t.a.ae(mp.arg(1+3j)) assert t.b.ae(mp.arg(-2+3j)) z = iv.mpc([1,2],[3,4]) t = iv.arg(z) assert t.a.ae(mp.arg(2+3j)) assert t.b.ae(mp.arg(1+4j)) z = iv.mpc([1,2],[-2,3]) t = iv.arg(z) assert t.a.ae(mp.arg(1-2j)) assert t.b.ae(mp.arg(1+3j)) z = iv.mpc([1,2],[-4,-3]) t = iv.arg(z) assert t.a.ae(mp.arg(1-4j)) assert t.b.ae(mp.arg(2-3j)) z = iv.mpc([-1,2],[-4,-3]) t = iv.arg(z) assert t.a.ae(mp.arg(-1-3j)) assert t.b.ae(mp.arg(2-3j)) z = iv.mpc([-2,-1],[-4,-3]) t = iv.arg(z) assert t.a.ae(mp.arg(-2-3j)) assert t.b.ae(mp.arg(-1-4j)) z = iv.mpc([-2,-1],[-3,3]) t = iv.arg(z) assert t.a.ae(-mp.pi) assert t.b.ae(mp.pi) z = iv.mpc([-2,2],[-3,3]) t = iv.arg(z) assert t.a.ae(-mp.pi) assert t.b.ae(mp.pi) def test_interval_ae(): iv.dps = 15 x = iv.mpf([1,2]) assert x.ae(1) is None assert x.ae(1.5) is None assert x.ae(2) is None assert x.ae(2.01) is False assert x.ae(0.99) is False x = iv.mpf(3.5) assert x.ae(3.5) is True assert x.ae(3.5+1e-15) is True assert x.ae(3.5-1e-15) is True assert x.ae(3.501) is False assert x.ae(3.499) is False assert x.ae(iv.mpf([3.5,3.501])) is None assert x.ae(iv.mpf([3.5,4.5+1e-15])) is None def test_interval_nstr(): iv.dps = n = 30 x = mpi(1, 2) # FIXME: error_dps should not be necessary assert iv.nstr(x, n, mode='plusminus', error_dps=6) == '1.5 +- 0.5' assert iv.nstr(x, n, mode='plusminus', use_spaces=False, error_dps=6) == '1.5+-0.5' assert iv.nstr(x, n, mode='percent') == '1.5 (33.33%)' assert iv.nstr(x, n, mode='brackets', use_spaces=False) == '[1.0,2.0]' assert iv.nstr(x, n, mode='brackets' , brackets=('<', '>')) == '<1.0, 2.0>' x = mpi('5.2582327113062393041', '5.2582327113062749951') assert iv.nstr(x, n, mode='diff') == '5.2582327113062[393041, 749951]' assert iv.nstr(iv.cos(mpi(1)), n, mode='diff', use_spaces=False) == '0.54030230586813971740093660744[2955,3053]' assert iv.nstr(mpi('1e123', '1e129'), n, mode='diff') == '[1.0e+123, 1.0e+129]' exp = iv.exp assert iv.nstr(iv.exp(mpi('5000.1')), n, mode='diff') == '3.2797365856787867069110487[0926, 1191]e+2171' iv.dps = 15 def test_mpi_from_str(): iv.dps = 15 assert iv.convert('1.5 +- 0.5') == mpi(mpf('1.0'), mpf('2.0')) assert mpi(1, 2) in iv.convert('1.5 (33.33333333333333333333333333333%)') assert iv.convert('[1, 2]') == mpi(1, 2) assert iv.convert('1[2, 3]') == mpi(12, 13) assert iv.convert('1.[23,46]e-8') == mpi('1.23e-8', '1.46e-8') assert iv.convert('12[3.4,5.9]e4') == mpi('123.4e+4', '125.9e4') def test_interval_gamma(): mp.dps = 15 iv.dps = 15 # TODO: need many more tests assert iv.rgamma(0) == 0 assert iv.fac(0) == 1 assert iv.fac(1) == 1 assert iv.fac(2) == 2 assert iv.fac(3) == 6 assert iv.gamma(0) == [-inf,inf] assert iv.gamma(1) == 1 assert iv.gamma(2) == 1 assert iv.gamma(3) == 2 assert -3.5449077018110320546 in iv.gamma(-0.5) assert iv.loggamma(1) == 0 assert iv.loggamma(2) == 0 assert 0.69314718055994530942 in iv.loggamma(3) # Test tight log-gamma endpoints based on monotonicity xs = [iv.mpc([2,3],[1,4]), iv.mpc([2,3],[-4,-1]), iv.mpc([2,3],[-1,4]), iv.mpc([2,3],[-4,1]), iv.mpc([2,3],[-4,4]), iv.mpc([-3,-2],[2,4]), iv.mpc([-3,-2],[-4,-2])] for x in xs: ys = [mp.loggamma(mp.mpc(x.a,x.c)), mp.loggamma(mp.mpc(x.b,x.c)), mp.loggamma(mp.mpc(x.a,x.d)), mp.loggamma(mp.mpc(x.b,x.d))] if 0 in x.imag: ys += [mp.loggamma(x.a), mp.loggamma(x.b)] min_real = min([y.real for y in ys]) max_real = max([y.real for y in ys]) min_imag = min([y.imag for y in ys]) max_imag = max([y.imag for y in ys]) z = iv.loggamma(x) assert z.a.ae(min_real) assert z.b.ae(max_real) assert z.c.ae(min_imag) assert z.d.ae(max_imag)
import asynchat import asyncore import logging import util import socket import json class ExternalRequestStage(asyncore.dispatcher): """ Listens for external connections from clients and routes requests internally. """ def __init__(self, server=None, external_port=None, hostname="0.0.0.0"): """ Args ---------- server : PynamoServer object. object through which internal stages can be accessed. external_port : str or int port on which server will be listening for external communications. hostname : str public dns name through which server will be contacted. """ print hostname, external_port self.logger = logging.getLogger('{}'.format(self.__class__.__name__)) self.logger.debug('__init__') self.logger.debug('__init__. hostname, external_port: {}, {}'.format(hostname, external_port)) # socket stuff asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: self.bind((hostname, int(external_port))) self.logger.info('__init__. successfully listening to {}:{}'.format(hostname, external_port)) except: self.logger.error('__init__. unable to bind to {}:{}'.format(hostname, external_port)) self.listen(5) # protected properties self._server = server self._hostname = hostname self._external_port = int(external_port) self._channels = [] self._coordinators = [] self._processor = self._request_handler() def handle_accept(self): """ Implements asyncore.dispatcher's handle_accept method. """ self.logger.info('handle_accept') sock, client_address = self.accept() if self._server.is_accepting_external_requests: external_channel = ExternalChannel(server=self._server, sock=sock) self._channels.append(external_channel) self.logger.info('handle_accept. accepting connection from: {}'.format(client_address)) else: self.logger.debug('handle_accept. external shutdown flag: {}. closing connection.'.format(self_server._external_shutdown_flag)) sock.close() def handle_close(self): """ Implements asyncore.dispatcher's handle_accept method. -closes each asynchat channel and then closes self. """ self.logger.debug('handle_close') for channel in self._channels: channel.close_when_done() self.close() self.logger.info('handle_close. channels and self closed') def process(self): """ Signals coroutine to cycle through its next loop. """ self.logger.debug('process') return self._processor.next() @util.coroutine def _request_handler(self): """ Coroutine that cycles through self._channels and processes each one in turn. """ yield # do nothing when first called while True: if self._server._external_shutdown_flag: # if flagged for shutdown, handle closing of self. if not self._channels: self.handle_close() yield False to_be_removed = [] for channel in self._channels: if channel.process(): to_be_removed.append(channel) # implement channel removal here yield True def _immediate_shutdown(self): """ Stops self from listening on self._external_port immediately. """ self.logger.debug('_immediate_shutdown') self.handle_close() class ExternalChannel(asynchat.async_chat): """ asyncore channel that handles external communication with a client. """ def __init__(self, server=None, sock=None): """ Args: ---------- server : PynamoServer object through which internal stages can be accessed. sock : socket socket connected to client. """ self.logger = logging.getLogger('{}'.format(self.__class__.__name__)) self.logger.debug('__init__') # internal variables self._server = server self._coordinators = list() self._timeout = None # async_chat asynchat.async_chat.__init__(self, sock) self._read_buffer = list() self.set_terminator(self._server.terminator) def collect_incoming_data(self, data): """ Implements asynchat.async_chat's collect_incoming_data method. -appends data to internal read buffer. Args: ---------- data : str incoming data read by the async_chat channel. """ self.logger.debug('collect_incoming_data') self.logger.debug('collect_incoming_data. node_hash: {}'.format(self._server.node_hash)) self.logger.debug('collect_incoming_data. collected data: {}'.format(data)) self._read_buffer.append(data) def found_terminator(self): """ Implements asynchat.async_chat's found_terminator method. - handler for when message's terminator is found. - the message is converted into a json object, the read buffer flushed, and the message passed to a message handler. Args: ---------- data : str incoming data read by the async_chat channel. """ request = json.loads(''.join(self._read_buffer)) self.logger.debug('found_terminator. request: {}'.format(request)) self._read_buffer = [] self._process_message(request) def _process_message(self, request): """ Handles request messages passed from async_chat's found_terminator handler. -marks message as 'exteral request' and gives it a timestamp. Hashes key before passing the request internally by instantiating an ExternalRequestCoordinator. Args: ---------- request : JSON incoming request object. """ self.logger.debug('_request_handler.') request['type'] = 'external request' request['timestamp'] = util.current_time() try request['key'] = util.get_hash(request['key']) except: pass coordinator = ExternalRequestCoordinator(server=self._server, request=request) self._coordinators.append(coordinator) self.logger.debug('_request_handler. coordinator appended: {}'.format(coordinator)) def _send_message(self, message): """ Sends message back to client. """ self.logger.debug('_send_message.') self.logger.debug('_send_message. message {}'.format(message)) self.push(util.pack_message(message, self._server._terminator)) # set timeout to be 30 seconds after last request received self._timeout = util.add_time(util.current_time(), 30) def process(self): """ Processes request queue and returns replies in the correct order when they are ready. """ self.logger.debug('process') # if timeout has been set and it's past the time if self._timeout and (util.current_time() > self._timeout): self.close_when_done() pass # process requests for coordinator in self._coordinators: coordinator.process() # send replies if ready for index, coordinator in enumerate(self._coordinators): if coordinator.completed: self._send_message(coordinator._reply) self._coordinators.pop(0) else: break class ExternalRequestCoordinator(object): """ Receives external requests and processes them internally. Instructs InternalRequestStage to handle request and listens for a reply, which it then forwards back to the client. """ def __init__(self, server=None, request=None): """ Args: ---------- server : PynamoServer object. object through which internal stages can be accessed. request : json request message. """ self.logger = logging.getLogger('{}'.format(self.__class__.__name__)) self.logger.debug('__init__') self._server = server self._reply = None self._reply_listener = self._reply_listener() self._processor = self._request_handler(request) @property def completed(self): """ Returns: True if request has returned a reply, False otherwise. """ return bool(self._reply) @property def reply(self): return self._reply def process(self): """ Steps processor through next cycle. """ return self._processor.next() @util.coroutine def _request_handler(self, request): """ Coroutine that acts as the ExternalRequestCoordinator's processor. Yields: True when request is complete. False when request is not yet complete. """ self.logger.debug('_request_handler') self.logger.debug('_request_handler. request: {}'.format(request)) self._server.internal_request_stage.handle_internal_message(message=request, reply_listener=self._reply_listener) self.logger.debug('_request_handler. handle_internal_message called.') while True: yield self.completed @util.coroutine def _reply_listener(self): """ Coroutine for listening to the reply of a request. """ self.logger.debug('_reply_listener') self._reply = (yield) self.logger.debug('_reply_listener. reply received: {}'.format(self._reply)) yield True
#!/usr/bin/env python # -*- coding: utf-8 -*- ######################################################################### # Copyright/License Notice (BSD License) # ######################################################################### ######################################################################### # Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK # # All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: - # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the author nor the names of its contributors # # may be used to endorse or promote products derived from this # # software without specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ######################################################################### ################################################### # Danny's Digital Packet # # Danny's Data Packet # # Danny's DDP Project # ################################################### # Specification ID: 0 # # Specification Name: 7-bit # ################################################### from ddp import DanRSA import os def constructPacket(self, callsign_from, via, callsign_to, flags, data, application_id, signature): tx = bytearray() p1 = bytearray() p2 = bytearray() p1.extend(self.PROTOCOL_HEADER) p1.extend(self.SECTION_SEPERATOR) p1.extend(self.PROTOCOL_VERSION) p1.extend(self.SECTION_SEPERATOR) p2.extend(callsign_from) p2.extend(self.SECTION_SEPERATOR) p2.extend(via) p2.extend(self.SECTION_SEPERATOR) p2.extend(callsign_to) p2.extend(self.SECTION_SEPERATOR) p2.extend(str(hex(int(str(flags), 2))).replace("0x", "")) p2.extend(self.SECTION_SEPERATOR) p2.extend(application_id) p2.extend(self.SECTION_SEPERATOR) p2.extend(self.generatePacketID()) p2.extend(self.SECTION_SEPERATOR) p2.extend(self.encodeData(data, str(flags))) # Check the signature to see if it's full of nulls if len(str(signature).replace("\x00", "")) == 0: signature = "" if self.CRYPTO_AVAILABLE and signature == "": if self.DEBUG_MODE: self.log.info("Signature generation is required.") signature = self.crypto.signMessage(str(p2)) p2.extend(self.SECTION_SEPERATOR) p2.extend(self.encodeStreamToBase128(signature, 0, True)) checksum = self.sha1(str(p2)) tx.extend(str(p1)) tx.extend(str(p2)) tx.extend(self.SECTION_SEPERATOR) tx.extend(checksum) tx.extend(self.SECTION_SEPERATOR) tx.extend(self.PROTOCOL_FOOTER) # Now we need apply the extra rules to the packet e.g. scrambling, reed-solomon, etc if self.DEBUG_MODE: self.log.info("Constructed: %s" % repr(str(tx))) self.log.info("Applying rules to packet...") d = str(tx) tx2 = bytearray() if self.fldigi is not None: tx2.extend(d) tx2.extend("\n") tx2.extend("^r") else: tx2.extend(d) if self.DEBUG_MODE: a = float(len(str(tx))) b = float(len(str(tx2))) c = ((b - a) / a) * 100. self.log.info("Original packet size %d bytes, including overhead %d bytes - %.2f%% increase." % (a, b, c)) return str(tx2) def decodeData(self, data, flags): f = str(flags)[::-1] din = self.decodeBase128ToStream(str(data), 0, True) din = self.descramble(din) if int(f[self.FLAG_COMPRESSION]) == 1: return self.decompressStream(din) else: return din def encodeData(self, data, flags): din = None f = str(flags)[::-1] if int(f[self.FLAG_COMPRESSION]) == 1: din = self.compressStream(str(data)) else: din = str(data) din = self.scramble(din) return self.encodeStreamToBase128(din, 0, True) def init(self): self.log.info("Running in 7-bit text mode.") def isCompressionAllowed(self): return True def parsePacket(self, packet): return packet def splitPacket(self, packet): return str(packet).split(self.SECTION_SEPERATOR) def verifyPacket(self, packet): plen = len(packet) # Check the number of sections if plen == self.MAX_SECTIONS: # Check the header if packet[self.SECTION_HEADER] == self.PROTOCOL_HEADER: # Check the version number (must be exact versions for now) if packet[self.SECTION_VERSION] == self.PROTOCOL_VERSION: # Check the checksum verify = packet[self.SECTION_SOURCE] + self.SECTION_SEPERATOR + packet[self.SECTION_VIA] + self.SECTION_SEPERATOR + packet[self.SECTION_DESTINATION] + self.SECTION_SEPERATOR + packet[self.SECTION_FLAGS] + self.SECTION_SEPERATOR + packet[self.SECTION_APPLICATION_ID] + self.SECTION_SEPERATOR + packet[self.SECTION_PACKET_ID] + self.SECTION_SEPERATOR + packet[self.SECTION_DATA] checksum = self.sha1(verify + self.SECTION_SEPERATOR + packet[self.SECTION_SIGNATURE]) # Convert the flags to old-style packet[self.SECTION_FLAGS] = bin(int(packet[self.SECTION_FLAGS], 16)).replace("0b", "").rjust(16, "0") if checksum == packet[self.SECTION_CHECKSUM]: # Application ID if self.REPEATER_MODE: # Set the application ID to the one we are verifying self.APPLICATION_ID = packet[self.SECTION_APPLICATION_ID] if packet[self.SECTION_APPLICATION_ID] == self.APPLICATION_ID: # Packet replay detection if not packet[self.SECTION_PACKET_ID] in self.prd: if self.DEBUG_MODE: self.log.info("Packet ID %s not present in PRD database, adding it in." % packet[self.SECTION_PACKET_ID]) self.prd[packet[self.SECTION_PACKET_ID]] = packet[self.SECTION_SOURCE] # Callsign signature if self.CRYPTO_AVAILABLE: if len(str(packet[self.SECTION_SIGNATURE]).replace("\x00", "")) > 0: client_key = os.path.join(self.CRYPTO_REMOTE_DIRECTORY, "%s.key" % self.sha1(packet[self.SECTION_SOURCE])) if os.path.exists(client_key): if self.DEBUG_MODE: self.log.info("Verifying the packet using the public key for %s..." % packet[self.SECTION_SOURCE]) test = DanRSA(client_key, None, None) result = test.verifyMessage(verify, self.decodeBase128ToStream(packet[self.SECTION_SIGNATURE], 0, True)) test = None if result: if self.DEBUG_MODE: self.log.info("The signature has validated using the public key for %s." % packet[self.SECTION_SOURCE]) else: if self.DEBUG_MODE: self.log.warn("The signature did NOT validate using the public key for %s." % packet[self.SECTION_SOURCE]) return result else: if self.DEBUG_MODE: self.log.warn("We do not have the public key for %s (%s.key), cannot verify the packet." % (packet[self.SECTION_SOURCE], self.sha1(packet[self.SECTION_SOURCE]))) if self.CRYPTO_ALLOW_UNSIGNED_PACKETS: return True else: if self.DEBUG_MODE: self.log.warn("Packet contained no signature.") if self.CRYPTO_ALLOW_UNSIGNED_PACKETS: return True else: if self.DEBUG_MODE: self.log.warn("Crypto isn't available so we can't validate the signature.") if self.CRYPTO_ALLOW_UNSIGNED_PACKETS: return True else: if self.DEBUG_MODE: self.log.warn("Packet replay detected by %s." % self.prd[packet[self.SECTION_PACKET_ID]]) else: if self.DEBUG_MODE: self.log.warn("The packet received application ID (%s) isn't for this application." % packet[self.SECTION_APPLICATION_ID]) else: if self.DEBUG_MODE: self.log.warn("Checksum mismatch (%s != %s)." % (checksum, packet[self.SECTION_CHECKSUM])) else: if self.DEBUG_MODE: self.log.warn("Version number mismatch (%s != %s)." % (self.PROTOCOL_VERSION, packet[self.SECTION_VERSION])) else: if self.DEBUG_MODE: self.log.warn("Invalid header (%s != %s)." % (self.PROTOCOL_HEADER, packet[self.SECTION_HEADER])) else: if self.DEBUG_MODE: self.log.warn("Wrong number of sections (%d != %d)." % (plen, self.MAX_SECTIONS)) return False
# -*-coding:utf-8-*- """ security ~~~~~~~~ Use this model to encrypt string. Usage ===== >>> d = DES() >>> d.input_key("123456789") >>> s = "/static/hello.js" >>> a = d.encode(s) >>> print a b14f1453ceddc91e492fbe883d552a2e >>> b = d.decode(a) >>> print b /static/hello.js """ from functools import partial __author__ = 'karlvorndoenitz@gmail.com' class DES(object): """ DES encrypt method interface: input_key(s, base=10), encode(s), decode(s) """ __ip = [ 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7, ] __ip1 = [ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25, ] __e = [ 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1, ] __p = [ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25, ] __s = [ [ 0xe, 0x4, 0xd, 0x1, 0x2, 0xf, 0xb, 0x8, 0x3, 0xa, 0x6, 0xc, 0x5, 0x9, 0x0, 0x7, 0x0, 0xf, 0x7, 0x4, 0xe, 0x2, 0xd, 0x1, 0xa, 0x6, 0xc, 0xb, 0x9, 0x5, 0x3, 0x8, 0x4, 0x1, 0xe, 0x8, 0xd, 0x6, 0x2, 0xb, 0xf, 0xc, 0x9, 0x7, 0x3, 0xa, 0x5, 0x0, 0xf, 0xc, 0x8, 0x2, 0x4, 0x9, 0x1, 0x7, 0x5, 0xb, 0x3, 0xe, 0xa, 0x0, 0x6, 0xd, ], [ 0xf, 0x1, 0x8, 0xe, 0x6, 0xb, 0x3, 0x4, 0x9, 0x7, 0x2, 0xd, 0xc, 0x0, 0x5, 0xa, 0x3, 0xd, 0x4, 0x7, 0xf, 0x2, 0x8, 0xe, 0xc, 0x0, 0x1, 0xa, 0x6, 0x9, 0xb, 0x5, 0x0, 0xe, 0x7, 0xb, 0xa, 0x4, 0xd, 0x1, 0x5, 0x8, 0xc, 0x6, 0x9, 0x3, 0x2, 0xf, 0xd, 0x8, 0xa, 0x1, 0x3, 0xf, 0x4, 0x2, 0xb, 0x6, 0x7, 0xc, 0x0, 0x5, 0xe, 0x9, ], [ 0xa, 0x0, 0x9, 0xe, 0x6, 0x3, 0xf, 0x5, 0x1, 0xd, 0xc, 0x7, 0xb, 0x4, 0x2, 0x8, 0xd, 0x7, 0x0, 0x9, 0x3, 0x4, 0x6, 0xa, 0x2, 0x8, 0x5, 0xe, 0xc, 0xb, 0xf, 0x1, 0xd, 0x6, 0x4, 0x9, 0x8, 0xf, 0x3, 0x0, 0xb, 0x1, 0x2, 0xc, 0x5, 0xa, 0xe, 0x7, 0x1, 0xa, 0xd, 0x0, 0x6, 0x9, 0x8, 0x7, 0x4, 0xf, 0xe, 0x3, 0xb, 0x5, 0x2, 0xc, ], [ 0x7, 0xd, 0xe, 0x3, 0x0, 0x6, 0x9, 0xa, 0x1, 0x2, 0x8, 0x5, 0xb, 0xc, 0x4, 0xf, 0xd, 0x8, 0xb, 0x5, 0x6, 0xf, 0x0, 0x3, 0x4, 0x7, 0x2, 0xc, 0x1, 0xa, 0xe, 0x9, 0xa, 0x6, 0x9, 0x0, 0xc, 0xb, 0x7, 0xd, 0xf, 0x1, 0x3, 0xe, 0x5, 0x2, 0x8, 0x4, 0x3, 0xf, 0x0, 0x6, 0xa, 0x1, 0xd, 0x8, 0x9, 0x4, 0x5, 0xb, 0xc, 0x7, 0x2, 0xe, ], [ 0x2, 0xc, 0x4, 0x1, 0x7, 0xa, 0xb, 0x6, 0x8, 0x5, 0x3, 0xf, 0xd, 0x0, 0xe, 0x9, 0xe, 0xb, 0x2, 0xc, 0x4, 0x7, 0xd, 0x1, 0x5, 0x0, 0xf, 0xa, 0x3, 0x9, 0x8, 0x6, 0x4, 0x2, 0x1, 0xb, 0xa, 0xd, 0x7, 0x8, 0xf, 0x9, 0xc, 0x5, 0x6, 0x3, 0x0, 0xe, 0xb, 0x8, 0xc, 0x7, 0x1, 0xe, 0x2, 0xd, 0x6, 0xf, 0x0, 0x9, 0xa, 0x4, 0x5, 0x3, ], [ 0xc, 0x1, 0xa, 0xf, 0x9, 0x2, 0x6, 0x8, 0x0, 0xd, 0x3, 0x4, 0xe, 0x7, 0x5, 0xb, 0xa, 0xf, 0x4, 0x2, 0x7, 0xc, 0x9, 0x5, 0x6, 0x1, 0xd, 0xe, 0x0, 0xb, 0x3, 0x8, 0x9, 0xe, 0xf, 0x5, 0x2, 0x8, 0xc, 0x3, 0x7, 0x0, 0x4, 0xa, 0x1, 0xd, 0xb, 0x6, 0x4, 0x3, 0x2, 0xc, 0x9, 0x5, 0xf, 0xa, 0xb, 0xe, 0x1, 0x7, 0x6, 0x0, 0x8, 0xd, ], [ 0x4, 0xb, 0x2, 0xe, 0xf, 0x0, 0x8, 0xd, 0x3, 0xc, 0x9, 0x7, 0x5, 0xa, 0x6, 0x1, 0xd, 0x0, 0xb, 0x7, 0x4, 0x9, 0x1, 0xa, 0xe, 0x3, 0x5, 0xc, 0x2, 0xf, 0x8, 0x6, 0x1, 0x4, 0xb, 0xd, 0xc, 0x3, 0x7, 0xe, 0xa, 0xf, 0x6, 0x8, 0x0, 0x5, 0x9, 0x2, 0x6, 0xb, 0xd, 0x8, 0x1, 0x4, 0xa, 0x7, 0x9, 0x5, 0x0, 0xf, 0xe, 0x2, 0x3, 0xc, ], [ 0xd, 0x2, 0x8, 0x4, 0x6, 0xf, 0xb, 0x1, 0xa, 0x9, 0x3, 0xe, 0x5, 0x0, 0xc, 0x7, 0x1, 0xf, 0xd, 0x8, 0xa, 0x3, 0x7, 0x4, 0xc, 0x5, 0x6, 0xb, 0x0, 0xe, 0x9, 0x2, 0x7, 0xb, 0x4, 0x1, 0x9, 0xc, 0xe, 0x2, 0x0, 0x6, 0xa, 0xd, 0xf, 0x3, 0x5, 0x8, 0x2, 0x1, 0xe, 0x7, 0x4, 0xa, 0x8, 0xd, 0xf, 0xc, 0x9, 0x0, 0x3, 0x5, 0x6, 0xb, ], ] __k1 = [ 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4, ] __k2 = [ 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32, ] __k0 = [ 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1, ] __hex_bin = { '0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001', 'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111', ' ': '0000' } __re = lambda t, s: ''.join(s[i - 1] for i in t) __IP = partial(__re, __ip) __IP1 = partial(__re, __ip1) __E = partial(__re, __e) __P = partial(__re, __p) __K1 = partial(__re, __k1) __K2 = partial(__re, __k2) __B = partial(lambda hex_bin, s: ''.join(hex_bin[w] for w in ''.join('%2x' % ord(w) for w in s)), __hex_bin) __DB = partial(lambda s: ''.join(chr(int(s[i:i + 8], 2)) for i in range(0, len(s), 8))) __S = partial(lambda hex_bin, __s, s: ''.join( hex_bin['%x' % __s[i][int(s[i * 6] + s[i * 6 + 5], 2) * 16 + int(s[i * 6 + 1:i * 6 + 5], 2)]] for i in range(8)), __hex_bin, __s) __F = partial(lambda s, k: ''.join('0' if s[i] == k[i] else '1' for i in range(len(s)))) __K0 = partial( lambda k0, K2, k: map(K2, (k[k0[i]:28] + k[0:k0[i]] + k[k0[i] + 28:56] + k[28:k0[i] + 28] for i in range(16))), __k0, __K2) __K = partial(lambda K1, K0, k: K0(K1(k)), __K1, __K0) def __init__(self): self.__k = None def input_key(self, key, base=10): if base == 2: pass elif base == 16: key = ''.join(self.__class__.__hex_bin[w] for w in key) else: key = self.__class__.__B(key) self.__k = self.__class__.__K(key) def __code(self, s, k): s = self.__IP(s) l, r = s[0:32], s[32:64] for i in range(16): r_t = r r = self.__E(r) r = self.__F(r, k[i]) r = self.__S(r) r = self.__P(r) r = self.__F(r, l) l = r_t return self.__class__.__IP1(r + l) def encode(self, s): s = str(s) a = '' s += ' ' * ((8 - len(s) % 8) % 8) for i in range(0, len(s), 8): before = self.__class__.__B(s[i:i + 8]) after = self.__code(before, self.__k) a += '%16x' % int(after, 2) return ''.join(w if w != ' ' else '0' for w in a) def decode(self, s): a = "" s.lower() for i in range(0, len(s), 16): before = ''.join(self.__class__.__hex_bin[s[j]] for j in range(i, i + 16)) after = self.__code(before, self.__k[::-1]) a += self.__class__.__DB(after) return a.rstrip().decode('utf-8')
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2012 Midokura Japan K.K. # Copyright (C) 2013 Midokura PTE LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Rossella Sblendido, Midokura Europe SARL # @author: Ryu Ishimoto, Midokura Japan KK # @author: Tomoe Sugihara, Midokura Japan KK import sys import uuid import mock from webob import exc as w_exc import quantum.common.test_lib as test_lib import quantum.tests.unit.midonet as midonet import quantum.tests.unit.test_db_plugin as test_plugin MIDOKURA_PKG_PATH = "quantum.plugins.midonet.plugin" # Need to mock the midonetclient module since the plugin will try to load it. sys.modules["midonetclient"] = mock.Mock() class MidonetPluginV2TestCase(test_plugin.QuantumDbPluginV2TestCase): _plugin_name = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH) def setUp(self): self.mock_api = mock.patch('midonetclient.api.MidonetApi') self.instance = self.mock_api.start() super(MidonetPluginV2TestCase, self).setUp(self._plugin_name) def tearDown(self): super(MidonetPluginV2TestCase, self).tearDown() self.mock_api.stop() def _setup_bridge_mock(self, bridge_id=str(uuid.uuid4()), name='net'): # Set up mocks needed for the parent network() method bridge = mock.Mock() bridge.get_id.return_value = bridge_id bridge.get_name.return_value = name self.instance.return_value.add_bridge.return_value.name.return_value\ .tenant_id.return_value.create.return_value = bridge self.instance.return_value.get_bridges.return_value = [bridge] self.instance.return_value.get_bridge.return_value = bridge return bridge def _setup_subnet_mocks(self, subnet_id=str(uuid.uuid4()), subnet_prefix='10.0.0.0', subnet_len=int(24)): # Set up mocks needed for the parent subnet() method bridge = self._setup_bridge_mock() subnet = mock.Mock() subnet.get_subnet_prefix.return_value = subnet_prefix subnet.get_subnet_length.return_value = subnet_len subnet.get_id.return_value = subnet_prefix + '/' + str(subnet_len) bridge.add_dhcp_subnet.return_value.default_gateway\ .return_value.subnet_prefix.return_value.subnet_length\ .return_value.create.return_value = subnet bridge.get_dhcp_subnets.return_value = [subnet] return (bridge, subnet) def _setup_port_mocks(self, port_id=str(uuid.uuid4())): # Set up mocks needed for the parent port() method bridge, subnet = self._setup_subnet_mocks() port = mock.Mock() port.get_id.return_value = port_id self.instance.return_value.create_port.return_value = port self.instance.return_value.get_port.return_value = port bridge.add_exterior_port.return_value.create.return_value = ( port ) dhcp_host = mock.Mock() rv1 = subnet.add_dhcp_host.return_value.ip_addr.return_value rv1.mac_addr.return_value.create.return_value = dhcp_host subnet.get_dhcp_hosts.return_value = [dhcp_host] return (bridge, subnet, port, dhcp_host) class TestMidonetNetworksV2(test_plugin.TestNetworksV2, MidonetPluginV2TestCase): def test_create_network(self): self._setup_bridge_mock() super(TestMidonetNetworksV2, self).test_create_network() def test_create_public_network(self): self._setup_bridge_mock() super(TestMidonetNetworksV2, self).test_create_public_network() def test_create_public_network_no_admin_tenant(self): self._setup_bridge_mock() super(TestMidonetNetworksV2, self).test_create_public_network_no_admin_tenant() def test_update_network(self): self._setup_bridge_mock() super(TestMidonetNetworksV2, self).test_update_network() def test_list_networks(self): bridge = self._setup_bridge_mock() with self.network(name='net1') as net1: req = self.new_list_request('networks') res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(res['networks'][0]['name'], net1['network']['name']) def test_show_network(self): self._setup_bridge_mock() super(TestMidonetNetworksV2, self).test_show_network() def test_update_shared_network_noadmin_returns_403(self): self._setup_bridge_mock() super(TestMidonetNetworksV2, self).test_update_shared_network_noadmin_returns_403() def test_update_network_set_shared(self): pass def test_update_network_with_subnet_set_shared(self): pass def test_update_network_set_not_shared_single_tenant(self): pass def test_update_network_set_not_shared_other_tenant_returns_409(self): pass def test_update_network_set_not_shared_multi_tenants_returns_409(self): pass def test_update_network_set_not_shared_multi_tenants2_returns_409(self): pass def test_create_networks_bulk_native(self): pass def test_create_networks_bulk_native_quotas(self): pass def test_create_networks_bulk_tenants_and_quotas(self): pass def test_create_networks_bulk_tenants_and_quotas_fail(self): pass def test_create_networks_bulk_emulated(self): pass def test_create_networks_bulk_wrong_input(self): pass def test_create_networks_bulk_emulated_plugin_failure(self): pass def test_create_networks_bulk_native_plugin_failure(self): pass def test_list_networks_with_parameters(self): pass def test_list_networks_with_fields(self): pass def test_list_networks_with_parameters_invalid_values(self): pass def test_list_shared_networks_with_non_admin_user(self): pass def test_show_network_with_subnet(self): pass def test_invalid_admin_status(self): pass def test_list_networks_with_pagination_emulated(self): pass def test_list_networks_with_pagination_reverse_emulated(self): pass def test_list_networks_with_parameters(self): pass def test_list_networks_with_parameters_invalid_values(self): pass def test_list_networks_with_sort_emulated(self): pass def test_list_networks_without_pk_in_fields_pagination_emulated(self): pass class TestMidonetSubnetsV2(test_plugin.TestSubnetsV2, MidonetPluginV2TestCase): def test_create_subnet(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet() def test_create_two_subnets(self): pass def test_create_two_subnets_same_cidr_returns_400(self): pass def test_create_two_subnets_same_cidr_returns_400(self): pass def test_create_subnet_bad_V4_cidr(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_V4_cidr() def test_create_subnet_bad_V6_cidr(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_V4_cidr() def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self): pass def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): pass def test_create_subnets_bulk_native(self): pass def test_create_subnets_bulk_emulated(self): pass def test_create_subnets_bulk_emulated_plugin_failure(self): pass def test_create_subnets_bulk_native_plugin_failure(self): pass def test_delete_subnet(self): _bridge, subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_delete_subnet() subnet.delete.assert_called_once_with() def test_delete_subnet_port_exists_owned_by_network(self): _bridge, subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_delete_subnet_port_exists_owned_by_network() def test_delete_subnet_port_exists_owned_by_other(self): pass def test_delete_network(self): bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_delete_network() bridge.delete.assert_called_once_with() def test_create_subnet_bad_tenant(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_tenant() def test_create_subnet_bad_ip_version(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_ip_version() def test_create_subnet_bad_ip_version_null(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_ip_version_null() def test_create_subnet_bad_uuid(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_uuid() def test_create_subnet_bad_boolean(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_boolean() def test_create_subnet_bad_pools(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_pools() def test_create_subnet_bad_nameserver(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_nameserver() def test_create_subnet_bad_hostroutes(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_bad_hostroutes() def test_create_subnet_defaults(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_defaults() def test_create_subnet_gw_values(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_gw_values() def test_create_force_subnet_gw_values(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_force_subnet_gw_values() def test_create_subnet_with_allocation_pool(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_allocation_pool() def test_create_subnet_with_none_gateway(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_none_gateway() def test_create_subnet_with_none_gateway_fully_allocated(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_none_gateway_fully_allocated() def test_subnet_with_allocation_range(self): pass def test_create_subnet_with_none_gateway_allocation_pool(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_none_gateway_allocation_pool() def test_create_subnet_with_v6_allocation_pool(self): pass def test_create_subnet_with_large_allocation_pool(self): pass def test_create_subnet_multiple_allocation_pools(self): pass def test_create_subnet_with_dhcp_disabled(self): pass def test_create_subnet_default_gw_conflict_allocation_pool_returns_409( self): pass def test_create_subnet_gateway_in_allocation_pool_returns_409(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self)\ .test_create_subnet_gateway_in_allocation_pool_returns_409() def test_create_subnet_overlapping_allocation_pools_returns_409(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self)\ .test_create_subnet_overlapping_allocation_pools_returns_409() def test_create_subnet_invalid_allocation_pool_returns_400(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_invalid_allocation_pool_returns_400() def test_create_subnet_out_of_range_allocation_pool_returns_400(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self)\ .test_create_subnet_out_of_range_allocation_pool_returns_400() def test_create_subnet_shared_returns_400(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_shared_returns_400() def test_create_subnet_inconsistent_ipv6_cidrv4(self): pass def test_create_subnet_inconsistent_ipv4_cidrv6(self): pass def test_create_subnet_inconsistent_ipv4_gatewayv6(self): pass def test_create_subnet_inconsistent_ipv6_gatewayv4(self): pass def test_create_subnet_inconsistent_ipv6_dns_v4(self): pass def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): pass def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): pass def test_update_subnet(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet() def test_update_subnet_shared_returns_400(self): self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet_shared_returns_400() def test_update_subnet_inconsistent_ipv4_gatewayv6(self): pass def test_update_subnet_inconsistent_ipv6_gatewayv4(self): pass def test_update_subnet_inconsistent_ipv4_dns_v6(self): pass def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): pass def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): pass def test_show_subnet(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_show_subnet() def test_list_subnets(self): pass def test_list_subnets_shared(self): pass def test_list_subnets_with_parameter(self): pass def test_invalid_ip_version(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_invalid_ip_version() def test_invalid_subnet(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_invalid_subnet() def test_invalid_ip_address(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_invalid_ip_address() def test_invalid_uuid(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_invalid_uuid() def test_create_subnet_with_one_dns(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_one_dns() def test_create_subnet_with_two_dns(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_two_dns() def test_create_subnet_with_too_many_dns(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_too_many_dns() def test_create_subnet_with_one_host_route(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_one_host_route() def test_create_subnet_with_two_host_routes(self): self.skipTest("Disabled by Ubuntu packaging") _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_two_host_routes() def test_create_subnet_with_too_many_routes(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_create_subnet_with_too_many_routes() def test_update_subnet_dns(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet_dns() def test_update_subnet_dns_to_None(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet_dns_to_None() def test_update_subnet_dns_with_too_many_entries(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet_dns_with_too_many_entries() def test_update_subnet_route(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet_route() def test_update_subnet_route_to_None(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet_route_to_None() def test_update_subnet_route_with_too_many_entries(self): _bridge, _subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_update_subnet_route_with_too_many_entries() def test_delete_subnet_with_dns(self): _bridge, subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_delete_subnet_with_dns() subnet.delete.assert_called_once_with() def test_delete_subnet_with_route(self): _bridge, subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_delete_subnet_with_route() subnet.delete.assert_called_once_with() def test_delete_subnet_with_dns_and_route(self): _bridge, subnet = self._setup_subnet_mocks() super(TestMidonetSubnetsV2, self).test_delete_subnet_with_dns_and_route() subnet.delete.assert_called_once_with() def test_update_subnet_gateway_in_allocation_pool_returns_409(self): self._setup_port_mocks() super(TestMidonetSubnetsV2, self)\ .test_update_subnet_gateway_in_allocation_pool_returns_409() def test_list_subnets_with_pagination_emulated(self): pass def test_list_subnets_with_pagination_reverse_emulated(self): pass def test_list_subnets_with_sort_emulated(self): pass class TestMidonetPortsV2(test_plugin.TestPortsV2, MidonetPluginV2TestCase): def test_create_port_json(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_create_port_json() def test_create_port_bad_tenant(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_create_port_bad_tenant() def test_create_port_public_network(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_create_port_public_network() def test_create_port_public_network_with_ip(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_create_port_public_network_with_ip() def test_create_ports_bulk_native(self): pass def test_create_ports_bulk_emulated(self): pass def test_create_ports_bulk_wrong_input(self): pass def test_create_ports_bulk_emulated_plugin_failure(self): pass def test_create_ports_bulk_native_plugin_failure(self): pass def test_list_ports(self): pass def test_list_ports_filtered_by_fixed_ip(self): pass def test_list_ports_public_network(self): pass def test_show_port(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_show_port() def test_delete_port(self): _bridge, _subnet, port, _dhcp = self._setup_port_mocks() super(TestMidonetPortsV2, self).test_delete_port() port.delete.assert_called_once_with() def test_delete_port_public_network(self): _bridge, _subnet, port, _dhcp = self._setup_port_mocks() super(TestMidonetPortsV2, self).test_delete_port_public_network() port.delete.assert_called_once_with() def test_update_port(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_port() def test_update_device_id_null(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_device_id_null() def test_delete_network_if_port_exists(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_delete_network_if_port_exists() def test_delete_network_port_exists_owned_by_network(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_delete_network_port_exists_owned_by_network() def test_update_port_delete_ip(self): pass def test_no_more_port_exception(self): pass def test_update_port_update_ip(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_port_update_ip() def test_update_port_update_ip_address_only(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_port_update_ip_address_only() def test_update_port_update_ips(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_port_update_ips() def test_update_port_add_additional_ip(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_port_add_additional_ip() def test_requested_duplicate_mac(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_requested_duplicate_mac() def test_mac_generation(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_mac_generation() def test_mac_generation_4octet(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_mac_generation_4octet() def test_bad_mac_format(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_bad_mac_format() def test_mac_exhaustion(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_mac_exhaustion() def test_requested_duplicate_ip(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_requested_duplicate_ip() def test_requested_subnet_delete(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_requested_subnet_delete() def test_requested_subnet_id(self): pass def test_requested_subnet_id_not_on_network(self): pass def test_overlapping_subnets(self): pass def test_requested_subnet_id_v4_and_v6(self): pass def test_range_allocation(self): pass def test_requested_invalid_fixed_ips(self): pass def test_invalid_ip(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_invalid_ip() def test_requested_split(self): pass def test_duplicate_ips(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_duplicate_ips() def test_fixed_ip_invalid_subnet_id(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_fixed_ip_invalid_subnet_id() def test_fixed_ip_invalid_ip(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_fixed_ip_invalid_ip() def test_requested_ips_only(self): pass def test_recycling(self): pass def test_invalid_admin_state(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_invalid_admin_state() def test_invalid_mac_address(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_invalid_mac_address() def test_default_allocation_expiration(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_default_allocation_expiration() def test_update_fixed_ip_lease_expiration(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_fixed_ip_lease_expiration() def test_port_delete_holds_ip(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_port_delete_holds_ip() def test_update_fixed_ip_lease_expiration_invalid_address(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_fixed_ip_lease_expiration_invalid_address() def test_hold_ip_address(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_hold_ip_address() def test_recycle_held_ip_address(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_recycle_held_ip_address() def test_recycle_expired_previously_run_within_context(self): pass def test_update_port_not_admin(self): self._setup_port_mocks() super(TestMidonetPortsV2, self).test_update_port_not_admin() def test_list_ports_with_pagination_emulated(self): pass def test_list_ports_with_pagination_reverse_emulated(self): pass def test_list_ports_with_sort_emulated(self): pass def test_max_fixed_ips_exceeded(self): pass def test_update_max_fixed_ips_exceeded(self): pass
"""Clowder config class .. codeauthor:: Joe DeCapo <joe@polka.cat> """ from configparser import ConfigParser from enum import auto, unique from functools import wraps from typing import Any, List, Optional, Tuple import clowder.util.formatting as fmt from clowder.util.enum import AutoLowerName from clowder.environment import ENVIRONMENT from clowder.git import GitProtocol from clowder.util.console import CONSOLE from clowder.util.error import MissingFileError, UnknownProjectError from clowder.util.file_system import remove_file def print_config(func): """Print config after wrapped function returrns""" @wraps(func) def wrapper(*args, **kwargs): """Wrapper""" retval = func(*args, **kwargs) Config().print_config() return retval return wrapper @unique class CommandConfigType(AutoLowerName): JOBS = auto() PROJECTS = auto() @staticmethod def section_name() -> str: return 'command' @unique class GitConfigType(AutoLowerName): FETCH = auto() PROTOCOL = auto() REBASE = auto() @classmethod def section_name(cls) -> str: return 'git' class Config(object): """Config class :ivar str name: Name of clowder :ivar Optional[Tuple[str, ...]] projects: Default projects :ivar Optional[GitProtocol] protocol: Default protocol :ivar Optional[bool] rebase: Default rebase :ivar Optional[int] jobs: Default number of jobs """ def __init__(self): """Config __init__""" self._config: ConfigParser = ConfigParser() if ENVIRONMENT.clowder_config is not None and ENVIRONMENT.clowder_config.exists(): self._config.read(ENVIRONMENT.clowder_config) git_section = GitConfigType.section_name() if git_section not in self._config: self._config[git_section] = {} self._git_config = self._config[git_section] command_section = CommandConfigType.section_name() if command_section not in self._config: self._config[command_section] = {} self._command_config = self._config[command_section] self._validate_config_projects_defined(self.projects) # if defaults is not None: # projects = defaults.get('projects', None) # self.projects: Optional[Tuple[str, ...]] = None if projects is None else tuple(sorted(projects)) # protocol = defaults.get('protocol', None) # self.protocol: Optional[GitProtocol] = None if protocol is None else GitProtocol(protocol) # self.rebase: Optional[bool] = defaults.get('rebase', None) # self.jobs: Optional[int] = defaults.get('jobs', None) @property def jobs(self) -> Optional[int]: jobs = str(CommandConfigType.JOBS.value) return self._command_config.getint(jobs) @jobs.setter def jobs(self, jobs: Optional[int]): self._set_command_option(CommandConfigType.JOBS, jobs) @property def projects(self) -> Optional[Tuple[str, ...]]: projects = self._command_config.get(str(CommandConfigType.PROJECTS.value)) if projects is None: return None projects = [p for p in projects.strip().split(", ")] return tuple(sorted(projects)) @projects.setter def projects(self, projects: Optional[List[str]]): if not projects: self._set_command_option(CommandConfigType.PROJECTS, None) return self._set_command_option(CommandConfigType.PROJECTS, ", ".join(projects)) @property def protocol(self) -> Optional[GitProtocol]: protocol = str(GitConfigType.PROTOCOL.value) protocol = self._git_config.get(protocol) if protocol is None: return None return GitProtocol(protocol) @protocol.setter def protocol(self, protocol: Optional[GitProtocol]): self._set_git_option(GitConfigType.PROTOCOL, protocol.value) @property def rebase(self) -> Optional[bool]: rebase = str(GitConfigType.REBASE.value) return self._git_config.getboolean(rebase) @rebase.setter def rebase(self, rebase: Optional[bool]): self._set_git_option(GitConfigType.REBASE, rebase) @staticmethod def clear() -> None: """Clear all config settings""" if ENVIRONMENT.clowder_config is not None and ENVIRONMENT.clowder_config.exists(): remove_file(ENVIRONMENT.clowder_config) @staticmethod def print_config() -> None: """Print current config file contents""" if ENVIRONMENT.clowder_config is None or not ENVIRONMENT.clowder_config.exists(): CONSOLE.stdout(' - No config file found') return CONSOLE.stdout(fmt.bold('Current config\n')) text = ENVIRONMENT.clowder_config.read_text() CONSOLE.stdout(fmt.escape(f"{text.strip()}\n")) def process_projects_arg(self, projects: List[str]) -> Tuple[str, ...]: """Process project args based on parameters and config :param List[str] projects: Projects to filter :return: Projects in groups matching given names """ if projects != ['default']: return tuple(sorted(projects)) if not self.projects: return ('all',) # noqa return self.projects def save(self) -> None: """Save configuration to file""" # if not ENVIRONMENT.clowder_config_dir.exists(): # make_dir(ENVIRONMENT.clowder_config_dir) if ENVIRONMENT.clowder_config is None: raise MissingFileError('No clowder config file path found') with open(ENVIRONMENT.clowder_config, 'w') as configfile: self._config.write(configfile) def _set_command_option(self, option: CommandConfigType, value: Optional[Any]) -> None: if value is None: self._config.remove_option(CommandConfigType.section_name(), option.value) else: self._command_config[option.value] = str(value) def _set_git_option(self, option: GitConfigType, value: Optional[Any]) -> None: if value is None: self._config.remove_option(GitConfigType.section_name(), option.value) else: self._git_config[option.value] = str(value) def _validate_config_projects_defined(self, project_options: Tuple[str, ...]) -> None: """Validate all projects were defined in clowder yaml file :param Tuple[str, ...] project_options: Projects to validate against :raise UnknownProjectError: """ if self.projects is None: return for project in self.projects: if project not in project_options: message = f"{fmt.path(ENVIRONMENT.clowder_config)}\n" \ f"Clowder config file appears to be invalid" \ f"Unknown project {fmt.project_name(project)}" raise UnknownProjectError(message) # FIXME: Assemble all undefined projects in message rather than raising on first instance not found
# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import os import StringIO from hashlib import md5 from swift.common import direct_client from swiftclient import json_loads def mock_http_connect(status, fake_headers=None, body=None): class FakeConn(object): def __init__(self, status, fake_headers, body, *args, **kwargs): self.status = status self.reason = 'Fake' self.body = body self.host = args[0] self.port = args[1] self.method = args[4] self.path = args[5] self.with_exc = False self.headers = kwargs.get('headers', {}) self.fake_headers = fake_headers self.etag = md5() def getresponse(self): if self.with_exc: raise Exception('test') if self.fake_headers is not None and self.method == 'POST': self.fake_headers.append(self.headers) return self def getheader(self, header, default=None): return self.headers.get(header.lower(), default) def getheaders(self): if self.fake_headers is not None: for key in self.fake_headers: self.headers.update({key: self.fake_headers[key]}) return self.headers.items() def read(self): return self.body def send(self, data): self.etag.update(data) self.headers['etag'] = str(self.etag.hexdigest()) def close(self): return return lambda *args, **kwargs: FakeConn(status, fake_headers, body, *args, **kwargs) class TestDirectClient(unittest.TestCase): def test_quote(self): res = direct_client.quote('123') assert res == '123' res = direct_client.quote('1&2&/3') assert res == '1%262%26/3' res = direct_client.quote('1&2&3', safe='&') assert res == '1&2&3' def test_gen_headers(self): hdrs = direct_client.gen_headers() assert 'user-agent' in hdrs assert hdrs['user-agent'] == 'direct-client %s' % os.getpid() assert len(hdrs.keys()) == 1 hdrs = direct_client.gen_headers(add_ts=True) assert 'user-agent' in hdrs assert 'x-timestamp' in hdrs assert len(hdrs.keys()) == 2 hdrs = direct_client.gen_headers(hdrs_in={'foo-bar': '47'}) assert 'user-agent' in hdrs assert 'foo-bar' in hdrs assert hdrs['foo-bar'] == '47' assert len(hdrs.keys()) == 2 hdrs = direct_client.gen_headers(hdrs_in={'user-agent': '47'}) assert 'user-agent' in hdrs assert hdrs['user-agent'] == 'direct-client %s' % os.getpid() assert len(hdrs.keys()) == 1 def test_direct_get_account(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' headers = { 'X-Account-Container-Count': '1', 'X-Account-Object-Count': '1', 'X-Account-Bytes-Used': '1', 'X-Timestamp': '1234567890', 'X-PUT-Timestamp': '1234567890'} body = '[{"count": 1, "bytes": 20971520, "name": "c1"}]' fake_headers = {} for header, value in headers.items(): fake_headers[header.lower()] = value was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, fake_headers, body) resp_headers, resp = direct_client.direct_get_account(node, part, account) fake_headers.update({'user-agent': 'direct-client %s' % os.getpid()}) self.assertEqual(fake_headers, resp_headers) self.assertEqual(json_loads(body), resp) direct_client.http_connect = mock_http_connect(204, fake_headers, body) resp_headers, resp = direct_client.direct_get_account(node, part, account) fake_headers.update({'user-agent': 'direct-client %s' % os.getpid()}) self.assertEqual(fake_headers, resp_headers) self.assertEqual([], resp) direct_client.http_connect = was_http_connector def test_direct_head_container(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' headers = {'key': 'value'} was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, headers) resp = direct_client.direct_head_container(node, part, account, container) headers.update({'user-agent': 'direct-client %s' % os.getpid()}) self.assertEqual(headers, resp) direct_client.http_connect = was_http_connector def test_direct_get_container(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' headers = {'key': 'value'} body = '[{"hash": "8f4e3", "last_modified": "317260", "bytes": 209}]' was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, headers, body) resp_headers, resp = ( direct_client.direct_get_container(node, part, account, container)) headers.update({'user-agent': 'direct-client %s' % os.getpid()}) self.assertEqual(headers, resp_headers) self.assertEqual(json_loads(body), resp) direct_client.http_connect = mock_http_connect(204, headers, body) resp_headers, resp = ( direct_client.direct_get_container(node, part, account, container)) headers.update({'user-agent': 'direct-client %s' % os.getpid()}) self.assertEqual(headers, resp_headers) self.assertEqual([], resp) direct_client.http_connect = was_http_connector def test_direct_delete_container(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200) direct_client.direct_delete_container(node, part, account, container) direct_client.http_connect = was_http_connector def test_direct_head_object(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' headers = {'key': 'value'} was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, headers) resp = direct_client.direct_head_object(node, part, account, container, name) headers.update({'user-agent': 'direct-client %s' % os.getpid()}) self.assertEqual(headers, resp) direct_client.http_connect = was_http_connector def test_direct_get_object(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' contents = StringIO.StringIO('123456') was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, body=contents) resp_header, obj_body = ( direct_client.direct_get_object(node, part, account, container, name)) self.assertEqual(obj_body, contents) direct_client.http_connect = was_http_connector pass def test_direct_post_object(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' headers = {'Key': 'value'} fake_headers = [] was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, fake_headers) direct_client.direct_post_object(node, part, account, container, name, headers) self.assertEqual(headers['Key'], fake_headers[0].get('Key')) direct_client.http_connect = was_http_connector def test_direct_delete_object(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200) direct_client.direct_delete_object(node, part, account, container, name) direct_client.http_connect = was_http_connector def test_direct_put_object(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' contents = StringIO.StringIO('123456') was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200) resp = direct_client.direct_put_object(node, part, account, container, name, contents, 6) self.assertEqual(md5('123456').hexdigest(), resp) direct_client.http_connect = was_http_connector def test_direct_put_object_fail(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' contents = StringIO.StringIO('123456') was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(500) self.assertRaises(direct_client.ClientException, direct_client.direct_put_object, node, part, account, container, name, contents) direct_client.http_connect = was_http_connector def test_direct_put_object_chunked(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' contents = StringIO.StringIO('123456') was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200) resp = direct_client.direct_put_object(node, part, account, container, name, contents) self.assertEqual(md5('6\r\n123456\r\n0\r\n\r\n').hexdigest(), resp) direct_client.http_connect = was_http_connector def test_retry(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} part = '0' account = 'a' container = 'c' name = 'o' headers = {'key': 'value'} was_http_connector = direct_client.http_connect direct_client.http_connect = mock_http_connect(200, headers) attempts, resp = direct_client.retry(direct_client.direct_head_object, node, part, account, container, name) headers.update({'user-agent': 'direct-client %s' % os.getpid()}) self.assertEqual(headers, resp) self.assertEqual(attempts, 1) direct_client.http_connect = was_http_connector if __name__ == '__main__': unittest.main()
#!/usr/bin/env python # # Use the raw transactions API to spend dashs received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a dashd or Dash-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the Dash Core data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/DashCore/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "DashCore") return os.path.expanduser("~/.dashcore") def read_bitcoin_config(dbdir): """Read the dash.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "dash.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a Dash Core JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 19998 if testnet else 9998 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the dashd we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(dashd): info = dashd.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") dashd.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = dashd.getinfo() return int(info['unlocked_until']) > time.time() def list_available(dashd): address_summary = dict() address_to_account = dict() for info in dashd.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = dashd.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = dashd.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-dash-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(dashd, fromaddresses, toaddress, amount, fee): all_coins = list_available(dashd) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to dashd. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = dashd.createrawtransaction(inputs, outputs) signed_rawtx = dashd.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(dashd, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = dashd.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(dashd, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = dashd.decoderawtransaction(txdata_hex) total_in = compute_amount_in(dashd, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get dashs from") parser.add_option("--to", dest="to", default=None, help="address to get send dashs to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of dash.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True dashd = connect_JSON(config) if options.amount is None: address_summary = list_available(dashd) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(dashd) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(dashd, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(dashd, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = dashd.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
# coding: utf-8 """ Cloudbreak API Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a> OpenAPI spec version: 2.9.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class RecipeResponse(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str', 'description': 'str', 'recipe_type': 'str', 'content': 'str', 'workspace': 'WorkspaceResourceResponse', 'id': 'int' } attribute_map = { 'name': 'name', 'description': 'description', 'recipe_type': 'recipeType', 'content': 'content', 'workspace': 'workspace', 'id': 'id' } def __init__(self, name=None, description=None, recipe_type=None, content=None, workspace=None, id=None): """ RecipeResponse - a model defined in Swagger """ self._name = None self._description = None self._recipe_type = None self._content = None self._workspace = None self._id = None if name is not None: self.name = name if description is not None: self.description = description self.recipe_type = recipe_type if content is not None: self.content = content if workspace is not None: self.workspace = workspace if id is not None: self.id = id @property def name(self): """ Gets the name of this RecipeResponse. name of the resource :return: The name of this RecipeResponse. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this RecipeResponse. name of the resource :param name: The name of this RecipeResponse. :type: str """ if name is not None and len(name) > 100: raise ValueError("Invalid value for `name`, length must be less than or equal to `100`") if name is not None and len(name) < 5: raise ValueError("Invalid value for `name`, length must be greater than or equal to `5`") if name is not None and not re.search('(^[a-z][-a-z0-9]*[a-z0-9]$)', name): raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/(^[a-z][-a-z0-9]*[a-z0-9]$)/`") self._name = name @property def description(self): """ Gets the description of this RecipeResponse. description of the resource :return: The description of this RecipeResponse. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this RecipeResponse. description of the resource :param description: The description of this RecipeResponse. :type: str """ if description is not None and len(description) > 1000: raise ValueError("Invalid value for `description`, length must be less than or equal to `1000`") if description is not None and len(description) < 0: raise ValueError("Invalid value for `description`, length must be greater than or equal to `0`") self._description = description @property def recipe_type(self): """ Gets the recipe_type of this RecipeResponse. type of recipe :return: The recipe_type of this RecipeResponse. :rtype: str """ return self._recipe_type @recipe_type.setter def recipe_type(self, recipe_type): """ Sets the recipe_type of this RecipeResponse. type of recipe :param recipe_type: The recipe_type of this RecipeResponse. :type: str """ if recipe_type is None: raise ValueError("Invalid value for `recipe_type`, must not be `None`") allowed_values = ["PRE_AMBARI_START", "PRE_TERMINATION", "POST_AMBARI_START", "POST_CLUSTER_INSTALL"] if recipe_type not in allowed_values: raise ValueError( "Invalid value for `recipe_type` ({0}), must be one of {1}" .format(recipe_type, allowed_values) ) self._recipe_type = recipe_type @property def content(self): """ Gets the content of this RecipeResponse. content of recipe :return: The content of this RecipeResponse. :rtype: str """ return self._content @content.setter def content(self, content): """ Sets the content of this RecipeResponse. content of recipe :param content: The content of this RecipeResponse. :type: str """ self._content = content @property def workspace(self): """ Gets the workspace of this RecipeResponse. workspace of the resource :return: The workspace of this RecipeResponse. :rtype: WorkspaceResourceResponse """ return self._workspace @workspace.setter def workspace(self, workspace): """ Sets the workspace of this RecipeResponse. workspace of the resource :param workspace: The workspace of this RecipeResponse. :type: WorkspaceResourceResponse """ self._workspace = workspace @property def id(self): """ Gets the id of this RecipeResponse. id of the resource :return: The id of this RecipeResponse. :rtype: int """ return self._id @id.setter def id(self, id): """ Sets the id of this RecipeResponse. id of the resource :param id: The id of this RecipeResponse. :type: int """ self._id = id def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, RecipeResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
import io import time import hashlib from unittest import mock import furl import pytest import multidict import aiohttpretty from waterbutler.core import streams, exceptions from waterbutler.core.path import WaterButlerPath from waterbutler.providers.cloudfiles import CloudFilesProvider from waterbutler.providers.cloudfiles import settings as cloud_settings @pytest.fixture def auth(): return { 'name': 'cat', 'email': 'cat@cat.com', } @pytest.fixture def credentials(): return { 'username': 'prince', 'token': 'revolutionary', 'region': 'iad', } @pytest.fixture def settings(): return {'container': 'purple rain'} @pytest.fixture def provider(auth, credentials, settings): return CloudFilesProvider(auth, credentials, settings) @pytest.fixture def auth_json(): return { "access": { "serviceCatalog": [ { "name": "cloudFiles", "type": "object-store", "endpoints": [ { "publicURL": "https://fakestorage", "internalURL": "https://internal_fake_storage", "region": "IAD", "tenantId": "someid_123456" }, ] } ], "token": { "RAX-AUTH:authenticatedBy": [ "APIKEY" ], "tenant": { "name": "12345", "id": "12345" }, "id": "2322f6b2322f4dbfa69802baf50b0832", "expires": "2014-12-17T09:12:26.069Z" }, "user": { "name": "osf-production", "roles": [ { "name": "object-store:admin", "id": "10000256", "description": "Object Store Admin Role for Account User" }, { "name": "compute:default", "description": "A Role that allows a user access to keystone Service methods", "id": "6", "tenantId": "12345" }, { "name": "object-store:default", "description": "A Role that allows a user access to keystone Service methods", "id": "5", "tenantId": "some_id_12345" }, { "name": "identity:default", "id": "2", "description": "Default Role." } ], "id": "secret", "RAX-AUTH:defaultRegion": "IAD" } } } @pytest.fixture def token(auth_json): return auth_json['access']['token']['id'] @pytest.fixture def endpoint(auth_json): return auth_json['access']['serviceCatalog'][0]['endpoints'][0]['publicURL'] @pytest.fixture def temp_url_key(): return 'temporary beret' @pytest.fixture def mock_auth(auth_json): aiohttpretty.register_json_uri( 'POST', settings.AUTH_URL, body=auth_json, ) @pytest.fixture def mock_temp_key(endpoint, temp_url_key): aiohttpretty.register_uri( 'HEAD', endpoint, status=204, headers={'X-Account-Meta-Temp-URL-Key': temp_url_key}, ) @pytest.fixture def mock_time(monkeypatch): mock_time = mock.Mock() mock_time.return_value = 10 monkeypatch.setattr(time, 'time', mock_time) @pytest.fixture def connected_provider(provider, token, endpoint, temp_url_key, mock_time): provider.token = token provider.endpoint = endpoint provider.temp_url_key = temp_url_key.encode() return provider @pytest.fixture def file_content(): return b'sleepy' @pytest.fixture def file_like(file_content): return io.BytesIO(file_content) @pytest.fixture def file_stream(file_like): return streams.FileStreamReader(file_like) @pytest.fixture def file_metadata(): return multidict.CIMultiDict([ ('LAST-MODIFIED', 'Thu, 25 Dec 2014 02:54:35 GMT'), ('CONTENT-LENGTH', '0'), ('ETAG', 'edfa12d00b779b4b37b81fe5b61b2b3f'), ('CONTENT-TYPE', 'text/html; charset=UTF-8'), ('X-TRANS-ID', 'txf876a4b088e3451d94442-00549b7c6aiad3'), ('DATE', 'Thu, 25 Dec 2014 02:54:34 GMT') ]) # Metadata Test Scenarios # / (folder_root_empty) # / (folder_root) # /level1/ (folder_root_level1) # /level1/level2/ (folder_root_level1_level2) # /level1/level2/file2.file - (file_root_level1_level2_file2_txt) # /level1_empty/ (folder_root_level1_empty) # /similar (file_similar) # /similar.name (file_similar_name) # /does_not_exist (404) # /does_not_exist/ (404) @pytest.fixture def folder_root_empty(): return [] @pytest.fixture def folder_root(): return [ { 'last_modified': '2014-12-19T22:08:23.006360', 'content_type': 'application/directory', 'hash': 'd41d8cd98f00b204e9800998ecf8427e', 'name': 'level1', 'bytes': 0 }, { 'subdir': 'level1/' }, { 'last_modified': '2014-12-19T23:22:23.232240', 'content_type': 'application/x-www-form-urlencoded;charset=utf-8', 'hash': 'edfa12d00b779b4b37b81fe5b61b2b3f', 'name': 'similar', 'bytes': 190 }, { 'last_modified': '2014-12-19T23:22:14.728640', 'content_type': 'application/x-www-form-urlencoded;charset=utf-8', 'hash': 'edfa12d00b779b4b37b81fe5b61b2b3f', 'name': 'similar.file', 'bytes': 190 }, { 'last_modified': '2014-12-19T23:20:16.718860', 'content_type': 'application/directory', 'hash': 'd41d8cd98f00b204e9800998ecf8427e', 'name': 'level1_empty', 'bytes': 0 } ] @pytest.fixture def folder_root_level1(): return [ { 'last_modified': '2014-12-19T22:08:26.958830', 'content_type': 'application/directory', 'hash': 'd41d8cd98f00b204e9800998ecf8427e', 'name': 'level1/level2', 'bytes': 0 }, { 'subdir': 'level1/level2/' } ] @pytest.fixture def folder_root_level1_level2(): return [ { 'name': 'level1/level2/file2.txt', 'content_type': 'application/x-www-form-urlencoded;charset=utf-8', 'last_modified': '2014-12-19T23:25:22.497420', 'bytes': 1365336, 'hash': 'ebc8cdd3f712fd39476fb921d43aca1a' } ] @pytest.fixture def file_root_level1_level2_file2_txt(): return multidict.CIMultiDict([ ('ORIGIN', 'https://mycloud.rackspace.com'), ('CONTENT-LENGTH', '216945'), ('ACCEPT-RANGES', 'bytes'), ('LAST-MODIFIED', 'Mon, 22 Dec 2014 19:01:02 GMT'), ('ETAG', '44325d4f13b09f3769ede09d7c20a82c'), ('X-TIMESTAMP', '1419274861.04433'), ('CONTENT-TYPE', 'text/plain'), ('X-TRANS-ID', 'tx836375d817a34b558756a-0054987deeiad3'), ('DATE', 'Mon, 22 Dec 2014 20:24:14 GMT') ]) @pytest.fixture def folder_root_level1_empty(): return multidict.CIMultiDict([ ('ORIGIN', 'https://mycloud.rackspace.com'), ('CONTENT-LENGTH', '0'), ('ACCEPT-RANGES', 'bytes'), ('LAST-MODIFIED', 'Mon, 22 Dec 2014 18:58:56 GMT'), ('ETAG', 'd41d8cd98f00b204e9800998ecf8427e'), ('X-TIMESTAMP', '1419274735.03160'), ('CONTENT-TYPE', 'application/directory'), ('X-TRANS-ID', 'txd78273e328fc4ba3a98e3-0054987eeeiad3'), ('DATE', 'Mon, 22 Dec 2014 20:28:30 GMT') ]) @pytest.fixture def file_root_similar(): return multidict.CIMultiDict([ ('ORIGIN', 'https://mycloud.rackspace.com'), ('CONTENT-LENGTH', '190'), ('ACCEPT-RANGES', 'bytes'), ('LAST-MODIFIED', 'Fri, 19 Dec 2014 23:22:24 GMT'), ('ETAG', 'edfa12d00b779b4b37b81fe5b61b2b3f'), ('X-TIMESTAMP', '1419031343.23224'), ('CONTENT-TYPE', 'application/x-www-form-urlencoded;charset=utf-8'), ('X-TRANS-ID', 'tx7cfeef941f244807aec37-005498754diad3'), ('DATE', 'Mon, 22 Dec 2014 19:47:25 GMT') ]) @pytest.fixture def file_root_similar_name(): return multidict.CIMultiDict([ ('ORIGIN', 'https://mycloud.rackspace.com'), ('CONTENT-LENGTH', '190'), ('ACCEPT-RANGES', 'bytes'), ('LAST-MODIFIED', 'Mon, 22 Dec 2014 19:07:12 GMT'), ('ETAG', 'edfa12d00b779b4b37b81fe5b61b2b3f'), ('X-TIMESTAMP', '1419275231.66160'), ('CONTENT-TYPE', 'application/x-www-form-urlencoded;charset=utf-8'), ('X-TRANS-ID', 'tx438cbb32b5344d63b267c-0054987f3biad3'), ('DATE', 'Mon, 22 Dec 2014 20:29:47 GMT') ]) class TestCRUD: @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_download(self, connected_provider): body = b'dearly-beloved' path = WaterButlerPath('/lets-go-crazy') url = connected_provider.sign_url(path) aiohttpretty.register_uri('GET', url, body=body, auto_length=True) result = await connected_provider.download(path) content = await result.read() assert content == body @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_download_accept_url(self, connected_provider): path = WaterButlerPath('/lets-go-crazy') url = connected_provider.sign_url(path) parsed_url = furl.furl(url) parsed_url.args['filename'] = 'lets-go-crazy' result = await connected_provider.download(path, accept_url=True) assert result == parsed_url.url @pytest.mark.asyncio @pytest.mark.aiohttpretty @pytest.mark.parametrize("display_name_arg,expected_name", [ ('meow.txt', 'meow.txt'), ('', 'lets-go-crazy'), (None, 'lets-go-crazy'), ]) async def test_download_file_with_display_name(self, connected_provider, display_name_arg, expected_name): path = WaterButlerPath('/lets-go-crazy') url = connected_provider.sign_url(path) parsed_url = furl.furl(url) parsed_url.args['filename'] = expected_name result = await connected_provider.download(path, accept_url=True, display_name=display_name_arg) assert result == parsed_url.url @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_download_not_found(self, connected_provider): path = WaterButlerPath('/lets-go-crazy') url = connected_provider.sign_url(path) aiohttpretty.register_uri('GET', url, status=404) with pytest.raises(exceptions.DownloadError): await connected_provider.download(path) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_upload(self, connected_provider, file_content, file_stream, file_metadata): path = WaterButlerPath('/foo.bar') content_md5 = hashlib.md5(file_content).hexdigest() metadata_url = connected_provider.build_url(path.path) url = connected_provider.sign_url(path, 'PUT') aiohttpretty.register_uri( 'HEAD', metadata_url, responses=[ {'status': 404}, {'headers': file_metadata}, ] ) aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"{}"'.format(content_md5)}) metadata, created = await connected_provider.upload(file_stream, path) assert created is True assert metadata.kind == 'file' assert aiohttpretty.has_call(method='PUT', uri=url) assert aiohttpretty.has_call(method='HEAD', uri=metadata_url) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_upload_check_none(self, connected_provider, file_content, file_stream, file_metadata): path = WaterButlerPath('/foo.bar') content_md5 = hashlib.md5(file_content).hexdigest() metadata_url = connected_provider.build_url(path.path) url = connected_provider.sign_url(path, 'PUT') aiohttpretty.register_uri( 'HEAD', metadata_url, responses=[ {'status': 404}, {'headers': file_metadata}, ] ) aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"{}"'.format(content_md5)}) metadata, created = await connected_provider.upload( file_stream, path, check_created=False, fetch_metadata=False) assert created is None assert metadata is None assert aiohttpretty.has_call(method='PUT', uri=url) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_upload_checksum_mismatch(self, connected_provider, file_stream, file_metadata): path = WaterButlerPath('/foo.bar') metadata_url = connected_provider.build_url(path.path) url = connected_provider.sign_url(path, 'PUT') aiohttpretty.register_uri( 'HEAD', metadata_url, responses=[ {'status': 404}, {'headers': file_metadata}, ] ) aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"Bogus MD5"'}) with pytest.raises(exceptions.UploadChecksumMismatchError): await connected_provider.upload(file_stream, path) assert aiohttpretty.has_call(method='PUT', uri=url) assert aiohttpretty.has_call(method='HEAD', uri=metadata_url) # @pytest.mark.asyncio # @pytest.mark.aiohttpretty # async def test_delete_folder(self, connected_provider, folder_root_empty, file_metadata): # # This test will probably fail on a live # # version of the provider because build_url is called wrong. # # Will comment out parts of this test till that is fixed. # path = WaterButlerPath('/delete/') # query = {'prefix': path.path} # url = connected_provider.build_url('', **query) # body = json.dumps(folder_root_empty).encode('utf-8') # delete_query = {'bulk-delete': ''} # delete_url = connected_provider.build_url('', **delete_query) # file_url = connected_provider.build_url(path.path) # aiohttpretty.register_uri('GET', url, body=body) # aiohttpretty.register_uri('HEAD', file_url, headers=file_metadata) # aiohttpretty.register_uri('DELETE', delete_url) # await connected_provider.delete(path) # assert aiohttpretty.has_call(method='DELETE', uri=delete_url) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_delete_file(self, connected_provider): path = WaterButlerPath('/delete.file') url = connected_provider.build_url(path.path) aiohttpretty.register_uri('DELETE', url, status=204) await connected_provider.delete(path) assert aiohttpretty.has_call(method='DELETE', uri=url) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_intra_copy(self, connected_provider, file_metadata): src_path = WaterButlerPath('/delete.file') dest_path = WaterButlerPath('/folder1/delete.file') dest_url = connected_provider.build_url(dest_path.path) aiohttpretty.register_uri('HEAD', dest_url, headers=file_metadata) aiohttpretty.register_uri('PUT', dest_url, status=201) result = await connected_provider.intra_copy(connected_provider, src_path, dest_path) assert result[0].path == '/folder1/delete.file' assert result[0].name == 'delete.file' assert result[0].etag == 'edfa12d00b779b4b37b81fe5b61b2b3f' class TestMetadata: @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_folder_root_empty(self, connected_provider, folder_root_empty): path = WaterButlerPath('/') body = folder_root_empty url = connected_provider.build_url(path.path, prefix=path.path, delimiter='/') aiohttpretty.register_json_uri('GET', url, status=200, body=body) result = await connected_provider.metadata(path) assert len(result) == 0 assert result == [] @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_folder_root(self, connected_provider, folder_root): path = WaterButlerPath('/') body = folder_root url = connected_provider.build_url('', prefix=path.path, delimiter='/') aiohttpretty.register_json_uri('GET', url, status=200, body=body) result = await connected_provider.metadata(path) assert len(result) == 4 assert result[0].name == 'level1' assert result[0].path == '/level1/' assert result[0].kind == 'folder' assert result[1].name == 'similar' assert result[1].path == '/similar' assert result[1].kind == 'file' assert result[2].name == 'similar.file' assert result[2].path == '/similar.file' assert result[2].kind == 'file' assert result[3].name == 'level1_empty' assert result[3].path == '/level1_empty/' assert result[3].kind == 'folder' @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_folder_root_level1(self, connected_provider, folder_root_level1): path = WaterButlerPath('/level1/') body = folder_root_level1 url = connected_provider.build_url('', prefix=path.path, delimiter='/') aiohttpretty.register_json_uri('GET', url, status=200, body=body) result = await connected_provider.metadata(path) assert len(result) == 1 assert result[0].name == 'level2' assert result[0].path == '/level1/level2/' assert result[0].kind == 'folder' @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_folder_root_level1_level2(self, connected_provider, folder_root_level1_level2): path = WaterButlerPath('/level1/level2/') body = folder_root_level1_level2 url = connected_provider.build_url('', prefix=path.path, delimiter='/') aiohttpretty.register_json_uri('GET', url, status=200, body=body) result = await connected_provider.metadata(path) assert len(result) == 1 assert result[0].name == 'file2.txt' assert result[0].path == '/level1/level2/file2.txt' assert result[0].kind == 'file' @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_file_root_level1_level2_file2_txt(self, connected_provider, file_root_level1_level2_file2_txt): path = WaterButlerPath('/level1/level2/file2.txt') url = connected_provider.build_url(path.path) aiohttpretty.register_uri('HEAD', url, status=200, headers=file_root_level1_level2_file2_txt) result = await connected_provider.metadata(path) assert result.name == 'file2.txt' assert result.path == '/level1/level2/file2.txt' assert result.kind == 'file' assert result.content_type == 'text/plain' assert result.extra == {'hashes': {'md5': '44325d4f13b09f3769ede09d7c20a82c'}} @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_folder_root_level1_empty(self, connected_provider, folder_root_level1_empty): path = WaterButlerPath('/level1_empty/') folder_url = connected_provider.build_url('', prefix=path.path, delimiter='/') folder_body = [] file_url = connected_provider.build_url(path.path.rstrip('/')) aiohttpretty.register_json_uri('GET', folder_url, status=200, body=folder_body) aiohttpretty.register_uri('HEAD', file_url, status=200, headers=folder_root_level1_empty) result = await connected_provider.metadata(path) assert result == [] @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_file_root_similar(self, connected_provider, file_root_similar): path = WaterButlerPath('/similar') url = connected_provider.build_url(path.path) aiohttpretty.register_uri('HEAD', url, status=200, headers=file_root_similar) result = await connected_provider.metadata(path) assert result.name == 'similar' assert result.path == '/similar' assert result.kind == 'file' assert result.extra == {'hashes': {'md5': 'edfa12d00b779b4b37b81fe5b61b2b3f'}} @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_file_root_similar_name(self, connected_provider, file_root_similar_name): path = WaterButlerPath('/similar.file') url = connected_provider.build_url(path.path) aiohttpretty.register_uri('HEAD', url, status=200, headers=file_root_similar_name) result = await connected_provider.metadata(path) assert result.name == 'similar.file' assert result.path == '/similar.file' assert result.kind == 'file' assert result.extra == {'hashes': {'md5': 'edfa12d00b779b4b37b81fe5b61b2b3f'}} @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_file_does_not_exist(self, connected_provider): path = WaterButlerPath('/does_not.exist') url = connected_provider.build_url(path.path) aiohttpretty.register_uri('HEAD', url, status=404) with pytest.raises(exceptions.MetadataError): await connected_provider.metadata(path) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_folder_does_not_exist(self, connected_provider): path = WaterButlerPath('/does_not_exist/') folder_url = connected_provider.build_url('', prefix=path.path, delimiter='/') folder_body = [] file_url = connected_provider.build_url(path.path.rstrip('/')) aiohttpretty.register_json_uri('GET', folder_url, status=200, body=folder_body) aiohttpretty.register_uri('HEAD', file_url, status=404) with pytest.raises(exceptions.MetadataError): await connected_provider.metadata(path) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_metadata_file_bad_content_type(self, connected_provider, file_metadata): item = file_metadata item['Content-Type'] = 'application/directory' path = WaterButlerPath('/does_not.exist') url = connected_provider.build_url(path.path) aiohttpretty.register_uri('HEAD', url, headers=item) with pytest.raises(exceptions.MetadataError): await connected_provider.metadata(path) class TestV1ValidatePath: @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_v1_validate_path(self, connected_provider): path = '/ab4x3' result = await connected_provider.validate_v1_path(path) assert result.path == path.strip('/') class TestOperations: @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_ensure_connection(self, provider, auth_json, mock_temp_key): token_url = cloud_settings.AUTH_URL aiohttpretty.register_json_uri('POST', token_url, body=auth_json) await provider._ensure_connection() assert aiohttpretty.has_call(method='POST', uri=token_url) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_ensure_connection_not_public(self, provider, auth_json, temp_url_key): token_url = cloud_settings.AUTH_URL provider.use_public = False internal_endpoint = "https://internal_fake_storage" aiohttpretty.register_json_uri('POST', token_url, body=auth_json) aiohttpretty.register_uri( 'HEAD', internal_endpoint, status=204, headers={'X-Account-Meta-Temp-URL-Key': temp_url_key}, ) await provider._ensure_connection() assert aiohttpretty.has_call(method='POST', uri=token_url) @pytest.mark.asyncio @pytest.mark.aiohttpretty async def test_ensure_connection_bad_url(self, provider, auth_json, endpoint): token_url = cloud_settings.AUTH_URL aiohttpretty.register_json_uri('POST', token_url, body=auth_json) aiohttpretty.register_uri( 'HEAD', endpoint, status=204, headers={'bad': 'yes'} ) with pytest.raises(exceptions.ProviderError) as e: await provider._ensure_connection() assert e.value.code == 503 assert aiohttpretty.has_call(method='POST', uri=token_url) assert aiohttpretty.has_call(method='HEAD', uri=endpoint) def test_can_duplicate_names(self, connected_provider): assert connected_provider.can_duplicate_names() is False def test_can_intra_copy(self, connected_provider): assert connected_provider.can_intra_copy(connected_provider) def test_can_intra_move(self, connected_provider): assert connected_provider.can_intra_move(connected_provider)
########################################################################## # # Copyright (c) 2011, John Haddon. All rights reserved. # Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECore import GafferUI QtGui = GafferUI._qtImport( "QtGui" ) QtCore = GafferUI._qtImport( "QtCore" ) ## The ListContainer holds a series of Widgets either in a column or a row. # It attempts to provide a list like interface for manipulation of the widgets. class ListContainer( GafferUI.ContainerWidget ) : Orientation = IECore.Enum.create( "Vertical", "Horizontal" ) HorizontalAlignment = GafferUI.Enums.HorizontalAlignment VerticalAlignment = GafferUI.Enums.VerticalAlignment def __init__( self, orientation=Orientation.Vertical, spacing=0, borderWidth=0, **kw ) : GafferUI.ContainerWidget.__init__( self, QtGui.QWidget(), **kw ) if orientation==self.Orientation.Vertical : self.__qtLayout = QtGui.QVBoxLayout() else : self.__qtLayout = QtGui.QHBoxLayout() self.__qtLayout.setSpacing( spacing ) self.__qtLayout.setContentsMargins( borderWidth, borderWidth, borderWidth, borderWidth ) self.__qtLayout.setSizeConstraint( QtGui.QLayout.SetMinAndMaxSize ) self._qtWidget().setLayout( self.__qtLayout ) self.__orientation = orientation self.__widgets = [] def orientation( self ) : return self.__orientation def append( self, child, expand=False, horizontalAlignment=None, verticalAlignment=None ) : assert( isinstance( child, GafferUI.Widget ) ) oldParent = child.parent() if oldParent is not None : oldParent.removeChild( child ) self.__widgets.append( child ) stretch = 1 if expand else 0 self.__qtLayout.addWidget( child._qtWidget(), stretch, self.__convertToQtAlignment( horizontalAlignment, verticalAlignment ) ) child._applyVisibility() def remove( self, child ) : self.removeChild( child ) def insert( self, index, child, expand=False, horizontalAlignment=None, verticalAlignment=None ) : l = len( self.__widgets ) if index > l : index = l oldParent = child.parent() if oldParent is not None : oldParent.removeChild( child ) self.__widgets.insert( index, child ) stretch = 1 if expand else 0 self.__qtLayout.insertWidget( index, child._qtWidget(), stretch, self.__convertToQtAlignment( horizontalAlignment, verticalAlignment ) ) child._applyVisibility() def index( self, child ) : return self.__widgets.index( child ) def __setitem__( self, index, child ) : # Shortcut if there would be no change. Rearranging # things in Qt is extremely costly and this test is # trivial in comparison, so this is well worth doing. if self.__widgets[index] == child : return if isinstance( index, slice ) : assert( isinstance( child, list ) ) children = child insertionIndex = index.start if index.start is not None else 0 else : children = [ child ] insertionIndex = index expands = [] for i in range( insertionIndex, insertionIndex + len( children ) ) : if i < len( self ) : expands.append( self.__qtLayout.stretch( i ) > 0 ) else : expands.append( False ) del self[index] # It's very important that we insert widgets in the order in which # they are to appear visually, because qt will define the tab-focus # chain order based on order of insertion, and not based on the order # of visual appearance. It's still possible to make several calls to # __setitem__ out of sequence and end up with bad focus orders, but # at least this way a slice set at one time will be in the correct order. # # Investigation into a method of achieving perfect ordering all the # time didn't yield anything better than this. One attempt called # setTabOrder() for every child, starting with the last child - this # worked when the children of the ListContainer where childless, # but not when they had children. Another possibility was to reimplement # QWidget.focusNextPrevChild() at the GafferUI.Window level, iterating # through the focus chain as for QApplicationPrivate::focusNextPrevChild_helper(), # but using knowledge of Container order to override the sequence where # necessary. This seemed like it might have promise, but is not straightforward. for i in range( 0, len( children ) ) : self.insert( insertionIndex + i, children[i], expands[i] ) def __getitem__( self, index ) : return self.__widgets[index] def __delitem__( self, index ) : if isinstance( index, slice ) : indices = range( *(index.indices( len( self ) )) ) for i in indices : self[i]._qtWidget().setParent( None ) self[i]._applyVisibility() del self.__widgets[index] else : self.__widgets[index]._qtWidget().setParent( None ) self.__widgets[index]._applyVisibility() del self.__widgets[index] def __len__( self ) : return len( self.__widgets ) def __convertToQtAlignment( self, horizontalAlignment, verticalAlignment): if not horizontalAlignment and not verticalAlignment: return QtCore.Qt.Alignment( 0 ) if verticalAlignment: qtVerticalAlignment = GafferUI.VerticalAlignment._toQt( verticalAlignment ) else: qtVerticalAlignment = QtCore.Qt.Alignment( 0 ) if horizontalAlignment: qtHorizontalAlignment = GafferUI.HorizontalAlignment._toQt( horizontalAlignment ) else: qtHorizontalAlignment = QtCore.Qt.Alignment( 0 ) return qtHorizontalAlignment | qtVerticalAlignment def addSpacer( self, width=0, height=0, expand=False, horizontalAlignment=None, verticalAlignment=None): self.append( GafferUI.Spacer( IECore.V2i( width, height ) ), expand=expand, horizontalAlignment=horizontalAlignment, verticalAlignment=verticalAlignment ) def addChild( self, child, expand=False, horizontalAlignment=None, verticalAlignment=None ) : self.append( child, expand=expand, horizontalAlignment=horizontalAlignment, verticalAlignment=verticalAlignment ) def removeChild( self, child ) : self.__widgets.remove( child ) child._qtWidget().setParent( None ) child._applyVisibility() def setExpand( self, child, expand ) : self.__qtLayout.setStretchFactor( child._qtWidget(), 1 if expand else 0 ) def getExpand( self, child ) : stretch = self.__qtLayout.stretch( self.index( child ) ) return stretch > 0
"""The tests for the MQTT component.""" from collections import namedtuple import unittest from unittest import mock import socket import voluptuous as vol from homeassistant.bootstrap import _setup_component import homeassistant.components.mqtt as mqtt from homeassistant.const import ( EVENT_CALL_SERVICE, ATTR_DOMAIN, ATTR_SERVICE, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP) from tests.common import ( get_test_home_assistant, mock_mqtt_component, fire_mqtt_message) class TestMQTT(unittest.TestCase): """Test the MQTT component.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant(1) mock_mqtt_component(self.hass) self.calls = [] def tearDown(self): # pylint: disable=invalid-name """Stop everything that was started.""" self.hass.stop() def record_calls(self, *args): """Helper for recording calls.""" self.calls.append(args) def test_client_starts_on_home_assistant_start(self): """"Test if client start on HA launch.""" self.hass.bus.fire(EVENT_HOMEASSISTANT_START) self.hass.block_till_done() self.assertTrue(mqtt.MQTT_CLIENT.start.called) def test_client_stops_on_home_assistant_start(self): """Test if client stops on HA launch.""" self.hass.bus.fire(EVENT_HOMEASSISTANT_START) self.hass.block_till_done() self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP) self.hass.block_till_done() self.assertTrue(mqtt.MQTT_CLIENT.stop.called) def test_setup_fails_if_no_connect_broker(self): """Test for setup failure if connection to broker is missing.""" with mock.patch('homeassistant.components.mqtt.MQTT', side_effect=socket.error()): self.hass.config.components = [] assert not _setup_component(self.hass, mqtt.DOMAIN, { mqtt.DOMAIN: { mqtt.CONF_BROKER: 'test-broker', } }) def test_setup_protocol_validation(self): """Test for setup failure if connection to broker is missing.""" with mock.patch('paho.mqtt.client.Client'): self.hass.config.components = [] assert _setup_component(self.hass, mqtt.DOMAIN, { mqtt.DOMAIN: { mqtt.CONF_BROKER: 'test-broker', mqtt.CONF_PROTOCOL: 3.1, } }) def test_publish_calls_service(self): """Test the publishing of call to services.""" self.hass.bus.listen_once(EVENT_CALL_SERVICE, self.record_calls) mqtt.publish(self.hass, 'test-topic', 'test-payload') self.hass.block_till_done() self.assertEqual(1, len(self.calls)) self.assertEqual( 'test-topic', self.calls[0][0].data['service_data'][mqtt.ATTR_TOPIC]) self.assertEqual( 'test-payload', self.calls[0][0].data['service_data'][mqtt.ATTR_PAYLOAD]) def test_service_call_without_topic_does_not_publish(self): """Test the service call if topic is missing.""" self.hass.bus.fire(EVENT_CALL_SERVICE, { ATTR_DOMAIN: mqtt.DOMAIN, ATTR_SERVICE: mqtt.SERVICE_PUBLISH }) self.hass.block_till_done() self.assertTrue(not mqtt.MQTT_CLIENT.publish.called) def test_service_call_with_template_payload_renders_template(self): """Test the service call with rendered template. If 'payload_template' is provided and 'payload' is not, then render it. """ mqtt.publish_template(self.hass, "test/topic", "{{ 1+1 }}") self.hass.block_till_done() self.assertTrue(mqtt.MQTT_CLIENT.publish.called) self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], "2") def test_service_call_with_payload_doesnt_render_template(self): """Test the service call with unrendered template. If both 'payload' and 'payload_template' are provided then fail. """ payload = "not a template" payload_template = "a template" self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, { mqtt.ATTR_TOPIC: "test/topic", mqtt.ATTR_PAYLOAD: payload, mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template }, blocking=True) self.assertFalse(mqtt.MQTT_CLIENT.publish.called) def test_service_call_with_ascii_qos_retain_flags(self): """Test the service call with args that can be misinterpreted. Empty payload message and ascii formatted qos and retain flags. """ self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, { mqtt.ATTR_TOPIC: "test/topic", mqtt.ATTR_PAYLOAD: "", mqtt.ATTR_QOS: '2', mqtt.ATTR_RETAIN: 'no' }, blocking=True) self.assertTrue(mqtt.MQTT_CLIENT.publish.called) self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][2], 2) self.assertFalse(mqtt.MQTT_CLIENT.publish.call_args[0][3]) def test_subscribe_topic(self): """Test the subscription of a topic.""" unsub = mqtt.subscribe(self.hass, 'test-topic', self.record_calls) fire_mqtt_message(self.hass, 'test-topic', 'test-payload') self.hass.block_till_done() self.assertEqual(1, len(self.calls)) self.assertEqual('test-topic', self.calls[0][0]) self.assertEqual('test-payload', self.calls[0][1]) unsub() fire_mqtt_message(self.hass, 'test-topic', 'test-payload') self.hass.block_till_done() self.assertEqual(1, len(self.calls)) def test_subscribe_topic_not_match(self): """Test if subscribed topic is not a match.""" mqtt.subscribe(self.hass, 'test-topic', self.record_calls) fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload') self.hass.block_till_done() self.assertEqual(0, len(self.calls)) def test_subscribe_topic_level_wildcard(self): """Test the subscription of wildcard topics.""" mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls) fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload') self.hass.block_till_done() self.assertEqual(1, len(self.calls)) self.assertEqual('test-topic/bier/on', self.calls[0][0]) self.assertEqual('test-payload', self.calls[0][1]) def test_subscribe_topic_level_wildcard_no_subtree_match(self): """Test the subscription of wildcard topics.""" mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls) fire_mqtt_message(self.hass, 'test-topic/bier', 'test-payload') self.hass.block_till_done() self.assertEqual(0, len(self.calls)) def test_subscribe_topic_subtree_wildcard_subtree_topic(self): """Test the subscription of wildcard topics.""" mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls) fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload') self.hass.block_till_done() self.assertEqual(1, len(self.calls)) self.assertEqual('test-topic/bier/on', self.calls[0][0]) self.assertEqual('test-payload', self.calls[0][1]) def test_subscribe_topic_subtree_wildcard_root_topic(self): """Test the subscription of wildcard topics.""" mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls) fire_mqtt_message(self.hass, 'test-topic', 'test-payload') self.hass.block_till_done() self.assertEqual(1, len(self.calls)) self.assertEqual('test-topic', self.calls[0][0]) self.assertEqual('test-payload', self.calls[0][1]) def test_subscribe_topic_subtree_wildcard_no_match(self): """Test the subscription of wildcard topics.""" mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls) fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload') self.hass.block_till_done() self.assertEqual(0, len(self.calls)) class TestMQTTCallbacks(unittest.TestCase): """Test the MQTT callbacks.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant(1) # mock_mqtt_component(self.hass) with mock.patch('paho.mqtt.client.Client'): self.hass.config.components = [] assert _setup_component(self.hass, mqtt.DOMAIN, { mqtt.DOMAIN: { mqtt.CONF_BROKER: 'mock-broker', } }) def tearDown(self): # pylint: disable=invalid-name """Stop everything that was started.""" self.hass.stop() def test_receiving_mqtt_message_fires_hass_event(self): """Test if receiving triggers an event.""" calls = [] def record(event): """Helper to record calls.""" calls.append(event) self.hass.bus.listen_once(mqtt.EVENT_MQTT_MESSAGE_RECEIVED, record) MQTTMessage = namedtuple('MQTTMessage', ['topic', 'qos', 'payload']) message = MQTTMessage('test_topic', 1, 'Hello World!'.encode('utf-8')) mqtt.MQTT_CLIENT._mqtt_on_message(None, {'hass': self.hass}, message) self.hass.block_till_done() self.assertEqual(1, len(calls)) last_event = calls[0] self.assertEqual('Hello World!', last_event.data['payload']) self.assertEqual(message.topic, last_event.data['topic']) self.assertEqual(message.qos, last_event.data['qos']) def test_mqtt_failed_connection_results_in_disconnect(self): """Test if connection failure leads to disconnect.""" for result_code in range(1, 6): mqtt.MQTT_CLIENT._mqttc = mock.MagicMock() mqtt.MQTT_CLIENT._mqtt_on_connect(None, {'topics': {}}, 0, result_code) self.assertTrue(mqtt.MQTT_CLIENT._mqttc.disconnect.called) def test_mqtt_subscribes_topics_on_connect(self): """Test subscription to topic on connect.""" from collections import OrderedDict prev_topics = OrderedDict() prev_topics['topic/test'] = 1, prev_topics['home/sensor'] = 2, prev_topics['still/pending'] = None mqtt.MQTT_CLIENT.topics = prev_topics mqtt.MQTT_CLIENT.progress = {1: 'still/pending'} # Return values for subscribe calls (rc, mid) mqtt.MQTT_CLIENT._mqttc.subscribe.side_effect = ((0, 2), (0, 3)) mqtt.MQTT_CLIENT._mqtt_on_connect(None, None, 0, 0) self.assertFalse(mqtt.MQTT_CLIENT._mqttc.disconnect.called) expected = [(topic, qos) for topic, qos in prev_topics.items() if qos is not None] self.assertEqual( expected, [call[1] for call in mqtt.MQTT_CLIENT._mqttc.subscribe.mock_calls]) self.assertEqual({ 1: 'still/pending', 2: 'topic/test', 3: 'home/sensor', }, mqtt.MQTT_CLIENT.progress) def test_mqtt_disconnect_tries_no_reconnect_on_stop(self): """Test the disconnect tries.""" mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 0) self.assertFalse(mqtt.MQTT_CLIENT._mqttc.reconnect.called) @mock.patch('homeassistant.components.mqtt.time.sleep') def test_mqtt_disconnect_tries_reconnect(self, mock_sleep): """Test the re-connect tries.""" mqtt.MQTT_CLIENT.topics = { 'test/topic': 1, 'test/progress': None } mqtt.MQTT_CLIENT.progress = { 1: 'test/progress' } mqtt.MQTT_CLIENT._mqttc.reconnect.side_effect = [1, 1, 1, 0] mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 1) self.assertTrue(mqtt.MQTT_CLIENT._mqttc.reconnect.called) self.assertEqual(4, len(mqtt.MQTT_CLIENT._mqttc.reconnect.mock_calls)) self.assertEqual([1, 2, 4], [call[1][0] for call in mock_sleep.mock_calls]) self.assertEqual({'test/topic': 1}, mqtt.MQTT_CLIENT.topics) self.assertEqual({}, mqtt.MQTT_CLIENT.progress) def test_invalid_mqtt_topics(self): self.assertRaises(vol.Invalid, mqtt.valid_publish_topic, 'bad+topic') self.assertRaises(vol.Invalid, mqtt.valid_subscribe_topic, 'bad\0one')
# vi: ts=8 sts=4 sw=4 et # # control.py: form controls # # This file is part of Draco2. Draco2 is free software and is made available # under the MIT license. Consult the file "LICENSE" that is distributed # together with this file for the exact licensing terms. # # Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file # "AUTHORS" for a complete overview. # # $Revision: 1187 $ import re import time import decimal import datetime from draco2.locale.locale import tr from draco2.util.http import FileUpload from draco2.form.exception import * class Control(object): """Base class for all form controls.""" name = None label = None def __init__(self, form): self.m_form = form def parse(self, args): """Parse `args' to Python format.""" raise NotImplementedError def unparse(self, object): """Parse `object' to string format.""" raise NotImplementedError class ScalarControl(Control): """A form control that reads one single value from a form and outputs that single value. """ type = None default = None nullok = False null_value = '' def parse(self, args): """Parse `args' to Python format.""" value = args.get(self.name, '') if not isinstance(value, basestring): m = tr('Expecting scalar value for field: %s') % tr(self.label) fields = [self.name] raise FormError(m, fields) if self.nullok and value == self.null_value: value = None elif not value: if self.default is None: m = tr('A value is required for field: %s') % tr(self.label) fields = [self.name] raise FormError(m, fields) value = self.default if value is not None and self.type is not None: try: value = self.type(value) except ValueError: m = tr('Illegal value for field: %s') % tr(self.label) fields = [self.name] raise FormError(m, fields) result = {} result[self.name] = value return result def unparse(self, object): """Parse `object' to string format.""" value = object[self.name] if value is None: if self.nullok: value = self.null_value else: # Be lenient when unparsing null values. A null value here # means an incomplete db record, which this form is possibly # trying to address. value = '' else: value = str(value) result = {} result[self.name] = value return result class BooleanControl(ScalarControl): """Boolean control. This control accepts various ways of specifying booleans as its input, such as true/false, yes/no and on/off. """ def parse(self, args): """Parse `args' to Python format.""" data = super(BooleanControl, self).parse(args) value = data[self.name] if value == 'true': value = True elif value == 'false': value = False elif value is None: value = None else: m = tr('Illegal value for field: %s') % tr(self.label) fields = [self.name] raise FormError(m, fields) data[self.name] = value return data def unparse(self, object): value = object[self.name] if value is None: if self.nullok: value = self.null_value else: value = '' else: value = str(value).lower() result = {} result[self.name] = value return result class NumericControl(ScalarControl): """Numeric control. This control is the base class for all numeric controls. """ minval = None maxval = None def parse(self, args): """Parse `args' to Python format.""" result = super(NumericControl, self).parse(args) value = result[self.name] if value is not None: if self.minval is not None and value < self.minval: m = tr('Value of field %s is too low (minimum value is %s).') \ % (tr(self.label), self.minval) fields = [self.name] raise FormError(m, fields) elif self.maxval is not None and value > self.maxval: m = tr('Value of field %s is too high (maximum value is %s).') \ % (tr(self.label), self.minval) fields = [self.name] raise FormError(m, fields) return result class IntegerControl(NumericControl): """Integer control. This control accepts a regular integer as its input. """ type = int class DecimalControl(NumericControl): """Decimal control. This control accepts an arbitrary precision decimal number as its input. """ type = decimal.Decimal class FloatControl(NumericControl): """Floatint pont control. This control accepts a floating point number as its input. """ type = float class CharacterControl(ScalarControl): """Base class for text controls.""" minlen = None maxlen = None strip = None regex = None def parse(self, args): """Parse `args' to Python format.""" data = super(CharacterControl, self).parse(args) value = data[self.name] if value is not None: if self.strip is not None: value = self.strip.sub('', value) data[self.name] = value if self.minlen is not None and len(value) < self.minlen: m = tr('Input for field %s is too short (minimum length is %d).') \ % (tr(self.label), self.minlen) fields = [self.name] raise FormError(m, fields) if self.maxlen is not None and len(value) > self.maxlen: m = tr('Input for field %s is too long (maximum length is %d).') \ % (tr(self.label), self.maxlen) fields = [self.name] raise FormError(m, fields) if self.regex is not None and not self.regex.match(value): m = tr('The input for field %s does not match the required ' \ 'format.') % tr(self.label) fields = [self.name] raise FormError(m, fields) return data def unparse(self, object): """Parse `object' to string format.""" value = object[self.name] if value is None: if self.nullok: value = self.null_value else: value = '' else: value = unicode(value) result = {} result[self.name] = value return result class StringControl(CharacterControl): """A control that accepts a single-line of text.""" re_eol = re.compile('[\r\n]') def parse(self, args): """Parse `args'.""" data = super(StringControl, self).parse(args) value = data.get(self.name) if value is not None: value = value.strip() if self.re_eol.search(value): m = tr('Illegal value for field %s (no end-of-lines allowed).') \ % tr(self.label) fields = [self.name] raise FormError(m, fields) data[self.name] = value return data class TextControl(CharacterControl): """A control that accepts an arbitrary amount of text.""" class EnumControl(StringControl): """Enumerated control. This is a string control that only accepts values form a limited set of possibilities. """ values = [] def parse(self, args): """Parse `args' to Python format.""" data = super(EnumControl, self).parse(args) value = data[self.name] if value is not None and value not in self.values: m = tr('Illegal value for field: %s.') % tr(self.label) fields = [self.name] raise FormError(m, fields) return data class DateTimeBaseControl(ScalarControl): """Date base control. This is a base class for all date, time and datetime controls. """ formats = None date_type = None tm_slice = None def parse(self, args): """Parse `args' to Python format.""" data = super(DateTimeBaseControl, self).parse(args) value = data[self.name] if value is not None: for fmt in self.formats: try: tm = time.strptime(value, fmt) break except ValueError: pass else: m = tr('Illegal format for field: %s') % tr(self.label) fields = [self.name] raise FormError(m, fields) value = self.date_type(*tuple(tm)[self.tm_slice]) data[self.name] = value return data def unparse(self, object): """Parse `object' to string format.""" result = {} value = object[self.name] if value is None: if self.nullok: value = self.null_value else: value = '' else: value = value.strftime(self.formats[0]) result = {} result[self.name] = value return result class DateControl(DateTimeBaseControl): """Date control. This control accepts a date as its input. """ formats = ('%Y-%m-%d', '%y-%m-%d') date_type = datetime.date tm_slice = slice(0, 3) class TimeControl(DateTimeBaseControl): """Time control. This control accepts a time as its input. """ formats = ('%H:%M:%S',) date_type = datetime.time tm_slice = slice(3, 6) class DateTimeControl(DateTimeBaseControl): """Date/Time control. This control accepts a date/time as its input. """ formats = ('%Y-%m-%d %H:%M:%S',) date_type = datetime.datetime tm_slice = slice(0, 6) class IntervalControl(IntegerControl): """Date/Time interval. This control accepts a number of days as its input. """ def parse(self, args): """Parse `args' to Python format.""" data = super(IntegerControl, self).parse(args) value = data[self.name] if value is not None: value = datetime.timedelta(value) data[self.name] = value return data def unparse(self, object): """Parse `object' to string format.""" result = super(IntegerControl, self).unparse(object) value = result[self.name] if value: value = value[:value.find(' ')] result[self.name] = value return result class ArrayControl(Control): """Array control. This control accepts one or multiple values for a single variable name. The variables are passed as-is and not parsed further. """ def parse(self, args): value = args.get(self.name) if not value: value = [] elif not isinstance(value, list): value = [value] result = {} result[self.name] = value return result def unparse(self, object): value = object[self.name] result = {} result[self.name] = value return result class FileUploadControl(Control): """File upload control. This control accepts a FileUpload object as its input. """ nullok = False def parse(self, args): value = args.get(self.name) if value: if not isinstance(value, FileUpload): m = tr('Expecting file upload for field: %s') % tr(self.label) fields = [self.name] raise FormError(m, fields) elif self.nullok: value = None else: m = tr('No file selected for field: %s') % tr(self.label) fields = [self.name] raise FormError(m, fields) result = {} result[self.name] = value return result def unparse(self, args): # no postback possible for file upload fields return {}
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import util as distribution_util import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging du = distribution_util def try_import(name): # pylint: disable=invalid-name module = None try: module = importlib.import_module(name) except ImportError as e: tf_logging.warning("Could not import %s: %s" % (name, str(e))) return module special = try_import("scipy.special") def _logit(x): x = np.asarray(x) return np.log(x) - np.log1p(-x) class AssertCloseTest(test.TestCase): def testAssertCloseIntegerDtype(self): x = array_ops.placeholder(dtypes.int32) y = x z = array_ops.placeholder(dtypes.int32) feed_dict = {x: [1, 5, 10, 15, 20], z: [2, 5, 10, 15, 20]} with self.test_session(): with ops.control_dependencies([du.assert_close(x, y)]): array_ops.identity(x).eval(feed_dict=feed_dict) with ops.control_dependencies([du.assert_close(y, x)]): array_ops.identity(x).eval(feed_dict=feed_dict) with self.assertRaisesOpError("Condition x ~= y"): with ops.control_dependencies([du.assert_close(x, z)]): array_ops.identity(x).eval(feed_dict=feed_dict) with self.assertRaisesOpError("Condition x ~= y"): with ops.control_dependencies([du.assert_close(y, z)]): array_ops.identity(y).eval(feed_dict=feed_dict) def testAssertCloseNonIntegerDtype(self): x = array_ops.placeholder(dtypes.float32) y = x + 1e-8 z = array_ops.placeholder(dtypes.float32) feed_dict = {x: [1., 5, 10, 15, 20], z: [2., 5, 10, 15, 20]} with self.test_session(): with ops.control_dependencies([du.assert_close(x, y)]): array_ops.identity(x).eval(feed_dict=feed_dict) with ops.control_dependencies([du.assert_close(y, x)]): array_ops.identity(x).eval(feed_dict=feed_dict) with self.assertRaisesOpError("Condition x ~= y"): with ops.control_dependencies([du.assert_close(x, z)]): array_ops.identity(x).eval(feed_dict=feed_dict) with self.assertRaisesOpError("Condition x ~= y"): with ops.control_dependencies([du.assert_close(y, z)]): array_ops.identity(y).eval(feed_dict=feed_dict) def testAssertCloseEpsilon(self): x = [0., 5, 10, 15, 20] # x != y y = [0.1, 5, 10, 15, 20] # x = z z = [1e-8, 5, 10, 15, 20] with self.test_session(): with ops.control_dependencies([du.assert_close(x, z)]): array_ops.identity(x).eval() with self.assertRaisesOpError("Condition x ~= y"): with ops.control_dependencies([du.assert_close(x, y)]): array_ops.identity(x).eval() with self.assertRaisesOpError("Condition x ~= y"): with ops.control_dependencies([du.assert_close(y, z)]): array_ops.identity(y).eval() def testAssertIntegerForm(self): # This should only be detected as an integer. x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) # First component isn't less than float32.eps = 1e-7 z = array_ops.placeholder(dtypes.float32) # This shouldn"t be detected as an integer. w = array_ops.placeholder(dtypes.float32) feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20], z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]} with self.test_session(): with ops.control_dependencies([du.assert_integer_form(x)]): array_ops.identity(x).eval(feed_dict=feed_dict) with self.assertRaisesOpError("has non-integer components"): with ops.control_dependencies( [du.assert_integer_form(y)]): array_ops.identity(y).eval(feed_dict=feed_dict) with self.assertRaisesOpError("has non-integer components"): with ops.control_dependencies( [du.assert_integer_form(z)]): array_ops.identity(z).eval(feed_dict=feed_dict) with self.assertRaisesOpError("has non-integer components"): with ops.control_dependencies( [du.assert_integer_form(w)]): array_ops.identity(w).eval(feed_dict=feed_dict) class GetLogitsAndProbsTest(test.TestCase): def testImproperArguments(self): with self.test_session(): with self.assertRaises(ValueError): du.get_logits_and_probs(logits=None, probs=None) with self.assertRaises(ValueError): du.get_logits_and_probs(logits=[0.1], probs=[0.1]) def testLogits(self): p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32) logits = _logit(p) with self.test_session(): new_logits, new_p = du.get_logits_and_probs( logits=logits, validate_args=True) self.assertAllClose(p, new_p.eval(), rtol=1e-5, atol=0.) self.assertAllClose(logits, new_logits.eval(), rtol=1e-5, atol=0.) def testLogitsMultidimensional(self): p = np.array([0.2, 0.3, 0.5], dtype=np.float32) logits = np.log(p) with self.test_session(): new_logits, new_p = du.get_logits_and_probs( logits=logits, multidimensional=True, validate_args=True) self.assertAllClose(new_p.eval(), p) self.assertAllClose(new_logits.eval(), logits) def testProbability(self): p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32) with self.test_session(): new_logits, new_p = du.get_logits_and_probs( probs=p, validate_args=True) self.assertAllClose(_logit(p), new_logits.eval()) self.assertAllClose(p, new_p.eval()) def testProbabilityMultidimensional(self): p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32) with self.test_session(): new_logits, new_p = du.get_logits_and_probs( probs=p, multidimensional=True, validate_args=True) self.assertAllClose(np.log(p), new_logits.eval()) self.assertAllClose(p, new_p.eval()) def testProbabilityValidateArgs(self): p = [0.01, 0.2, 0.5, 0.7, .99] # Component less than 0. p2 = [-1, 0.2, 0.5, 0.3, .2] # Component greater than 1. p3 = [2, 0.2, 0.5, 0.3, .2] with self.test_session(): _, prob = du.get_logits_and_probs( probs=p, validate_args=True) prob.eval() with self.assertRaisesOpError("Condition x >= 0"): _, prob = du.get_logits_and_probs( probs=p2, validate_args=True) prob.eval() _, prob = du.get_logits_and_probs( probs=p2, validate_args=False) prob.eval() with self.assertRaisesOpError("probs has components greater than 1"): _, prob = du.get_logits_and_probs( probs=p3, validate_args=True) prob.eval() _, prob = du.get_logits_and_probs( probs=p3, validate_args=False) prob.eval() def testProbabilityValidateArgsMultidimensional(self): p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32) # Component less than 0. Still sums to 1. p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32) # Component greater than 1. Does not sum to 1. p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32) # Does not sum to 1. p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32) with self.test_session(): _, prob = du.get_logits_and_probs( probs=p, multidimensional=True) prob.eval() with self.assertRaisesOpError("Condition x >= 0"): _, prob = du.get_logits_and_probs( probs=p2, multidimensional=True, validate_args=True) prob.eval() _, prob = du.get_logits_and_probs( probs=p2, multidimensional=True, validate_args=False) prob.eval() with self.assertRaisesOpError( "(probs has components greater than 1|probs does not sum to 1)"): _, prob = du.get_logits_and_probs( probs=p3, multidimensional=True, validate_args=True) prob.eval() _, prob = du.get_logits_and_probs( probs=p3, multidimensional=True, validate_args=False) prob.eval() with self.assertRaisesOpError("probs does not sum to 1"): _, prob = du.get_logits_and_probs( probs=p4, multidimensional=True, validate_args=True) prob.eval() _, prob = du.get_logits_and_probs( probs=p4, multidimensional=True, validate_args=False) prob.eval() def testProbsMultidimShape(self): with self.test_session(): with self.assertRaises(ValueError): p = array_ops.ones([int(2**11+1)], dtype=np.float16) du.get_logits_and_probs( probs=p, multidimensional=True, validate_args=True) with self.assertRaisesOpError( "Number of classes exceeds `dtype` precision"): p = array_ops.placeholder(dtype=dtypes.float16) _, prob = du.get_logits_and_probs( probs=p, multidimensional=True, validate_args=True) prob.eval(feed_dict={p: np.ones([int(2**11+1)])}) def testLogitsMultidimShape(self): with self.test_session(): with self.assertRaises(ValueError): l = array_ops.ones([int(2**11+1)], dtype=np.float16) du.get_logits_and_probs( logits=l, multidimensional=True, validate_args=True) with self.assertRaisesOpError( "Number of classes exceeds `dtype` precision"): l = array_ops.placeholder(dtype=dtypes.float16) logit, _ = du.get_logits_and_probs( logits=l, multidimensional=True, validate_args=True) logit.eval(feed_dict={l: np.ones([int(2**11+1)])}) class EmbedCheckCategoricalEventShapeTest(test.TestCase): def testTooSmall(self): with self.test_session(): with self.assertRaises(ValueError): param = array_ops.ones([1], dtype=np.float16) checked_param = du.embed_check_categorical_event_shape( param) with self.assertRaisesOpError( "must have at least 2 events"): param = array_ops.placeholder(dtype=dtypes.float16) checked_param = du.embed_check_categorical_event_shape( param) checked_param.eval(feed_dict={param: np.ones([1])}) def testTooLarge(self): with self.test_session(): with self.assertRaises(ValueError): param = array_ops.ones([int(2**11+1)], dtype=dtypes.float16) checked_param = du.embed_check_categorical_event_shape( param) with self.assertRaisesOpError( "Number of classes exceeds `dtype` precision"): param = array_ops.placeholder(dtype=dtypes.float16) checked_param = du.embed_check_categorical_event_shape( param) checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])}) def testUnsupportedDtype(self): with self.test_session(): with self.assertRaises(TypeError): param = array_ops.ones([int(2**11+1)], dtype=dtypes.qint16) du.embed_check_categorical_event_shape(param) class EmbedCheckIntegerCastingClosedTest(test.TestCase): def testCorrectlyAssertsNonnegative(self): with self.test_session(): with self.assertRaisesOpError("Elements must be non-negative"): x = array_ops.placeholder(dtype=dtypes.float16) x_checked = du.embed_check_integer_casting_closed( x, target_dtype=dtypes.int16) x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.float16)}) def testCorrectlyAssersIntegerForm(self): with self.test_session(): with self.assertRaisesOpError("Elements must be int16-equivalent."): x = array_ops.placeholder(dtype=dtypes.float16) x_checked = du.embed_check_integer_casting_closed( x, target_dtype=dtypes.int16) x_checked.eval(feed_dict={x: np.array([1, 1.5], dtype=np.float16)}) def testCorrectlyAssertsLargestPossibleInteger(self): with self.test_session(): with self.assertRaisesOpError("Elements cannot exceed 32767."): x = array_ops.placeholder(dtype=dtypes.int32) x_checked = du.embed_check_integer_casting_closed( x, target_dtype=dtypes.int16) x_checked.eval(feed_dict={x: np.array([1, 2**15], dtype=np.int32)}) def testCorrectlyAssertsSmallestPossibleInteger(self): with self.test_session(): with self.assertRaisesOpError("Elements cannot be smaller than 0."): x = array_ops.placeholder(dtype=dtypes.int32) x_checked = du.embed_check_integer_casting_closed( x, target_dtype=dtypes.uint16, assert_nonnegative=False) x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.int32)}) class LogCombinationsTest(test.TestCase): def testLogCombinationsBinomial(self): n = [2, 5, 12, 15] k = [1, 2, 4, 11] if not special: return log_combs = np.log(special.binom(n, k)) with self.test_session(): n = np.array(n, dtype=np.float32) counts = [[1., 1], [2., 3], [4., 8], [11, 4]] log_binom = du.log_combinations(n, counts) self.assertEqual([4], log_binom.get_shape()) self.assertAllClose(log_combs, log_binom.eval()) def testLogCombinationsShape(self): # Shape [2, 2] n = [[2, 5], [12, 15]] with self.test_session(): n = np.array(n, dtype=np.float32) # Shape [2, 2, 4] counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]] log_binom = du.log_combinations(n, counts) self.assertEqual([2, 2], log_binom.get_shape()) class DynamicShapeTest(test.TestCase): def testSameDynamicShape(self): with self.test_session(): scalar = constant_op.constant(2.0) scalar1 = array_ops.placeholder(dtype=dtypes.float32) vector = [0.3, 0.4, 0.5] vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None]) vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None]) multidimensional = [[0.3, 0.4], [0.2, 0.6]] multidimensional1 = array_ops.placeholder( dtype=dtypes.float32, shape=[None, None]) multidimensional2 = array_ops.placeholder( dtype=dtypes.float32, shape=[None, None]) # Scalar self.assertTrue( du.same_dynamic_shape(scalar, scalar1).eval({ scalar1: 2.0 })) # Vector self.assertTrue( du.same_dynamic_shape(vector, vector1).eval({ vector1: [2.0, 3.0, 4.0] })) self.assertTrue( du.same_dynamic_shape(vector1, vector2).eval({ vector1: [2.0, 3.0, 4.0], vector2: [2.0, 3.5, 6.0] })) # Multidimensional self.assertTrue( du.same_dynamic_shape( multidimensional, multidimensional1).eval({ multidimensional1: [[2.0, 3.0], [3.0, 4.0]] })) self.assertTrue( du.same_dynamic_shape( multidimensional1, multidimensional2).eval({ multidimensional1: [[2.0, 3.0], [3.0, 4.0]], multidimensional2: [[1.0, 3.5], [6.3, 2.3]] })) # Scalar, X self.assertFalse( du.same_dynamic_shape(scalar, vector1).eval({ vector1: [2.0, 3.0, 4.0] })) self.assertFalse( du.same_dynamic_shape(scalar1, vector1).eval({ scalar1: 2.0, vector1: [2.0, 3.0, 4.0] })) self.assertFalse( du.same_dynamic_shape(scalar, multidimensional1).eval({ multidimensional1: [[2.0, 3.0], [3.0, 4.0]] })) self.assertFalse( du.same_dynamic_shape(scalar1, multidimensional1).eval( { scalar1: 2.0, multidimensional1: [[2.0, 3.0], [3.0, 4.0]] })) # Vector, X self.assertFalse( du.same_dynamic_shape(vector, vector1).eval({ vector1: [2.0, 3.0] })) self.assertFalse( du.same_dynamic_shape(vector1, vector2).eval({ vector1: [2.0, 3.0, 4.0], vector2: [6.0] })) self.assertFalse( du.same_dynamic_shape(vector, multidimensional1).eval({ multidimensional1: [[2.0, 3.0], [3.0, 4.0]] })) self.assertFalse( du.same_dynamic_shape(vector1, multidimensional1).eval( { vector1: [2.0, 3.0, 4.0], multidimensional1: [[2.0, 3.0], [3.0, 4.0]] })) # Multidimensional, X self.assertFalse( du.same_dynamic_shape( multidimensional, multidimensional1).eval({ multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]] })) self.assertFalse( du.same_dynamic_shape( multidimensional1, multidimensional2).eval({ multidimensional1: [[2.0, 3.0], [3.0, 4.0]], multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]] })) class RotateTransposeTest(test.TestCase): def _np_rotate_transpose(self, x, shift): if not isinstance(x, np.ndarray): x = np.array(x) return np.transpose(x, np.roll(np.arange(len(x.shape)), shift)) def testRollStatic(self): with self.test_session(): with self.assertRaisesRegexp(ValueError, "None values not supported."): du.rotate_transpose(None, 1) for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))): for shift in np.arange(-5, 5): y = du.rotate_transpose(x, shift) self.assertAllEqual(self._np_rotate_transpose(x, shift), y.eval()) self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list()) def testRollDynamic(self): with self.test_session() as sess: x = array_ops.placeholder(dtypes.float32) shift = array_ops.placeholder(dtypes.int32) for x_value in (np.ones( 1, dtype=x.dtype.as_numpy_dtype()), np.ones( (2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones( (3, 2, 1), dtype=x.dtype.as_numpy_dtype())): for shift_value in np.arange(-5, 5): self.assertAllEqual( self._np_rotate_transpose(x_value, shift_value), sess.run(du.rotate_transpose(x, shift), feed_dict={x: x_value, shift: shift_value})) class PickVectorTest(test.TestCase): def testCorrectlyPicksVector(self): with self.test_session(): x = np.arange(10, 12) y = np.arange(15, 18) self.assertAllEqual(x, du.pick_vector( math_ops.less(0, 5), x, y).eval()) self.assertAllEqual(y, du.pick_vector( math_ops.less(5, 0), x, y).eval()) self.assertAllEqual(x, du.pick_vector( constant_op.constant(True), x, y)) # No eval. self.assertAllEqual(y, du.pick_vector( constant_op.constant(False), x, y)) # No eval. class PreferStaticRankTest(test.TestCase): def testNonEmptyConstantTensor(self): x = array_ops.zeros((2, 3, 4)) rank = du.prefer_static_rank(x) self.assertIsInstance(rank, np.ndarray) self.assertEqual(3, rank) def testEmptyConstantTensor(self): x = constant_op.constant([]) rank = du.prefer_static_rank(x) self.assertIsInstance(rank, np.ndarray) self.assertEqual(1, rank) def testScalarTensor(self): x = constant_op.constant(1.) rank = du.prefer_static_rank(x) self.assertIsInstance(rank, np.ndarray) self.assertEqual(0, rank) def testDynamicRankEndsUpBeingNonEmpty(self): x = array_ops.placeholder(np.float64, shape=None) rank = du.prefer_static_rank(x) with self.test_session(): self.assertAllEqual(2, rank.eval(feed_dict={x: np.zeros((2, 3))})) def testDynamicRankEndsUpBeingEmpty(self): x = array_ops.placeholder(np.int32, shape=None) rank = du.prefer_static_rank(x) with self.test_session(): self.assertAllEqual(1, rank.eval(feed_dict={x: []})) def testDynamicRankEndsUpBeingScalar(self): x = array_ops.placeholder(np.int32, shape=None) rank = du.prefer_static_rank(x) with self.test_session(): self.assertAllEqual(0, rank.eval(feed_dict={x: 1})) class PreferStaticShapeTest(test.TestCase): def testNonEmptyConstantTensor(self): x = array_ops.zeros((2, 3, 4)) shape = du.prefer_static_shape(x) self.assertIsInstance(shape, np.ndarray) self.assertAllEqual(np.array([2, 3, 4]), shape) def testEmptyConstantTensor(self): x = constant_op.constant([]) shape = du.prefer_static_shape(x) self.assertIsInstance(shape, np.ndarray) self.assertAllEqual(np.array([0]), shape) def testScalarTensor(self): x = constant_op.constant(1.) shape = du.prefer_static_shape(x) self.assertIsInstance(shape, np.ndarray) self.assertAllEqual(np.array([]), shape) def testDynamicShapeEndsUpBeingNonEmpty(self): x = array_ops.placeholder(np.float64, shape=None) shape = du.prefer_static_shape(x) with self.test_session(): self.assertAllEqual((2, 3), shape.eval(feed_dict={x: np.zeros((2, 3))})) def testDynamicShapeEndsUpBeingEmpty(self): x = array_ops.placeholder(np.int32, shape=None) shape = du.prefer_static_shape(x) with self.test_session(): self.assertAllEqual(np.array([0]), shape.eval(feed_dict={x: []})) def testDynamicShapeEndsUpBeingScalar(self): x = array_ops.placeholder(np.int32, shape=None) shape = du.prefer_static_shape(x) with self.test_session(): self.assertAllEqual(np.array([]), shape.eval(feed_dict={x: 1})) class PreferStaticValueTest(test.TestCase): def testNonEmptyConstantTensor(self): x = array_ops.zeros((2, 3, 4)) value = du.prefer_static_value(x) self.assertIsInstance(value, np.ndarray) self.assertAllEqual(np.zeros((2, 3, 4)), value) def testEmptyConstantTensor(self): x = constant_op.constant([]) value = du.prefer_static_value(x) self.assertIsInstance(value, np.ndarray) self.assertAllEqual(np.array([]), value) def testScalarTensor(self): x = constant_op.constant(1.) value = du.prefer_static_value(x) self.assertIsInstance(value, np.ndarray) self.assertAllEqual(np.array(1.), value) def testDynamicValueEndsUpBeingNonEmpty(self): x = array_ops.placeholder(np.float64, shape=None) value = du.prefer_static_value(x) with self.test_session(): self.assertAllEqual(np.zeros((2, 3)), value.eval(feed_dict={x: np.zeros((2, 3))})) def testDynamicValueEndsUpBeingEmpty(self): x = array_ops.placeholder(np.int32, shape=None) value = du.prefer_static_value(x) with self.test_session(): self.assertAllEqual(np.array([]), value.eval(feed_dict={x: []})) def testDynamicValueEndsUpBeingScalar(self): x = array_ops.placeholder(np.int32, shape=None) value = du.prefer_static_value(x) with self.test_session(): self.assertAllEqual(np.array(1), value.eval(feed_dict={x: 1})) class FillTriangularTest(test.TestCase): def setUp(self): self._rng = np.random.RandomState(42) def _fill_triangular(self, x, upper=False): """Numpy implementation of `fill_triangular`.""" x = np.asarray(x) # Formula derived by solving for n: m = n(n+1)/2. m = np.int32(x.shape[-1]) n = np.sqrt(0.25 + 2. * m) - 0.5 if n != np.floor(n): raise ValueError("Invalid shape.") n = np.int32(n) # We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle # `m == n == 1`. Hence, we do absoulte indexing. x_tail = x[..., (m - (n * n - m)):] y = np.concatenate( [x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]], axis=-1) y = y.reshape(np.concatenate([ np.int32(x.shape[:-1]), np.int32([n, n]), ], axis=0)) return np.triu(y) if upper else np.tril(y) def _run_test(self, x_, use_deferred_shape=False, **kwargs): x_ = np.asarray(x_) with self.test_session() as sess: static_shape = None if use_deferred_shape else x_.shape x_pl = array_ops.placeholder_with_default(x_, shape=static_shape) # Add `zeros_like(x)` such that x's value and gradient are identical. We # do this so we can ensure each gradient value is mapped to the right # gradient location. (Not doing this means the gradient wrt `x` is simple # `ones_like(x)`.) # Note: # zeros_like_x_pl == zeros_like(x_pl) # gradient(zeros_like_x_pl, x_pl) == x_pl - 1 zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.) - array_ops.stop_gradient(x_pl * (x_pl - 1.))) x = x_pl + zeros_like_x_pl actual = du.fill_triangular(x, **kwargs) grad_actual = gradients_impl.gradients(actual, x_pl)[0] [actual_, grad_actual_] = sess.run([actual, grad_actual], feed_dict={x_pl: x_}) expected = self._fill_triangular(x_, **kwargs) if use_deferred_shape: self.assertEqual(None, actual.shape) else: self.assertAllEqual(expected.shape, actual.shape) self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9) self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9) def testCorrectlyMakes1x1TriLower(self): self._run_test(self._rng.randn(3, int(1*2/2))) def testCorrectlyMakesNoBatchTriLower(self): self._run_test(self._rng.randn(int(4*5/2))) def testCorrectlyMakesBatchTriLower(self): self._run_test(self._rng.randn(2, 3, int(3*4/2))) def testCorrectlyMakesBatchTriLowerUnknownShape(self): self._run_test(self._rng.randn(2, 3, int(3*4/2)), use_deferred_shape=True) def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self): self._run_test(self._rng.randn(2, 3, int(7*8/2)), use_deferred_shape=True) def testCorrectlyMakesBatch7x7TriLower(self): self._run_test(self._rng.randn(2, 3, int(7*8/2))) def testCorrectlyMakes1x1TriUpper(self): self._run_test(self._rng.randn(3, int(1*2/2)), upper=True) def testCorrectlyMakesNoBatchTriUpper(self): self._run_test(self._rng.randn(int(4*5/2)), upper=True) def testCorrectlyMakesBatchTriUpper(self): self._run_test(self._rng.randn(2, 2, int(3*4/2)), upper=True) def testCorrectlyMakesBatchTriUpperUnknownShape(self): self._run_test(self._rng.randn(2, 2, int(3*4/2)), use_deferred_shape=True, upper=True) def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self): self._run_test(self._rng.randn(2, 3, int(7*8/2)), use_deferred_shape=True, upper=True) def testCorrectlyMakesBatch7x7TriUpper(self): self._run_test(self._rng.randn(2, 3, int(7*8/2)), upper=True) class ReduceWeightedLogSumExp(test.TestCase): def _reduce_weighted_logsumexp(self, logx, w, axis, keep_dims=False): m = np.max(logx, axis=axis, keepdims=True) sum_ = np.sum(w * np.exp(logx - m), axis=axis, keepdims=keep_dims) sgn = np.sign(sum_) if not keep_dims: m = np.squeeze(m, axis=axis) return m + np.log(sgn * sum_), sgn def testNoWeights(self): logx_ = np.array([[0., -1, 1000.], [0, 1, -1000.], [-5, 0, 5]]) with self.test_session() as sess: logx = constant_op.constant(logx_) expected = math_ops.reduce_logsumexp(logx, axis=-1) grad_expected = gradients_impl.gradients(expected, logx)[0] actual, actual_sgn = du.reduce_weighted_logsumexp( logx, axis=-1, return_sign=True) grad_actual = gradients_impl.gradients(actual, logx)[0] [actual_, actual_sgn_, grad_actual_, expected_, grad_expected_] = sess.run([ actual, actual_sgn, grad_actual, expected, grad_expected]) self.assertAllEqual(expected_, actual_) self.assertAllEqual(grad_expected_, grad_actual_) self.assertAllEqual([1., 1, 1], actual_sgn_) def testNegativeWeights(self): logx_ = np.array([[0., -1, 1000.], [0, 1, -1000.], [-5, 0, 5]]) w_ = np.array([[1., 1, -1], [1, -2, 1], [1, 0, 1]]) expected, _ = self._reduce_weighted_logsumexp(logx_, w_, axis=-1) with self.test_session() as sess: logx = constant_op.constant(logx_) w = constant_op.constant(w_) actual, actual_sgn = du.reduce_weighted_logsumexp( logx, w, axis=-1, return_sign=True) [actual_, actual_sgn_] = sess.run([actual, actual_sgn]) self.assertAllEqual(expected, actual_) self.assertAllEqual([-1., -1, 1], actual_sgn_) def testKeepDims(self): logx_ = np.array([[0., -1, 1000.], [0, 1, -1000.], [-5, 0, 5]]) w_ = np.array([[1., 1, -1], [1, -2, 1], [1, 0, 1]]) expected, _ = self._reduce_weighted_logsumexp( logx_, w_, axis=-1, keep_dims=True) with self.test_session() as sess: logx = constant_op.constant(logx_) w = constant_op.constant(w_) actual, actual_sgn = du.reduce_weighted_logsumexp( logx, w, axis=-1, return_sign=True, keep_dims=True) [actual_, actual_sgn_] = sess.run([actual, actual_sgn]) self.assertAllEqual(expected, actual_) self.assertAllEqual([[-1.], [-1], [1]], actual_sgn_) def testDocString(self): """This test verifies the correctness of the docstring examples.""" with self.test_session(): x = constant_op.constant([[0., 0, 0], [0, 0, 0]]) w = constant_op.constant([[-1., 1, 1], [1, 1, 1]]) self.assertAllClose( np.log(4), du.reduce_weighted_logsumexp(x, w).eval()) with np.errstate(divide="ignore"): self.assertAllClose( np.log([0, 2, 2]), du.reduce_weighted_logsumexp(x, w, axis=0).eval()) self.assertAllClose( np.log([1, 3]), du.reduce_weighted_logsumexp(x, w, axis=1).eval()) self.assertAllClose( np.log([[1], [3]]), du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True).eval()) self.assertAllClose( np.log(4), du.reduce_weighted_logsumexp(x, w, axis=[0, 1]).eval()) class GenNewSeedTest(test.TestCase): def testOnlyNoneReturnsNone(self): self.assertFalse(du.gen_new_seed(0, "salt") is None) self.assertTrue(du.gen_new_seed(None, "salt") is None) # TODO(jvdillon): Merge this test back into: # tensorflow/python/kernel_tests/softplus_op_test.py # once TF core is accepting new ops. class SoftplusTest(test.TestCase): def _npSoftplus(self, np_features): np_features = np.asarray(np_features) zero = np.asarray(0).astype(np_features.dtype) return np.logaddexp(zero, np_features) def _testSoftplus(self, np_features, use_gpu=False): np_features = np.asarray(np_features) np_softplus = self._npSoftplus(np_features) with self.test_session(use_gpu=use_gpu) as sess: softplus = nn_ops.softplus(np_features) softplus_inverse = du.softplus_inverse(softplus) [tf_softplus, tf_softplus_inverse] = sess.run([ softplus, softplus_inverse]) self.assertAllCloseAccordingToType(np_softplus, tf_softplus) rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get( str(np_features.dtype), 1e-6) # This will test that we correctly computed the inverse by verifying we # recovered the original input. self.assertAllCloseAccordingToType( np_features, tf_softplus_inverse, atol=0., rtol=rtol) self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool), tf_softplus > 0) self.assertShapeEqual(np_softplus, softplus) self.assertShapeEqual(np_softplus, softplus_inverse) self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool), np.isfinite(tf_softplus)) self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool), np.isfinite(tf_softplus_inverse)) def testNumbers(self): for t in [np.float16, np.float32, np.float64]: lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100) upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100) self._testSoftplus( np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape( [2, -1]), use_gpu=False) self._testSoftplus( np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape( [2, -1]), use_gpu=True) log_eps = np.log(np.finfo(t).eps) one = t(1) ten = t(10) self._testSoftplus( [ log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten, -log_eps, -log_eps - one, -log_eps + one, -log_eps - ten, -log_eps + ten ], use_gpu=False) self._testSoftplus( [ log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten - log_eps, -log_eps - one, -log_eps + one, -log_eps - ten, -log_eps + ten ], use_gpu=True) def testGradient(self): with self.test_session(): x = constant_op.constant( [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9], shape=[2, 5], name="x") y = nn_ops.softplus(x, name="softplus") x_init = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker.compute_gradient_error( x, [2, 5], y, [2, 5], x_init_value=x_init) tf_logging.vlog(2, "softplus (float) gradient err = ", err) self.assertLess(err, 1e-4) def testInverseSoftplusGradientNeverNan(self): with self.test_session(): # Note that this range contains both zero and inf. x = constant_op.constant(np.logspace(-8, 6).astype(np.float16)) y = du.softplus_inverse(x) grads = gradients_impl.gradients(y, x)[0].eval() # Equivalent to `assertAllFalse` (if it existed). self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads)) def testInverseSoftplusGradientFinite(self): with self.test_session(): # This range of x is all finite, and so is 1 / x. So the # gradient and its approximations should be finite as well. x = constant_op.constant(np.logspace(-4.8, 4.5).astype(np.float16)) y = du.softplus_inverse(x) grads = gradients_impl.gradients(y, x)[0].eval() # Equivalent to `assertAllTrue` (if it existed). self.assertAllEqual( np.ones_like(grads).astype(np.bool), np.isfinite(grads)) if __name__ == "__main__": test.main()
""" Read and preprocess video data. Video processing occurs on a single video at a time. Video are read and preprocessed in parallel across multiple threads. The resulting videos are concatenated together to form a single batch for training or evaluation. -- Provide processed video data for a network: inputs: Construct batches of evaluation examples of videos. distorted_inputs: Construct batches of training examples of videos. batch_inputs: Construct batches of training or evaluation examples of videos. -- Data processing: parse_example_proto: Parses an Example proto containing a training example of a video. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf FLAGS = tf.app.flags.FLAGS def decode_jpeg(image_buffer, scope=None): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) # After this point, all image pixels reside in [0,1) # until the very end, when they're rescaled to (-1, 1). The various # adjust_* ops all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image with an area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) # Resize the image to the original height and width. image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [FLAGS.image_height, FLAGS.image_width], align_corners=False) image = tf.squeeze(image, [0]) return image def decode_video(video_buffer): """Decode list of string Tensor into list of 3-D float image Tensor. Args: video_buffer: tensor, shape [num_steps]. Returns: list of 3-D float Tensor with values ranging from [0, 1). """ # Decode the images of one video return tf.map_fn(decode_jpeg, video_buffer, dtype=tf.float32) def inputs(dataset, config, num_preprocess_threads=4): """ Generate batches of videos for evaluation. Use this function as the inputs for evaluating a network. Note that some (minimal) video preprocessing occurs during evaluation including central cropping and resizing of the video to fit the network. Args: dataset: instance of Dataset class specifying the dataset. config: class, the configuration setting num_preprocess_threads: integer, total number of preprocessing threads defaults to 4. Returns: videos: 2-D string Tensor of [batch_size, num_steps] a batch of video, each video is a dictionary containing strings providing JPEG encoding of all the images of a video clip labels: 1-D integer Tensor of [batch_size]. filenames: 1-D integer Tensor of [batch_size]. """ # Force all input processing onto CPU in order to reserve the GPU for # the forward inference and back-propagation. with tf.device('/cpu:0'): videos, labels, filenames = batch_inputs( dataset, config, train=False, num_preprocess_threads=num_preprocess_threads) return videos, labels, filenames def distorted_inputs(dataset, config, num_preprocess_threads=4): """ Generate batches of distorted versions of videos. Use this function as the inputs for training a network. Distorting videos provides a useful technique for augmenting the data set during training in order to make the network invariant to aspects of the video that do not effect the label. Args: dataset: instance of Dataset class specifying the dataset. batch_size: integer, number of examples in batch num_preprocess_threads: integer, total number of preprocessing threads defaults to 4. Returns: videos: 2-D string Tensor of [batch_size, num_steps] a batch of video, each video is a dictionary containing strings providing JPEG encoding of all the images of a video clip labels: 1-D integer one host Tensor of [batch_size]. filenames: 1-D integer Tensor of [batch_size]. """ # Force all input processing onto CPU in order to reserve the GPU for # the forward inference and back-propagation. videos, labels_one_hot, filenames = batch_inputs( dataset, config, train=True, num_preprocess_threads=num_preprocess_threads) return videos, labels_one_hot, filenames def video_preprocessing(image_features): """ Transfer dictionary to tensor type Args: image_features: dictionary contains, Tensor tf.string containing the contents of all the JPEG file of a video. Returns: resutl: 4-D float Tensor containing an appropriately list of scaled image [num_steps, encoded JPEG string] Raises: ValueError: if user does not provide bounding box """ # convert the image_features dictionary to array images = [] tmp_dict = {} for key, value in image_features.items(): tmp_dict[int(key[-3:])] = image_features[key] image_features.clear() for index in range(len(tmp_dict)): images.append(tmp_dict[index]) # transfer the images list into a tensor for idx, image in enumerate(images): images[idx] = tf.expand_dims(image, 0) result = tf.concat(images, 0) return result def parse_example_proto(example_serialized, num_steps): """ Parses an Example proto containing a training example of a video clip. The output of the convert_to_records.py video preprocessing script is a dataset containing serialized Example protocol buffers. Each Example proto contains the following fields: image/height: 200 image/width: 100 image/colorspace: 'RGB' image/channels: 3 image/class/label: 2 image/class/text: 'walking' image/format: 'JPEG' image/filename: '00001.JPEG' raw/image/001: <JPEG encoded string> ... raw/image/n: <JPEG encoded string> Args: example_serialized: scalar Tensor tf.string containing a serialized Example protocol buffer. Returns: image_features: A dictionary containing strings providing JPEG encoding of all the images of a video clip. label: Tensor tf.int32 containing the label. text: Tensor tf.string containing the human-readable label. filename: the filename of the image """ # Dense features in Example proto. feature_map = { 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/filename': tf.FixedLenFeature([], dtype=tf.string, default_value='') } features = tf.parse_single_example(example_serialized, feature_map) label = tf.cast(features['image/class/label'], dtype=tf.int32) # subtract the label value by 1, becuae the previous label value range # from(1..n) label = tf.subtract(label, tf.constant(1)) # images data in the Example proto image_map = {} for index in range(num_steps): image_map['raw/image/%03d' % index] = tf.FixedLenFeature( [], dtype=tf.string, default_value='') image_features = tf.parse_single_example(example_serialized, image_map) return (image_features, label, features['image/class/text'], features['image/filename']) def batch_inputs(dataset, config, train, num_preprocess_threads=4): """Contruct batches of training or evaluation examples from the video dataset. Args: dataset: instance of Dataset class specifying the dataset. See dataset.py for details. config: class, configuration train: boolean, shuffle the dataset or not num_preprocess_threads: integer, total number of preprocessing threads Returns: videos: 2-D string Tensor of [batch_size, num_steps] a batch of video, each video is a dictionary containing strings providing JPEG encoding of all the images of a video clip labels: 1-D integer Tensor of [batch_size]. filenames: an array contains all the filenames Raises: ValueError: if data is not found """ with tf.name_scope('batch_processing'): batch_size = config['batch_size'] data_files = dataset.data_files() if data_files is None: raise ValueError('No data files found for this dataset') # Create filename_queue if train: filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16) else: filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1) if num_preprocess_threads % 4: raise ValueError('Please make num_preprocess_threads a multiple ' 'of 4 (%d % 4 != 0).', num_preprocess_threads) reader = dataset.reader() _, example_serialized = reader.read(filename_queue) videos_and_labels_and_filenames = [] # Parse a serialized Example proto to extract the video and metadata. image_features, label_index, text, filename = parse_example_proto( example_serialized, config['num_steps']) video = video_preprocessing(image_features) videos_and_labels_and_filenames.append([video, label_index, filename]) videos, label_index_batch, filename_batch = tf.train.batch_join( videos_and_labels_and_filenames, batch_size=batch_size, capacity=2 * num_preprocess_threads * batch_size) # Convert the label to one hot vector labels = tf.reshape(label_index_batch, [batch_size]) labels_one_hot = tf.one_hot(labels, dataset.num_classes(), 1, 0) return (videos, labels_one_hot, tf.reshape(filename_batch, [batch_size]))
""" Test to verify the functionality of the OpenGL backends. This test sets up a real visualization with shaders and all. This tests setting source code, setting texture and buffer data, and we touch many other functions of the API too. The end result is an image with four colored quads. The result is tested for pixel color. The visualization ----------------- We create a visualization where the screen is divided in 4 quadrants, and each quadrant is drawn a different color (black, red, green, blue). The drawing is done for 50% using attribute data, and 50% using a texture. The end result should be fully saturated colors. Remember: the bottom left is (-1, -1) and the first quadrant. """ import sys import numpy as np from vispy.app import Canvas from vispy.testing import (requires_application, requires_pyopengl, SkipTest, run_tests_if_main, assert_equal, assert_true) from vispy.gloo import gl import pytest # All these tests require a working backend. # # High level tests # def teardown_module(): gl.use_gl() # Reset to default @pytest.mark.xfail(sys.platform == 'darwin', reason='functionality fails on OSX (see #1178)') @requires_application() def test_functionality_desktop(): """ Test desktop GL backend for full functionality. """ _test_functionality('gl2') @pytest.mark.xfail(sys.platform == 'darwin', reason='functionality fails on OSX (see #1178)') @requires_application() def test_functionality_proxy(): """ Test GL proxy class for full functionality. """ # By using debug mode, we are using the proxy class _test_functionality('gl2 debug') @pytest.mark.xfail(sys.platform == 'darwin', reason='functionality fails on OSX (see #1178)') @requires_application() @requires_pyopengl() def test_functionality_pyopengl(): """ Test pyopengl GL backend for full functionality. """ _test_functionality('pyopengl2') @requires_application() def test_functionality_es2(): """ Test es2 GL backend for full functionality. """ if True: raise SkipTest('Skip es2 functionality test for now.') if not sys.platform.startswith('win'): raise SkipTest('Can only test es2 functionality on Windows.') _test_functionality('es2') def _clear_screen(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glFinish() def _test_functionality(backend): """ Create app and canvas so we have a context. Then run tests. """ # use the backend gl.use_gl(backend) with Canvas() as canvas: _clear_screen() # Prepare w, h = canvas.size gl.glViewport(0, 0, w, h) gl.glScissor(0, 0, w, h) # touch gl.glClearColor(0.0, 0.0, 0.0, 1.0) # Setup visualization, ensure to do it in a draw event objects = _prepare_vis() _clear_screen() _draw1() _clear_screen() _draw2() _clear_screen() _draw3() # Clean up for delete_func, handle in objects: delete_func(handle) gl.glFinish() # # Create CPU data # # Create vertex and fragments shader. They are designed to that all # OpenGL func can be tested, i.e. all types of uniforms are present. # Most variables are nullified however, but we must make sure we do this # in a way that the compiler won't optimize out :) VERT = """ #version 120 attribute float a_1; attribute vec2 a_2; attribute vec3 a_3; attribute vec4 a_4; uniform float u_f1; uniform vec2 u_f2; uniform vec3 u_f3; uniform vec4 u_f4; uniform int u_i1; uniform ivec2 u_i2; uniform ivec3 u_i3; uniform ivec4 u_i4; uniform mat2 u_m2; uniform mat3 u_m3; uniform mat4 u_m4; varying vec2 v_2; // tex coords varying vec4 v_4; // attr colors void main() { float zero = float(u_i1); // Combine int with float uniforms (i.e. ints are "used") float u1 = u_f1 + float(u_i1); vec2 u2 = u_f2 + vec2(u_i2); vec3 u3 = u_f3 + vec3(u_i3); vec4 u4 = u_f4 + vec4(u_i4); // Set varyings (use every 2D and 4D variable, and u1) v_2 = a_1 * a_2 + zero*u_m2 * a_2 * u2 * u1; v_4 = u_m4 * a_4 * u4; // Set position (use 3D variables) gl_Position = vec4(u_m3* a_3* u3, 1.0); } """ FRAG = """ #version 120 uniform sampler2D s_1; uniform int u_i1; varying vec2 v_2; // rex coords varying vec4 v_4; // attr colors void main() { float zero = float(u_i1); gl_FragColor = (texture2D(s_1, v_2) + v_4); } """ # Color texture texquad = 5 im1 = np.zeros((texquad*2, texquad*2, 3), np.uint8) im1[texquad:, :texquad, 0] = 128 im1[texquad:, texquad:, 1] = 128 im1[:texquad, texquad:, 2] = 128 # Grayscale texture (uploaded but not used) im2 = im1[:, :, 0] # A non-contiguous view assert im2.flags['C_CONTIGUOUS'] is False # Vertex Buffers # Create coordinates for upper left quad quad = np.array([[0, 0, 0], [-1, 0, 0], [-1, -1, 0], [0, 0, 0], [-1, -1, 0], [0, -1, 0]], np.float32) N = quad.shape[0] * 4 # buf3 contains coordinates in device coordinates for four quadrants buf3 = np.row_stack([quad + (0, 0, 0), quad + (0, 1, 0), quad + (1, 1, 0), quad + (1, 0, 0)]).astype(np.float32) # buf2 is texture coords. Note that this is a view on buf2 buf2 = ((buf3+1.0)*0.5)[:, :2] # not C-contiguous assert buf2.flags['C_CONTIGUOUS'] is False # Array of colors buf4 = np.zeros((N, 5), np.float32) buf4[6:12, 0] = 0.5 buf4[12:18, 1] = 0.5 buf4[18:24, 2] = 0.5 buf4[:, 3] = 1.0 # alpha buf4 = buf4[:, :4] # make non-contiguous # Element buffer # elements = np.arange(N, dtype=np.uint8) # C-contiguous elements = np.arange(0, N, 0.5).astype(np.uint8)[::2] # not C-contiguous helements = None # the OpenGL object ref # # The GL calls # def _prepare_vis(): objects = [] # --- program and shaders # Create program and shaders hprog = gl.glCreateProgram() hvert = gl.glCreateShader(gl.GL_VERTEX_SHADER) hfrag = gl.glCreateShader(gl.GL_FRAGMENT_SHADER) objects.append((gl.glDeleteProgram, hprog)) objects.append((gl.glDeleteShader, hvert)) objects.append((gl.glDeleteShader, hfrag)) # Compile source code gl.glShaderSource(hvert, VERT) gl.glShaderSource(hfrag, FRAG) gl.glCompileShader(hvert) gl.glCompileShader(hfrag) # Check assert gl.glGetShaderInfoLog(hvert) == '' assert gl.glGetShaderInfoLog(hfrag) == '' assert gl.glGetShaderParameter(hvert, gl.GL_COMPILE_STATUS) == 1 assert gl.glGetShaderParameter(hfrag, gl.GL_COMPILE_STATUS) == 1 # Attach and link gl.glAttachShader(hprog, hvert) gl.glAttachShader(hprog, hfrag) # touch glDetachShader gl.glDetachShader(hprog, hvert) gl.glAttachShader(hprog, hvert) # Bind all attributes - we could let this occur automatically, but some # implementations bind an attribute to index 0, which has the unfortunate # property of being unable to be modified. gl.glBindAttribLocation(hprog, 1, 'a_1') gl.glBindAttribLocation(hprog, 2, 'a_2') gl.glBindAttribLocation(hprog, 3, 'a_3') gl.glBindAttribLocation(hprog, 4, 'a_4') gl.glLinkProgram(hprog) # Test that indeed these shaders are attached attached_shaders = gl.glGetAttachedShaders(hprog) assert_equal(set(attached_shaders), set([hvert, hfrag])) # Check assert_equal(gl.glGetProgramInfoLog(hprog), '') assert_equal(gl.glGetProgramParameter(hprog, gl.GL_LINK_STATUS), 1) gl.glValidateProgram(hprog) assert_equal(gl.glGetProgramParameter(hprog, gl.GL_VALIDATE_STATUS), 1) # Use it! gl.glUseProgram(hprog) # Check if all is ok assert_equal(gl.glGetError(), 0) # Check source vert_source = gl.glGetShaderSource(hvert) assert_true('attribute vec2 a_2;' in vert_source) # --- get information on attributes and uniforms # Count attributes and uniforms natt = gl.glGetProgramParameter(hprog, gl.GL_ACTIVE_ATTRIBUTES) nuni = gl.glGetProgramParameter(hprog, gl.GL_ACTIVE_UNIFORMS) assert_equal(natt, 4) assert_equal(nuni, 4+4+3+1) # Get names names = {} for i in range(natt): name, count, type = gl.glGetActiveAttrib(hprog, i) names[name] = type assert_equal(count, 1) for i in range(nuni): name, count, type = gl.glGetActiveUniform(hprog, i) names[name] = type assert_equal(count, 1) # Check assert_equal(names['a_1'], gl.GL_FLOAT) assert_equal(names['a_2'], gl.GL_FLOAT_VEC2) assert_equal(names['a_3'], gl.GL_FLOAT_VEC3) assert_equal(names['a_4'], gl.GL_FLOAT_VEC4) assert_equal(names['s_1'], gl.GL_SAMPLER_2D) # for i, type in enumerate([gl.GL_FLOAT, gl.GL_FLOAT_VEC2, gl.GL_FLOAT_VEC3, gl.GL_FLOAT_VEC4]): assert_equal(names['u_f%i' % (i+1)], type) for i, type in enumerate([gl.GL_INT, gl.GL_INT_VEC2, gl.GL_INT_VEC3, gl.GL_INT_VEC4]): assert_equal(names['u_i%i' % (i+1)], type) for i, type in enumerate([gl.GL_FLOAT_MAT2, gl.GL_FLOAT_MAT3, gl.GL_FLOAT_MAT4]): assert_equal(names['u_m%i' % (i+2)], type) # Check if all is ok assert_equal(gl.glGetError(), 0) # --- texture # Create, bind, activate htex = gl.glCreateTexture() objects.append((gl.glDeleteTexture, htex)) gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1) gl.glBindTexture(gl.GL_TEXTURE_2D, htex) # Allocate data and upload # This data is luminance and not C-contiguous gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_LUMINANCE, gl.GL_LUMINANCE, gl.GL_UNSIGNED_BYTE, im2) # touch gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_LUMINANCE, gl.GL_LUMINANCE, gl.GL_UNSIGNED_BYTE, im2.shape[:2]) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, gl.GL_LUMINANCE, gl.GL_UNSIGNED_BYTE, im2) # Set texture parameters (use f and i to touch both) T = gl.GL_TEXTURE_2D gl.glTexParameterf(T, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(T, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) # Re-allocate data and upload gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, im1.shape[:2]) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, im1) # Attach! loc = gl.glGetUniformLocation(hprog, 's_1') unit = 0 gl.glActiveTexture(gl.GL_TEXTURE0+unit) gl.glUniform1i(loc, unit) # Mipmaps (just to touch this function) gl.glGenerateMipmap(gl.GL_TEXTURE_2D) # Check min filter (touch getTextParameter) minfilt = gl.glGetTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER) assert_equal(minfilt, gl.GL_LINEAR) # Check if all is ok assert_equal(gl.glGetError(), 0) # --- buffer vec2 (contiguous VBO) # Create buffer hbuf2 = gl.glCreateBuffer() objects.append((gl.glDeleteBuffer, hbuf2)) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, hbuf2) # Allocate and set data gl.glBufferData(gl.GL_ARRAY_BUFFER, buf2.nbytes, gl.GL_DYNAMIC_DRAW) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, buf2) # Attach! loc = gl.glGetAttribLocation(hprog, 'a_2') gl.glDisableVertexAttribArray(loc) # touch gl.glEnableVertexAttribArray(loc) gl.glVertexAttribPointer(loc, 2, gl.GL_FLOAT, False, 2*4, 0) # Check (touch glGetBufferParameter, glGetVertexAttrib and # glGetVertexAttribOffset) size = gl.glGetBufferParameter(gl.GL_ARRAY_BUFFER, gl.GL_BUFFER_SIZE) assert_equal(size, buf2.nbytes) stride = gl.glGetVertexAttrib(loc, gl.GL_VERTEX_ATTRIB_ARRAY_STRIDE) assert_equal(stride, 2*4) offset = gl.glGetVertexAttribOffset(loc, gl.GL_VERTEX_ATTRIB_ARRAY_POINTER) assert_equal(offset, 0) # Check if all is ok assert_equal(gl.glGetError(), 0) # --- buffer vec3 (non-contiguous VBO) # Create buffer hbuf3 = gl.glCreateBuffer() objects.append((gl.glDeleteBuffer, hbuf3)) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, hbuf3) # Allocate and set data gl.glBufferData(gl.GL_ARRAY_BUFFER, buf3.nbytes, gl.GL_DYNAMIC_DRAW) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, buf3) # Attach! loc = gl.glGetAttribLocation(hprog, 'a_3') gl.glEnableVertexAttribArray(loc) gl.glVertexAttribPointer(loc, 3, gl.GL_FLOAT, False, 3*4, 0) # Check if all is ok assert_equal(gl.glGetError(), 0) # --- buffer vec4 (client vertex data) # Select no FBO gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0) # Attach! loc = gl.glGetAttribLocation(hprog, 'a_4') gl.glEnableVertexAttribArray(loc) gl.glVertexAttribPointer(loc, 4, gl.GL_FLOAT, False, 4*4, buf4) # Check if all is ok assert_equal(gl.glGetError(), 0) # --- element buffer # Create buffer global helements helements = gl.glCreateBuffer() objects.append((gl.glDeleteBuffer, helements)) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, helements) # Allocate and set data gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, elements, gl.GL_DYNAMIC_DRAW) gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, elements) # Turn off gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, 0) # Check if all is ok assert_equal(gl.glGetError(), 0) # --- uniforms # Set integer uniforms to 0 # We set them twice just to touch both i and iv functions for i, fun1, fun2 in [(1, gl.glUniform1i, gl.glUniform1iv), (2, gl.glUniform2i, gl.glUniform2iv), (3, gl.glUniform3i, gl.glUniform3iv), (4, gl.glUniform4i, gl.glUniform4iv)]: name = 'u_i%i' % i value = [0] * i loc = gl.glGetUniformLocation(hprog, name) fun1(loc, *value) # e.g. glUniform4i fun2(loc, 1, value) # e.g. glUniform4iv # Set float uniforms to 1.0 # We set them twice just to touch both i and iv functions for i, fun1, fun2 in [(1, gl.glUniform1f, gl.glUniform1fv), (2, gl.glUniform2f, gl.glUniform2fv), (3, gl.glUniform3f, gl.glUniform3fv), (4, gl.glUniform4f, gl.glUniform4fv)]: name = 'u_f%i' % i value = [1.0] * i loc = gl.glGetUniformLocation(hprog, name) fun1(loc, *value) # e.g. glUniform4f fun2(loc, 1, value) # e.g. glUniform4fv # Set matrix uniforms m = np.eye(5, dtype='float32') loc = gl.glGetUniformLocation(hprog, 'u_m2') gl.glUniformMatrix2fv(loc, 1, False, m[:2, :2]) # loc = gl.glGetUniformLocation(hprog, 'u_m3') m = np.eye(3, dtype='float32') gl.glUniformMatrix3fv(loc, 1, False, m[:3, :3]) # loc = gl.glGetUniformLocation(hprog, 'u_m4') m = np.eye(4, dtype='float32') gl.glUniformMatrix4fv(loc, 1, False, m[:4, :4]) # Check some uniforms loc = gl.glGetUniformLocation(hprog, 'u_i1') assert_equal(gl.glGetUniform(hprog, loc), 0) loc = gl.glGetUniformLocation(hprog, 'u_i2') assert_equal(gl.glGetUniform(hprog, loc), (0, 0)) loc = gl.glGetUniformLocation(hprog, 'u_f2') assert_equal(gl.glGetUniform(hprog, loc), (1.0, 1.0)) # Check if all is ok assert_equal(gl.glGetError(), 0) # --- attributes # Constant values for attributes. We do not even use this ... loc = gl.glGetAttribLocation(hprog, 'a_1') gl.glVertexAttrib1f(loc, 1.0) loc = gl.glGetAttribLocation(hprog, 'a_2') gl.glVertexAttrib2f(loc, 1.0, 1.0) loc = gl.glGetAttribLocation(hprog, 'a_3') gl.glVertexAttrib3f(loc, 1.0, 1.0, 1.0) loc = gl.glGetAttribLocation(hprog, 'a_4') gl.glVertexAttrib4f(loc, 1.0, 1.0, 1.0, 1.0) # --- flush and finish # Not really necessary, but we want to touch the functions gl.glFlush() gl.glFinish() # print([i[1] for i in objects]) return objects def _draw1(): # Draw using arrays gl.glDrawArrays(gl.GL_TRIANGLES, 0, N) gl.glFinish() _check_result() def _draw2(): # Draw using elements via buffer gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, helements) gl.glDrawElements(gl.GL_TRIANGLES, elements.size, gl.GL_UNSIGNED_BYTE, 0) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, 0) gl.glFinish() _check_result() def _draw3(): # Draw using elements via numpy array gl.glDrawElements(gl.GL_TRIANGLES, elements.size, gl.GL_UNSIGNED_BYTE, elements) gl.glFinish() _check_result() def _check_result(assert_result=True): """ Test the color of each quadrant by picking the center pixel of each quadrant and comparing it with the reference color. """ # Take screenshot x, y, w, h = gl.glGetParameter(gl.GL_VIEWPORT) data = gl.glReadPixels(x, y, w, h, gl.GL_RGB, gl.GL_UNSIGNED_BYTE) im = np.frombuffer(data, np.uint8) im.shape = h, w, 3 # Get center pixel from each quadrant pix1 = tuple(im[int(1*h/4), int(1*w/4)]) pix2 = tuple(im[int(3*h/4), int(1*w/4)]) pix3 = tuple(im[int(3*h/4), int(3*w/4)]) pix4 = tuple(im[int(1*h/4), int(3*w/4)]) # print(pix1, pix2, pix3, pix4) if assert_result: # Test their value assert_equal(pix1, (0, 0, 0)) assert_equal(pix2, (255, 0, 0)) assert_equal(pix3, (0, 255, 0)) assert_equal(pix4, (0, 0, 255)) run_tests_if_main()
# Copyright 2014-2015 Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from contextlib import ExitStack except ImportError: from contextlib2 import ExitStack from touchdown.core.action import Action from touchdown.core.resource import Resource from touchdown.core.plan import Plan, Present from touchdown.core import argument, errors, serializers from touchdown.provisioner import Provisioner from touchdown import ssh from ..account import BaseAccount from ..common import SimpleDescribe, SimpleApply, SimpleDestroy class Image(Resource): resource_name = "image" immutable_tags = True name = argument.String(min=3, max=128, field="Name") description = argument.String(field="Description") source_ami = argument.String() instance_type = argument.String(default="m3.medium") username = argument.String() provisioner = argument.Resource(Provisioner) # architecture = argument.String(field="Architecture", default="x86_64", choices=["x86_64", "i386"]) # kernel = argument.String(field="KernelId") # ramdisk = argument.String(field="RamdiskId") # root_device_name = argument.String(field="RootDeviceName") # virtualization_type = argument.String(choices=["paravirtual", "hvm"], field="VirtualizationType") # sriov_net_support = argument.String(choices=["simple"], field="SriovNetSupport") # location = argument.String() # snapshot_id = argument.String() launch_permissions = argument.List() tags = argument.Dict() account = argument.Resource(BaseAccount) class BuildInstance(Action): @property def description(self): yield "Build new AMI '{}' from '{}'".format(self.resource.name, self.resource.source_ami) def create_security_group(self): self.plan.echo("Creating temporary security group") security_group = self.plan.client.create_security_group( GroupName="temporary-security-group", Description="Temporary security group", ) self.stack.callback(self.destroy_security_group, security_group) self.plan.echo("Granting SSH access") self.plan.client.authorize_security_group_ingress( GroupId=security_group['GroupId'], IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp="0.0.0.0/0", ) return security_group def destroy_security_group(self, security_group): self.plan.echo("Deleting temporary security group") self.plan.client.delete_security_group( GroupId=security_group["GroupId"], ) def create_keypair(self): self.plan.echo("Creating temporary keypair") keypair = self.plan.client.create_key_pair( KeyName="temporary-key-pair", ) self.stack.callback(self.destroy_keypair, keypair) return keypair def destroy_keypair(self, keypair): self.plan.echo("Deleting temporary keypair") self.plan.client.delete_key_pair( KeyName=keypair["KeyName"], ) def create_instance(self, keypair, security_group): self.plan.echo("Creating a source instance from {}".format(self.resource.source_ami)) reservations = self.plan.client.run_instances( ImageId=self.resource.source_ami, InstanceType=self.plan.resource.instance_type, MaxCount=1, MinCount=1, KeyName=keypair['KeyName'], NetworkInterfaces=[{ "DeviceIndex": 0, "AssociatePublicIpAddress": True, "Groups": [security_group['GroupId']], }], ) if len(reservations.get("Instances", [])) == 0: raise errors.Error("No instances were started") elif len(reservations["Instances"]) > 1: raise errors.Error("Somehow multiple instances were started!?") instance = reservations["Instances"][0] self.stack.callback(self.terminate_instance, instance) self.plan.echo("Waiting for instance {} to boot...".format(instance["InstanceId"])) self.plan.client.get_waiter("instance_running").wait(InstanceIds=[instance["InstanceId"]]) # We have to now get the info about the isntance again so we know # it's public ip address reservation = self.plan.client.describe_instances( InstanceIds=[instance["InstanceId"]] )['Reservations'][0] return reservation["Instances"][0] def deploy_instance(self, keypair, instance): cli = ssh.Client(self.plan) cli.connect( hostname=instance['PublicIpAddress'], username=self.resource.username, pkey=ssh.private_key_from_string(keypair['KeyMaterial']), look_for_keys=False, ) cli.run_script(**serializers.Resource().render(self.runner, self.resource.provisioner)) def terminate_instance(self, instance): self.plan.echo("Terminating instance") self.plan.client.terminate_instances( InstanceIds=[instance["InstanceId"]], ) self.plan.echo("Waiting for instance to go away") self.plan.client.get_waiter("instance_terminated").wait(InstanceIds=[instance["InstanceId"]]) def run(self): self.stack = ExitStack() with self.stack: keypair = self.create_keypair() security_group = self.create_security_group() instance = self.create_instance(keypair, security_group) self.plan.echo("Deploying instance") self.deploy_instance(keypair, instance) self.plan.echo("Creating image") image = self.plan.client.create_image( Name=self.resource.name, InstanceId=instance['InstanceId'], ) self.plan.echo("Waiting for image to become available") self.plan.client.get_waiter("image_available").wait(ImageIds=[image["ImageId"]]) class Describe(SimpleDescribe, Plan): resource = Image service_name = 'ec2' describe_action = "describe_images" describe_envelope = "Images" key = 'ImageId' def get_describe_filters(self): return {"Filters": [{"Name": "name", "Values": [self.resource.name]}]} class Apply(SimpleApply, Describe): create_action = "create_image" create_response = "not-that-useful" signature = ( Present("name"), ) def create_object(self): return BuildInstance(self) def update_object(self): for change in super(Apply, self).update_object(): yield change description = ["Update who can launch this image"] remote_userids = [] if self.object: results = self.client.describe_image_attribute( ImageId=self.object['ImageId'], Attribute="launchPermission", ).get("LaunchPermissions", []) remote_userids = [r['UserId'] for r in results] add = [] for userid in self.resource.launch_permissions: if userid not in remote_userids: description.append("Add launch permission for '{}'".format(userid)) add.append({"UserId": userid}) remove = [] for userid in remote_userids: if userid not in self.resource.launch_permissions: description.append("Remove launch permission for '{}'".format(userid)) remove.append({"UserId": userid}) if add or remove: yield self.generic_action( description, self.client.modify_image_attribute, ImageId=serializers.Identifier(), Attribute="launchPermission", LaunchPermission=dict( Add=add, Remove=remove, ), ) class Destroy(SimpleDestroy, Describe): destroy_action = "deregister_image"
import re from behave import * # ============================================================================== # Helpers def page_exists(table, slug): return get_page_row(table, slug) is not None def get_page_row(table, slug): tbody = table.find("tbody") rows = tbody.find_all("tr") for row in rows: cols = row.find_all("td") view_link = cols[3].find("a") view_url = view_link["href"] url_parts = view_url.split("/") page_slug = url_parts[len(url_parts) - 1] if page_slug == slug: return row return None def user_exists(table, email): return get_user_row(table, email) is not None def get_user_row(table, email): tbody = table.find("tbody") rows = tbody.find_all("tr") for row in rows: cols = row.find_all("td") curr_email = cols[1] if curr_email.text == email: return row return None def login(context): b = context.browser context.open_url("auth/login") soup = context.get_soup() if soup.title.string == "Admin": return b.select_form(nr = 0) b.form["email"] = "tester@test.com" b.form["password"] = "devpass" b.submit() def logout(context): b = context.browser context.open_url("admin") soup = context.get_soup() if soup.title.string != "Admin": return b.select_form(name = "logout") b.submit() # ============================================================================== # General @given(u'I am authorized') def step_impl(context): login(context) @given(u'I am not authorized') def step_impl(context): logout(context) @when(u'I logout') def step_impl(context): logout(context) @given(u'a page with slug "{slug}" does not exist') def step_impl(context, slug): curs = context.db.cursor() curs.execute("DELETE FROM text_page WHERE slug = '%s';" % slug) context.db.commit() curs.close() @given(u'a user with email "{email}" does not exist') def step_impl(context, email): curs = context.db.cursor() curs.execute("DELETE FROM public.user WHERE email = '%s';" % email) context.db.commit() curs.close() @when(u'I visit "{path}"') def step_impl(context, path): context.open_url(path) @then(u'I should see the "{page_identifier}" page') def step_impl(context, page_identifier): soup = context.get_soup() wrapper = soup.find( "div", { "id" : re.compile(r'.*' + page_identifier + '.*') } ) assert wrapper is not None # ============================================================================== # User related @when(u'I login') def step_impl(context): login(context) @when(u'I login with wrong credentials') def step_impl(context): b = context.browser context.open_url("auth/login") soup = context.get_soup() assert soup.title.string == "Login" b.select_form(nr = 0) b.form["email"] = "blablabla@asd.com" b.form["password"] = "wrongpassword" b.submit() @when(u'I create a user with email "{email}"') def step_impl(context, email): b = context.browser context.open_url("user/create") b.select_form(name = "user_create") fields = context.get_hident_fields() context.assign_hident(fields, "email", email) b.submit() @when(u'I delete user with email "{email}"') def step_impl(context, email): b = context.browser context.open_url("user") soup = context.get_soup() table = soup.find("table") row = get_user_row(table, email) cols = row.find_all("td") remove_link = cols[4].find("form") remove_url = remove_link["action"] for form in b.forms(): if form.action == remove_url: b.form = form b.submit() b.select_form(name = "user_remove") b.submit() @then(u'I should see the user list {inclusion} the user "{email}"') def step_impl(context, inclusion, email): b = context.browser context.open_url("user") soup = context.get_soup() table = soup.find("table") if inclusion == "with": assert user_exists(table, email) else: assert not user_exists(table, email) # ============================================================================== # Page related @when(u'I create a {visibility} page with slug "{slug}"') def step_impl(context, visibility, slug): b = context.browser context.open_url("page/create") b.select_form(name = "page") fields = context.get_hident_fields() context.assign_hident(fields, "name", slug) context.assign_hident(fields, "slug", slug) context.assign_hident(fields, "body", "Is this my body?") if visibility == "public": context.assign_hident(fields, "is_public", ["yes"]) else: context.assign_hident(fields, "is_public", ["no"]) b.submit() @when(u'I delete page with slug "{slug}"') def step_impl(context, slug): b = context.browser context.open_url("page") soup = context.get_soup() table = soup.find("table") row = get_page_row(table, slug) cols = row.find_all("td") remove_link = cols[5].find("form") remove_url = remove_link["action"] for form in b.forms(): if form.action == remove_url: b.form = form b.submit() b.select_form(name = "page_remove") b.submit() @then(u'I should see the page list {inclusion} the page "{slug}"') def step_impl(context, inclusion, slug): b = context.browser context.open_url("page") soup = context.get_soup() table = soup.find("table") if inclusion == "with": assert page_exists(table, slug) else: assert not page_exists(table, slug) @then(u'I should see a duplicate error message') def step_impl(context): soup = context.get_soup() alert = soup.find("div", {"id" : "alert-holder"}) assert "exists already" in alert.text
# -*- coding: utf-8 -*- import numpy as np from ..Qt import QtGui, QtCore from ..python2_3 import asUnicode, basestring from .. import metaarray __all__ = ['TableWidget'] def _defersort(fn): def defersort(self, *args, **kwds): # may be called recursively; only the first call needs to block sorting setSorting = False if self._sorting is None: self._sorting = self.isSortingEnabled() setSorting = True self.setSortingEnabled(False) try: return fn(self, *args, **kwds) finally: if setSorting: self.setSortingEnabled(self._sorting) self._sorting = None return defersort class TableWidget(QtGui.QTableWidget): """Extends QTableWidget with some useful functions for automatic data handling and copy / export context menu. Can automatically format and display a variety of data types (see :func:`setData() <pyqtgraph.TableWidget.setData>` for more information. """ def __init__(self, *args, **kwds): """ All positional arguments are passed to QTableWidget.__init__(). ===================== ================================================= **Keyword Arguments** editable (bool) If True, cells in the table can be edited by the user. Default is False. sortable (bool) If True, the table may be soted by clicking on column headers. Note that this also causes rows to appear initially shuffled until a sort column is selected. Default is True. *(added in version 0.9.9)* ===================== ================================================= """ QtGui.QTableWidget.__init__(self, *args) self.itemClass = TableWidgetItem self.setVerticalScrollMode(self.ScrollPerPixel) self.setSelectionMode(QtGui.QAbstractItemView.ContiguousSelection) self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) self.clear() kwds.setdefault('sortable', True) kwds.setdefault('editable', False) self.setEditable(kwds.pop('editable')) self.setSortingEnabled(kwds.pop('sortable')) if len(kwds) > 0: raise TypeError("Invalid keyword arguments '%s'" % list(kwds.keys())) self._sorting = None # used when temporarily disabling sorting self._formats = {None: None} # stores per-column formats and entire table format self.sortModes = {} # stores per-column sort mode self.itemChanged.connect(self.handleItemChanged) self.contextMenu = QtGui.QMenu() self.contextMenu.addAction('Copy Selection').triggered.connect(self.copySel) self.contextMenu.addAction('Copy All').triggered.connect(self.copyAll) self.contextMenu.addAction('Save Selection').triggered.connect(self.saveSel) self.contextMenu.addAction('Save All').triggered.connect(self.saveAll) def clear(self): """Clear all contents from the table.""" QtGui.QTableWidget.clear(self) self.verticalHeadersSet = False self.horizontalHeadersSet = False self.items = [] self.setRowCount(0) self.setColumnCount(0) self.sortModes = {} def setData(self, data): """Set the data displayed in the table. Allowed formats are: * numpy arrays * numpy record arrays * metaarrays * list-of-lists [[1,2,3], [4,5,6]] * dict-of-lists {'x': [1,2,3], 'y': [4,5,6]} * list-of-dicts [{'x': 1, 'y': 4}, {'x': 2, 'y': 5}, ...] """ self.clear() self.appendData(data) self.resizeColumnsToContents() @_defersort def appendData(self, data): """ Add new rows to the table. See :func:`setData() <pyqtgraph.TableWidget.setData>` for accepted data types. """ startRow = self.rowCount() fn0, header0 = self.iteratorFn(data) if fn0 is None: self.clear() return it0 = fn0(data) try: first = next(it0) except StopIteration: return fn1, header1 = self.iteratorFn(first) if fn1 is None: self.clear() return firstVals = [x for x in fn1(first)] self.setColumnCount(len(firstVals)) if not self.verticalHeadersSet and header0 is not None: labels = [self.verticalHeaderItem(i).text() for i in range(self.rowCount())] self.setRowCount(startRow + len(header0)) self.setVerticalHeaderLabels(labels + header0) self.verticalHeadersSet = True if not self.horizontalHeadersSet and header1 is not None: self.setHorizontalHeaderLabels(header1) self.horizontalHeadersSet = True i = startRow self.setRow(i, firstVals) for row in it0: i += 1 self.setRow(i, [x for x in fn1(row)]) if (self._sorting and self.horizontalHeadersSet and self.horizontalHeader().sortIndicatorSection() >= self.columnCount()): self.sortByColumn(0, QtCore.Qt.AscendingOrder) def setEditable(self, editable=True): self.editable = editable for item in self.items: item.setEditable(editable) def setFormat(self, format, column=None): """ Specify the default text formatting for the entire table, or for a single column if *column* is specified. If a string is specified, it is used as a format string for converting float values (and all other types are converted using str). If a function is specified, it will be called with the item as its only argument and must return a string. Setting format = None causes the default formatter to be used instead. Added in version 0.9.9. """ if format is not None and not isinstance(format, basestring) and not callable(format): raise ValueError("Format argument must string, callable, or None. (got %s)" % format) self._formats[column] = format if column is None: # update format of all items that do not have a column format # specified for c in range(self.columnCount()): if self._formats.get(c, None) is None: for r in range(self.rowCount()): item = self.item(r, c) if item is None: continue item.setFormat(format) else: # set all items in the column to use this format, or the default # table format if None was specified. if format is None: format = self._formats[None] for r in range(self.rowCount()): item = self.item(r, column) if item is None: continue item.setFormat(format) def iteratorFn(self, data): ## Return 1) a function that will provide an iterator for data and 2) a list of header strings if isinstance(data, list) or isinstance(data, tuple): return lambda d: d.__iter__(), None elif isinstance(data, dict): return lambda d: iter(d.values()), list(map(asUnicode, data.keys())) elif (hasattr(data, 'implements') and data.implements('MetaArray')): if data.axisHasColumns(0): header = [asUnicode(data.columnName(0, i)) for i in range(data.shape[0])] elif data.axisHasValues(0): header = list(map(asUnicode, data.xvals(0))) else: header = None return self.iterFirstAxis, header elif isinstance(data, np.ndarray): return self.iterFirstAxis, None elif isinstance(data, np.void): return self.iterate, list(map(asUnicode, data.dtype.names)) elif data is None: return (None,None) elif np.isscalar(data): return self.iterateScalar, None else: msg = "Don't know how to iterate over data type: {!s}".format(type(data)) raise TypeError(msg) def iterFirstAxis(self, data): for i in range(data.shape[0]): yield data[i] def iterate(self, data): # for numpy.void, which can be iterated but mysteriously # has no __iter__ (??) for x in data: yield x def iterateScalar(self, data): yield data def appendRow(self, data): self.appendData([data]) @_defersort def addRow(self, vals): row = self.rowCount() self.setRowCount(row + 1) self.setRow(row, vals) @_defersort def setRow(self, row, vals): if row > self.rowCount() - 1: self.setRowCount(row + 1) for col in range(len(vals)): val = vals[col] item = self.itemClass(val, row) item.setEditable(self.editable) sortMode = self.sortModes.get(col, None) if sortMode is not None: item.setSortMode(sortMode) format = self._formats.get(col, self._formats[None]) item.setFormat(format) self.items.append(item) self.setItem(row, col, item) item.setValue(val) # Required--the text-change callback is invoked # when we call setItem. def setSortMode(self, column, mode): """ Set the mode used to sort *column*. ============== ======================================================== **Sort Modes** value Compares item.value if available; falls back to text comparison. text Compares item.text() index Compares by the order in which items were inserted. ============== ======================================================== Added in version 0.9.9 """ for r in range(self.rowCount()): item = self.item(r, column) if hasattr(item, 'setSortMode'): item.setSortMode(mode) self.sortModes[column] = mode def sizeHint(self): # based on http://stackoverflow.com/a/7195443/54056 width = sum(self.columnWidth(i) for i in range(self.columnCount())) width += self.verticalHeader().sizeHint().width() width += self.verticalScrollBar().sizeHint().width() width += self.frameWidth() * 2 height = sum(self.rowHeight(i) for i in range(self.rowCount())) height += self.verticalHeader().sizeHint().height() height += self.horizontalScrollBar().sizeHint().height() return QtCore.QSize(width, height) def serialize(self, useSelection=False): """Convert entire table (or just selected area) into tab-separated text values""" if useSelection: selection = self.selectedRanges()[0] rows = list(range(selection.topRow(), selection.bottomRow() + 1)) columns = list(range(selection.leftColumn(), selection.rightColumn() + 1)) else: rows = list(range(self.rowCount())) columns = list(range(self.columnCount())) data = [] if self.horizontalHeadersSet: row = [] if self.verticalHeadersSet: row.append(asUnicode('')) for c in columns: row.append(asUnicode(self.horizontalHeaderItem(c).text())) data.append(row) for r in rows: row = [] if self.verticalHeadersSet: row.append(asUnicode(self.verticalHeaderItem(r).text())) for c in columns: item = self.item(r, c) if item is not None: row.append(asUnicode(item.value)) else: row.append(asUnicode('')) data.append(row) s = '' for row in data: s += ('\t'.join(row) + '\n') return s def copySel(self): """Copy selected data to clipboard.""" QtGui.QApplication.clipboard().setText(self.serialize(useSelection=True)) def copyAll(self): """Copy all data to clipboard.""" QtGui.QApplication.clipboard().setText(self.serialize(useSelection=False)) def saveSel(self): """Save selected data to file.""" self.save(self.serialize(useSelection=True)) def saveAll(self): """Save all data to file.""" self.save(self.serialize(useSelection=False)) def save(self, data): fileName = QtGui.QFileDialog.getSaveFileName(self, "Save As..", "", "Tab-separated values (*.tsv)") if fileName == '': return open(fileName, 'w').write(data) def contextMenuEvent(self, ev): self.contextMenu.popup(ev.globalPos()) def keyPressEvent(self, ev): if ev.key() == QtCore.Qt.Key_C and ev.modifiers() == QtCore.Qt.ControlModifier: ev.accept() self.copySel() else: QtGui.QTableWidget.keyPressEvent(self, ev) def handleItemChanged(self, item): item.itemChanged() class TableWidgetItem(QtGui.QTableWidgetItem): def __init__(self, val, index, format=None): QtGui.QTableWidgetItem.__init__(self, '') self._blockValueChange = False self._format = None self._defaultFormat = '%0.3g' self.sortMode = 'value' self.index = index flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled self.setFlags(flags) self.setValue(val) self.setFormat(format) def setEditable(self, editable): """ Set whether this item is user-editable. """ if editable: self.setFlags(self.flags() | QtCore.Qt.ItemIsEditable) else: self.setFlags(self.flags() & ~QtCore.Qt.ItemIsEditable) def setSortMode(self, mode): """ Set the mode used to sort this item against others in its column. ============== ======================================================== **Sort Modes** value Compares item.value if available; falls back to text comparison. text Compares item.text() index Compares by the order in which items were inserted. ============== ======================================================== """ modes = ('value', 'text', 'index', None) if mode not in modes: raise ValueError('Sort mode must be one of %s' % str(modes)) self.sortMode = mode def setFormat(self, fmt): """Define the conversion from item value to displayed text. If a string is specified, it is used as a format string for converting float values (and all other types are converted using str). If a function is specified, it will be called with the item as its only argument and must return a string. Added in version 0.9.9. """ if fmt is not None and not isinstance(fmt, basestring) and not callable(fmt): raise ValueError("Format argument must string, callable, or None. (got %s)" % fmt) self._format = fmt self._updateText() def _updateText(self): self._blockValueChange = True try: self._text = self.format() self.setText(self._text) finally: self._blockValueChange = False def setValue(self, value): self.value = value self._updateText() def itemChanged(self): """Called when the data of this item has changed.""" if self.text() != self._text: self.textChanged() def textChanged(self): """Called when this item's text has changed for any reason.""" self._text = self.text() if self._blockValueChange: # text change was result of value or format change; do not # propagate. return import traceback print("====================", self.text()) traceback.print_stack() try: self.value = type(self.value)(self.text()) except ValueError: self.value = str(self.text()) def format(self): if callable(self._format): return self._format(self) if isinstance(self.value, (float, np.floating)): if self._format is None: return self._defaultFormat % self.value else: return self._format % self.value else: return asUnicode(self.value) def __lt__(self, other): if self.sortMode == 'index' and hasattr(other, 'index'): return self.index < other.index if self.sortMode == 'value' and hasattr(other, 'value'): return self.value < other.value else: return self.text() < other.text() if __name__ == '__main__': app = QtGui.QApplication([]) win = QtGui.QMainWindow() t = TableWidget() win.setCentralWidget(t) win.resize(800,600) win.show() ll = [[1,2,3,4,5]] * 20 ld = [{'x': 1, 'y': 2, 'z': 3}] * 20 dl = {'x': list(range(20)), 'y': list(range(20)), 'z': list(range(20))} a = np.ones((20, 5)) ra = np.ones((20,), dtype=[('x', int), ('y', int), ('z', int)]) t.setData(ll) ma = metaarray.MetaArray(np.ones((20, 3)), info=[ {'values': np.linspace(1, 5, 20)}, {'cols': [ {'name': 'x'}, {'name': 'y'}, {'name': 'z'}, ]} ]) t.setData(ma)
##################################################################################### # # Copyright (c) Microsoft Corporation. All rights reserved. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### from iptest.assert_util import * # ref: http://docs.python.org/ref/metaclasses.html class Old: def method(self): return 10 class New(object): def method(self): return 10 def g_f_modify(new_base=None, new_name=None): def f_modify(name, bases, dict): if new_name: name = new_name if new_base: bases = new_base + bases dict['version'] = 2.4 return type(name, bases, dict) return f_modify def g_c_modify(new_base=None, new_name=None): class c_modify(type): def __new__(cls, name, bases, dict): if new_name: name = new_name if new_base: bases = new_base + bases dict['version'] = 2.4 return super(c_modify, cls).__new__(cls, name, bases, dict) return c_modify # Modifying the class dictionary prior to the class being created. def test_modify(): def _check(T): x = T() AreEqual(x.version, 2.4) AreEqual(x.method(), 10) AreEqual(x.__class__.__name__, "D") for f in [ g_f_modify, g_c_modify ]: class C(object): __metaclass__ = f((New,), "D") _check(C) class C: __metaclass__ = f((New,), "D") _check(C) class C(object): __metaclass__ = f((Old,), "D") _check(C) try: class C: __metaclass__ = f((Old,), "D") except TypeError: pass else: Fail("Should have thrown") class dash_attributes(type): def __new__(metaclass, name, bases, dict): new_dict = {} for key, val in dict.iteritems(): new_key = key[0].lower() for x in key[1:]: if not x.islower(): new_key += "_" + x.lower() else: new_key += x new_dict[new_key] = val return super(dash_attributes, metaclass).__new__(metaclass, name, bases, new_dict) def test_dash_attribute(): class C(object): __metaclass__ = dash_attributes def WriteLine(self, *arg): return 4 x = C() Assert(not hasattr(C, "WriteLine")) AreEqual(x.write_line(), 4) def test_basic(): def try_metaclass(t): class C(object): __metaclass__ = t def method(self): return 10 x = C() AreEqual(x.method(), 10) try_metaclass(type) #try_metaclass(type(Old)) # bug 364938 try_metaclass(dash_attributes) try_metaclass(sub_type1) ## subclassing class C1(object): __metaclass__ = g_c_modify() class C2: __metaclass__ = g_f_modify() # not defining __metaclass__ for C in [C1, C2]: class D(C): pass Assert(hasattr(D, "version")) AreEqual(D().version, 2.4) # redefining __metaclass__ try: class D(C1): __metaclass__ = dash_attributes except TypeError: pass else: Fail("metaclass conflict expected") class D(C2): __metaclass__ = dash_attributes def StartSomethingToday(self): pass Assert(hasattr(D, "version")) Assert(hasattr(D, "start_something_today")) def test_find_metaclass(): class A1: pass class A2(object): pass AreEqual(A2.__class__, type) class B1: __metaclass__ = dash_attributes class B2(object): __metaclass__ = dash_attributes global __metaclass__ __metaclass__ = lambda *args: 100 class C1: def __metaclass__(*args): return 200 AreEqual(C1, 200) class C2(object): def __metaclass__(*args): return 200 AreEqual(C2, 200) class D1: pass AreEqual(D1, 100) class D2(object): pass AreEqual(D2.__class__, type) # base order: how to see the effect of the order??? for x in [ A1, #A2, # bug 364991 ]: for y in [B1, B2]: class E(x, y): def PythonMethod(self): pass Assert(hasattr(E, "python_method")) class E(y, x): def PythonMethod(self): pass Assert(hasattr(E, "python_method")) del __metaclass__ class F1: pass Assert(F1 != 100) global flag # to track which __metaclass__'es get invoked flag = 0 class sub_type1(type): def __new__(cls, name, bases, dict): global flag flag += 1 return super(sub_type1, cls).__new__(cls, name, bases, dict) class sub_type2(type): def __new__(cls, name, bases, dict): global flag flag += 10 return super(sub_type2, cls).__new__(cls, name, bases, dict) class sub_type3(sub_type2): # subclass def __new__(cls, name, bases, dict): global flag flag += 100 return super(sub_type3, cls).__new__(cls, name, bases, dict) def test_conflict(): global flag class C1(object): pass class C2(object): __metaclass__ = sub_type1 class C3(object): __metaclass__ = sub_type2 class C4(object): __metaclass__ = sub_type3 flag = 0 class D(C1, C2): pass #AreEqual(flag, 1) # bug 364991 flag = 0 class D(C2, C1): pass #AreEqual(flag, 1) flag = 0 class D(C3, C4): pass # C4 derive from C3 #AreEqual(flag, 120) flag = 0 class D(C3, C1, C4): pass #AreEqual(flag, 120) flag = 0 class D(C4, C1): pass #AreEqual(flag, 110) def f1(): class D(C2, C3): pass def f2(): class D(C1, C2, C3): pass def f3(): class D(C2, C1, C3): pass for f in [ f1, #f2, # bug 364991 f3, ]: AssertError(TypeError, f) def test_bad_choices(): def create(x): class C(object): __metaclass__ = x for x in [ #None, # bug 364967 1, [], lambda name, bases, dict, extra: 1, lambda name, bases: 1, Old, New, ]: AssertError(TypeError, create, x) # copied from test_class.py def test_metaclass_call_override(): """overriding __call__ on a metaclass should work""" class mytype(type): def __call__(self, *args): return args class myclass(object): __metaclass__ = mytype AreEqual(myclass(1,2,3), (1,2,3)) def test_metaclass(): global __metaclass__, recvArgs # verify we can use a function as a metaclass in the dictionary recvArgs = None def funcMeta(*args): global recvArgs recvArgs = args class foo: __metaclass__ = funcMeta AreEqual(recvArgs, ('foo', (), {'__module__' : __name__, '__metaclass__' : funcMeta})) class foo(object): __metaclass__ = funcMeta AreEqual(recvArgs, ('foo', (object, ), {'__module__' : __name__, '__metaclass__' : funcMeta})) # verify setting __metaclass__ to default old-style type works class classType: pass classType = type(classType) # get classObj for tests __metaclass__ = classType class c: pass AreEqual(type(c), classType) del(__metaclass__) # verify setting __metaclass__ to default new-style type works __metaclass__ = type class c: pass AreEqual(type(c), type) del(__metaclass__) # try setting it a different way - by getting it from a type class c(object): pass __metaclass__ = type(c) class xyz: pass AreEqual(type(xyz), type(c)) del(__metaclass__) # verify setting __metaclass__ at module scope to a function works __metaclass__ = funcMeta recvArgs = None class foo: pass AreEqual(recvArgs, ('foo', (), {'__module__' : __name__})) # note no __metaclass__ becauses its not in our dict # clean up __metaclass__ for other tests del(__metaclass__) def test_arguments(): class MetaType(type): def __init__(cls, name, bases, dict): super(MetaType, cls).__init__(name, bases, dict) class Base(object): __metaclass__ = MetaType class A(Base): def __init__(self, a, b='b', c='12', d='', e=''): self.val = a + b + c + d + e a = A('hello') AreEqual(a.val, 'hellob12') b = ('there',) a = A('hello', *b) AreEqual(a.val, 'hellothere12') c = ['42','23'] a = A('hello', *c) AreEqual(a.val, 'hello4223') x = () y = {'d': 'boom'} a = A('hello', *x, **y) AreEqual(a.val, 'hellob12boom') run_test(__name__)
"""Support for Android IP Webcam.""" import asyncio from datetime import timedelta from pydroid_ipcam import PyDroidIPCam import voluptuous as vol from homeassistant.components.mjpeg.camera import CONF_MJPEG_URL, CONF_STILL_IMAGE_URL from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PLATFORM, CONF_PORT, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SWITCHES, CONF_TIMEOUT, CONF_USERNAME, ) from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import discovery from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_point_in_utc_time from homeassistant.helpers.typing import ConfigType from homeassistant.util.dt import utcnow ATTR_AUD_CONNS = "Audio Connections" ATTR_HOST = "host" ATTR_VID_CONNS = "Video Connections" CONF_MOTION_SENSOR = "motion_sensor" DATA_IP_WEBCAM = "android_ip_webcam" DEFAULT_NAME = "IP Webcam" DEFAULT_PORT = 8080 DEFAULT_TIMEOUT = 10 DOMAIN = "android_ip_webcam" SCAN_INTERVAL = timedelta(seconds=10) SIGNAL_UPDATE_DATA = "android_ip_webcam_update" KEY_MAP = { "audio_connections": "Audio Connections", "adet_limit": "Audio Trigger Limit", "antibanding": "Anti-banding", "audio_only": "Audio Only", "battery_level": "Battery Level", "battery_temp": "Battery Temperature", "battery_voltage": "Battery Voltage", "coloreffect": "Color Effect", "exposure": "Exposure Level", "exposure_lock": "Exposure Lock", "ffc": "Front-facing Camera", "flashmode": "Flash Mode", "focus": "Focus", "focus_homing": "Focus Homing", "focus_region": "Focus Region", "focusmode": "Focus Mode", "gps_active": "GPS Active", "idle": "Idle", "ip_address": "IPv4 Address", "ipv6_address": "IPv6 Address", "ivideon_streaming": "Ivideon Streaming", "light": "Light Level", "mirror_flip": "Mirror Flip", "motion": "Motion", "motion_active": "Motion Active", "motion_detect": "Motion Detection", "motion_event": "Motion Event", "motion_limit": "Motion Limit", "night_vision": "Night Vision", "night_vision_average": "Night Vision Average", "night_vision_gain": "Night Vision Gain", "orientation": "Orientation", "overlay": "Overlay", "photo_size": "Photo Size", "pressure": "Pressure", "proximity": "Proximity", "quality": "Quality", "scenemode": "Scene Mode", "sound": "Sound", "sound_event": "Sound Event", "sound_timeout": "Sound Timeout", "torch": "Torch", "video_connections": "Video Connections", "video_chunk_len": "Video Chunk Length", "video_recording": "Video Recording", "video_size": "Video Size", "whitebalance": "White Balance", "whitebalance_lock": "White Balance Lock", "zoom": "Zoom", } ICON_MAP = { "audio_connections": "mdi:speaker", "battery_level": "mdi:battery", "battery_temp": "mdi:thermometer", "battery_voltage": "mdi:battery-charging-100", "exposure_lock": "mdi:camera", "ffc": "mdi:camera-front-variant", "focus": "mdi:image-filter-center-focus", "gps_active": "mdi:crosshairs-gps", "light": "mdi:flashlight", "motion": "mdi:run", "night_vision": "mdi:weather-night", "overlay": "mdi:monitor", "pressure": "mdi:gauge", "proximity": "mdi:map-marker-radius", "quality": "mdi:quality-high", "sound": "mdi:speaker", "sound_event": "mdi:speaker", "sound_timeout": "mdi:speaker", "torch": "mdi:white-balance-sunny", "video_chunk_len": "mdi:video", "video_connections": "mdi:eye", "video_recording": "mdi:record-rec", "whitebalance_lock": "mdi:white-balance-auto", } SWITCHES = [ "exposure_lock", "ffc", "focus", "gps_active", "motion_detect", "night_vision", "overlay", "torch", "whitebalance_lock", "video_recording", ] SENSORS = [ "audio_connections", "battery_level", "battery_temp", "battery_voltage", "light", "motion", "pressure", "proximity", "sound", "video_connections", ] CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional( CONF_TIMEOUT, default=DEFAULT_TIMEOUT ): cv.positive_int, vol.Optional( CONF_SCAN_INTERVAL, default=SCAN_INTERVAL ): cv.time_period, vol.Inclusive(CONF_USERNAME, "authentication"): cv.string, vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string, vol.Optional(CONF_SWITCHES): vol.All( cv.ensure_list, [vol.In(SWITCHES)] ), vol.Optional(CONF_SENSORS): vol.All( cv.ensure_list, [vol.In(SENSORS)] ), vol.Optional(CONF_MOTION_SENSOR): cv.boolean, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the IP Webcam component.""" webcams = hass.data[DATA_IP_WEBCAM] = {} websession = async_get_clientsession(hass) async def async_setup_ipcamera(cam_config): """Set up an IP camera.""" host = cam_config[CONF_HOST] username = cam_config.get(CONF_USERNAME) password = cam_config.get(CONF_PASSWORD) name = cam_config[CONF_NAME] interval = cam_config[CONF_SCAN_INTERVAL] switches = cam_config.get(CONF_SWITCHES) sensors = cam_config.get(CONF_SENSORS) motion = cam_config.get(CONF_MOTION_SENSOR) # Init ip webcam cam = PyDroidIPCam( hass.loop, websession, host, cam_config[CONF_PORT], username=username, password=password, timeout=cam_config[CONF_TIMEOUT], ) if switches is None: switches = [ setting for setting in cam.enabled_settings if setting in SWITCHES ] if sensors is None: sensors = [sensor for sensor in cam.enabled_sensors if sensor in SENSORS] sensors.extend(["audio_connections", "video_connections"]) if motion is None: motion = "motion_active" in cam.enabled_sensors async def async_update_data(now): """Update data from IP camera in SCAN_INTERVAL.""" await cam.update() async_dispatcher_send(hass, SIGNAL_UPDATE_DATA, host) async_track_point_in_utc_time(hass, async_update_data, utcnow() + interval) await async_update_data(None) # Load platforms webcams[host] = cam mjpeg_camera = { CONF_PLATFORM: "mjpeg", CONF_MJPEG_URL: cam.mjpeg_url, CONF_STILL_IMAGE_URL: cam.image_url, CONF_NAME: name, } if username and password: mjpeg_camera.update({CONF_USERNAME: username, CONF_PASSWORD: password}) hass.async_create_task( discovery.async_load_platform(hass, "camera", "mjpeg", mjpeg_camera, config) ) if sensors: hass.async_create_task( discovery.async_load_platform( hass, "sensor", DOMAIN, {CONF_NAME: name, CONF_HOST: host, CONF_SENSORS: sensors}, config, ) ) if switches: hass.async_create_task( discovery.async_load_platform( hass, "switch", DOMAIN, {CONF_NAME: name, CONF_HOST: host, CONF_SWITCHES: switches}, config, ) ) if motion: hass.async_create_task( discovery.async_load_platform( hass, "binary_sensor", DOMAIN, {CONF_HOST: host, CONF_NAME: name}, config, ) ) tasks = [async_setup_ipcamera(conf) for conf in config[DOMAIN]] if tasks: await asyncio.wait(tasks) return True class AndroidIPCamEntity(Entity): """The Android device running IP Webcam.""" def __init__(self, host, ipcam): """Initialize the data object.""" self._host = host self._ipcam = ipcam async def async_added_to_hass(self): """Register update dispatcher.""" @callback def async_ipcam_update(host): """Update callback.""" if self._host != host: return self.async_schedule_update_ha_state(True) self.async_on_remove( async_dispatcher_connect(self.hass, SIGNAL_UPDATE_DATA, async_ipcam_update) ) @property def should_poll(self): """Return True if entity has to be polled for state.""" return False @property def available(self): """Return True if entity is available.""" return self._ipcam.available @property def extra_state_attributes(self): """Return the state attributes.""" state_attr = {ATTR_HOST: self._host} if self._ipcam.status_data is None: return state_attr state_attr[ATTR_VID_CONNS] = self._ipcam.status_data.get("video_connections") state_attr[ATTR_AUD_CONNS] = self._ipcam.status_data.get("audio_connections") return state_attr
"""The tests for the google calendar component.""" # pylint: disable=protected-access import logging import unittest from unittest.mock import patch, Mock import pytest import homeassistant.components.calendar as calendar_base import homeassistant.components.calendar.google as calendar import homeassistant.util.dt as dt_util from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON from homeassistant.helpers.template import DATE_STR_FORMAT from tests.common import get_test_home_assistant, MockDependency TEST_PLATFORM = {calendar_base.DOMAIN: {CONF_PLATFORM: 'test'}} _LOGGER = logging.getLogger(__name__) class TestComponentsGoogleCalendar(unittest.TestCase): """Test the Google calendar.""" hass = None # HomeAssistant # pylint: disable=invalid-name def setUp(self): """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.hass.http = Mock() # Set our timezone to CST/Regina so we can check calculations # This keeps UTC-6 all year round dt_util.set_default_time_zone(dt_util.get_time_zone('America/Regina')) # pylint: disable=invalid-name def tearDown(self): """Stop everything that was started.""" dt_util.set_default_time_zone(dt_util.get_time_zone('UTC')) self.hass.stop() @patch('homeassistant.components.calendar.google.GoogleCalendarData') def test_all_day_event(self, mock_next_event): """Test that we can create an event trigger on device.""" week_from_today = dt_util.dt.date.today() \ + dt_util.dt.timedelta(days=7) event = { 'summary': 'Test All Day Event', 'start': { 'date': week_from_today.isoformat() }, 'end': { 'date': (week_from_today + dt_util.dt.timedelta(days=1)) .isoformat() }, 'location': 'Test Cases', 'description': 'We\'re just testing that all day events get setup ' 'correctly', 'kind': 'calendar#event', 'created': '2016-06-23T16:37:57.000Z', 'transparency': 'transparent', 'updated': '2016-06-24T01:57:21.045Z', 'reminders': {'useDefault': True}, 'organizer': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True }, 'sequence': 0, 'creator': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True }, 'id': '_c8rinwq863h45qnucyoi43ny8', 'etag': '"2933466882090000"', 'htmlLink': 'https://www.google.com/calendar/event?eid=*******', 'iCalUID': 'cydrevtfuybguinhomj@google.com', 'status': 'confirmed' } mock_next_event.return_value.event = event device_name = 'Test All Day' cal = calendar.GoogleCalendarEventDevice(self.hass, None, '', {'name': device_name}) self.assertEqual(cal.name, device_name) self.assertEqual(cal.state, STATE_OFF) self.assertFalse(cal.offset_reached()) self.assertEqual(cal.device_state_attributes, { 'message': event['summary'], 'all_day': True, 'offset_reached': False, 'start_time': '{} 00:00:00'.format(event['start']['date']), 'end_time': '{} 00:00:00'.format(event['end']['date']), 'location': event['location'], 'description': event['description'], }) @patch('homeassistant.components.calendar.google.GoogleCalendarData') def test_future_event(self, mock_next_event): """Test that we can create an event trigger on device.""" one_hour_from_now = dt_util.now() \ + dt_util.dt.timedelta(minutes=30) event = { 'start': { 'dateTime': one_hour_from_now.isoformat() }, 'end': { 'dateTime': (one_hour_from_now + dt_util.dt.timedelta(minutes=60)) .isoformat() }, 'summary': 'Test Event in 30 minutes', 'reminders': {'useDefault': True}, 'id': 'aioehgni435lihje', 'status': 'confirmed', 'updated': '2016-11-05T15:52:07.329Z', 'organizer': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True, }, 'created': '2016-11-05T15:52:07.000Z', 'iCalUID': 'dsfohuygtfvgbhnuju@google.com', 'sequence': 0, 'creator': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', }, 'etag': '"2956722254658000"', 'kind': 'calendar#event', 'htmlLink': 'https://www.google.com/calendar/event?eid=*******', } mock_next_event.return_value.event = event device_name = 'Test Future Event' device_id = 'test_future_event' cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id, {'name': device_name}) self.assertEqual(cal.name, device_name) self.assertEqual(cal.state, STATE_OFF) self.assertFalse(cal.offset_reached()) self.assertEqual(cal.device_state_attributes, { 'message': event['summary'], 'all_day': False, 'offset_reached': False, 'start_time': one_hour_from_now.strftime(DATE_STR_FORMAT), 'end_time': (one_hour_from_now + dt_util.dt.timedelta(minutes=60)) .strftime(DATE_STR_FORMAT), 'location': '', 'description': '', }) @patch('homeassistant.components.calendar.google.GoogleCalendarData') def test_in_progress_event(self, mock_next_event): """Test that we can create an event trigger on device.""" middle_of_event = dt_util.now() \ - dt_util.dt.timedelta(minutes=30) event = { 'start': { 'dateTime': middle_of_event.isoformat() }, 'end': { 'dateTime': (middle_of_event + dt_util.dt .timedelta(minutes=60)) .isoformat() }, 'summary': 'Test Event in Progress', 'reminders': {'useDefault': True}, 'id': 'aioehgni435lihje', 'status': 'confirmed', 'updated': '2016-11-05T15:52:07.329Z', 'organizer': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True, }, 'created': '2016-11-05T15:52:07.000Z', 'iCalUID': 'dsfohuygtfvgbhnuju@google.com', 'sequence': 0, 'creator': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', }, 'etag': '"2956722254658000"', 'kind': 'calendar#event', 'htmlLink': 'https://www.google.com/calendar/event?eid=*******', } mock_next_event.return_value.event = event device_name = 'Test Event in Progress' device_id = 'test_event_in_progress' cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id, {'name': device_name}) self.assertEqual(cal.name, device_name) self.assertEqual(cal.state, STATE_ON) self.assertFalse(cal.offset_reached()) self.assertEqual(cal.device_state_attributes, { 'message': event['summary'], 'all_day': False, 'offset_reached': False, 'start_time': middle_of_event.strftime(DATE_STR_FORMAT), 'end_time': (middle_of_event + dt_util.dt.timedelta(minutes=60)) .strftime(DATE_STR_FORMAT), 'location': '', 'description': '', }) @patch('homeassistant.components.calendar.google.GoogleCalendarData') def test_offset_in_progress_event(self, mock_next_event): """Test that we can create an event trigger on device.""" middle_of_event = dt_util.now() \ + dt_util.dt.timedelta(minutes=14) event_summary = 'Test Event in Progress' event = { 'start': { 'dateTime': middle_of_event.isoformat() }, 'end': { 'dateTime': (middle_of_event + dt_util.dt .timedelta(minutes=60)) .isoformat() }, 'summary': '{} !!-15'.format(event_summary), 'reminders': {'useDefault': True}, 'id': 'aioehgni435lihje', 'status': 'confirmed', 'updated': '2016-11-05T15:52:07.329Z', 'organizer': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True, }, 'created': '2016-11-05T15:52:07.000Z', 'iCalUID': 'dsfohuygtfvgbhnuju@google.com', 'sequence': 0, 'creator': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', }, 'etag': '"2956722254658000"', 'kind': 'calendar#event', 'htmlLink': 'https://www.google.com/calendar/event?eid=*******', } mock_next_event.return_value.event = event device_name = 'Test Event in Progress' device_id = 'test_event_in_progress' cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id, {'name': device_name}) self.assertEqual(cal.name, device_name) self.assertEqual(cal.state, STATE_OFF) self.assertTrue(cal.offset_reached()) self.assertEqual(cal.device_state_attributes, { 'message': event_summary, 'all_day': False, 'offset_reached': True, 'start_time': middle_of_event.strftime(DATE_STR_FORMAT), 'end_time': (middle_of_event + dt_util.dt.timedelta(minutes=60)) .strftime(DATE_STR_FORMAT), 'location': '', 'description': '', }) @pytest.mark.skip @patch('homeassistant.components.calendar.google.GoogleCalendarData') def test_all_day_offset_in_progress_event(self, mock_next_event): """Test that we can create an event trigger on device.""" tomorrow = dt_util.dt.date.today() \ + dt_util.dt.timedelta(days=1) event_summary = 'Test All Day Event Offset In Progress' event = { 'summary': '{} !!-25:0'.format(event_summary), 'start': { 'date': tomorrow.isoformat() }, 'end': { 'date': (tomorrow + dt_util.dt.timedelta(days=1)) .isoformat() }, 'location': 'Test Cases', 'description': 'We\'re just testing that all day events get setup ' 'correctly', 'kind': 'calendar#event', 'created': '2016-06-23T16:37:57.000Z', 'transparency': 'transparent', 'updated': '2016-06-24T01:57:21.045Z', 'reminders': {'useDefault': True}, 'organizer': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True }, 'sequence': 0, 'creator': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True }, 'id': '_c8rinwq863h45qnucyoi43ny8', 'etag': '"2933466882090000"', 'htmlLink': 'https://www.google.com/calendar/event?eid=*******', 'iCalUID': 'cydrevtfuybguinhomj@google.com', 'status': 'confirmed' } mock_next_event.return_value.event = event device_name = 'Test All Day Offset In Progress' device_id = 'test_all_day_offset_in_progress' cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id, {'name': device_name}) self.assertEqual(cal.name, device_name) self.assertEqual(cal.state, STATE_OFF) self.assertTrue(cal.offset_reached()) self.assertEqual(cal.device_state_attributes, { 'message': event_summary, 'all_day': True, 'offset_reached': True, 'start_time': '{} 06:00:00'.format(event['start']['date']), 'end_time': '{} 06:00:00'.format(event['end']['date']), 'location': event['location'], 'description': event['description'], }) @patch('homeassistant.components.calendar.google.GoogleCalendarData') def test_all_day_offset_event(self, mock_next_event): """Test that we can create an event trigger on device.""" tomorrow = dt_util.dt.date.today() \ + dt_util.dt.timedelta(days=2) offset_hours = (1 + dt_util.now().hour) event_summary = 'Test All Day Event Offset' event = { 'summary': '{} !!-{}:0'.format(event_summary, offset_hours), 'start': { 'date': tomorrow.isoformat() }, 'end': { 'date': (tomorrow + dt_util.dt.timedelta(days=1)) .isoformat() }, 'location': 'Test Cases', 'description': 'We\'re just testing that all day events get setup ' 'correctly', 'kind': 'calendar#event', 'created': '2016-06-23T16:37:57.000Z', 'transparency': 'transparent', 'updated': '2016-06-24T01:57:21.045Z', 'reminders': {'useDefault': True}, 'organizer': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True }, 'sequence': 0, 'creator': { 'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com', 'displayName': 'Organizer Name', 'self': True }, 'id': '_c8rinwq863h45qnucyoi43ny8', 'etag': '"2933466882090000"', 'htmlLink': 'https://www.google.com/calendar/event?eid=*******', 'iCalUID': 'cydrevtfuybguinhomj@google.com', 'status': 'confirmed' } mock_next_event.return_value.event = event device_name = 'Test All Day Offset' device_id = 'test_all_day_offset' cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id, {'name': device_name}) self.assertEqual(cal.name, device_name) self.assertEqual(cal.state, STATE_OFF) self.assertFalse(cal.offset_reached()) self.assertEqual(cal.device_state_attributes, { 'message': event_summary, 'all_day': True, 'offset_reached': False, 'start_time': '{} 00:00:00'.format(event['start']['date']), 'end_time': '{} 00:00:00'.format(event['end']['date']), 'location': event['location'], 'description': event['description'], }) @MockDependency("httplib2") def test_update_false(self, mock_httplib2): """Test that the update returns False upon Error.""" mock_service = Mock() mock_service.get = Mock( side_effect=mock_httplib2.ServerNotFoundError("unit test")) cal = calendar.GoogleCalendarEventDevice(self.hass, mock_service, None, {'name': "test"}) result = cal.data.update() self.assertFalse(result)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VirtualHubRouteTableV2SOperations: """VirtualHubRouteTableV2SOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_11_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def get( self, resource_group_name: str, virtual_hub_name: str, route_table_name: str, **kwargs: Any ) -> "_models.VirtualHubRouteTableV2": """Retrieves the details of a VirtualHubRouteTableV2. :param resource_group_name: The resource group name of the VirtualHubRouteTableV2. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :param route_table_name: The name of the VirtualHubRouteTableV2. :type route_table_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualHubRouteTableV2, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_11_01.models.VirtualHubRouteTableV2 :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, virtual_hub_name: str, route_table_name: str, virtual_hub_route_table_v2_parameters: "_models.VirtualHubRouteTableV2", **kwargs: Any ) -> "_models.VirtualHubRouteTableV2": cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(virtual_hub_route_table_v2_parameters, 'VirtualHubRouteTableV2') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, virtual_hub_name: str, route_table_name: str, virtual_hub_route_table_v2_parameters: "_models.VirtualHubRouteTableV2", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualHubRouteTableV2"]: """Creates a VirtualHubRouteTableV2 resource if it doesn't exist else updates the existing VirtualHubRouteTableV2. :param resource_group_name: The resource group name of the VirtualHub. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :param route_table_name: The name of the VirtualHubRouteTableV2. :type route_table_name: str :param virtual_hub_route_table_v2_parameters: Parameters supplied to create or update VirtualHubRouteTableV2. :type virtual_hub_route_table_v2_parameters: ~azure.mgmt.network.v2020_11_01.models.VirtualHubRouteTableV2 :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualHubRouteTableV2 or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.VirtualHubRouteTableV2] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, virtual_hub_name=virtual_hub_name, route_table_name=route_table_name, virtual_hub_route_table_v2_parameters=virtual_hub_route_table_v2_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, virtual_hub_name: str, route_table_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore async def begin_delete( self, resource_group_name: str, virtual_hub_name: str, route_table_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes a VirtualHubRouteTableV2. :param resource_group_name: The resource group name of the VirtualHubRouteTableV2. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :param route_table_name: The name of the VirtualHubRouteTableV2. :type route_table_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, virtual_hub_name=virtual_hub_name, route_table_name=route_table_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore def list( self, resource_group_name: str, virtual_hub_name: str, **kwargs: Any ) -> AsyncIterable["_models.ListVirtualHubRouteTableV2SResult"]: """Retrieves the details of all VirtualHubRouteTableV2s. :param resource_group_name: The resource group name of the VirtualHub. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ListVirtualHubRouteTableV2SResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVirtualHubRouteTableV2SResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubRouteTableV2SResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ListVirtualHubRouteTableV2SResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables'} # type: ignore
# tests.test_cluster.test_silhouette # Tests for the SilhouetteVisualizer # # Author: Benjamin Bengfort # Created: Mon Mar 27 10:01:37 2017 -0400 # # Copyright (C) 2017 The scikit-yb developers # For license information, see LICENSE.txt # # ID: test_silhouette.py [57b563b] benjamin@bengfort.com $ """ Tests for the SilhouetteVisualizer """ ########################################################################## ## Imports ########################################################################## import sys import pytest import matplotlib.pyplot as plt from sklearn.datasets import make_blobs from sklearn.cluster import KMeans, MiniBatchKMeans from unittest import mock from tests.base import VisualTestCase from yellowbrick.datasets import load_nfl from yellowbrick.cluster.silhouette import SilhouetteVisualizer, silhouette_visualizer ########################################################################## ## SilhouetteVisualizer Test Cases ########################################################################## class TestSilhouetteVisualizer(VisualTestCase): """ Silhouette Visualizer Tests """ @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") def test_integrated_kmeans_silhouette(self): """ Test no exceptions for kmeans silhouette visualizer on blobs dataset """ # NOTE see #182: cannot use occupancy dataset because of memory usage # Generate a blobs data set X, y = make_blobs( n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0 ) try: fig = plt.figure() ax = fig.add_subplot() visualizer = SilhouetteVisualizer(KMeans(random_state=0), ax=ax) visualizer.fit(X) visualizer.finalize() self.assert_images_similar(visualizer, remove_legend=True) except Exception as e: self.fail("error during silhouette: {}".format(e)) @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") def test_integrated_mini_batch_kmeans_silhouette(self): """ Test no exceptions for mini-batch kmeans silhouette visualizer """ # NOTE see #182: cannot use occupancy dataset because of memory usage # Generate a blobs data set X, y = make_blobs( n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0 ) try: fig = plt.figure() ax = fig.add_subplot() visualizer = SilhouetteVisualizer(MiniBatchKMeans(random_state=0), ax=ax) visualizer.fit(X) visualizer.finalize() self.assert_images_similar(visualizer, remove_legend=True) except Exception as e: self.fail("error during silhouette: {}".format(e)) @pytest.mark.skip(reason="no negative silhouette example available yet") def test_negative_silhouette_score(self): """ Ensure negative silhouette scores are correctly displayed by the visualizer. """ raise NotImplementedError("no negative silhouette example available") @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") def test_colormap_silhouette(self): """ Test no exceptions for modifying the colormap in a silhouette visualizer """ # Generate a blobs data set X, y = make_blobs( n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0 ) try: fig = plt.figure() ax = fig.add_subplot() visualizer = SilhouetteVisualizer( MiniBatchKMeans(random_state=0), ax=ax, colormap="gnuplot" ) visualizer.fit(X) visualizer.finalize() self.assert_images_similar(visualizer, remove_legend=True) except Exception as e: self.fail("error during silhouette: {}".format(e)) @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") def test_colors_silhouette(self): """ Test no exceptions for modifying the colors in a silhouette visualizer with a list of color names """ # Generate a blobs data set X, y = make_blobs( n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0 ) try: fig = plt.figure() ax = fig.add_subplot() visualizer = SilhouetteVisualizer( MiniBatchKMeans(random_state=0), ax=ax, colors=["red", "green", "blue", "indigo", "cyan", "lavender"], ) visualizer.fit(X) visualizer.finalize() self.assert_images_similar(visualizer, remove_legend=True) except Exception as e: self.fail("error during silhouette: {}".format(e)) def test_colormap_as_colors_silhouette(self): """ Test no exceptions for modifying the colors in a silhouette visualizer by using a matplotlib colormap as colors """ # Generate a blobs data set X, y = make_blobs( n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0 ) try: fig = plt.figure() ax = fig.add_subplot() visualizer = SilhouetteVisualizer( MiniBatchKMeans(random_state=0), ax=ax, colors="cool" ) visualizer.fit(X) visualizer.finalize() tol = ( 3.2 if sys.platform == "win32" else 0.01 ) # Fails on AppVeyor with RMS 3.143 self.assert_images_similar(visualizer, remove_legend=True, tol=tol) except Exception as e: self.fail("error during silhouette: {}".format(e)) def test_quick_method(self): """ Test the quick method producing a valid visualization """ X, y = make_blobs( n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0 ) model = MiniBatchKMeans(3, random_state=343) oz = silhouette_visualizer(model, X, show=False) assert isinstance(oz, SilhouetteVisualizer) self.assert_images_similar(oz) @pytest.mark.xfail( reason="""third test fails with AssertionError: Expected fit to be called once. Called 0 times.""" ) def test_with_fitted(self): """ Test that visualizer properly handles an already-fitted model """ X, y = load_nfl(return_dataset=True).to_numpy() model = MiniBatchKMeans().fit(X, y) with mock.patch.object(model, "fit") as mockfit: oz = SilhouetteVisualizer(model) oz.fit(X, y) mockfit.assert_not_called() with mock.patch.object(model, "fit") as mockfit: oz = SilhouetteVisualizer(model, is_fitted=True) oz.fit(X, y) mockfit.assert_not_called() with mock.patch.object(model, "fit") as mockfit: oz = SilhouetteVisualizer(model, is_fitted=False) oz.fit(X, y) mockfit.assert_called_once_with(X, y)
""" This file is a modification of the pygear project (https://sourceforge.net/projects/pygear/) and is distribuited under the same license of his parent license: This code is published under the terms of the GNU General Public License v3 http://www.gnu.org/licenses/gpl-3.0.html """ from copy import deepcopy from math import tan, radians, atan, pi, sin, cos, degrees, acos, asin, sqrt import numpy as np from gearbox.libs.maths import involute as inv from gearbox.libs.maths import rotate, CartesianCoordinatesToPolarCoordinates, sign class GearWheel(object): """ Parent Class for all gear wheels. """ # Attributes: settings for geometry construction points_flank = 100 # points along the involute from root form to tip form circle points_fillet = 50 # points on fillet from root to root form circle points_tip = 20 # points along tip circle (half tooth) points_root = 5 # points along root circle from center of gap to # beginning of fillet (half tooth) points_chamfer = 20 # points on tip chamfer points_ext = 100 # points on extension of involute beyond base circle (if applicable)(should be at least 8) points_width = 10 # resolution along width # Attributes: default settings for parameters _x_default = 0.0 # default value for addendum modification _alpha_n_default = 20.0 # default value for pressure angle (DIN 867) _beta_default = 0.0 # default value for helix angle (spur gear) _rho_f_default = 0.38 # default value for fillet radius (divided bei module)(DIN 867) _c_default = 0.25 # default value for tip clearance (divided bei module)(DIN 867) _k_default = 0.0 # default value for tip height modification _d_s_default = 0.0 # default value for shaft diameter (inner gear wheel diameter) _h_k_default = 0.0 # default value for radial value of tip chamfer _tol_default = 1e-6 # default tolerance for comparisons _A_s_default = 0.0 # default value for tooth thickness allowance (DIN 3967) # Attributes: gear data data = None # dictionary containing all gear parameters for macro-geometry modifications = None # dictionary containing all gear parameters for micro-geometry (flank profile modifications) formcoords = None # list of 2D-coordinates of points of half a tooth profile (TColgp_Array1OfPnt2d, pythonOCC) _formwire = None # wire of half a tooth profile (TopoDS_Wire, pythonOCC) def set_resolution(self, curvename, value): """ Set resolution for tooth form representation INPUT parameters: curvename : segment of tooth flank (string) one of the following: flank, fillet, tip, root, shaft, width value : new value for number of points to represent segment """ if curvename == 'flank': self.points_flank = value elif curvename == 'fillet': self.points_fillet = value elif curvename == 'tip': self.points_tip = value elif curvename == 'root': self.points_root = value elif curvename == 'shaft': self.points_shaft = value elif curvename == 'width': self.points_width = value def get_resolution(self, curvename): """ Get resolution for tooth form representation INPUT parameters: curvename : segment of tooth flank (string) one of the following: flank, fillet, tip, root, shaft, width OUTPUT: number of points used to represent requested segment """ if curvename == 'flank': return self.points_flank elif curvename == 'fillet': return self.points_fillet elif curvename == 'tip': return self.points_tip elif curvename == 'root': return self.points_root elif curvename == 'shaft': return self.points_shaft elif curvename == 'width': return self.points_width def _make_unique(self, coords): """ Remove redundant entries from coordinate array INPUT parameter: coords : list of 2d-coordinate points (TColgp_Array1OfPnt2d, pythonOCC) OUTPUT: unique_coords : list of unique coordinates (TColgp_Array1OfPnt2d, pythonOCC) """ # tolerance for comparisons index = None tol = self._tol_default * self.data.get('m_n') # upper and lower index of point-array upper_index = len(coords) lower_index = 0 # remove redundant entries uniques = list() for index in range(lower_index, upper_index): unique = True for unique_point in uniques: if abs(coords[index][0] - unique_point[0]) < tol and abs(coords[index][1] - unique_point[1]) < tol: unique = False if unique: uniques.append([coords[index][0], coords[index][1]]) # copy list entries into coordinate array length_uniques = len(uniques) unique_coords = {} for index in range(lower_index, lower_index + length_uniques): if abs(uniques[index - 1][0]) > tol: unique_x = uniques[index - 1][0] else: unique_x = 0.0 if abs(uniques[index - 1][1]) > tol: unique_y = uniques[index - 1][1] else: unique_y = 0.0 if unique_x and unique_y: unique_coords.update({index: [unique_x, unique_y]}) unique_coords.update({index + 1: [0.0, self.data['d_a'] / 2]}) return unique_coords def __str__(self): """ Define string conversion of GearWheel objects INPUT parameter: - OUTPUT: string representation of class """ outstr = 'gear wheel data:\n' # output gear data for date in self.data: outstr += date.ljust(10) + ':\t' + str(self.data.get(date)) + '\n' # output modification data if self.modifications: outstr += '\nflank modifications:\n' for date in self.modifications: outstr += date.ljust(10) + ':\t' + str(self.modifications.get(date)) + '\n' # output tooth form coordinates if self.formcoords: # upper and lower index of point-array outstr += '\ntooth form coordinates:\n' for coord in self.formcoords: outstr += str(coord[0]) + '\t' + str(coord[1]) + '\n' return outstr def __init__(self, geardata, flankmods=None): """ Initialization of GearWheel-object Should be overwritten in derived classes INPUT parameter: geardata : data of gear wheel (dictionary) flankmods : data of flank modifications (dictionary) formcoords : list of 2d-coordinate points (list, list(len=2), numeric) """ self.points_shaft = None self.data = deepcopy(geardata) self.modifications = deepcopy(flankmods) def get_gear_data(self): """ Return data-attribute of class OUTPUT: data attribute of class (dictionary) """ return self.data def set_gear_data(self, geardata): """ Set data-attribute of class, overwrite current value INPUT parameter: geardata : dictionary, containing geometric data of gear for content, see method __init__ """ self.__init__(geardata, self.modifications) def update_gear_data(self, geardata): """ Set data-attribute of class, update current value INPUT parameter: geardata : dictionary, containing geometric data of gear for content, see method __init__ """ tempdata = self.data.copy() tempdata.update(geardata) self.__init__(geardata, self.modifications) def get_flank_modifications(self): """ Return modifications-attribute of class OUTPUT: data attribute of class (dictionary) """ return self.modifications def set_flank_modifications(self, flankmods): """ Set modifications-attribute of class, overwrite current value INPUT parameter: flankmods : dictionary, containing flank modification data of gear for content, see method __init__ """ self.__init__(self.data, flankmods) def update_flank_modifications(self, flankmods): """ Set modifications-attribute of class, update current value INPUT parameter: flankmods : dictionary, containing flank modification data of gear for content, see method __init__ """ tempmods = self.modifications.copy() tempmods.update(flankmods) self.__init__(self.data, tempmods) class CylindricalGearWheel(GearWheel): """ Class representing a spur wheel or a helical gear wheel. Applicable for external and internal gears. Derived from GearWheel-class """ def _tooth_thickness(self, d_y): """ Tooth thickness in transverse cross-section (chord-length) INPUT parameter: d_y : two times coordinate of tooth flank in radial direction (diameter of y-cylinder) OUTPUT: s_y : chord length of tooth thickness at d_y (numeric) d_yc : cutting point of diameter through tooth center and chord (numeric) """ # necessary due to numerical rounding errors if self.data.get('d') / d_y * cos(radians(self.data.get('alpha_t'))) > 1.0: alpha_yt = 0.0 else: alpha_yt = degrees(acos(self.data.get('d') / d_y * cos(radians(self.data.get('alpha_t'))))) s_yt = d_y * ( (pi + 4 * self.data.get('x_E') * tan(radians(self.data.get('alpha_n')))) / 2 / self.data.get( 'z') + inv(self.data.get('alpha_t')) - inv(alpha_yt)) s_y = d_y * (sin(s_yt / d_y)) # tooth thickness (chord-length) d_yc = d_y * (cos(s_yt / d_y)) # diameter at center of tooth (cut with chord) return s_y, d_yc def _analyze_formcoords(self): # ONLY FOR EXTERNAL GEARS SO FAR !!! """ analyze tooth form coordinates in order to get necessary information for geometry generator. INPUT parameters: formcoords : 2D cartesian coordinates of points on the toothflank, describing a half tooth (TColgp_Array1OfPnt2d, pythonOCC) OUTPUT: suppdata : supplement data for geardata dictionary (dictionary) the dictionary contains at least the following keys: d_f : root circle diameter (numeric) d_a : tip diameter (numeric) d_ff : root form diameter (numeric) d_fa : tip form diameter (numeric) z : number of teeth (numeric, integer) """ # transform formcoords to NumPy-array point = None half_tooth = self.formcoords # convert to polar coordinates half_tooth_polar = np.zeros([np.size(half_tooth, 0) - 1, 2]) for index in range(0, np.size(half_tooth, 0) - 1): [r, phi] = CartesianCoordinatesToPolarCoordinates(half_tooth[index + 1, 0], half_tooth[index + 1, 1]) half_tooth_polar[index, 0] = r half_tooth_polar[index, 1] = phi d_f = 2 * min(half_tooth_polar[:, 0]) # minimum radius --> root circle d_a = 2 * max(half_tooth_polar[:, 0]) # maximum radius --> tip circle tau = 2 * (max(half_tooth_polar[:, 1]) - min(half_tooth_polar[:, 1])) # pitch angle [radians] z = int(round(2 * pi / tau)) # number of teeth from pitch angle # for finding form diameters, it is checked if the points are part of the flank involute # the limiting points of the flank involute define the form diameters if 'alpha_n' in self.data and 'alpha_t' in self.data and 'x_E' in self.data: tol = self._tol_default * self.data.get('m_n') # tolerance for comparisons point_on_flank = False first_limit_diameter = None second_limit_diameter = None for point in range(0, np.size(half_tooth_polar, 0)): [x, y] = self._tooth_thickness(2 * half_tooth_polar[point, 0]) if abs(x + 2 * half_tooth[point + 1, 0]) < tol and abs(y - 2 * half_tooth[point + 1, 1]) < tol: if not point_on_flank: first_limit_diameter = 2 * half_tooth_polar[point, 0] point_on_flank = True else: if point_on_flank: second_limit_diameter = 2 * half_tooth_polar[point, 0] point_on_flank = False if second_limit_diameter is None: second_limit_diameter = 2 * half_tooth_polar[point, 0] if first_limit_diameter == second_limit_diameter: raise ValueError('tooth form coordinate analysis failed') if first_limit_diameter > second_limit_diameter: d_fa = first_limit_diameter d_ff = second_limit_diameter else: d_fa = second_limit_diameter d_ff = first_limit_diameter if 'd_ff' in self.data: # use user-parameter if supplied d_ff = self.data.get('d_ff') if 'd_fa' in self.data: d_ff = self.data.get('d_fa') return {'d_f': d_f, 'd_a': d_a, 'd_ff': d_ff, 'd_fa': d_fa, 'z': z} else: return {'d_f': d_f, 'd_a': d_a, 'z': z} def __init__(self, geardata, flankmods=None, formcoords=None): """ Initialization of GearWheel-object. All parameters in accordance to DIN 3960 and DIN 3967. INPUT parameters: z : number of teeth (numeric, integer) m_n : normal module (numeric, positive) d : pitch diameter (numeric) two of the three parameters z, m_n, d, must be supplied b : tooth width (numeric, positive) d_f : root circle diameter (numeric) optional - calculated if not supplied d_a : tip diameter (numeric) optional - calculated if not supplied d_Ff : root form diameter (numeric) optional - will be estimated if not supplied d_Fa : tip form diameter (numeric) optional - set equal da if not supplied (no chamfer) rho_f : fillet radius (numeric) optional - set equal 0.38*mn if not supplied x : addendum modification factor (numeric) optional - set equal 0.0 if not supplied alpha_n : pressure angle (numeric, positive)[degrees] optional - set equal 20.0 if not supplied beta : helix angle (numeric)[degrees] optional - set equal 0.0 if not supplied a : addendum (numeric) optional - no estimation c : tip clearance (numeric, positive, 0.1...0.3*mn) optional - set equal 0.25*mn if not supplied alpha_wt : service pressure angle (numeric, positive)[degrees] optional - calculated from z_2 or d_w d_w : service pitch diameter (numeric) optional - calculated from alpha_wt or z_2 h_k : radial size of tip chamfer (numeric) optional - set equal d_a-d_Fa or 0.0 if not supplied s_aK : remaining tooth thickness at tip, chord-length (numeric) optional - set equal s_a-2*h_k if not supplied z_2 : number of teeth of counter gear (numeric, integer) optional - calculated from alpha_wt or d_w d_s : shaft diameter, inner gear wheel diameter (numeric) optional - set equal 0.0 if not supplied A_s : tooth thickness allowance in normal cross-section (numeric, negative) optional - set equal 0.0 if not supplied All input parameters above are arranged in a dictionary. The keys are the names of the parameters as listed above. formcoords : 2D cartesian coordinates of points on the toothflank, describing a half tooth (TColgp_Array1OfPnt2d, pythonOCC) There are several possibilities for defining a complete gearwheel: 1) z, m_n, b, (beta), formcoords 2) z, m_n, b, (beta), d_f, d_a, d_Ff, d_Fa, rho_f 3) z, m_n, b, (beta), alpha_n, alpha_wt, x, a, rho_f 4) z, m_n, b, (beta), alpha_n, z_2, x, a, rho_f Some parameters can be left out, but the result might differ from your real gear. Missing parameters are estimated if possible. The helix angle beta doesn't have to be supplied for a spur gear. The constructor does not check for unit consistency. The user is responsible for supplying all values with consistent units. """ super(CylindricalGearWheel, self).__init__(geardata) self.data = deepcopy(geardata) self.modifications = deepcopy(flankmods) # form coordinates: value check (at least two points for defining a # tooth form (straight flanks) and two coordinates per point) if formcoords: self.data.update(self._analyze_formcoords()) # module: value check if 'm_n' in self.data and not self.data.get('m_n') >= 0: raise ValueError('module non-positive') if 'beta' not in self.data: self.data.update({'beta': self._beta_default}) self.data.update({'alpha_t': degrees( atan(tan(radians(self.data.get('alpha_n'))) / cos(radians(self.data.get('beta')))))}) self.data.update({'s_p': (pi * self.data['m_n'] / 2) + 2 * self.data['m_n'] * self.data['x'] * tan( radians(self.data['alpha_n']))}) if 'tau' in self.data and 'z' not in self.data: self.data.update({'z': int(2 * pi / self.data.get('tau'))}) if 'z' in self.data and 'm_n' in self.data: self.data.update( {'d': self.data.get('m_n') * self.data.get('z') / cos(radians(self.data.get('beta')))}) elif 'z' in self.data and 'd' in self.data: self.data.update( {'m_n': self.data.get('d') * cos(radians(self.data.get('beta'))) / self.data.get('z')}) elif 'm_n' in self.data and 'd' in self.data: self.data.update({ 'z': int(self.data.get('d') * cos(radians(self.data.get('beta'))) / self.data.get('m_n'))}) else: raise AttributeError('insufficient data supplied') if 'tau' not in self.data: self.data.update({'tau': degrees(2 * pi / self.data.get('z'))}) isexternal = sign(self.data.get('z')) if not sign(self.data.get('d')) == isexternal: raise ValueError('sign of pitch diameter') self.data.update({'m_t': self.data.get('m_n') / cos(radians(self.data.get('beta')))}) if 'alpha_n' in self.data: if self.data.get('alpha_n') < 0: raise ValueError('pitch angle non-positive') else: self.data.update({'alpha_n': self._alpha_n_default}) if 'x' not in self.data: self.data.update({'x': self._x_default}) if 'A_s' not in self.data: self.data.update({'A_s': self._A_s_default}) # tooth thickness allowance: value check else: if not self.data.get('A_s') <= 0: raise ValueError('tooth thickness allowance positive') self.data.update({'x_E': self.data.get('x') + self.data.get('A_s') / 2 / tan( radians(self.data.get('alpha_n'))) / self.data.get('m_n')}) if 'd_w' in self.data and 'alpha_wt' not in self.data: if not sign(self.data.get('d_w')) == isexternal: raise ValueError('sign of service pitch diameter') self.data.update({'alpha_wt': degrees(acos( self.data.get('d') / self.data.get('d_w') * cos(radians(self.data.get('alpha_t')))))}) if 'alpha_wt' in self.data and 'd_w' not in self.data: self.data.update({'d_w': self.data.get('d') * cos(radians(self.data.get('alpha_t'))) / cos( radians(self.data.get('alpha_wt')))}) self.data.update({'d_b': self.data.get('d') * cos(radians(self.data.get('alpha_t')))}) if formcoords: self.data.update(self._analyze_formcoords()) if not formcoords: # tip clearance: value check, set to default if not supplied if 'c' in self.data: if self.data.get('c') < 0.1 * self.data.get('m_n') or self.data.get('c') > 0.3 * self.data.get( 'm_n'): raise ValueError('tip clearance out of bounds') else: self.data.update({'c': self._c_default * self.data.get('m_n')}) # fillet radius: value check, set to default if not supplied if 'rho_f' not in self.data: self.data.update({'rho_f': self._rho_f_default * self.data.get('m_n')}) else: if self.data.get('rho_f') < 0: raise ValueError('fillet radius negative') # CAUTION: THE FOLLOWING SECTION OF CODE WILL BE REMOVED IN FUTURE RELEASES! # tool fillet radius: value check if 'rho_fP' in self.data: if self.data.get('rho_fP') < 0: raise ValueError('tool fillet radius negative') if not self.data.get('beta') == 0: raise ValueError('fillet trochoid cannot be generated for helical gears') # END OF CODE SECTION TO BE REMOVED # calculate tip height modification factor if possible (else set to default) # various attempts are made if 'a' in self.data and 'k' not in self.data: self.data.update( {'a_d': self.data.get('m_t') * (self.data.get('z') + self.data.get('z_2')) / 2}) self.data.update({'k': (self.data.get('a') - self.data.get('a_d')) / self.data.get('m_n') - ( self.data.get('x') + self.data.get('x_2'))}) else: self.data.update({'k': self._k_default}) # root circle diameter: value check, calculate if not supplied if 'd_f' in self.data: if 'd_f' in self.data > 'd' in self.data: raise ValueError('root circle diameter greater than pitch diameter') if not sign(self.data.get('d_f')) == isexternal: raise ValueError('sign of root circle diameter') else: self.data.update({ 'd_f': self.data.get('d') + 2 * self.data.get('x_E') * self.data.get('m_n') - 2 * ( self.data.get('m_n') + self.data.get('c'))}) # tip diameter: value check, calculate if not supplied if 'd_a' in self.data: # if self.data.get('d_a')<self.data.get('d'): # raise ValueError, 'tip diameter less than pitch diameter' if not sign(self.data.get('d_a')) == isexternal: raise ValueError('sign of tip diameter') else: self.data.update({ 'd_a': self.data.get('d') + 2 * self.data.get('x') * self.data.get('m_n') + 2 * self.data.get( 'm_n') + 2 * self.data.get('k') * self.data.get('m_n')}) # radial value of tip chamfer: value check, calculate or set to default # if not supplied if 'h_k' in self.data: if self.data.get('h_k') < 0: raise ValueError('value of tip chamfer negative') elif 'd_Fa' in self.data: self.data.update({'h_k': abs(self.data.get('d_a') - self.data.get('d_Fa')) / 2}) else: self.data.update({'h_k': self._h_k_default}) # remaining tooth thickness: value check, set to default if not supplied s_a, d_ac = self._tooth_thickness(self.data.get('d_a')) if 's_aK' not in self.data: self.data.update({'s_aK': s_a - 2 * self.data.get('h_k')}) if self.data.get('s_aK') < 0: raise ValueError('remaining tooth thickness at tip negative') if self.data.get('s_aK') > s_a: raise ValueError('remaining tip tooth thickness greater than tooth thickness') if 'd_Ff' in self.data: if self.data.get('d_Ff') > self.data.get('d'): raise ValueError('root form diameter greater than pitch diameter') if self.data.get('d_Ff') < self.data.get('d_f'): raise ValueError('root form diameter less than root circle diameter') if not sign(self.data.get('d_Ff')) == isexternal: raise ValueError('sign of root form diameter') # tip form diameter: value check if 'd_Fa' in self.data: if self.data.get('d_Fa') < self.data.get('d'): raise ValueError('tip form diameter less than pitch diameter') if self.data.get('d_Fa') > self.data.get('d_a'): raise ValueError('tip form diameter greater than tip diameter') if not sign(self.data.get('d_Fa')) == isexternal: raise ValueError('sign of tip form diameter') else: self.data.update({'d_Fa': self.data.get('d_a') - 2 * self.data.get('h_k')}) if 'd_s' not in self.data: self.data.update({'d_s': self._d_s_default}) if abs(self.data.get('d_s')) > self._tol_default: if not sign(self.data.get('d_s')) == isexternal: raise ValueError('sign of shaft diameter') if not self.data.get('d_s') < self.data.get('d_f'): raise ValueError('shaft diameter greater than root circle diameter') if not self.formcoords: self._make_form_coords() else: self.formcoords = self._make_unique(self.formcoords) def _make_form_coords(self): """ Tooth form coordinates in transverse cross-section (half tooth and half gap) points returned in 2D-cartesian coordinates, origin on wheel axis old form coordinates (if existend) will be replaced! This method should be used only if no user-supplied form coordinates are present. INPUT parameter: - """ # module imports from scipy.optimize import fsolve from numpy.linalg import norm from numpy import insert # tolerance for comparisons tol = self._tol_default * self.data.get('m_n') # delete old form coordinates if existend if self.formcoords: del self.formcoords if self._formwire: del self._formwire # indicator whether gear is external (number of teeth positive) or internal isexternal = sign(self.data.get('z')) inv_extension = False # indices for adressing parts of tooth form lower_index = 0 start_rootcirc_index = lower_index + 1 # one entry reserved for origin end_rootcirc_index = start_rootcirc_index + self.points_root - 1 start_fillet_index = end_rootcirc_index end_fillet_index = start_fillet_index + self.points_fillet - 1 start_involute_index = end_fillet_index + 1 # can differ from end of fillet end_involute_index = start_involute_index + self.points_flank - 1 start_chamfer_index = end_involute_index + 1 end_chamfer_index = start_chamfer_index + self.points_chamfer - 1 start_tipcirc_index = end_chamfer_index + 1 # differs from end of involute if chamfer present end_tipcirc_index = start_tipcirc_index + self.points_tip - 1 upper_index = end_tipcirc_index # determine boundary of half tooth segment on root circle rootcirc_start_point = self.data.get('d_f') / 2 * np.array( [-sin(radians(self.data.get('tau') / 2)), cos(radians(self.data.get('tau') / 2))]) # determine how the root shape is defined and calculate significant points # root shape is circular in transverse cross-section if isexternal > 0: # for external gears if 'd_Ff' not in self.data: # root circle is tangent to involute if (self.data.get('d_f') ** 2 + 4 * self.data.get('rho_f') * self.data.get( 'd_f') >= self.data.get('d_b') ** 2): self.data.update({'d_Ff': isexternal * sqrt((sqrt( (self.data.get('d_f') + 2 * self.data.get('rho_f')) ** 2 - self.data.get( 'd_b') ** 2) - 2 * self.data.get('rho_f')) ** 2 + self.data.get('d_b') ** 2)}) s_yt, d_yc = self._tooth_thickness(self.data.get('d_Ff')) fil_end_point = np.array([-s_yt / 2, d_yc / 2]) # no tangency possible: undercut elif self.data.get('d_f') + 4 * self.data.get('rho_f') >= self.data.get('d_b'): self.data.update({'d_Ff': self.data.get('d_b')}) s_yt, d_yc = self._tooth_thickness(self.data.get('d_b')) fil_end_point = np.array([-s_yt / 2, d_yc / 2]) # end of involute at base circle print( 'Warning: undercutting occurs!') else: self.data.update({'d_Ff': self.data.get('d_b')}) d_tangent = sqrt(self.data.get('d_f') ** 2 + 4 * self.data.get('rho_f') * self.data.get( 'd_f')) # diameter around gear center on that tangency point of fillet curve is located s_yt, d_yc = self._tooth_thickness(self.data.get('d_b')) nu = atan(s_yt / d_yc) fil_end_point = np.array([-d_tangent / 2 * sin(nu), d_tangent / 2 * cos( nu)]) # tangential extension of involute beyond base circle print ('Warning: involute had to be extended below base cicle to achieve root fillet tangency!') inv_extension = True else: # if root form circle diameter is supplied, it is forced strictly if possible if (self.data.get('d_Ff') - self.data.get('d_f')) / 2 > 2 * self.data.get( 'rho_f'): # check if root fillet circle fits beetween root form circle and root circle raise ValueError('root fillet radius too small: root shape cannot be determined') s_yt, d_yc = self._tooth_thickness(self.data.get('d_Ff')) if abs(self.data.get('d_Ff')) >= abs(self.data.get('d_b')): # fillet ends at root form circle fil_end_point = np.array([-s_yt / 2, d_yc / 2]) else: # base circle diameter greater than root form diameter: tangential extension of involute nu = atan(s_yt / d_yc) fil_end_point = np.array( [-self.data.get('d_Ff') * sin(nu), self.data.get('d_Ff') * cos(nu)]) print ('Warning: involute had to be extended below base cicle to enforce root form circle diameter!') inv_extension = True else: # for internal gears if 'd_Ff' not in self.data: # root circle is tangent to involute t_b = sqrt( (self.data.get('d_f') / 2 + self.data.get('rho_f')) ** 2 - (self.data.get('d_b') / 2) ** 2) self.data.update( {'d_Ff': -2 * sqrt((t_b + self.data.get('rho_f')) ** 2 + (self.data.get('d_b') / 2) ** 2)}) else: # if root form circle diameter is supplied, it is forced strictly if possible if (self.data.get('d_Ff') - self.data.get('d_f')) / 2 > 2 * self.data.get( 'rho_f'): # check if root fillet circle fits beetween root form circle and root circle raise ValueError('root fillet radius too small: root shape cannot be determined') s_yt, d_yc = self._tooth_thickness(self.data.get('d_Ff')) fil_end_point = np.array([-s_yt / 2, d_yc / 2]) # find center of root fillet circle by cutting circle around fillet end point with radius rho_f # with circle around center of gear wheel with radius d_f/2+rho_f def root_circle_center_func(phi): return fil_end_point + self.data.get('rho_f') * np.array([sin(phi[0]), cos(phi[0])]) - (self.data.get( 'd_f') / 2 + self.data.get('rho_f')) * np.array([sin(phi[1]), cos(phi[1])]) phi_fil_center = fsolve(root_circle_center_func, [-pi / 2, 0.0]) fil_center_point = (self.data.get('d_f') / 2 + self.data.get('rho_f')) * np.array( [sin(phi_fil_center[1]), cos(phi_fil_center[1])]) # boundary point of root fillet and root circle fil_start_point = fil_center_point * self.data.get('d_f') / ( self.data.get('d_f') + 2 * self.data.get('rho_f')) # if boundary point and fillet center are outside half tooth segment the shape of the root fillet # cannot be determined (root fillet curve is not continously differentiable and d_f is not matched) if abs(atan(fil_start_point[0] / fil_start_point[1])) > abs(radians(self.data.get('tau') / 2)): raise ValueError('root fillet radius too large: root shape cannot be determined') # determine boundary points of involute s_yt, d_yc = self._tooth_thickness(self.data.get('d_Ff')) inv_start_point = np.array([-s_yt / 2, d_yc / 2]) # involute starts at root form circle s_yt, d_yc = self._tooth_thickness(self.data.get('d_Fa')) inv_end_point = np.array([-s_yt / 2, d_yc / 2]) # involute ends at tip form circle # determine boundary points of tip circle nu = self.data.get('s_aK') / self.data.get('d_a') tipcirc_start_point = np.array([-self.data.get('d_a') / 2 * sin(nu), self.data.get('d_a') / 2 * cos( nu)]) # tip circle starts at end of tip chamfer tipcirc_end_point = np.array([0.0, self.data.get('d_a') / 2]) # tip circle ends at symmetry line # create array for tooth form coordinates formcoord_array = np.zeros([upper_index, 2]) # compute points on root circle phi_start = -asin(2 * rootcirc_start_point[0] / self.data.get('d_f')) # starting angle of root circle if abs(phi_start - acos(2 * rootcirc_start_point[1] / self.data.get( 'd_f'))) > tol: # computation is not unique phi_start = pi - phi_start phi_end = -asin(2 * fil_start_point[0] / self.data.get('d_f')) # end angle of root circle if abs(phi_end - acos(2 * fil_start_point[1] / self.data.get('d_f'))) > tol: # computation is not unique phi_end = pi - phi_end if abs(phi_start - phi_end) > tol: # check if a root circle curve exists delta_phi = (phi_end - phi_start) / (self.points_root - 1) n = 0 for index in range(start_rootcirc_index, end_rootcirc_index): formcoord_array[index] = self.data.get('d_f') / 2 * np.array( [-sin(phi_start + n * delta_phi), isexternal * cos(phi_start + n * delta_phi)]) n += 1 # compute points on root fillet print ('Warning: circular root fillet in transverse cross-section assumed!') phi_start = asin( (fil_start_point[0] - fil_center_point[0]) / self.data.get('rho_f')) # starting angle of root fillet if abs(phi_start - acos(-(fil_start_point[1] - fil_center_point[1]) / self.data.get( 'rho_f'))) > tol: # computation is not unique phi_start = pi - phi_start phi_end = asin( (fil_end_point[0] - fil_center_point[0]) / self.data.get('rho_f')) # end angle of root fillet if abs(phi_end - acos(-(fil_end_point[1] - fil_center_point[1]) / self.data.get( 'rho_f'))) > tol: # computation is not unique phi_end = pi - phi_end if abs(phi_start - phi_end) > tol: # check if a root fillet curve exists delta_phi = (phi_end - phi_start) / (self.points_fillet - 1) n = 0 for index in range(start_fillet_index, end_fillet_index + 1): formcoord_array[index] = fil_center_point + self.data.get('rho_f') * np.array( [sin(phi_start + n * delta_phi), -isexternal * cos(phi_start + n * delta_phi)]) n += 1 if (inv_start_point - fil_end_point).any(): # check if a root fillet circle connects directly to flank print ('involute was extended') # placeholder for future # compute points on flank d_start = isexternal * norm(inv_start_point, 2) * 2 # start diameter of involute flank (root form diameter) d_end = isexternal * norm(inv_end_point, 2) * 2 # end diameter of involute flank (tip form diameter) delta_d = (d_end - d_start) / (self.points_flank - 1) n = 0 for index in range(start_involute_index, end_involute_index + 1): s_yt, d_yc = self._tooth_thickness(d_start + n * delta_d) formcoord_array[index] = np.array([-s_yt / 2, d_yc / 2]) n += 1 # compute points on tip chamfer if 'h_k' in self.data and (self.data.get('h_k') > 0): print ('Warning: straight tip chamfer assumed!') delta_k = 1 / (self.points_chamfer - 1) n = 0 for index in range(end_involute_index, end_chamfer_index): formcoord_array[index] = inv_end_point + (tipcirc_start_point - inv_end_point) * n * delta_k n += 1 # compute points on tip circle phi_start = -asin(2 * tipcirc_start_point[0] / self.data.get('d_a')) # starting angle of tip circle if abs(phi_start - acos(2 * tipcirc_start_point[1] / self.data.get( 'd_a'))) > tol: # computation is not unique phi_start = pi - phi_start phi_end = -asin(2 * tipcirc_end_point[0] / self.data.get('d_a')) # end angle of tip circle if abs(phi_end - acos(2 * tipcirc_end_point[1] / self.data.get( 'd_a'))) > tol: # computation is not unique phi_end = pi - phi_end if isexternal < 0: phi_end = phi_end + pi if abs(phi_start - phi_end) > tol: # check if a tip circle curve exists delta_phi = (phi_end - phi_start) / (self.points_tip - 1) n = 1 for index in range(end_chamfer_index + 1, end_tipcirc_index): formcoord_array[index] = self.data.get('d_a') / 2 * np.array( [-sin(phi_start + n * delta_phi), isexternal * cos(phi_start + n * delta_phi)]) n += 1 # compute points on tangential extension of involute below base circle if inv_extension: delta_k = 1 / (self.points_ext - 1) for n in range(1, self.points_ext - 1): formcoord_array = insert(formcoord_array, start_involute_index, inv_start_point + (fil_end_point - inv_start_point) * n * delta_k, axis=0) self.formcoords = self._make_unique(formcoord_array) class GearExport(object): def __init__(self, pairdata): """ Initialization of GearPair-object Should be overwritten in derived classes :rtype : object INPUT parameters: pairdata : data of gear wheel pair (dictionary) Pinion : pinion (GearWheel-instance) Gear : gear (GearWheel-instance) """ self.data = deepcopy(pairdata) gear = {'z': self.data['z'], 'x': self.data['x'], 'alpha_n': self.data['alpha_n'], 'beta': self.data['beta'], 'm_n': self.data['m_n'], 'rho_f': self.data['rho_f'], 'd_s': self.data['d_s'], 'c': self.data['c'], 'b': self.data['b']} self.gear = self.__set_gear(gear) @staticmethod def __set_gear(geardata): """ Set pinion attribute :rtype : object INPUT parameter: gear : pinion ((Gear Pair data)) """ gear = CylindricalGearWheel(geardata) pd_s = gear.data['d'] * pi ang = gear.data['s_p'] * 360 / pd_s shaft = [[0, gear.data['d_s'] / 2], rotate([[0, gear.data['d_s'] / 2]], 0.5 * ( (gear.data['d_s'] / gear.data['z']) * 360 / gear.data['d_s']))[0]] gear.shaftcoords = shaft gear.formcoords = list(gear.formcoords.values()) gear.rotate_ang = -ang / 2 return gear return gear
"""Test state helpers.""" from datetime import timedelta import unittest try: from unittest import mock except ImportError: import mock import homeassistant.core as ha import homeassistant.components as core_components from homeassistant.const import (SERVICE_TURN_ON, SERVICE_TURN_OFF) from homeassistant.util import dt as dt_util from homeassistant.helpers import state from homeassistant.const import ( STATE_OPEN, STATE_CLOSED, STATE_LOCKED, STATE_UNLOCKED, STATE_ON, STATE_OFF) from homeassistant.components.media_player import ( SERVICE_PLAY_MEDIA, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PAUSE) from homeassistant.components.sun import (STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON) from tests.common import get_test_home_assistant, mock_service class TestStateHelpers(unittest.TestCase): """Test the Home Assistant event helpers.""" def setUp(self): # pylint: disable=invalid-name """Run when tests are started.""" self.hass = get_test_home_assistant() core_components.setup(self.hass, {}) def tearDown(self): # pylint: disable=invalid-name """Stop when tests are finished.""" self.hass.stop() def test_get_changed_since(self): """Test get_changed_since.""" point1 = dt_util.utcnow() point2 = point1 + timedelta(seconds=5) point3 = point2 + timedelta(seconds=5) with patch('homeassistant.core.dt_util.utcnow', return_value=point1): self.hass.states.set('light.test', 'on') state1 = self.hass.states.get('light.test') with patch('homeassistant.core.dt_util.utcnow', return_value=point2): self.hass.states.set('light.test2', 'on') state2 = self.hass.states.get('light.test2') with patch('homeassistant.core.dt_util.utcnow', return_value=point3): self.hass.states.set('light.test3', 'on') state3 = self.hass.states.get('light.test3') self.assertEqual( [state2, state3], state.get_changed_since([state1, state2, state3], point2)) def test_track_states(self): """Test tracking of states.""" point1 = dt_util.utcnow() point2 = point1 + timedelta(seconds=5) point3 = point2 + timedelta(seconds=5) with patch('homeassistant.core.dt_util.utcnow') as mock_utcnow: mock_utcnow.return_value = point2 with state.TrackStates(self.hass) as states: mock_utcnow.return_value = point1 self.hass.states.set('light.test', 'on') mock_utcnow.return_value = point2 self.hass.states.set('light.test2', 'on') state2 = self.hass.states.get('light.test2') mock_utcnow.return_value = point3 self.hass.states.set('light.test3', 'on') state3 = self.hass.states.get('light.test3') self.assertEqual( sorted([state2, state3], key=lambda state: state.entity_id), sorted(states, key=lambda state: state.entity_id)) def test_reproduce_with_no_entity(self): """Test reproduce_state with no entity.""" calls = mock_service(self.hass, 'light', SERVICE_TURN_ON) state.reproduce_state(self.hass, ha.State('light.test', 'on')) self.hass.pool.block_till_done() self.assertTrue(len(calls) == 0) self.assertEqual(None, self.hass.states.get('light.test')) def test_reproduce_turn_on(self): """Test reproduce_state with SERVICE_TURN_ON.""" calls = mock_service(self.hass, 'light', SERVICE_TURN_ON) self.hass.states.set('light.test', 'off') state.reproduce_state(self.hass, ha.State('light.test', 'on')) self.hass.pool.block_till_done() self.assertTrue(len(calls) > 0) last_call = calls[-1] self.assertEqual('light', last_call.domain) self.assertEqual(SERVICE_TURN_ON, last_call.service) self.assertEqual(['light.test'], last_call.data.get('entity_id')) def test_reproduce_turn_off(self): """Test reproduce_state with SERVICE_TURN_OFF.""" calls = mock_service(self.hass, 'light', SERVICE_TURN_OFF) self.hass.states.set('light.test', 'on') state.reproduce_state(self.hass, ha.State('light.test', 'off')) self.hass.pool.block_till_done() self.assertTrue(len(calls) > 0) last_call = calls[-1] self.assertEqual('light', last_call.domain) self.assertEqual(SERVICE_TURN_OFF, last_call.service) self.assertEqual(['light.test'], last_call.data.get('entity_id')) def test_reproduce_complex_data(self): """Test reproduce_state with complex service data.""" calls = mock_service(self.hass, 'light', SERVICE_TURN_ON) self.hass.states.set('light.test', 'off') complex_data = ['hello', {'11': '22'}] state.reproduce_state(self.hass, ha.State('light.test', 'on', { 'complex': complex_data })) self.hass.pool.block_till_done() self.assertTrue(len(calls) > 0) last_call = calls[-1] self.assertEqual('light', last_call.domain) self.assertEqual(SERVICE_TURN_ON, last_call.service) self.assertEqual(complex_data, last_call.data.get('complex')) def test_reproduce_media_data(self): """Test reproduce_state with SERVICE_PLAY_MEDIA.""" calls = mock_service(self.hass, 'media_player', SERVICE_PLAY_MEDIA) self.hass.states.set('media_player.test', 'off') media_attributes = {'media_content_type': 'movie', 'media_content_id': 'batman'} state.reproduce_state(self.hass, ha.State('media_player.test', 'None', media_attributes)) self.hass.pool.block_till_done() self.assertTrue(len(calls) > 0) last_call = calls[-1] self.assertEqual('media_player', last_call.domain) self.assertEqual(SERVICE_PLAY_MEDIA, last_call.service) self.assertEqual('movie', last_call.data.get('media_content_type')) self.assertEqual('batman', last_call.data.get('media_content_id')) def test_reproduce_media_play(self): """Test reproduce_state with SERVICE_MEDIA_PLAY.""" calls = mock_service(self.hass, 'media_player', SERVICE_MEDIA_PLAY) self.hass.states.set('media_player.test', 'off') state.reproduce_state( self.hass, ha.State('media_player.test', 'playing')) self.hass.pool.block_till_done() self.assertTrue(len(calls) > 0) last_call = calls[-1] self.assertEqual('media_player', last_call.domain) self.assertEqual(SERVICE_MEDIA_PLAY, last_call.service) self.assertEqual(['media_player.test'], last_call.data.get('entity_id')) def test_reproduce_media_pause(self): """Test reproduce_state with SERVICE_MEDIA_PAUSE.""" calls = mock_service(self.hass, 'media_player', SERVICE_MEDIA_PAUSE) self.hass.states.set('media_player.test', 'playing') state.reproduce_state( self.hass, ha.State('media_player.test', 'paused')) self.hass.pool.block_till_done() self.assertTrue(len(calls) > 0) last_call = calls[-1] self.assertEqual('media_player', last_call.domain) self.assertEqual(SERVICE_MEDIA_PAUSE, last_call.service) self.assertEqual(['media_player.test'], last_call.data.get('entity_id')) def test_reproduce_bad_state(self): """Test reproduce_state with bad state.""" calls = mock_service(self.hass, 'light', SERVICE_TURN_ON) self.hass.states.set('light.test', 'off') state.reproduce_state(self.hass, ha.State('light.test', 'bad')) self.hass.pool.block_till_done() self.assertTrue(len(calls) == 0) self.assertEqual('off', self.hass.states.get('light.test').state) def test_reproduce_group(self): """Test reproduce_state with group.""" light_calls = mock_service(self.hass, 'light', SERVICE_TURN_ON) self.hass.states.set('group.test', 'off', { 'entity_id': ['light.test1', 'light.test2']}) state.reproduce_state(self.hass, ha.State('group.test', 'on')) self.hass.pool.block_till_done() self.assertEqual(1, len(light_calls)) last_call = light_calls[-1] self.assertEqual('light', last_call.domain) self.assertEqual(SERVICE_TURN_ON, last_call.service) self.assertEqual(['light.test1', 'light.test2'], last_call.data.get('entity_id')) def test_reproduce_group_same_data(self): """Test reproduce_state with group with same domain and data.""" light_calls = mock_service(self.hass, 'light', SERVICE_TURN_ON) self.hass.states.set('light.test1', 'off') self.hass.states.set('light.test2', 'off') state.reproduce_state(self.hass, [ ha.State('light.test1', 'on', {'brightness': 95}), ha.State('light.test2', 'on', {'brightness': 95})]) self.hass.pool.block_till_done() self.assertEqual(1, len(light_calls)) last_call = light_calls[-1] self.assertEqual('light', last_call.domain) self.assertEqual(SERVICE_TURN_ON, last_call.service) self.assertEqual(['light.test1', 'light.test2'], last_call.data.get('entity_id')) self.assertEqual(95, last_call.data.get('brightness')) def test_as_number_states(self): """Test state_as_number with states.""" zero_states = (STATE_OFF, STATE_CLOSED, STATE_UNLOCKED, STATE_BELOW_HORIZON) one_states = (STATE_ON, STATE_OPEN, STATE_LOCKED, STATE_ABOVE_HORIZON) for _state in zero_states: self.assertEqual(0, state.state_as_number( ha.State('domain.test', _state, {}))) for _state in one_states: self.assertEqual(1, state.state_as_number( ha.State('domain.test', _state, {}))) def test_as_number_coercion(self): """Test state_as_number with number.""" for _state in ('0', '0.0', 0, 0.0): self.assertEqual( 0.0, state.state_as_number( ha.State('domain.test', _state, {}))) for _state in ('1', '1.0', 1, 1.0): self.assertEqual( 1.0, state.state_as_number( ha.State('domain.test', _state, {}))) def test_as_number_invalid_cases(self): """Test state_as_number with invalid cases.""" for _state in ('', 'foo', 'foo.bar', None, False, True, object, object()): self.assertRaises(ValueError, state.state_as_number, ha.State('domain.test', _state, {}))
from datetime import datetime import urllib import urlparse from django.conf import settings from django.contrib.auth.decorators import login_required, permission_required from django.contrib import comments as comment_app from django.contrib.comments.views import comments from django.contrib.comments.views.comments import CommentPostBadRequest, comment_done from django.contrib.comments.views.utils import confirmation_view, next_redirect from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotFound) from django.shortcuts import get_object_or_404, redirect, render_to_response from django.template import RequestContext, loader from django.utils.encoding import smart_unicode from django.views.decorators.csrf import csrf_protect from django.views.decorators.http import require_POST from voting.models import Vote from osl_comments import signals from osl_comments.forms import OslEditCommentForm from osl_comments.models import (CommentsBannedFromIpAddress, CommentsPerPageForContentType, OslComment) from osl_comments.templatetags import (EDIT_QUERY_STRING_KEY, REPLY_QUERY_STRING_KEY) NO_PAGINATION_QUERY_STRING_KEY = 'np' @login_required def delete_comment(request, comment_id, next=None): """ Deletes a comment. Confirmation on GET, action on POST. Templates: `comments/delete_by_user.html`, `comments/delete_by_user_forbidden.html` Context: comment the deleted `comments.comment` object """ comment = get_object_or_404(comment_app.get_model(), pk=comment_id, site__pk=settings.SITE_ID) # Ensure requesting user is same user who posted comment if request.user != comment.user: t = loader.get_template('comments/delete_by_user_forbidden.html') c = RequestContext(request) return HttpResponseForbidden(t.render(c)) # Check that user is not banned from commenting user_ip_address = request.META['REMOTE_ADDR'] user_is_banned = \ CommentsBannedFromIpAddress.objects.is_banned(user_ip_address) if user_is_banned: return HttpResponseForbidden() # Delete on POST if request.method == 'POST': comment.is_deleted_by_user = True comment.save() signals.comment_was_deleted_by_user.send( sender = comment.__class__, comment = comment, request = request ) if request.is_ajax(): return redirect('osl_comments.views.get_comment', comment_id=comment_id) else: return next_redirect(request.POST.copy(), next, delete_by_user_done, c=comment.pk) # Render a form on GET else: return render_to_response('comments/delete_by_user.html', {'comment': comment, "next": next}, RequestContext(request) ) delete_by_user_done = confirmation_view( template = "comments/deleted_by_user.html", doc = 'Displays a "comment was deleted" success page.' ) @login_required @require_POST def edit_comment(request, next=None): # get data data = request.POST.copy() next = data.get('next', next) # see if user wants to cancel cancel = 'cancel' in data if cancel: return redirect(data['cancel_url']) # get comment and raise error if id is wrong comment_id = data.get('comment_id') if comment_id is None: return CommentPostBadRequest("Missing comment id field.") try: comment = OslComment.objects.get(pk=comment_id) except ObjectDoesNotExist: return HttpResponseNotFound( "No comment matching with PK %r exists." % comment_id ) except (ValueError, ValidationError) as e: return CommentPostBadRequest( "Attempting to get comment PK %r raised %s" % (escape(comment_id), e.__class__.__name__) ) # ensure user editing is same user who posted comment if comment.user != request.user: return HttpResponseForbidden("Cannot edit another user's comment.") # does the user want to preview the comment? preview = 'preview' in data # get a special comment form for editing form = OslEditCommentForm(data=data) # If there are errors or if we requested a preview show the comment if form.errors or preview: template_list = [ # These first two exist for purely historical reasons. # Django v1.0 and v1.1 allowed the underscore format for # preview templates, so we have to preserve that format. "comments/%s_%s_preview.html" % (comment.content_type.app_label, comment.content_type.model), "comments/%s_preview.html" % comment.content_type.app_label, # Now the usual directory based template heirarchy. "comments/%s/%s/preview.html" % (comment.content_type.app_label, comment.content_type.model), "comments/%s/preview.html" % comment.content_type.app_label, "comments/preview.html", ] return render_to_response( template_list, { "comment" : form.data.get("comment", ""), "form" : form, "next": next, }, RequestContext(request, {}) ) responses = signals.comment_will_be_edited.send( sender = comment.__class__, comment = comment, request = request ) for (receiver, response) in responses: if response == False: return CommentPostBadRequest( "comment_will_be_edited receiver %r killed the comment" % receiver.__name__) # update comment content comment.comment = data.get('comment') comment.edit_timestamp = datetime.now() comment.save() signals.comment_was_edited.send( sender = comment.__class__, comment = comment, request = request ) if request.is_ajax(): return redirect('osl_comments.views.get_comment', comment_id=comment.pk) else: return next_redirect(data, next, comment_edited) comment_edited = confirmation_view( template = "comments/edit_confirmed.html", doc = """Display a "comment was edited" success page.""" ) def get_ajax_edit_form(request, comment_pk): comment = get_object_or_404(comment_app.get_model(), pk=comment_pk, site__pk=settings.SITE_ID) return render_to_response( "comments/render_edit_form.html", {'comment': comment}, RequestContext(request) ) def get_ajax_reply_form(request, obj_ctype_pk, obj_pk, comment_pk): obj_model = ContentType.objects.get(pk=obj_ctype_pk).model_class() obj = obj_model.objects.get(pk=obj_pk) comment = get_object_or_404(comment_app.get_model(), pk=comment_pk, site__pk=settings.SITE_ID) return render_to_response( "comments/reply_form_container.html", {'object': obj, 'comment': comment}, RequestContext(request) ) def get_comment(request, comment_id): comment = get_object_or_404(OslComment, pk=comment_id) return render_to_response('comments/comment.html', {'comment': comment, 'comments_enabled': True, 'comment_parent': comment.parent_comment == None, 'comment_score': Vote.objects.get_score(comment)['score'], 'vote': Vote.objects.get_for_user(comment, request.user)}, context_instance=RequestContext(request)) def get_comments(request, obj_ctype_pk, obj_pk, order_method, comments_enabled): """Renders a list of comments.""" OFFSET_QUERY_STRING_KEY = 'offset' if OFFSET_QUERY_STRING_KEY not in request.GET: return HttpResponseBadRequest('Need to provide an offset key value in the query string') offset = request.GET[OFFSET_QUERY_STRING_KEY] offset = int(offset) comments_enabled = bool(comments_enabled) obj_ctype = ContentType.objects.get(pk=obj_ctype_pk) comment_list = list(OslComment.objects.get_comments( ctype=obj_ctype, object_pk=obj_pk, order_method=order_method, offset=offset )) comment_count = OslComment.objects.filter( content_type = obj_ctype, object_pk = smart_unicode(obj_pk), site__pk = settings.SITE_ID, is_public = True, inline_to_object = False ).count() num_comments_per_page = CommentsPerPageForContentType.objects.get_comments_per_page_for_content_type( obj_ctype) display_load_more = False if offset + num_comments_per_page < comment_count: display_load_more = True return render_to_response( 'comments/inner_list.html', { 'comment_list': comment_list, 'comments_enabled': comments_enabled, 'display_load_more': display_load_more, 'sorted_by': order_method, 'object_ctype_pk': obj_ctype_pk, 'object_pk': obj_pk }, RequestContext(request) ) @require_POST @permission_required('comments.can_moderate') def moderate(request, comment_id, next=None): if not request.is_ajax: return redirect('django.contrib.comments.views.moderation.delete', comment_id) from django.contrib.comments.views.moderation import perform_delete comment = get_object_or_404(OslComment, pk=comment_id, site__pk=settings.SITE_ID) perform_delete(request, comment) return redirect('osl_comments.views.get_comment', comment_id=comment_id) @csrf_protect @require_POST def post_comment(request, next=None, using=None): """Wraps Django's post_comment view to handle the redirect better.""" data = request.POST.copy() if 'cancel' in data: return redirect(data['cancel_url']) response = comments.post_comment(request, next, using) comment_pk = '' if response.status_code == 302: # Move the comment pk in the query string to the URL fragment # (and clear out delete and reply key values pairs as well) redirect_location = response['location'] redirect_url = list(urlparse.urlparse(redirect_location)) redirect_qs = urlparse.parse_qs(redirect_url[4]) comment_pk = '' if 'c' in redirect_qs: comment_pk = redirect_qs['c'][0] del redirect_qs['c'] if EDIT_QUERY_STRING_KEY in redirect_qs: del redirect_qs[EDIT_QUERY_STRING_KEY] if REPLY_QUERY_STRING_KEY in redirect_qs: del redirect_qs[REPLY_QUERY_STRING_KEY] redirect_url[4] = urllib.urlencode(redirect_qs, True) redirect_url[5] = ''.join(['c', comment_pk]) response['location'] = urlparse.urlunparse(redirect_url) if request.is_ajax(): return redirect(get_comment, comment_id=comment_pk) return response def redirect_view(request, content_type_id, object_id): """ Used instead of standard comments-url-redirect view to allow for no pagination. """ from django.contrib.contenttypes.views import shortcut response = shortcut(request, content_type_id, object_id) original_url = response['location'] # Add in no pagination query string key url_list = list(urlparse.urlparse(original_url)) url = url_list[:4] query_string = url_list[4] fragment = url_list[5] query_string_dict = urlparse.parse_qs(query_string) query_string_dict.update({NO_PAGINATION_QUERY_STRING_KEY: '1'}) new_qs = urllib.urlencode(query_string_dict) url.extend([new_qs, fragment]) url_string = urlparse.urlunparse(url) response['location'] = url_string return response @login_required @permission_required('osl_comments.can_ban') def update_ip_address_ban(request, comment_id, next=None): comment = get_object_or_404(OslComment, pk=comment_id) if request.method == 'GET': banned = \ CommentsBannedFromIpAddress.objects.is_banned(comment.ip_address) return render_to_response('comments/update_ip_address_ban.html', {'banned': banned}, RequestContext(request) ) if request.method == 'POST': data = request.POST banned_str = data.get('ban', None) if banned_str == 'True': banned = True elif banned_str == 'False': banned = False else: return HttpResponseBadRequest() try: ban = CommentsBannedFromIpAddress.objects.get( ip_address=comment.ip_address) except CommentsBannedFromIpAddress.DoesNotExist: ban = CommentsBannedFromIpAddress(ip_address=comment.ip_address) ban.comments_banned = banned ban.save() signals.ip_address_ban_was_updated.send( sender = comment.__class__, banned = banned, request = request ) return next_redirect(request.POST.copy(), next, update_ip_address_ban_done) update_ip_address_ban_done = confirmation_view( template = "comments/update_ip_address_ban_done.html", doc = """Display a "ip address ban updated" success page.""" )
"""Python representations of kernel signatures.""" import numbers from elaps import symbolic named_attributes = ("lower", "upper", "symm", "herm", "spd", "hpd", "work") datatype_prefixes = { "i": "integer", "s": "single precision", "d": "double precision", "c": "single precision complex", "z": "double precision complex" } class Signature(list): """Representation of a kernel signature.""" def __init__(self, *args, **kwargs): """Initialize from file ore arguments.""" list.__init__(self, args) self.flopsstr = None self.flops = None if not isinstance(self[0], Name): self[0] = Name(self[0]) # infer and compile flops, min, max, attr self.init_lambdas(kwargs) # lookup for fast argument selection self.argtypelookup = {} def init_lambdas(self, kwargs): """Initialize lambda expressions.""" lambdaargs = ", ".join(arg.name for arg in self) if "complexity" in kwargs and "flops" not in kwargs: # legacy support kwargs["flops"] = kwargs["complexity"] if "flops" in kwargs: self.flopsstr = kwargs["flops"] self.flops = eval("lambda %s: %s" % (lambdaargs, kwargs["flops"]), symbolic.__dict__) for arg in self: arg.min = None arg.max = None if isinstance(arg, ArgWithMin): if arg.minstr: arg.min = eval("lambda %s: %s" % (lambdaargs, arg.minstr), symbolic.__dict__) if arg.maxstr: arg.max = eval("lambda %s: %s" % (lambdaargs, arg.maxstr), symbolic.__dict__) arg.properties = lambda *args: () if arg.propertiesstr: lambdarhs = arg.propertiesstr for attrname in named_attributes: lambdarhs = lambdarhs.replace(attrname, repr(attrname)) arg.properties = eval("lambda %s: filter(None, (%s,))" % (lambdaargs, lambdarhs), symbolic.__dict__) self.check_lambdas() def check_lambdas(self): """Check lambdas for unknown arguments.""" args = range(len(self)) if self.flops: try: self.flops(*args) except NameError as e: raise NameError("Unknown argument %r used in flops" % str(e).split("'")[1]) for arg in self: if arg.min: try: arg.min(*args) except NameError as e: raise NameError("Unknown argument %r used in min for %s" % (str(e).split("'")[1], arg)) if arg.max: try: arg.max(*args) except NameError as e: raise NameError("Unknown argument %r used in max for %s" % (str(e).split("'")[1], arg)) if arg.properties: try: arg.properties(*args) except NameError as e: raise NameError("Unknown argument or property %r " "used in properties for %s" % (str(e).split("'")[1], arg)) def __str__(self): """Format as human readable.""" return "%s(%s)" % (self[0], ", ".join(arg.name for arg in self[1:])) def __repr__(self): """Format as python parsable string.""" args = map(repr, [str(self[0])] + self[1:]) if self.flops: args.append("flops=%r" % self.flopsstr) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __call__(self, *args, **kwargs): """Create a call from the signature with given arguments.""" if len(args) == 0: args = tuple(arg.default() for arg in self[1:]) return Call(self, *args, **kwargs) def __getattr__(self, name): """Variable names as attributes.""" try: return self[self.argpos(name)] except: pass return list.__getattr__(self, name) def argpos(self, name): """Search for an argument id by name.""" for argid, arg in enumerate(self): if arg.name == name: return argid raise IndexError("Unknown argument: %s" % name) def argsbytype(self, type_, *types): """Return a list of argument posisions.""" if types: return sorted(set(self.argsbytype(type_) + self.argsbytype(*types))) if type_ not in self.argtypelookup: self.argtypelookup[type_] = [i for i, arg in enumerate(self) if isinstance(arg, type_)] return self.argtypelookup[type_] def dataargs(self): """Return a list of data argument positions.""" return self.argsbytype(Data) def datatype(self): """Deduce type of operands (single, double, complex, ...).""" # datatype is type of first dataarg return self[self.dataargs()[0]].typename class BasicCall(list): """Base class for Calls with and without a Signature.""" def __init__(self, sig, *args): """Initialize from arguments.""" if not args: args = tuple("" if arg == "char*" else 0 for arg in sig[1:]) if len(sig) != 1 + len(args): raise TypeError("%s takes %d arguments (%d given)" % (sig[0], len(sig) - 1, len(args))) list.__init__(self, (str(sig[0]),) + args) self.__dict__["sig"] = sig def __str__(self): """Format as human readable.""" return "%s(%s)" % (self[0], ", ".join(map(str, self[1:]))) def __repr__(self): """Format as python parsable string.""" args = map(repr, [self.sig] + self[1:]) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __copy__(self): """Create a shallow copy.""" return type(self)(self.sig, *self[1:]) def copy(self): """Create a copy.""" return self.__copy__() class Call(BasicCall): """A call to a signature.""" def __init__(self, sig, *args, **kwargs): """Initialize from signature and arguments.""" if not isinstance(sig, Signature): raise TypeError("a Signature is required as first argument") BasicCall.__init__(self, sig, *args) for arg, val in kwargs.iteritems(): setattr(self, arg, val) def __getattr__(self, name): """Variable names as attributes.""" try: return self[self.sig.argpos(name)] except: pass return BasicCall.__getattr__(self, name) def __setattr__(self, name, value): """Variable names as attributes.""" try: self[self.sig.argpos(name)] = value return value except: pass list.__setattr__(self, name, value) def argdict(self): """Create a dictionary of the calls arguments.""" return dict((arg.name, val) for arg, val in zip(self.sig, self)) def restrict_once(self): """Restrict integer arguments with mimum expressions once.""" l = list(self) for i, arg in enumerate(self.sig): if self[i] is not None and arg.min: try: self[i] = max(self[i], arg.min(*l)) except TypeError: pass # probably a None if self[i] is not None and arg.max: try: self[i] = min(self[i], arg.max(*l)) except TypeError: pass # probably a None def restrict(self): """Restrict integer arguments with mimum expressions.""" calls = [] while self[1:] not in calls: calls.append(self[1:]) self.restrict_once() def complete_once(self): """Attempt to complete arguments with minimum expressions once.""" l = list(self) for i, arg in enumerate(self.sig): if self[i] is None: if arg.min: try: self[i] = arg.min(*l) except TypeError: pass # probably a None else: self[i] = arg.default() def complete(self): """Attempt to complete all arguments with minimum expressions.""" calls = [] while self[1:] not in calls: calls.append(self[1:]) self.complete_once() def properties(self, argid=None): """Return a list of properties for the arguments.""" if argid: return self.sig[argid].properties(*self) return tuple(arg.properties(*self) for arg in self.sig) def flops(self): """Compute the call flops.""" if self.sig.flops is not None: return self.sig.flops(*self) return None def format_sampler(self): """Format for a sampler.""" return [arg.format_sampler(val) for arg, val in zip(self.sig, self)] class Arg(object): """Base class for signature arguments.""" class __metaclass__(type): """Meta class for Arg.""" def __repr__(cls): """Class name as representation.""" return cls.__name__ def __init__(self, name, attr=None): """Keep name and attributes.""" self.name = name self.propertiesstr = attr def __repr__(self): """Format as python parsable string.""" args = [self.name] if self.propertiesstr: args.append(self.propertiesstr) args = map(repr, args) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __str__(self): """Format as human readable.""" return str(self.name) def __cmp__(self, other): """Compare with other argument.""" return cmp(type(self), type(other)) or cmp( (self.name, self.propertiesstr), (other.name, other.propertiesstr) ) @staticmethod def format_sampler(val): """Format value for a sampler.""" return val class Name(Arg): """Name argument.""" def __cmp__(self, other): """Compare with other.""" if self.name == other: return 0 return Arg.__cmp__(self, other) def default(self): """Default: Kernel name.""" return self.name class Flag(Arg): """Flag argument.""" def __init__(self, name, flags, attr=None): """Initalize with name and list of possible flags.""" Arg.__init__(self, name, attr) self.flags = flags def __repr__(self): """Format as python parsable string.""" args = [self.name, self.flags] if self.propertiesstr: args.append(self.propertiesstr) args = map(repr, args) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __cmp__(self, other): """Compare with other.""" return Arg.__cmp__(self, other) or cmp(self.flags, other.flags) def default(self): """Default: first possible flag.""" return self.flags[0] def _create_Flag(classname, defaultname, flags): """Class factory for Flag arguments.""" def __init__(self, name=defaultname, attr=None): """Initialize custom Flag.""" Flag.__init__(self, name, flags, attr) def __repr__(self): """Format as python parsable string.""" args = [] if self.name != defaultname: args.append(self.name) if self.propertiesstr: args.append(self.propertiesstr) args = map(repr, args) elif self.propertiesstr: args.append("attr=%r" % self.propertiesstr) return "%s(%s)" % (type(self).__name__, ", ".join(args)) globals()[classname] = type(classname, (Flag,), { "__init__": __init__, "__repr__": __repr__ }) _create_Flag("Side", "side", ("L", "R")) _create_Flag("Uplo", "uplo", ("L", "U")) _create_Flag("Trans", "trans", ("N", "T")) _create_Flag("cTrans", "trans", ("N", "T", "C")) _create_Flag("Diag", "diag", ("N", "U")) class ArgWithMin(Arg): """Base class for Arguments with a minstr.""" def __init__(self, name, min=None, attr=None, max=None): """Optional minimum expression.""" Arg.__init__(self, name, attr) self.minstr = min self.maxstr = max def __repr__(self): """Format as python parsable string.""" args = [self.name] if self.minstr: args.append(self.minstr) if self.propertiesstr: if not self.minstr: args.append(None) args.append(self.propertiesstr) if self.maxstr: if not self.minstr: args.append(None) if not self.propertiesstr: args.append(None) args.append(self.maxstr) args = map(repr, args) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __cmp__(self, other): """Compare with other.""" return Arg.__cmp__(self, other) or cmp((self.minstr, self.maxstr), (other.minstr, other.maxstr)) def default(self): """Default: 1.""" if self.minstr is None: return 1 return None class Dim(ArgWithMin): """Dimension argument.""" pass class Scalar(Arg): """Scalar argument.""" typename = None def __init__(self, name="alpha", attr=None): """Initialize (no special case).""" Arg.__init__(self, name, attr) def __repr__(self): """Format as python parsable string.""" args = [] if self.name != "alpha": args.append(repr(self.name)) if self.propertiesstr: args.append(repr(self.propertiesstr)) elif self.propertiesstr: args.append("attr=%r" % self.propertiesstr) return "%s(%s)" % (type(self).__name__, ", ".join(args)) @staticmethod def default(): """Default: 1.0.""" return 1.0 def _create_Scalar(classname, typename): """Class factory Scalar arguments.""" attributes = {"typename": typename} if "complex" in typename: def format_sampler(self, val): """Format complex number as tuple of two reals.""" if isinstance(val, numbers.Number): val = complex(val) return "%s,%s" % (val.real, val.imag) return val attributes["format_sampler"] = format_sampler if typename == "integer": @staticmethod def default(): """Default: 1.""" return 1 attributes["default"] = default globals()[classname] = type(classname, (Scalar,), attributes) _create_Scalar("iScalar", "integer") _create_Scalar("sScalar", "single precision") _create_Scalar("dScalar", "double precision") _create_Scalar("cScalar", "single precision complex") _create_Scalar("zScalar", "double precision complex") class Data(ArgWithMin): """Data (operand) argument.""" typename = None def format_sampler(self, val): """Format surrounded by [] for the sampler.""" if isinstance(val, int): return "[%s]" % val return val def _create_Data(classname, typename): """Class factory Data arguments.""" attributes = {"typename": typename} if "complex" in typename: def format_sampler(self, val): """Format surrounded by [] for the sampler. 2x space (for real and complex parts). """ if isinstance(val, int): return "[%s]" % (2 * val) return val attributes["format_sampler"] = format_sampler globals()[classname] = type(classname, (Data,), attributes) _create_Data("iData", "integer") _create_Data("sData", "single precision") _create_Data("dData", "double precision") _create_Data("cData", "single precision complex") _create_Data("zData", "double precision complex") class Ld(ArgWithMin): """Leading dimension argument.""" @staticmethod def format_sampler(val): """For Sampler: minimum = 1.""" return max(1, val) class Inc(Arg): """Increment argument.""" @staticmethod def default(): """Default: 1.""" return 1 class Work(Data): """Work space argument.""" pass def _create_Work(classname, dataclass): """Class factory Work arguments.""" globals()[classname] = type(classname, (Work, dataclass), {}) _create_Work("iWork", iData) _create_Work("sWork", sData) _create_Work("dWork", dData) _create_Work("cWork", cData) _create_Work("zWork", zData) class Lwork(ArgWithMin): """Work size argument.""" pass class Info(Arg): """Info argument.""" def __init__(self, name="info", attr=None): """Initialize (no special case).""" Arg.__init__(self, name, attr) @staticmethod def default(): """Default: 0.""" return 0 class String(Arg): """String argument.""" @staticmethod def default(): """Default: '' (empty string).""" return ""
from __future__ import unicode_literals import hmac import json import os import unittest import uuid import mock import emailer @mock.patch('logging.error', new=mock.Mock()) @mock.patch('logging.warn', new=mock.Mock()) @mock.patch('logging.info', new=mock.Mock()) class EmailerTests(unittest.TestCase): def setUp(self): """Setup flask app for testing.""" super(EmailerTests, self).setUp() emailer.app.config['TESTING'] = True self.app = emailer.app.test_client() self.headers = { 'x-github-event': 'push', 'x-hub-signature': 'bogus-sig', 'content-type': 'application/json', } self.msg_info = { 'repo': 'TESTING/test', 'branch': 'the/TEST/master', 'revision': 'some-TEST-sha1', 'message': 'Merge pull request A lovely TEST\n\nTEST commit' ' message.', 'changed_files': ('R a.out\n' 'R gen\n' 'M README.md\n' 'M README\n' 'M LICENSE'), 'pusher': 'TESTING-the-tester', 'pusher_email': 'TESTING-the-tester <TEST@example.com>', 'compare_url': 'http://TEST.fake', 'pr_url': 'http://TEST.fake' } self.sender = 'noreply@fake.fake' self.recipient = 'joseph.tursi@hpe.com' self.reply_to = 'reply-to-me@fake.fake' self.send_grid_header = json.dumps( {'filters': {'clicktrack': {'settings': {'enable': 0}}}}) @mock.patch('flask.got_request_exception.connect') @mock.patch('rollbar.init') def test_rollbar_init__testing(self, mock_init, mock_exc): """Verify rollbar is not initalized in unittest environment.""" self.app.get('/') self.assertEqual(0, mock_init.call_count) self.assertEqual(0, mock_exc.call_count) @mock.patch('flask.got_request_exception.connect') @mock.patch('rollbar.init') def test_rollbar_init(self, mock_init, mock_exc): """Verify rollbar init is called.""" os.environ['ROLLBAR_ACCESS_TOKEN'] = 'fakefakefake' emailer.app.config['TESTING'] = False emailer.app.before_first_request_funcs[0]() mock_init.assert_called_once_with( 'fakefakefake', 'github-email-notifications', root=os.path.abspath(os.path.dirname(__file__)), allow_logging_basic_config=False ) mock_exc.assert_called_once_with(mock.ANY, emailer.app) @mock.patch('flask.got_request_exception.connect') @mock.patch('rollbar.init') def test_rollbar_init__env_name(self, mock_init, mock_exc): """Verify rollbar init is called with rollbar env name from env var.""" os.environ['ROLLBAR_ACCESS_TOKEN'] = 'fakefakefake' os.environ['GITHUB_COMMIT_EMAILER_ROLLBAR_ENV'] = 'my-TEST-env' emailer.app.config['TESTING'] = False emailer.app.before_first_request_funcs[0]() mock_init.assert_called_once_with( 'fakefakefake', 'my-TEST-env', root=os.path.abspath(os.path.dirname(__file__)), allow_logging_basic_config=False ) mock_exc.assert_called_once_with(mock.ANY, emailer.app) def test_index_redirects(self): """Verify index page redirects to chapel-lang.org.""" r = self.app.get('/') self.assertEqual(301, r.status_code) self.assertEqual('http://chapel-lang.org/', r.headers['location']) @mock.patch('emailer._send_email') def test_non_push_event(self, mock_send): """Verify non-push event is skipped.""" r = self.app.post('/commit-email', headers={'x-github-event': 'whatevs'}) self.assertEqual(200, r.status_code) self.assertEqual(0, mock_send.call_count) @mock.patch('emailer._get_secret') @mock.patch('emailer._send_email') def test_push_invalid_signature(self, mock_send, mock_secret): """Verify push event with invalid sig is skipped.""" mock_secret.return_value = 'asdf' headers = {'x-github-event': 'push', 'x-hub-signature': 'sha1=bogus'} r = self.app.post('/commit-email', headers=headers) self.assertEqual(200, r.status_code) self.assertEqual(0, mock_send.call_count) def test_no_secret_in_env(self): """Verify raises error when secret is not in environment.""" if 'GITHUB_COMMIT_EMAILER_SECRET' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_SECRET'] self.assertRaises( ValueError, self.app.post, '/commit-email', headers=self.headers ) @mock.patch('emailer._valid_signature') @mock.patch('emailer._get_secret') @mock.patch('emailer._send_email') def test_deleted_branch(self, mock_send, mock_sec, mock_sig): """Verify deleted branch notification are skipped.""" mock_sec.return_value = 'asdf' mock_sig.return_value = True r = self.app.post('/commit-email', headers=self.headers, data=json.dumps({'head_commit': {'message': 'Test'}, 'deleted': True})) self.assertEqual(200, r.status_code) self.assertEqual(0, mock_send.call_count) @mock.patch('emailer._valid_signature') @mock.patch('emailer._get_secret') @mock.patch('emailer._send_email') def test_test_send_mail(self, mock_send, mock_sec, mock_sig): """Verify correct message info is passed to _send_email.""" mock_sec.return_value = 'adsf' mock_sig.return_value = True body = { 'ref': 'the/master', 'deleted': False, 'compare': 'http://the-url.it', 'repository': {'full_name': 'testing/test'}, 'pusher': {'name': 'the-tester', 'email': 'the@example.com'}, 'after': 'some-sha', 'head_commit': { 'id': 'some-sha1', 'message': 'Merge pull request: A lovely\n\ncommit message.', 'added': [], 'removed': ['a.out', 'gen'], 'modified': ['README.md', 'README', 'LICENSE'], }, } expected_msg_info = { 'repo': 'testing/test', 'branch': 'the/master', 'revision': 'some-sha1'[:7], 'message': 'Merge pull request: A lovely\n\ncommit message.', 'changed_files': ('R a.out\n' 'R gen\n' 'M README.md\n' 'M README\n' 'M LICENSE'), 'pusher': 'the-tester', 'pusher_email': 'the-tester <the@example.com>', 'compare_url': 'http://the-url.it', 'pr_url': 'Unavailable' } r = self.app.post('/commit-email', headers=self.headers, data=json.dumps(body)) self.assertEqual(200, r.status_code) mock_send.assert_called_once_with(expected_msg_info) def test_send_email__no_sender(self): """Verify ValueError when sender is not configured.""" if 'GITHUB_COMMIT_EMAILER_SENDER' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_SENDER'] self.assertRaises(ValueError, emailer._send_email, {'pusher_email': 'x'}) def test_send_email__no_recipient(self): """Verify ValueError when recipient is not configured.""" if 'GITHUB_COMMIT_EMAILER_RECIPIENT' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_RECIPIENT'] self.assertRaises(ValueError, emailer._send_email, {'pusher_email': 'x'}) def test_send_email__missing_both(self): """Verify ValueError when recipient and sender are not configured.""" if 'GITHUB_COMMIT_EMAILER_SENDER' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_SENDER'] if 'GITHUB_COMMIT_EMAILER_RECIPIENT' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_RECIPIENT'] self.assertRaises(ValueError, emailer._send_email, {'pusher_email': 'x'}) def prep_env(self): """Prepare os.environ for _send_email() tests.""" os.environ['GITHUB_COMMIT_EMAILER_SENDER'] = self.sender os.environ['GITHUB_COMMIT_EMAILER_RECIPIENT'] = self.recipient def check_msg(self, actual_msg): """Verify recipient and sender on sent message.""" print(actual_msg) self.assertEqual([self.recipient], actual_msg[1]) self.assertEqual(self.sender, actual_msg[0]) assert '[Chapel Merge] TEST commit message.' in actual_msg[2] @mock.patch('smtplib.SMTP') def test_send_email__no_reply_to(self, mock_sendmail): """Verify email is sent as expected when reply-to is not configured.""" self.prep_env() if 'GITHUB_COMMIT_EMAILER_REPLY_TO' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_REPLY_TO'] emailer._send_email(self.msg_info) mock_sendmail.return_value.sendmail.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) actual_msg = mock_sendmail.return_value.sendmail.call_args[0] self.check_msg(actual_msg) assert "reply-to" not in actual_msg[2] @mock.patch('smtplib.SMTP') def test_send_email__reply_to(self, mock_sendmail): """Verify email is sent as expected when reply-to is configured.""" self.prep_env() os.environ['GITHUB_COMMIT_EMAILER_REPLY_TO'] = self.reply_to emailer._send_email(self.msg_info) mock_sendmail.return_value.sendmail.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) actual_msg = mock_sendmail.return_value.sendmail.call_args[0] self.check_msg(actual_msg) assert "reply-to" in actual_msg[2] @mock.patch('smtplib.SMTP') def test_send_email__approved(self, mock_sendmail): """Verify approved header is added when config is set.""" self.prep_env() os.environ['GITHUB_COMMIT_EMAILER_APPROVED_HEADER'] = 'my-super-secret' emailer._send_email(self.msg_info) mock_sendmail.return_value.sendmail.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) actual_msg = mock_sendmail.return_value.sendmail.call_args[0] self.check_msg(actual_msg) assert "approved" in actual_msg[2] @mock.patch('smtplib.SMTP') def test_send_email__no_approved(self, mock_sendmail): """Verify approved header is not added when config is not set.""" self.prep_env() if 'GITHUB_COMMIT_EMAILER_APPROVED_HEADER' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_APPROVED_HEADER'] emailer._send_email(self.msg_info) mock_sendmail.return_value.sendmail.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) actual_msg = mock_sendmail.return_value.sendmail.call_args[0] self.check_msg(actual_msg) assert "approved" not in actual_msg[2] @mock.patch('smtplib.SMTP') def test_send_email__unicode_body(self, mock_sendmail): """Verify unicode characters in msg_info are handled.""" msg_info = self.msg_info msg_info['message'] += '\n\u2026' self.prep_env() emailer._send_email(msg_info) mock_sendmail.return_value.sendmail.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) actual_msg = mock_sendmail.return_value.sendmail.call_args[0] self.check_msg(actual_msg) def test_get_sender__from_author(self): """Verify sent from author when appropriate config var set.""" os.environ['GITHUB_COMMIT_EMAILER_SEND_FROM_AUTHOR'] = 'whatevs' actual = emailer._get_sender('my-address') self.assertEqual('my-address', actual) def test_get_sender__from_noreply(self): """Verify sent from config'd sender when appropriate config var not set. """ if 'GITHUB_COMMIT_EMAILER_SEND_FROM_AUTHOR' in os.environ: del os.environ['GITHUB_COMMIT_EMAILER_SEND_FROM_AUTHOR'] os.environ['GITHUB_COMMIT_EMAILER_SENDER'] = 'noreply-addr' actual = emailer._get_sender('my-address') self.assertEqual('noreply-addr', actual) def test_get_subject(self): """Verify get_subject returns first line of commit message and repo name. """ expected = '[Chapel Merge] this is a message' actual = emailer._get_subject('TEST/it', 'this is a message') self.assertEqual(expected, actual) def test_get_subject__msg_greater_than_50(self): """Verify subject when commit message line has more than 50 chars.""" repo = 'TEST/realllllllllllllllyyyyyyyyyyy-loooooooooooooong' msg = 'this is really long {0}'.format('.' * 100) assert len(msg) > 50 expected = '[Chapel Merge] {0}'.format(msg[:50]) actual = emailer._get_subject(repo, msg) self.assertEqual(expected, actual) def test_get_subject__third_line(self): """Verify subject when commit message has three lines.""" msg = ('merge pull request #blah\n\n' 'my real message\n\n' 'with lots of info\n') expected = '[Chapel Merge] my real message' actual = emailer._get_subject('TEST/it', msg) self.assertEqual(expected, actual) def test_valid_signature__true__str(self): """Verify _valid_signature returns true when signature matches.""" body = '{"rock": "on"}' secret = str(uuid.uuid4()) h = hmac.new(secret.encode('utf8'), body.encode('utf8'), digestmod="sha1") sig = 'sha1=' + h.hexdigest() gh_sig = sig self.assertTrue(emailer._valid_signature(gh_sig, body, secret)) def test_valid_signature__true__unicode(self): """Verify _valid_signature returns true when signature matches, even if github\ signature is unicode.""" body = '{"rock": "on"}' secret = str(uuid.uuid4()) h = hmac.new(secret.encode('utf8'), body.encode('utf8'), digestmod="sha1") sig = 'sha1=' + h.hexdigest() gh_sig = str(sig) self.assertTrue(emailer._valid_signature(gh_sig, body, secret)) def test_valid_signature__false(self): """Verify _valid_signature returns False when signature does not match.""" self.assertFalse( emailer._valid_signature(str('adsf'), 'asdf', 'my-secret') ) if __name__ == '__main__': unittest.main()
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''Use avbin to decode audio and video media. ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' import ctypes import threading import time import pyglet from pyglet import gl from pyglet.gl import gl_info from pyglet import image import pyglet.lib from pyglet.media import \ MediaFormatException, StreamingSource, VideoFormat, AudioFormat, \ AudioData, MediaEvent, WorkerThread, SourceInfo av = pyglet.lib.load_library('avbin', darwin='/usr/local/lib/libavbin.dylib') AVBIN_RESULT_ERROR = -1 AVBIN_RESULT_OK = 0 AVbinResult = ctypes.c_int AVBIN_STREAM_TYPE_UNKNOWN = 0 AVBIN_STREAM_TYPE_VIDEO = 1 AVBIN_STREAM_TYPE_AUDIO = 2 AVbinStreamType = ctypes.c_int AVBIN_SAMPLE_FORMAT_U8 = 0 AVBIN_SAMPLE_FORMAT_S16 = 1 AVBIN_SAMPLE_FORMAT_S24 = 2 AVBIN_SAMPLE_FORMAT_S32 = 3 AVBIN_SAMPLE_FORMAT_FLOAT = 4 AVbinSampleFormat = ctypes.c_int AVBIN_LOG_QUIET = -8 AVBIN_LOG_PANIC = 0 AVBIN_LOG_FATAL = 8 AVBIN_LOG_ERROR = 16 AVBIN_LOG_WARNING = 24 AVBIN_LOG_INFO = 32 AVBIN_LOG_VERBOSE = 40 AVBIN_LOG_DEBUG = 48 AVbinLogLevel = ctypes.c_int AVbinFileP = ctypes.c_void_p AVbinStreamP = ctypes.c_void_p Timestamp = ctypes.c_int64 class AVbinFileInfo(ctypes.Structure): _fields_ = [ ('structure_size', ctypes.c_size_t), ('n_streams', ctypes.c_int), ('start_time', Timestamp), ('duration', Timestamp), ('title', ctypes.c_char * 512), ('author', ctypes.c_char * 512), ('copyright', ctypes.c_char * 512), ('comment', ctypes.c_char * 512), ('album', ctypes.c_char * 512), ('year', ctypes.c_int), ('track', ctypes.c_int), ('genre', ctypes.c_char * 32), ] class _AVbinStreamInfoVideo8(ctypes.Structure): _fields_ = [ ('width', ctypes.c_uint), ('height', ctypes.c_uint), ('sample_aspect_num', ctypes.c_uint), ('sample_aspect_den', ctypes.c_uint), ('frame_rate_num', ctypes.c_uint), ('frame_rate_den', ctypes.c_uint), ] class _AVbinStreamInfoAudio8(ctypes.Structure): _fields_ = [ ('sample_format', ctypes.c_int), ('sample_rate', ctypes.c_uint), ('sample_bits', ctypes.c_uint), ('channels', ctypes.c_uint), ] class _AVbinStreamInfoUnion8(ctypes.Union): _fields_ = [ ('video', _AVbinStreamInfoVideo8), ('audio', _AVbinStreamInfoAudio8), ] class AVbinStreamInfo8(ctypes.Structure): _fields_ = [ ('structure_size', ctypes.c_size_t), ('type', ctypes.c_int), ('u', _AVbinStreamInfoUnion8) ] class AVbinPacket(ctypes.Structure): _fields_ = [ ('structure_size', ctypes.c_size_t), ('timestamp', Timestamp), ('stream_index', ctypes.c_int), ('data', ctypes.POINTER(ctypes.c_uint8)), ('size', ctypes.c_size_t), ] AVbinLogCallback = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p) av.avbin_get_version.restype = ctypes.c_int av.avbin_get_ffmpeg_revision.restype = ctypes.c_int av.avbin_get_audio_buffer_size.restype = ctypes.c_size_t av.avbin_have_feature.restype = ctypes.c_int av.avbin_have_feature.argtypes = [ctypes.c_char_p] av.avbin_init.restype = AVbinResult av.avbin_set_log_level.restype = AVbinResult av.avbin_set_log_level.argtypes = [AVbinLogLevel] av.avbin_set_log_callback.argtypes = [AVbinLogCallback] av.avbin_open_filename.restype = AVbinFileP av.avbin_open_filename.argtypes = [ctypes.c_char_p] av.avbin_close_file.argtypes = [AVbinFileP] av.avbin_seek_file.argtypes = [AVbinFileP, Timestamp] av.avbin_file_info.argtypes = [AVbinFileP, ctypes.POINTER(AVbinFileInfo)] av.avbin_stream_info.argtypes = [AVbinFileP, ctypes.c_int, ctypes.POINTER(AVbinStreamInfo8)] av.avbin_open_stream.restype = ctypes.c_void_p av.avbin_open_stream.argtypes = [AVbinFileP, ctypes.c_int] av.avbin_close_stream.argtypes = [AVbinStreamP] av.avbin_read.argtypes = [AVbinFileP, ctypes.POINTER(AVbinPacket)] av.avbin_read.restype = AVbinResult av.avbin_decode_audio.restype = ctypes.c_int av.avbin_decode_audio.argtypes = [AVbinStreamP, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)] av.avbin_decode_video.restype = ctypes.c_int av.avbin_decode_video.argtypes = [AVbinStreamP, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p] if True: # XXX lock all avbin calls. not clear from ffmpeg documentation if this # is necessary. leaving it on while debugging to rule out the possiblity # of a problem. def synchronize(func, lock): def f(*args): lock.acquire() result = func(*args) lock.release() return result return f _avbin_lock = threading.Lock() for name in dir(av): if name.startswith('avbin_'): setattr(av, name, synchronize(getattr(av, name), _avbin_lock)) def get_version(): return av.avbin_get_version() class AVbinException(MediaFormatException): pass def timestamp_from_avbin(timestamp): return float(timestamp) / 1000000 def timestamp_to_avbin(timestamp): return int(timestamp * 1000000) class VideoPacket(object): _next_id = 0 def __init__(self, packet): self.timestamp = timestamp_from_avbin(packet.timestamp) self.data = (ctypes.c_uint8 * packet.size)() self.size = packet.size ctypes.memmove(self.data, packet.data, self.size) # Decoded image. 0 == not decoded yet; None == Error or discarded self.image = 0 self.id = self._next_id self.__class__._next_id += 1 class AVbinSource(StreamingSource): def __init__(self, filename, file=None): if file is not None: raise NotImplementedError('TODO: Load from file stream') self._file = av.avbin_open_filename(filename) if not self._file: raise AVbinException('Could not open "%s"' % filename) self._video_stream = None self._video_stream_index = -1 self._audio_stream = None self._audio_stream_index = -1 file_info = AVbinFileInfo() file_info.structure_size = ctypes.sizeof(file_info) av.avbin_file_info(self._file, ctypes.byref(file_info)) self._duration = timestamp_from_avbin(file_info.duration) self.info = SourceInfo() self.info.title = file_info.title self.info.author = file_info.author self.info.copyright = file_info.copyright self.info.comment = file_info.comment self.info.album = file_info.album self.info.year = file_info.year self.info.track = file_info.track self.info.genre = file_info.genre # Pick the first video and audio streams found, ignore others. for i in range(file_info.n_streams): info = AVbinStreamInfo8() info.structure_size = ctypes.sizeof(info) av.avbin_stream_info(self._file, i, info) if (info.type == AVBIN_STREAM_TYPE_VIDEO and not self._video_stream): stream = av.avbin_open_stream(self._file, i) if not stream: continue self.video_format = VideoFormat( width=info.u.video.width, height=info.u.video.height) if info.u.video.sample_aspect_num != 0: self.video_format.sample_aspect = ( float(info.u.video.sample_aspect_num) / info.u.video.sample_aspect_den) if _have_frame_rate: self.video_format.frame_rate = ( float(info.u.video.frame_rate_num) / info.u.video.frame_rate_den) self._video_stream = stream self._video_stream_index = i elif (info.type == AVBIN_STREAM_TYPE_AUDIO and info.u.audio.sample_bits in (8, 16) and info.u.audio.channels in (1, 2) and not self._audio_stream): stream = av.avbin_open_stream(self._file, i) if not stream: continue self.audio_format = AudioFormat( channels=info.u.audio.channels, sample_size=info.u.audio.sample_bits, sample_rate=info.u.audio.sample_rate) self._audio_stream = stream self._audio_stream_index = i self._packet = AVbinPacket() self._packet.structure_size = ctypes.sizeof(self._packet) self._packet.stream_index = -1 self._events = [] # Timestamp of last video packet added to decoder queue. self._video_timestamp = 0 self._buffered_audio_data = [] if self.audio_format: self._audio_buffer = \ (ctypes.c_uint8 * av.avbin_get_audio_buffer_size())() if self.video_format: self._video_packets = [] self._decode_thread = WorkerThread() self._decode_thread.start() self._condition = threading.Condition() def __del__(self): if _debug: print 'del avbin source' try: if self._video_stream: av.avbin_close_stream(self._video_stream) if self._audio_stream: av.avbin_close_stream(self._audio_stream) av.avbin_close_file(self._file) except: pass # XXX TODO call this / add to source api def delete(self): if self.video_format: self._decode_thread.stop() def seek(self, timestamp): if _debug: print 'AVbin seek', timestamp av.avbin_seek_file(self._file, timestamp_to_avbin(timestamp)) self._audio_packet_size = 0 del self._events[:] del self._buffered_audio_data[:] if self.video_format: self._video_timestamp = 0 self._condition.acquire() for packet in self._video_packets: packet.image = None self._condition.notify() self._condition.release() del self._video_packets[:] self._decode_thread.clear_jobs() def _get_packet(self): # Read a packet into self._packet. Returns True if OK, False if no # more packets are in stream. return av.avbin_read(self._file, self._packet) == AVBIN_RESULT_OK def _process_packet(self): # Returns (packet_type, packet), where packet_type = 'video' or # 'audio'; and packet is VideoPacket or AudioData. In either case, # packet is buffered or queued for decoding; no further action is # necessary. Returns (None, None) if packet was neither type. if self._packet.stream_index == self._video_stream_index: if self._packet.timestamp < 0: # XXX TODO # AVbin needs hack to decode timestamp for B frames in # some containers (OGG?). See # http://www.dranger.com/ffmpeg/tutorial05.html # For now we just drop these frames. return None, None video_packet = VideoPacket(self._packet) if _debug: print 'Created and queued frame %d (%f)' % \ (video_packet.id, video_packet.timestamp) self._video_timestamp = max(self._video_timestamp, video_packet.timestamp) self._video_packets.append(video_packet) self._decode_thread.put_job( lambda: self._decode_video_packet(video_packet)) return 'video', video_packet elif self._packet.stream_index == self._audio_stream_index: audio_data = self._decode_audio_packet() if audio_data: if _debug: print 'Got an audio packet at', audio_data.timestamp self._buffered_audio_data.append(audio_data) return 'audio', audio_data return None, None def get_audio_data(self, bytes): try: audio_data = self._buffered_audio_data.pop(0) audio_data_timeend = audio_data.timestamp + audio_data.duration except IndexError: audio_data = None audio_data_timeend = self._video_timestamp + 1 if _debug: print 'get_audio_data' have_video_work = False # Keep reading packets until we have an audio packet and all the # associated video packets have been enqueued on the decoder thread. while not audio_data or ( self._video_stream and self._video_timestamp < audio_data_timeend): if not self._get_packet(): break packet_type, packet = self._process_packet() if packet_type == 'video': have_video_work = True elif not audio_data and packet_type == 'audio': audio_data = self._buffered_audio_data.pop(0) if _debug: print 'Got requested audio packet at', audio_data.timestamp audio_data_timeend = audio_data.timestamp + audio_data.duration if have_video_work: # Give decoder thread a chance to run before we return this audio # data. time.sleep(0) if not audio_data: if _debug: print 'get_audio_data returning None' return None while self._events and self._events[0].timestamp <= audio_data_timeend: event = self._events.pop(0) if event.timestamp >= audio_data.timestamp: event.timestamp -= audio_data.timestamp audio_data.events.append(event) if _debug: print 'get_audio_data returning ts %f with events' % \ audio_data.timestamp, audio_data.events print 'remaining events are', self._events return audio_data def _decode_audio_packet(self): packet = self._packet size_out = ctypes.c_int(len(self._audio_buffer)) while True: audio_packet_ptr = ctypes.cast(packet.data, ctypes.c_void_p) audio_packet_size = packet.size used = av.avbin_decode_audio(self._audio_stream, audio_packet_ptr, audio_packet_size, self._audio_buffer, size_out) if used < 0: self._audio_packet_size = 0 break audio_packet_ptr.value += used audio_packet_size -= used if size_out.value <= 0: continue # XXX how did this ever work? replaced with copy below # buffer = ctypes.string_at(self._audio_buffer, size_out) # XXX to actually copy the data.. but it never used to crash, so # maybe I'm missing something buffer = ctypes.create_string_buffer(size_out.value) ctypes.memmove(buffer, self._audio_buffer, len(buffer)) buffer = buffer.raw duration = float(len(buffer)) / self.audio_format.bytes_per_second self._audio_packet_timestamp = \ timestamp = timestamp_from_avbin(packet.timestamp) return AudioData(buffer, len(buffer), timestamp, duration, []) def _decode_video_packet(self, packet): width = self.video_format.width height = self.video_format.height pitch = width * 3 buffer = (ctypes.c_uint8 * (pitch * height))() result = av.avbin_decode_video(self._video_stream, packet.data, packet.size, buffer) if result < 0: image_data = None else: image_data = image.ImageData(width, height, 'RGB', buffer, pitch) packet.image = image_data # Notify get_next_video_frame() that another one is ready. self._condition.acquire() self._condition.notify() self._condition.release() def _ensure_video_packets(self): '''Process packets until a video packet has been queued (and begun decoding). Return False if EOS. ''' if not self._video_packets: if _debug: print 'No video packets...' # Read ahead until we have another video packet self._get_packet() packet_type, _ = self._process_packet() while packet_type and packet_type != 'video': self._get_packet() packet_type, _ = self._process_packet() if not packet_type: return False if _debug: print 'Queued packet', _ return True old_stamp = None def get_next_video_timestamp(self): if not self.video_format: return if self._ensure_video_packets(): next_stamp = self._video_packets[0].timestamp if _debug: print 'Next video timestamp is', next_stamp if self.old_stamp == next_stamp: return self.old_stamp = next_stamp return next_stamp def get_next_video_frame(self): if not self.video_format: return if self._ensure_video_packets(): packet = self._video_packets.pop(0) if _debug: print 'Waiting for', packet # Block until decoding is complete self._condition.acquire() while packet.image == 0: self._condition.wait() self._condition.release() if _debug: print 'Returning', packet return packet.image av.avbin_init() if pyglet.options['debug_media']: _debug = True av.avbin_set_log_level(AVBIN_LOG_DEBUG) else: _debug = False av.avbin_set_log_level(AVBIN_LOG_QUIET) _have_frame_rate = av.avbin_have_feature('frame_rate')
# -*- coding: utf-8 -*- """One of the oft-cited tenets of Python is that it is better to ask forgiveness than permission. That is, there are many cases where it is more inclusive and correct to handle exceptions than spend extra lines and execution time checking for conditions. This philosophy makes good exception handling features all the more important. Unfortunately Python's :mod:`traceback` module is woefully behind the times. The ``tbutils`` module provides two disparate but complementary featuresets: 1. With :class:`ExceptionInfo` and :class:`TracebackInfo`, the ability to extract, construct, manipulate, format, and serialize exceptions, tracebacks, and callstacks. 2. With :class:`ParsedException`, the ability to find and parse tracebacks from captured output such as logs and stdout. There is also the :class:`ContextualTracebackInfo` variant of :class:`TracebackInfo`, which includes much more information from each frame of the callstack, including values of locals and neighboring lines of code. """ from __future__ import print_function import re import sys import linecache # TODO: chaining primitives? what are real use cases where these help? # TODO: print_* for backwards compatability # __all__ = ['extract_stack', 'extract_tb', 'format_exception', # 'format_exception_only', 'format_list', 'format_stack', # 'format_tb', 'print_exc', 'format_exc', 'print_exception', # 'print_last', 'print_stack', 'print_tb'] __all__ = ['ExceptionInfo', 'TracebackInfo', 'Callpoint', 'ContextualExceptionInfo', 'ContextualTracebackInfo', 'ContextualCallpoint', 'print_exception', 'ParsedException'] class Callpoint(object): """The Callpoint is a lightweight object used to represent a single entry in the code of a call stack. It stores the code-related metadata of a given frame. Available attributes are the same as the parameters below. Args: func_name (str): the function name lineno (int): the line number module_name (str): the module name module_path (str): the filesystem path of the module lasti (int): the index of bytecode execution line (str): the single-line code content (if available) """ __slots__ = ('func_name', 'lineno', 'module_name', 'module_path', 'lasti', 'line') def __init__(self, module_name, module_path, func_name, lineno, lasti, line=None): self.func_name = func_name self.lineno = lineno self.module_name = module_name self.module_path = module_path self.lasti = lasti self.line = line def to_dict(self): "Get a :class:`dict` copy of the Callpoint. Useful for serialization." ret = {} for slot in self.__slots__: try: ret[slot] = getattr(self, slot) except AttributeError: pass return ret @classmethod def from_current(cls, level=1): "Creates a Callpoint from the location of the calling function." frame = sys._getframe(level) return cls.from_frame(frame) @classmethod def from_frame(cls, frame): "Create a Callpoint object from data extracted from the given frame." func_name = frame.f_code.co_name lineno = frame.f_lineno module_name = frame.f_globals.get('__name__', '') module_path = frame.f_code.co_filename lasti = frame.f_lasti line = _DeferredLine(module_path, lineno, frame.f_globals) return cls(module_name, module_path, func_name, lineno, lasti, line=line) @classmethod def from_tb(cls, tb): """Create a Callpoint from the traceback of the current exception. Main difference with :meth:`from_frame` is that ``lineno`` and ``lasti`` come from the traceback, which is to say the line that failed in the try block, not the line currently being executed (in the except block). """ func_name = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno lasti = tb.tb_lasti module_name = tb.tb_frame.f_globals.get('__name__', '') module_path = tb.tb_frame.f_code.co_filename line = _DeferredLine(module_path, lineno, tb.tb_frame.f_globals) return cls(module_name, module_path, func_name, lineno, lasti, line=line) def __repr__(self): cn = self.__class__.__name__ args = [getattr(self, s, None) for s in self.__slots__] if not any(args): return super(Callpoint, self).__repr__() else: return '%s(%s)' % (cn, ', '.join([repr(a) for a in args])) def tb_frame_str(self): """Render the Callpoint as it would appear in a standard printed Python traceback. Returns a string with filename, line number, function name, and the actual code line of the error on up to two lines. """ ret = ' File "%s", line %s, in %s\n' % (self.module_path, self.lineno, self.func_name) if self.line: ret += ' %s\n' % (str(self.line).strip(),) return ret class _DeferredLine(object): """The _DeferredLine type allows Callpoints and TracebackInfos to be constructed without potentially hitting the filesystem, as is the normal behavior of the standard Python :mod:`traceback` and :mod:`linecache` modules. Calling :func:`str` fetches and caches the line. Args: filename (str): the path of the file containing the line lineno (int): the number of the line in question module_globals (dict): an optional dict of module globals, used to handle advanced use cases using custom module loaders. """ def __init__(self, filename, lineno, module_globals=None): self.filename = filename self.lineno = lineno # TODO: this is going away when we fix linecache # TODO: (mark) read about loader self.module_globals = {} if module_globals is not None: for k in ('__name__', '__loader__'): v = module_globals.get(k) if v is None: self.module_globals[k] = v def __eq__(self, other): return (self.lineno, self.filename) == (other.lineno, other.filename) def __ne__(self, other): return not self == other def __str__(self): if hasattr(self, '_line'): return self._line try: linecache.checkcache(self.filename) line = linecache.getline(self.filename, self.lineno, self.module_globals) line = line.rstrip() except KeyError: line = '' self._line = line return line def __repr__(self): return repr(str(self)) def __len__(self): return len(str(self)) # TODO: dedup frames, look at __eq__ on _DeferredLine class TracebackInfo(object): """The TracebackInfo class provides a basic representation of a stack trace, be it from an exception being handled or just part of normal execution. It is basically a wrapper around a list of :class:`Callpoint` objects representing frames. Args: frames (list): A list of frame objects in the stack. .. note :: ``TracebackInfo`` can represent both exception tracebacks and non-exception tracebacks (aka stack traces). As a result, there is no ``TracebackInfo.from_current()``, as that would be ambiguous. Instead, call :meth:`TracebackInfo.from_frame` without the *frame* argument for a stack trace, or :meth:`TracebackInfo.from_traceback` without the *tb* argument for an exception traceback. """ callpoint_type = Callpoint def __init__(self, frames): self.frames = frames @classmethod def from_frame(cls, frame=None, level=1, limit=None): """Create a new TracebackInfo *frame* by recurring up in the stack a max of *limit* times. If *frame* is unset, get the frame from :func:`sys._getframe` using *level*. Args: frame (types.FrameType): frame object from :func:`sys._getframe` or elsewhere. Defaults to result of :func:`sys.get_frame`. level (int): If *frame* is unset, the desired frame is this many levels up the stack from the invocation of this method. Default ``1`` (i.e., caller of this method). limit (int): max number of parent frames to extract (defaults to :data:`sys.tracebacklimit`) """ ret = [] if frame is None: frame = sys._getframe(1) if limit is None: limit = getattr(sys, 'tracebacklimit', 1000) n = 0 while frame is not None and n < limit: item = cls.callpoint_type.from_frame(frame) ret.append(item) frame = frame.f_back n += 1 ret.reverse() return cls(ret) @classmethod def from_traceback(cls, tb=None, limit=None): """Create a new TracebackInfo from the traceback *tb* by recurring up in the stack a max of *limit* times. If *tb* is unset, get the traceback from the currently handled exception. If no exception is being handled, raise a :exc:`ValueError`. Args: frame (types.TracebackType): traceback object from :func:`sys.exc_info` or elsewhere. If absent or set to ``None``, defaults to ``sys.exc_info()[2]``, and raises a :exc:`ValueError` if no exception is currently being handled. limit (int): max number of parent frames to extract (defaults to :data:`sys.tracebacklimit`) """ ret = [] if tb is None: tb = sys.exc_info()[2] if tb is None: raise ValueError('no tb set and no exception being handled') if limit is None: limit = getattr(sys, 'tracebacklimit', 1000) n = 0 while tb is not None and n < limit: item = cls.callpoint_type.from_tb(tb) ret.append(item) tb = tb.tb_next n += 1 return cls(ret) @classmethod def from_dict(cls, d): "Complements :meth:`TracebackInfo.to_dict`." # TODO: check this. return cls(d['frames']) def to_dict(self): """Returns a dict with a list of :class:`Callpoint` frames converted to dicts. """ return {'frames': [f.to_dict() for f in self.frames]} def __len__(self): return len(self.frames) def __iter__(self): return iter(self.frames) def __repr__(self): cn = self.__class__.__name__ if self.frames: frame_part = ' last=%r' % (self.frames[-1],) else: frame_part = '' return '<%s frames=%s%s>' % (cn, len(self.frames), frame_part) def __str__(self): return self.get_formatted() def get_formatted(self): """Returns a string as formatted in the traditional Python built-in style observable when an exception is not caught. In other words, mimics :func:`traceback.format_tb` and :func:`traceback.format_stack`. """ ret = 'Traceback (most recent call last):\n' ret += ''.join([f.tb_frame_str() for f in self.frames]) return ret class ExceptionInfo(object): """An ExceptionInfo object ties together three main fields suitable for representing an instance of an exception: The exception type name, a string representation of the exception itself (the exception message), and information about the traceback (stored as a :class:`TracebackInfo` object). These fields line up with :func:`sys.exc_info`, but unlike the values returned by that function, ExceptionInfo does not hold any references to the real exception or traceback. This property makes it suitable for serialization or long-term retention, without worrying about formatting pitfalls, circular references, or leaking memory. Args: exc_type (str): The exception type name. exc_msg (str): String representation of the exception value. tb_info (TracebackInfo): Information about the stack trace of the exception. Like the :class:`TracebackInfo`, ExceptionInfo is most commonly instantiated from one of its classmethods: :meth:`from_exc_info` or :meth:`from_current`. """ #: Override this in inherited types to control the TracebackInfo type used tb_info_type = TracebackInfo def __init__(self, exc_type, exc_msg, tb_info): # TODO: additional fields for SyntaxErrors self.exc_type = exc_type self.exc_msg = exc_msg self.tb_info = tb_info @classmethod def from_exc_info(cls, exc_type, exc_value, traceback): """Create an :class:`ExceptionInfo` object from the exception's type, value, and traceback, as returned by :func:`sys.exc_info`. See also :meth:`from_current`. """ type_str = exc_type.__name__ type_mod = exc_type.__module__ if type_mod not in ("__main__", "__builtin__", "exceptions"): type_str = '%s.%s' % (type_mod, type_str) val_str = _some_str(exc_value) tb_info = cls.tb_info_type.from_traceback(traceback) return cls(type_str, val_str, tb_info) @classmethod def from_current(cls): """Create an :class:`ExceptionInfo` object from the current exception being handled, by way of :func:`sys.exc_info`. Will raise an exception if no exception is currently being handled. """ return cls.from_exc_info(*sys.exc_info()) def to_dict(self): """Get a :class:`dict` representation of the ExceptionInfo, suitable for JSON serialization. """ return {'exc_type': self.exc_type, 'exc_msg': self.exc_msg, 'exc_tb': self.tb_info.to_dict()} def __repr__(self): cn = self.__class__.__name__ try: len_frames = len(self.tb_info.frames) last_frame = ', last=%r' % (self.tb_info.frames[-1],) except: len_frames = 0 last_frame = '' args = (cn, self.exc_type, self.exc_msg, len_frames, last_frame) return '<%s [%s: %s] (%s frames%s)>' % args def get_formatted(self): """Returns a string formatted in the traditional Python built-in style observable when an exception is not caught. In other words, mimics :func:`traceback.format_exception`. """ # TODO: add SyntaxError formatting tb_str = self.tb_info.get_formatted() return ''.join([tb_str, '%s: %s' % (self.exc_type, self.exc_msg)]) class ContextualCallpoint(Callpoint): """The ContextualCallpoint is a :class:`Callpoint` subtype with the exact same API and storing two additional values: 1. :func:`repr` outputs for local variables from the Callpoint's scope 2. A number of lines before and after the Callpoint's line of code The ContextualCallpoint is used by the :class:`ContextualTracebackInfo`. """ def __init__(self, *a, **kw): self.local_reprs = kw.pop('local_reprs', {}) self.pre_lines = kw.pop('pre_lines', []) self.post_lines = kw.pop('post_lines', []) super(ContextualCallpoint, self).__init__(*a, **kw) @classmethod def from_frame(cls, frame): "Identical to :meth:`Callpoint.from_frame`" ret = super(ContextualCallpoint, cls).from_frame(frame) ret._populate_local_reprs(frame.f_locals) ret._populate_context_lines() return ret @classmethod def from_tb(cls, tb): "Identical to :meth:`Callpoint.from_tb`" ret = super(ContextualCallpoint, cls).from_tb(tb) ret._populate_local_reprs(tb.tb_frame.f_locals) ret._populate_context_lines() return ret def _populate_context_lines(self, pivot=8): DL, lineno = _DeferredLine, self.lineno try: module_globals = self.line.module_globals except: module_globals = None start_line = max(0, lineno - pivot) pre_lines = [DL(self.module_path, ln, module_globals) for ln in range(start_line, lineno)] self.pre_lines[:] = pre_lines post_lines = [DL(self.module_path, ln, module_globals) for ln in range(lineno + 1, lineno + 1 + pivot)] self.post_lines[:] = post_lines return def _populate_local_reprs(self, f_locals): local_reprs = self.local_reprs for k, v in f_locals.items(): try: local_reprs[k] = repr(v) except: surrogate = '<unprintable %s object>' % type(v).__name__ local_reprs[k] = surrogate return def to_dict(self): """ Same principle as :meth:`Callpoint.to_dict`, but with the added contextual values. With ``ContextualCallpoint.to_dict()``, each frame will now be represented like:: {'func_name': 'print_example', 'lineno': 0, 'module_name': 'example_module', 'module_path': '/home/example/example_module.pyc', 'lasti': 0, 'line': 'print "example"', 'locals': {'variable': '"value"'}, 'pre_lines': ['variable = "value"'], 'post_lines': []} The locals dictionary and line lists are copies and can be mutated freely. """ ret = super(ContextualCallpoint, self).to_dict() ret['locals'] = dict(self.local_reprs) # get the line numbers and textual lines # without assuming DeferredLines start_line = self.lineno - len(self.pre_lines) pre_lines = [{'lineno': start_line + i, 'line': str(l)} for i, l in enumerate(self.pre_lines)] # trim off leading empty lines for i, item in enumerate(pre_lines): if item['line']: break if i: pre_lines = pre_lines[i:] ret['pre_lines'] = pre_lines # now post_lines post_lines = [{'lineno': self.lineno + i, 'line': str(l)} for i, l in enumerate(self.post_lines)] _last = 0 for i, item in enumerate(post_lines): if item['line']: _last = i post_lines = post_lines[:_last + 1] ret['post_lines'] = post_lines return ret class ContextualTracebackInfo(TracebackInfo): """The ContextualTracebackInfo type is a :class:`TracebackInfo` subtype that is used by :class:`ContextualExceptionInfo` and uses the :class:`ContextualCallpoint` as its frame-representing primitive. """ callpoint_type = ContextualCallpoint class ContextualExceptionInfo(ExceptionInfo): """The ContextualTracebackInfo type is a :class:`TracebackInfo` subtype that uses the :class:`ContextualCallpoint` as its frame-representing primitive. It carries with it most of the exception information required to recreate the widely recognizable "500" page for debugging Django applications. """ tb_info_type = ContextualTracebackInfo # TODO: clean up & reimplement -- specifically for syntax errors def format_exception_only(etype, value): """Format the exception part of a traceback. The arguments are the exception type and value such as given by sys.last_type and sys.last_value. The return value is a list of strings, each ending in a newline. Normally, the list contains a single string; however, for SyntaxError exceptions, it contains several lines that (when printed) display detailed information about where the syntax error occurred. The message indicating which exception occurred is always the last string in the list. """ # Gracefully handle (the way Python 2.4 and earlier did) the case of # being called with (None, None). if etype is None: return [_format_final_exc_line(etype, value)] stype = etype.__name__ smod = etype.__module__ if smod not in ("__main__", "builtins", "exceptions"): stype = smod + '.' + stype if not issubclass(etype, SyntaxError): return [_format_final_exc_line(stype, value)] # It was a syntax error; show exactly where the problem was found. lines = [] filename = value.filename or "<string>" lineno = str(value.lineno) or '?' lines.append(' File "%s", line %s\n' % (filename, lineno)) badline = value.text offset = value.offset if badline is not None: lines.append(' %s\n' % badline.strip()) if offset is not None: caretspace = badline.rstrip('\n')[:offset].lstrip() # non-space whitespace (likes tabs) must be kept for alignment caretspace = ((c.isspace() and c or ' ') for c in caretspace) # only three spaces to account for offset1 == pos 0 lines.append(' %s^\n' % ''.join(caretspace)) msg = value.msg or "<no detail available>" lines.append("%s: %s\n" % (stype, msg)) return lines # TODO: use asciify, improved if necessary def _some_str(value): try: return str(value) except Exception: pass try: value = unicode(value) return value.encode("ascii", "backslashreplace") except Exception: pass return '<unprintable %s object>' % type(value).__name__ def _format_final_exc_line(etype, value): valuestr = _some_str(value) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line def print_exception(etype, value, tb, limit=None, file=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error. """ if file is None: file = sys.stderr if tb: tbi = TracebackInfo.from_traceback(tb, limit) print(str(tbi), end='', file=file) for line in format_exception_only(etype, value): print(line, end='', file=file) def fix_print_exception(): """ Sets the default exception hook :func:`sys.excepthook` to the :func:`tbutils.print_exception` that uses all the ``tbutils`` facilities to provide slightly more correct output behavior. """ sys.excepthook = print_exception _frame_re = re.compile(r'^File "(?P<filepath>.+)", line (?P<lineno>\d+)' r', in (?P<funcname>.+)$') _se_frame_re = re.compile(r'^File "(?P<filepath>.+)", line (?P<lineno>\d+)') # TODO: ParsedException generator over large bodies of text class ParsedException(object): """Stores a parsed traceback and exception as would be typically output by :func:`sys.excepthook` or :func:`traceback.print_exception`. .. note: Does not currently store SyntaxError details such as column. """ def __init__(self, exc_type_name, exc_msg, frames=None): self.exc_type = exc_type_name self.exc_msg = exc_msg self.frames = list(frames or []) @property def source_file(self): """ The file path of module containing the function that raised the exception, or None if not available. """ try: return self.frames[-1]['filepath'] except IndexError: return None def to_dict(self): "Get a copy as a JSON-serializable :class:`dict`." return {'exc_type': self.exc_type, 'exc_msg': self.exc_msg, 'frames': list(self.frames)} def __repr__(self): cn = self.__class__.__name__ return ('%s(%r, %r, frames=%r)' % (cn, self.exc_type, self.exc_msg, self.frames)) @classmethod def from_string(cls, tb_str): """Parse a traceback and exception from the text *tb_str*. This text is expected to have been decoded, otherwise it will be interpreted as UTF-8. This method does not search a larger body of text for tracebacks. If the first line of the text passed does not match one of the known patterns, a :exc:`ValueError` will be raised. This method will ignore trailing text after the end of the first traceback. Args: tb_str (str): The traceback text (:class:`unicode` or UTF-8 bytes) """ if not isinstance(tb_str, unicode): tb_str = tb_str.decode('utf-8') tb_lines = tb_str.lstrip().splitlines() # First off, handle some ignored exceptions. These can be the # result of exceptions raised by __del__ during garbage # collection while tb_lines: cl = tb_lines[-1] if cl.startswith('Exception ') and cl.endswith('ignored'): tb_lines.pop() else: break if tb_lines and tb_lines[0].strip() == 'Traceback (most recent call last):': start_line = 1 frame_re = _frame_re elif len(tb_lines) > 1 and tb_lines[-2].lstrip().startswith('^'): # This is to handle the slight formatting difference # associated with SyntaxErrors, which also don't really # have tracebacks start_line = 0 frame_re = _se_frame_re else: raise ValueError('unrecognized traceback string format') frames = [] for pair_idx in range(start_line, len(tb_lines), 2): frame_line = tb_lines[pair_idx].strip() frame_match = frame_re.match(frame_line) if frame_match: frame_dict = frame_match.groupdict() else: break frame_dict['source_line'] = tb_lines[pair_idx + 1].strip() frames.append(frame_dict) exc_line_offset = start_line + len(frames) * 2 try: exc_line = tb_lines[exc_line_offset] exc_type, _, exc_msg = exc_line.partition(':') except: exc_type, exc_msg = '', '' return cls(exc_type, exc_msg, frames) ParsedTB = ParsedException # legacy alias if __name__ == '__main__': import cStringIO builtin_exc_hook = sys.excepthook fix_print_exception() tbi_str = '' def test(): raise ValueError('yay fun') fake_stderr1 = cStringIO.StringIO() fake_stderr2 = cStringIO.StringIO() sys.stderr = fake_stderr1 try: test() except: _, _, exc_traceback = sys.exc_info() tbi = TracebackInfo.from_traceback(exc_traceback) exc_info = ExceptionInfo.from_exc_info(*sys.exc_info()) exc_info2 = ExceptionInfo.from_current() tbi_str = str(tbi) print_exception(*sys.exc_info(), file=fake_stderr2) new_exc_hook_res = fake_stderr2.getvalue() builtin_exc_hook(*sys.exc_info()) builtin_exc_hook_res = fake_stderr1.getvalue() finally: sys.stderr = sys.__stderr__ print() print('# Single frame:\n') print(tbi.frames[-1].tb_frame_str()) print('# Traceback info:\n') print(tbi_str) print('# Full except hook output:\n') print(new_exc_hook_res) assert new_exc_hook_res == builtin_exc_hook_res FAKE_TB_STR = u""" Traceback (most recent call last): File "example.py", line 2, in <module> plarp NameError: name 'plarp' is not defined """ parsed_tb = ParsedTB.from_string(FAKE_TB_STR) print(parsed_tb) def func1(): return func2() def func2(): x = 5 return func3() def func3(): return ContextualCallpoint.from_current(level=2) callpoint = func1() print(repr(callpoint)) assert 'func2' in repr(callpoint) def func_a(): a = 1 raise Exception('func_a exception') def func_b(): b = 2 return func_a() def func_c(): c = 3 return func_b() try: func_c() except: ctx_ei = ContextualExceptionInfo.from_current() print(ctx_ei.get_formatted()) import pdb;pdb.set_trace()
# STA experiments # # Copyright (C) 2010-2012 Huang Xin # # See LICENSE.TXT that came with this file. import os import sys import time import Pyro import subprocess from Experiment import ExperimentConfig,Experiment class STAExperiment(Experiment): STA_SERVER_PROCESS = None STA_SERVER_PORT = 6878 def __init__(self,*args,**kwargs): super(STAExperiment, self).__init__(*args,**kwargs) self.pyro_source = '' self.exp_param = '' def sta_analysis(self, sta_type=None): # Beware that the pyro operation is asynchronized. It takes several # hundred millseconds for the app to complete action. So it's safe to wait for # remote app taking effect before another pyro operation. try: self.sta_server = self.get_sta_server() except Exception,e: self.logger.error('Failed to get sta app. ' + str(e)) try: self.logger.info('Starting sta data.') self.sta_server.start_data() except Exception,e: self.logger.error('Failed to start sta app. ' + str(e)) try: self.logger.info('Setting up sta app before stimulation.') self.pre_stim_setup() except Exception,e: self.logger.error('Failed to setup sta app. ' + str(e)) try: self.wait_for_stim() except Exception,e: self.logger.error('Failed to wait for stimulation. ' + str(e)) try: self.logger.info('Setting up sta app after stimulation.') self.post_stim_setup() except Exception,e: self.logger.error('Failed to setup sta app. ' + str(e)) try: data = self.sta_server.get_data() except Exception,e: self.logger.error('Failed to get data from sta. ' + str(e)) try: self.log_sta_data(data) except Exception,e: self.logger.error('Failed to log sta data. ' + str(e)) try: results = self.extract_results(data) except Exception,e: self.logger.error('Failed to extract sta data. ' + str(e)) try: # wait for complete of preceding pyro operationsg time.sleep(3.0) self.logger.info('Stopping sta data.') self.sta_server.stop_data() except Exception,e: self.logger.error('Failed to stop sta app. ' + str(e)) try: # wait for complete of preceding pyro operationsg time.sleep(3.0) self.logger.info('Closing sta server.') self.sta_server.close() except Exception,e: self.logger.error('Failed to close sta server. ' + str(e)) try: return results except Exception,e: self.logger.error('Failed to return sta result. ' + str(e)) def log_sta_data(self, data): pass def get_sta_server(self): self.logger.info('Fetching sta server.') try: if STAExperiment.STA_SERVER_PROCESS.poll() is not None: self.logger.info('STA server is dead.') raise except: self.logger.info('Creating new sta app.') sta_app_path = os.path.dirname(__file__) + os.path.sep + 'app' + os.path.sep + self.pyro_source args = [sys.executable, sta_app_path, str(STAExperiment.STA_SERVER_PORT)] STAExperiment.STA_SERVER_PROCESS = subprocess.Popen(args) time.sleep(3.0) else: self.logger.info('Psth app has been launched.') assert STAExperiment.STA_SERVER_PROCESS.poll() is None URI = "PYROLOC://localhost:%d/%s" % (STAExperiment.STA_SERVER_PORT, 'sta_server') Pyro.core.initClient() return Pyro.core.getProxyForURI(URI) def pre_stim_setup(self): self.sta_server.set_title(self.exp_name) def post_stim_setup(self): pass def extract_results(self, _data): raise RuntimeError("Must override extract_results method with exp implementation!") class RFCMappingExp(STAExperiment): def __init__(self,eye,params,postfix,*args,**kwargs): super(RFCMappingExp, self).__init__(*args,**kwargs) self.pyro_source = 'pyro_sta.py' self.stim_source = 'sparsenoise.py' self.exp_name = ExperimentConfig.CELLPREFIX + '-sparsenoise-' + postfix + '-' + eye self.exp_param = 'sn' self.eye = eye self.params = params self.assignments = ["eye = '%s'" %eye] def run(self): super(RFCMappingExp, self).run() if self.eye == 'left': self.run_stimulus(left_params=self.params, assignments=self.assignments) elif self.eye == 'right': self.run_stimulus(right_params=self.params, assignments=self.assignments) position = self.sta_analysis() return position def pre_stim_setup(self): super(RFCMappingExp, self).pre_stim_setup() self.logger.info('Choose sparse noise data source.') self.sta_server.choose_source('sparse_noise') self.logger.info('Choose no image fitting.') self.sta_server.check_fitting('none') def post_stim_setup(self): super(RFCMappingExp, self).post_stim_setup() try: chart_file = ExperimentConfig.CELLDIR + os.path.sep + self.exp_name + '-raw.png' self.logger.info('Exporting raw chart to: ' + chart_file) self.sta_server.export_chart(chart_file) # wait for asynchronized pyro operation to complete time.sleep(0.5) except Exception,e: self.logger.error('Failed to export sta chart. ' + str(e)) self.logger.info('Choose Gabor fitting.') self.sta_server.check_fitting('gabor') # wait for asynchronized pyro operation to complete time.sleep(2.0) try: chart_file = ExperimentConfig.CELLDIR + os.path.sep + self.exp_name + '-fitted.png' self.logger.info('Exporting fitted chart to: ' + chart_file) self.sta_server.export_chart(chart_file) # wait for asynchronized pyro operation to complete time.sleep(0.5) except Exception,e: self.logger.error('Failed to export sta chart. ' + str(e)) def extract_results(self, data): if 'peak_time' not in data: self.logger.error('Failed to get peak time data from %s experiment.' %self.exp_name) else: self.logger.info('Get peak response at %.1fms after stimulus onset.' %data['peak_time']) if 'rf_center' not in data: self.logger.error('Failed to get RF center from %s experiment.' %self.exp_name) else: orig_pos = self.params['xorigDeg'], self.params['yorigDeg'] cell_width = self.params['widthDeg']*2.0/32.0 rf_x_pos = orig_pos[0] + cell_width * (data['rf_center'][0]-16) rf_y_pos = orig_pos[1] + cell_width * (16-data['rf_center'][1]) rf_pos = (float(rf_x_pos), float(rf_y_pos)) self.logger.info('Original RF center: %.2f,%.2f' %orig_pos) self.logger.info('Get RF center from %s experiment: %.2f,%.2f' %(self.exp_name,rf_pos[0],rf_pos[1])) return rf_pos def log_sta_data(self, data): data_file = ExperimentConfig.CELLDIR + os.path.sep + self.exp_name + '.csv' with open(data_file,'w') as data_output: if 'peak_time' in data: data_output.writelines('peak time,%.1f\n' %data['peak_time']) if 'rf_center' in data: data_output.writelines('rf position index,%.2f,%.2f\n' %(data['rf_center'][0],data['rf_center'][1])) class ParamMappingExp(STAExperiment): def __init__(self,eye,params,postfix,*args,**kwargs): super(ParamMappingExp, self).__init__(*args,**kwargs) self.pyro_source = 'pyro_sta.py' self.stim_source = 'param_mapping.py' self.exp_name = ExperimentConfig.CELLPREFIX + '-param-mapping-' + postfix + '-' + eye self.exp_param = 'pm' self.eye = eye self.params = params self.assignments = ["eye = '%s'" %eye] def run(self): super(ParamMappingExp, self).run() if self.eye == 'left': self.run_stimulus(left_params=self.params, assignments=self.assignments) elif self.eye == 'right': self.run_stimulus(right_params=self.params, assignments=self.assignments) position = self.sta_analysis() return position def pre_stim_setup(self): super(ParamMappingExp, self).pre_stim_setup() self.logger.info('Choose param mapping data source.') self.sta_server.choose_source('param_mapping') self.logger.info('Choose no image fitting.') self.sta_server.check_fitting('none') def post_stim_setup(self): super(ParamMappingExp, self).post_stim_setup() try: chart_file = ExperimentConfig.CELLDIR + os.path.sep + self.exp_name + '-raw.png' self.logger.info('Exporting raw chart to: ' + chart_file) self.sta_server.export_chart(chart_file) # wait for asynchronized pyro operation to complete time.sleep(0.5) except Exception,e: self.logger.error('Failed to export sta chart. ' + str(e)) self.logger.info('Choose Gauss fitting.') self.sta_server.check_fitting('gauss') # wait for asynchronized pyro operation to complete time.sleep(2.0) try: chart_file = ExperimentConfig.CELLDIR + os.path.sep + self.exp_name + '-fitted.png' self.logger.info('Exporting fitted chart to: ' + chart_file) self.sta_server.export_chart(chart_file) # wait for asynchronized pyro operation to complete time.sleep(0.5) except Exception,e: self.logger.error('Failed to export sta chart. ' + str(e)) def extract_results(self, data): if 'peak_time' not in data: self.logger.error('Failed to get peak time data from %s experiment.' %self.exp_name) else: self.logger.info('Get peak response at %.1fms after stimulus onset.' %data['peak_time']) if 'optimal_ori' not in data or 'optimal_spf' not in data: self.logger.error('Failed to get optimal parameter from %s experiment.' %self.exp_name) else: self.logger.info('Get optimal ori: %.2f and optimal spf: %.2f from %s experiment.' %(data['optimal_ori'], data['optimal_spf'], self.exp_name)) return data['optimal_ori'], data['optimal_spf'] def log_sta_data(self, data): data_file = ExperimentConfig.CELLDIR + os.path.sep + self.exp_name + '.csv' with open(data_file,'w') as data_output: if 'peak_time' in data: data_output.writelines('peak time,%.1f\n' %data['peak_time']) if 'optimal_ori' in data or 'optimal_spf' in data: data_output.writelines('optimal ori,%.2f\n' % data['optimal_ori']) data_output.writelines('optimal spf,%.2f\n' % data['optimal_ori'])
"""Support for tasks, coroutines and the scheduler.""" __all__ = ( 'Task', 'create_task', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'wait', 'wait_for', 'as_completed', 'sleep', 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', 'current_task', 'all_tasks', '_register_task', '_unregister_task', '_enter_task', '_leave_task', ) import concurrent.futures import contextvars import functools import inspect import itertools import types import warnings import weakref from . import base_tasks from . import coroutines from . import events from . import exceptions from . import futures from .coroutines import _is_coroutine # Helper to generate new task names # This uses itertools.count() instead of a "+= 1" operation because the latter # is not thread safe. See bpo-11866 for a longer explanation. _task_name_counter = itertools.count(1).__next__ def current_task(loop=None): """Return a currently executed task.""" if loop is None: loop = events.get_running_loop() return _current_tasks.get(loop) def all_tasks(loop=None): """Return a set of all tasks for the loop.""" if loop is None: loop = events.get_running_loop() # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another # thread while we do so. Therefore we cast it to list prior to filtering. The list # cast itself requires iteration, so we repeat it several times ignoring # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for # details. i = 0 while True: try: tasks = list(_all_tasks) except RuntimeError: i += 1 if i >= 1000: raise else: break return {t for t in tasks if futures._get_loop(t) is loop and not t.done()} def _all_tasks_compat(loop=None): # Different from "all_task()" by returning *all* Tasks, including # the completed ones. Used to implement deprecated "Tasks.all_task()" # method. if loop is None: loop = events.get_event_loop() # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another # thread while we do so. Therefore we cast it to list prior to filtering. The list # cast itself requires iteration, so we repeat it several times ignoring # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for # details. i = 0 while True: try: tasks = list(_all_tasks) except RuntimeError: i += 1 if i >= 1000: raise else: break return {t for t in tasks if futures._get_loop(t) is loop} def _set_task_name(task, name): if name is not None: try: set_name = task.set_name except AttributeError: pass else: set_name(name) class Task(futures._PyFuture): # Inherit Python Task implementation # from a Python Future implementation. """A coroutine wrapped in a Future.""" # An important invariant maintained while a Task not done: # # - Either _fut_waiter is None, and _step() is scheduled; # - or _fut_waiter is some Future, and _step() is *not* scheduled. # # The only transition from the latter to the former is through # _wakeup(). When _fut_waiter is not None, one of its callbacks # must be _wakeup(). # If False, don't log a message if the task is destroyed whereas its # status is still pending _log_destroy_pending = True @classmethod def current_task(cls, loop=None): """Return the currently running task in an event loop or None. By default the current task for the current event loop is returned. None is returned when called not in the context of a Task. """ warnings.warn("Task.current_task() is deprecated since Python 3.7, " "use asyncio.current_task() instead", DeprecationWarning, stacklevel=2) if loop is None: loop = events.get_event_loop() return current_task(loop) @classmethod def all_tasks(cls, loop=None): """Return a set of all tasks for an event loop. By default all tasks for the current event loop are returned. """ warnings.warn("Task.all_tasks() is deprecated since Python 3.7, " "use asyncio.all_tasks() instead", DeprecationWarning, stacklevel=2) return _all_tasks_compat(loop) def __init__(self, coro, *, loop=None, name=None): super().__init__(loop=loop) if self._source_traceback: del self._source_traceback[-1] if not coroutines.iscoroutine(coro): # raise after Future.__init__(), attrs are required for __del__ # prevent logging for pending task in __del__ self._log_destroy_pending = False raise TypeError(f"a coroutine was expected, got {coro!r}") if name is None: self._name = f'Task-{_task_name_counter()}' else: self._name = str(name) self._must_cancel = False self._fut_waiter = None self._coro = coro self._context = contextvars.copy_context() self._loop.call_soon(self.__step, context=self._context) _register_task(self) def __del__(self): if self._state == futures._PENDING and self._log_destroy_pending: context = { 'task': self, 'message': 'Task was destroyed but it is pending!', } if self._source_traceback: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) super().__del__() def _repr_info(self): return base_tasks._task_repr_info(self) def get_coro(self): return self._coro def get_name(self): return self._name def set_name(self, value): self._name = str(value) def set_result(self, result): raise RuntimeError('Task does not support set_result operation') def set_exception(self, exception): raise RuntimeError('Task does not support set_exception operation') def get_stack(self, *, limit=None): """Return the list of stack frames for this task's coroutine. If the coroutine is not done, this returns the stack where it is suspended. If the coroutine has completed successfully or was cancelled, this returns an empty list. If the coroutine was terminated by an exception, this returns the list of traceback frames. The frames are always ordered from oldest to newest. The optional limit gives the maximum number of frames to return; by default all available frames are returned. Its meaning differs depending on whether a stack or a traceback is returned: the newest frames of a stack are returned, but the oldest frames of a traceback are returned. (This matches the behavior of the traceback module.) For reasons beyond our control, only one stack frame is returned for a suspended coroutine. """ return base_tasks._task_get_stack(self, limit) def print_stack(self, *, limit=None, file=None): """Print the stack or traceback for this task's coroutine. This produces output similar to that of the traceback module, for the frames retrieved by get_stack(). The limit argument is passed to get_stack(). The file argument is an I/O stream to which the output is written; by default output is written to sys.stderr. """ return base_tasks._task_print_stack(self, limit, file) def cancel(self): """Request that this task cancel itself. This arranges for a CancelledError to be thrown into the wrapped coroutine on the next cycle through the event loop. The coroutine then has a chance to clean up or even deny the request using try/except/finally. Unlike Future.cancel, this does not guarantee that the task will be cancelled: the exception might be caught and acted upon, delaying cancellation of the task or preventing cancellation completely. The task may also return a value or raise a different exception. Immediately after this method is called, Task.cancelled() will not return True (unless the task was already cancelled). A task will be marked as cancelled when the wrapped coroutine terminates with a CancelledError exception (even if cancel() was not called). """ self._log_traceback = False if self.done(): return False if self._fut_waiter is not None: if self._fut_waiter.cancel(): # Leave self._fut_waiter; it may be a Task that # catches and ignores the cancellation so we may have # to cancel it again later. return True # It must be the case that self.__step is already scheduled. self._must_cancel = True return True def __step(self, exc=None): if self.done(): raise exceptions.InvalidStateError( f'_step(): already done: {self!r}, {exc!r}') if self._must_cancel: if not isinstance(exc, exceptions.CancelledError): exc = exceptions.CancelledError() self._must_cancel = False coro = self._coro self._fut_waiter = None _enter_task(self._loop, self) # Call either coro.throw(exc) or coro.send(None). try: if exc is None: # We use the `send` method directly, because coroutines # don't have `__iter__` and `__next__` methods. result = coro.send(None) else: result = coro.throw(exc) except StopIteration as exc: if self._must_cancel: # Task is cancelled right before coro stops. self._must_cancel = False super().cancel() else: super().set_result(exc.value) except exceptions.CancelledError: super().cancel() # I.e., Future.cancel(self). except (KeyboardInterrupt, SystemExit) as exc: super().set_exception(exc) raise except BaseException as exc: super().set_exception(exc) else: blocking = getattr(result, '_asyncio_future_blocking', None) if blocking is not None: # Yielded Future must come from Future.__iter__(). if futures._get_loop(result) is not self._loop: new_exc = RuntimeError( f'Task {self!r} got Future ' f'{result!r} attached to a different loop') self._loop.call_soon( self.__step, new_exc, context=self._context) elif blocking: if result is self: new_exc = RuntimeError( f'Task cannot await on itself: {self!r}') self._loop.call_soon( self.__step, new_exc, context=self._context) else: result._asyncio_future_blocking = False result.add_done_callback( self.__wakeup, context=self._context) self._fut_waiter = result if self._must_cancel: if self._fut_waiter.cancel(): self._must_cancel = False else: new_exc = RuntimeError( f'yield was used instead of yield from ' f'in task {self!r} with {result!r}') self._loop.call_soon( self.__step, new_exc, context=self._context) elif result is None: # Bare yield relinquishes control for one event loop iteration. self._loop.call_soon(self.__step, context=self._context) elif inspect.isgenerator(result): # Yielding a generator is just wrong. new_exc = RuntimeError( f'yield was used instead of yield from for ' f'generator in task {self!r} with {result!r}') self._loop.call_soon( self.__step, new_exc, context=self._context) else: # Yielding something else is an error. new_exc = RuntimeError(f'Task got bad yield: {result!r}') self._loop.call_soon( self.__step, new_exc, context=self._context) finally: _leave_task(self._loop, self) self = None # Needed to break cycles when an exception occurs. def __wakeup(self, future): try: future.result() except BaseException as exc: # This may also be a cancellation. self.__step(exc) else: # Don't pass the value of `future.result()` explicitly, # as `Future.__iter__` and `Future.__await__` don't need it. # If we call `_step(value, None)` instead of `_step()`, # Python eval loop would use `.send(value)` method call, # instead of `__next__()`, which is slower for futures # that return non-generator iterators from their `__iter__`. self.__step() self = None # Needed to break cycles when an exception occurs. _PyTask = Task try: import _asyncio except ImportError: pass else: # _CTask is needed for tests. Task = _CTask = _asyncio.Task def create_task(coro, *, name=None): """Schedule the execution of a coroutine object in a spawn task. Return a Task object. """ loop = events.get_running_loop() task = loop.create_task(coro) _set_task_name(task, name) return task # wait() and as_completed() similar to those in PEP 3148. FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION ALL_COMPLETED = concurrent.futures.ALL_COMPLETED async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED): """Wait for the Futures and coroutines given by fs to complete. The sequence futures must not be empty. Coroutines will be wrapped in Tasks. Returns two sets of Future: (done, pending). Usage: done, pending = await asyncio.wait(fs) Note: This does not raise TimeoutError! Futures that aren't done when the timeout occurs are returned in the second set. """ if futures.isfuture(fs) or coroutines.iscoroutine(fs): raise TypeError(f"expect a list of futures, not {type(fs).__name__}") if not fs: raise ValueError('Set of coroutines/Futures is empty.') if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED): raise ValueError(f'Invalid return_when value: {return_when}') if loop is None: loop = events.get_running_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) fs = {ensure_future(f, loop=loop) for f in set(fs)} return await _wait(fs, timeout, return_when, loop) def _release_waiter(waiter, *args): if not waiter.done(): waiter.set_result(None) async def wait_for(fut, timeout, *, loop=None): """Wait for the single Future or coroutine to complete, with timeout. Coroutine will be wrapped in Task. Returns result of the Future or coroutine. When a timeout occurs, it cancels the task and raises TimeoutError. To avoid the task cancellation, wrap it in shield(). If the wait is cancelled, the task is also cancelled. This function is a coroutine. """ if loop is None: loop = events.get_running_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) if timeout is None: return await fut if timeout <= 0: fut = ensure_future(fut, loop=loop) if fut.done(): return fut.result() fut.cancel() raise exceptions.TimeoutError() waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) fut = ensure_future(fut, loop=loop) fut.add_done_callback(cb) try: # wait until the future completes or the timeout try: await waiter except exceptions.CancelledError: fut.remove_done_callback(cb) fut.cancel() raise if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) raise exceptions.TimeoutError() finally: timeout_handle.cancel() async def _wait(fs, timeout, return_when, loop): """Internal helper for wait(). The fs argument must be a collection of Futures. """ assert fs, 'Set of Futures is empty.' waiter = loop.create_future() timeout_handle = None if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) counter = len(fs) def _on_completion(f): nonlocal counter counter -= 1 if (counter <= 0 or return_when == FIRST_COMPLETED or return_when == FIRST_EXCEPTION and (not f.cancelled() and f.exception() is not None)): if timeout_handle is not None: timeout_handle.cancel() if not waiter.done(): waiter.set_result(None) for f in fs: f.add_done_callback(_on_completion) try: await waiter finally: if timeout_handle is not None: timeout_handle.cancel() for f in fs: f.remove_done_callback(_on_completion) done, pending = set(), set() for f in fs: if f.done(): done.add(f) else: pending.add(f) return done, pending async def _cancel_and_wait(fut, loop): """Cancel the *fut* future or task and wait until it completes.""" waiter = loop.create_future() cb = functools.partial(_release_waiter, waiter) fut.add_done_callback(cb) try: fut.cancel() # We cannot wait on *fut* directly to make # sure _cancel_and_wait itself is reliably cancellable. await waiter finally: fut.remove_done_callback(cb) # This is *not* a @coroutine! It is just an iterator (yielding Futures). def as_completed(fs, *, loop=None, timeout=None): """Return an iterator whose values are coroutines. When waiting for the yielded coroutines you'll get the results (or exceptions!) of the original Futures (or coroutines), in the order in which and as soon as they complete. This differs from PEP 3148; the proper way to use this is: for f in as_completed(fs): result = await f # The 'await' may raise. # Use result. If a timeout is specified, the 'await' will raise TimeoutError when the timeout occurs before all Futures are done. Note: The futures 'f' are not necessarily members of fs. """ if futures.isfuture(fs) or coroutines.iscoroutine(fs): raise TypeError(f"expect a list of futures, not {type(fs).__name__}") from .queues import Queue # Import here to avoid circular import problem. done = Queue(loop=loop) if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) todo = {ensure_future(f, loop=loop) for f in set(fs)} timeout_handle = None def _on_timeout(): for f in todo: f.remove_done_callback(_on_completion) done.put_nowait(None) # Queue a dummy value for _wait_for_one(). todo.clear() # Can't do todo.remove(f) in the loop. def _on_completion(f): if not todo: return # _on_timeout() was here first. todo.remove(f) done.put_nowait(f) if not todo and timeout_handle is not None: timeout_handle.cancel() async def _wait_for_one(): f = await done.get() if f is None: # Dummy value from _on_timeout(). raise exceptions.TimeoutError return f.result() # May raise f.exception(). for f in todo: f.add_done_callback(_on_completion) if todo and timeout is not None: timeout_handle = loop.call_later(timeout, _on_timeout) for _ in range(len(todo)): yield _wait_for_one() @types.coroutine def __sleep0(): """Skip one event loop run cycle. This is a private helper for 'asyncio.sleep()', used when the 'delay' is set to 0. It uses a bare 'yield' expression (which Task.__step knows how to handle) instead of creating a Future object. """ yield async def sleep(delay, result=None, *, loop=None): """Coroutine that completes after a given time (in seconds).""" if delay <= 0: await __sleep0() return result if loop is None: loop = events.get_running_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) future = loop.create_future() h = loop.call_later(delay, futures._set_result_unless_cancelled, future, result) try: return await future finally: h.cancel() def ensure_future(coro_or_future, *, loop=None): """Wrap a coroutine or an awaitable in a future. If the argument is a Future, it is returned directly. """ if coroutines.iscoroutine(coro_or_future): if loop is None: loop = events.get_event_loop() task = loop.create_task(coro_or_future) if task._source_traceback: del task._source_traceback[-1] return task elif futures.isfuture(coro_or_future): if loop is not None and loop is not futures._get_loop(coro_or_future): raise ValueError('The future belongs to a different loop than ' 'the one specified as the loop argument') return coro_or_future elif inspect.isawaitable(coro_or_future): return ensure_future(_wrap_awaitable(coro_or_future), loop=loop) else: raise TypeError('An asyncio.Future, a coroutine or an awaitable is ' 'required') @types.coroutine def _wrap_awaitable(awaitable): """Helper for asyncio.ensure_future(). Wraps awaitable (an object with __await__) into a coroutine that will later be wrapped in a Task by ensure_future(). """ return (yield from awaitable.__await__()) _wrap_awaitable._is_coroutine = _is_coroutine class _GatheringFuture(futures.Future): """Helper for gather(). This overrides cancel() to cancel all the children and act more like Task.cancel(), which doesn't immediately mark itself as cancelled. """ def __init__(self, children, *, loop=None): super().__init__(loop=loop) self._children = children self._cancel_requested = False def cancel(self): if self.done(): return False ret = False for child in self._children: if child.cancel(): ret = True if ret: # If any child tasks were actually cancelled, we should # propagate the cancellation request regardless of # *return_exceptions* argument. See issue 32684. self._cancel_requested = True return ret def gather(*coros_or_futures, loop=None, return_exceptions=False): """Return a future aggregating results from the given coroutines/futures. Coroutines will be wrapped in a future and scheduled in the event loop. They will not necessarily be scheduled in the same order as passed in. All futures must share the same event loop. If all the tasks are done successfully, the returned future's result is the list of results (in the order of the original sequence, not necessarily the order of results arrival). If *return_exceptions* is True, exceptions in the tasks are treated the same as successful results, and gathered in the result list; otherwise, the first raised exception will be immediately propagated to the returned future. Cancellation: if the outer Future is cancelled, all children (that have not completed yet) are also cancelled. If any child is cancelled, this is treated as if it raised CancelledError -- the outer Future is *not* cancelled in this case. (This is to prevent the cancellation of one child to cause other children to be cancelled.) """ if not coros_or_futures: if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) outer = loop.create_future() outer.set_result([]) return outer def _done_callback(fut): nonlocal nfinished nfinished += 1 if outer.done(): if not fut.cancelled(): # Mark exception retrieved. fut.exception() return if not return_exceptions: if fut.cancelled(): # Check if 'fut' is cancelled first, as # 'fut.exception()' will *raise* a CancelledError # instead of returning it. exc = exceptions.CancelledError() outer.set_exception(exc) return else: exc = fut.exception() if exc is not None: outer.set_exception(exc) return if nfinished == nfuts: # All futures are done; create a list of results # and set it to the 'outer' future. results = [] for fut in children: if fut.cancelled(): # Check if 'fut' is cancelled first, as # 'fut.exception()' will *raise* a CancelledError # instead of returning it. res = exceptions.CancelledError() else: res = fut.exception() if res is None: res = fut.result() results.append(res) if outer._cancel_requested: # If gather is being cancelled we must propagate the # cancellation regardless of *return_exceptions* argument. # See issue 32684. outer.set_exception(exceptions.CancelledError()) else: outer.set_result(results) arg_to_fut = {} children = [] nfuts = 0 nfinished = 0 for arg in coros_or_futures: if arg not in arg_to_fut: fut = ensure_future(arg, loop=loop) if loop is None: loop = futures._get_loop(fut) if fut is not arg: # 'arg' was not a Future, therefore, 'fut' is a new # Future created specifically for 'arg'. Since the caller # can't control it, disable the "destroy pending task" # warning. fut._log_destroy_pending = False nfuts += 1 arg_to_fut[arg] = fut fut.add_done_callback(_done_callback) else: # There's a duplicate Future object in coros_or_futures. fut = arg_to_fut[arg] children.append(fut) outer = _GatheringFuture(children, loop=loop) return outer def shield(arg, *, loop=None): """Wait for a future, shielding it from cancellation. The statement res = await shield(something()) is exactly equivalent to the statement res = await something() *except* that if the coroutine containing it is cancelled, the task running in something() is not cancelled. From the POV of something(), the cancellation did not happen. But its caller is still cancelled, so the yield-from expression still raises CancelledError. Note: If something() is cancelled by other means this will still cancel shield(). If you want to completely ignore cancellation (not recommended) you can combine shield() with a try/except clause, as follows: try: res = await shield(something()) except CancelledError: res = None """ if loop is not None: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) inner = ensure_future(arg, loop=loop) if inner.done(): # Shortcut. return inner loop = futures._get_loop(inner) outer = loop.create_future() def _inner_done_callback(inner): if outer.cancelled(): if not inner.cancelled(): # Mark inner's result as retrieved. inner.exception() return if inner.cancelled(): outer.cancel() else: exc = inner.exception() if exc is not None: outer.set_exception(exc) else: outer.set_result(inner.result()) def _outer_done_callback(outer): if not inner.done(): inner.remove_done_callback(_inner_done_callback) inner.add_done_callback(_inner_done_callback) outer.add_done_callback(_outer_done_callback) return outer def run_coroutine_threadsafe(coro, loop): """Submit a coroutine object to a given event loop. Return a concurrent.futures.Future to access the result. """ if not coroutines.iscoroutine(coro): raise TypeError('A coroutine object is required') future = concurrent.futures.Future() def callback(): try: futures._chain_future(ensure_future(coro, loop=loop), future) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: if future.set_running_or_notify_cancel(): future.set_exception(exc) raise loop.call_soon_threadsafe(callback) return future # WeakSet containing all alive tasks. _all_tasks = weakref.WeakSet() # Dictionary containing tasks that are currently active in # all running event loops. {EventLoop: Task} _current_tasks = {} def _register_task(task): """Register a new task in asyncio as executed by loop.""" _all_tasks.add(task) def _enter_task(loop, task): current_task = _current_tasks.get(loop) if current_task is not None: raise RuntimeError(f"Cannot enter into task {task!r} while another " f"task {current_task!r} is being executed.") _current_tasks[loop] = task def _leave_task(loop, task): current_task = _current_tasks.get(loop) if current_task is not task: raise RuntimeError(f"Leaving task {task!r} does not match " f"the current task {current_task!r}.") del _current_tasks[loop] def _unregister_task(task): """Unregister a task.""" _all_tasks.discard(task) _py_register_task = _register_task _py_unregister_task = _unregister_task _py_enter_task = _enter_task _py_leave_task = _leave_task try: from _asyncio import (_register_task, _unregister_task, _enter_task, _leave_task, _all_tasks, _current_tasks) except ImportError: pass else: _c_register_task = _register_task _c_unregister_task = _unregister_task _c_enter_task = _enter_task _c_leave_task = _leave_task
#!/usr/bin/env python # Generate Patch Order Sequence # # python PatchOrderGenerator.py <path_to_patches_dir> # # This script reads all the Patch files(*.KID/*.KIDs) # and info file (*.TXT(s)/*,txt) under the input directory recursively # and generate the Patch order via patch dependency # #--------------------------------------------------------------------------- # Copyright 2012-2019 The Open Source Electronic Health Record Alliance # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #--------------------------------------------------------------------------- from __future__ import print_function from past.builtins import cmp from builtins import str from builtins import range from builtins import object import os import sys import re import glob import csv from datetime import datetime # append this module in the sys.path at run time curDir = os.path.dirname(os.path.abspath(__file__)) if curDir not in sys.path: sys.path.append(curDir) PATCH_IGNORED_DIRS = ('Packages', 'Uncategorized', 'MultiBuilds') VALID_CSV_ORDER_FILE_FIELDS = [ 'INSTALLED', 'VERIFY_DT', 'STATUS', 'SEQ#', 'LABELED_AS', 'CATEGORY', 'PRODUCT_NAME' ] """ enums """ KIDS_BUILD_FILE_TYPE_KIDS = 0 KIDS_BUILD_FILE_TYPE_HEADER = 1 KIDS_BUILD_FILE_TYPE_SHA1 = 2 from LoggerManager import logger, initConsoleLogging from KIDSBuildParser import KIDSBuildParser from PatchInfoParser import PatchInfoParser from PatchInfoParser import convertToInstallName from PatchInfoParser import dirNameToInstallName, PatchInfo from PatchInfoParser import setPatchInfoFromInstallName from ConvertToExternalData import readSha1SumFromSha1File from ConvertToExternalData import isValidKIDSBuildSuffix from ConvertToExternalData import isValidKIDSBuildHeaderSuffix from ConvertToExternalData import isValidKIDSBuildSha1Suffix from ConvertToExternalData import isValidPatchInfoSuffix from ConvertToExternalData import isValidPatchInfoSha1Suffix from ConvertToExternalData import isValidCSVSuffix from ConvertToExternalData import isValidPatchRelatedFiles from ConvertToExternalData import isValidGlobalFileSuffix from ConvertToExternalData import isValidGlobalSha1Suffix from ConvertToExternalData import isValidPythonSuffix from KIDSAssociatedFilesMapping import getAssociatedInstallName class CycleException(Exception): pass """ This class will generate a Patch order based on input patch directory """ class PatchOrderGenerator(object): def __init__(self): self._kidsInstallNameDict = dict() # the install name -> kids files self._kidsDepBuildDict = dict() # install name -> [dependency build] self._multiBuildDict = dict() # kids file -> [install names] self._kidsBuildFileDict = dict() # all the kids files name->[path,sha1path] self._kidsInstallNameSha1Dict = dict() # install name -> sha1 self._kidsInfoFileList = [] # all kids info file under vista patches dir self._csvOrderFileList = [] # all csv order file under vista patches dir self._globalFilesSet = set() # all global file under vista patches dir self._patchInfoDict = dict() #install name -> patchInfo self._missKidsBuildDict = dict() # install name -> patchInfo without Kids self._missKidsInfoSet = set() # kids build without info file self._patchOrderCSVDict = dict() # csv File -> list of patches in order self._patchOrder = [] # list of install name in order self._informationalKidsSet = set() # a list of kids that are informational self._notInstalledKidsSet = set() # a list of kids that are not installed self._patchDependencyDict = dict() # the dependency dict of all patches self._invalidInfoFileSet = set() # invalid txt file self._csvDepDict = dict() # installName => installName based on csvFile self._installNameSeqMap = dict() # installName => seqNo, patch in order self._pythonScriptList = [] # all the python script files def generatePatchOrder(self, patchReposDir, installName=None): return self.generatePatchOrderTopologic(patchReposDir, installName) """ generate a patch seqence order by topologic sort """ def generatePatchOrderTopologic(self, patchDir, installName=None): self.analyzeVistAPatchDir(patchDir) if installName: if installName not in self._patchInfoDict: raise Exception("Could not find patch for %s" % installName) self.__updatePatchDependency__((installName is None)) self.__generatePatchDependencyGraph__() try: self.__topologicSort__(installName) except CycleException as e: errorMessage = "Failed to sort patches: %s" % e logger.error(errorMessage) return [] logger.info("After topologic sort %d" % len(self._patchOrder)) return self._patchOrder """ analyze VistA patch Dir generate data structure """ def analyzeVistAPatchDir(self, patchDir): assert os.path.exists(patchDir) self.__getAllKIDSBuildInfoAndOtherFileList__(patchDir) self.__parseAllKIDSBuildFilesList__() self.__parseAllKIDSInfoFilesList__() self.__generateMissKIDSInfoSet__() self.__addMissKIDSInfoPatch__() self.__handlePatchAssociatedFiles__() self.__updateCustomInstaller__() self.__updateMultiBuildPatchInfo__() self.__getPatchOrderDependencyByCSVFiles__() """ Some getter function to return result """ """ @ return all patchInfoDict install => patchInfo""" def getPatchInfoDict(self): return self._patchInfoDict """ @return all invalid KIDS info files set""" def getInvalidInfoFiles(self): return self._invalidInfoFileSet """ @return Info file without a KIDS build as install => patchInfo""" def getNoKidsBuildInfoDict(self): return self._missKidsBuildDict """ print the final order list """ def printPatchOrderList(self): printPatchOrderList(self._patchOrder) def __addKidsBuildFileToDict__(self, fileName, absPath, fileType): if fileName not in self._kidsBuildFileDict: self._kidsBuildFileDict[fileName] = [None, None] if ( fileType == KIDS_BUILD_FILE_TYPE_KIDS or fileType == KIDS_BUILD_FILE_TYPE_HEADER ): filePath = self._kidsBuildFileDict[fileName][0] if self._kidsBuildFileDict[fileName][0] != None: logger.error("Duplicated KIDS file path %s : %s" % (filePath, absPath)) else: self._kidsBuildFileDict[fileName][0] = absPath return if fileType == KIDS_BUILD_FILE_TYPE_SHA1: sha1File = self._kidsBuildFileDict[fileName][1] if self._kidsBuildFileDict[fileName][1] != None: logger.error("Duplicated KIDS Sha1 File %s : %s" % (sha1File, absPath)) else: self._kidsBuildFileDict[fileName][1] = absPath """ walk through the dir to find all KIDS build and info file and others """ def __getAllKIDSBuildInfoAndOtherFileList__(self, patchDir): assert os.path.exists(patchDir) absPatchDir = os.path.abspath(patchDir) for (root, dirs, files) in os.walk(absPatchDir): lastDir = os.path.split(root)[-1] for fileName in files: absFilename = os.path.join(root, fileName) if not isValidPatchRelatedFiles(absFilename, True): continue """ Handle KIDS build files """ if isValidKIDSBuildSuffix(fileName): logger.debug("Adding %s KIDS file to dict" % absFilename) self.__addKidsBuildFileToDict__(fileName, absFilename, KIDS_BUILD_FILE_TYPE_KIDS) continue """ Handle KIDS build HEADER files """ if isValidKIDSBuildHeaderSuffix(fileName): logger.debug("Adding %s KIDS header to dict" % absFilename) kidsFileName = fileName[0:fileName.rfind('.')] self.__addKidsBuildFileToDict__(kidsFileName, absFilename, KIDS_BUILD_FILE_TYPE_HEADER) continue """ Handle KIDS build Sha1 files """ if isValidKIDSBuildSha1Suffix(fileName): logger.debug("Adding %s KIDS info to dict" % absFilename) kidsFileName = fileName[0:fileName.rfind('.')] self.__addKidsBuildFileToDict__(kidsFileName, absFilename, KIDS_BUILD_FILE_TYPE_SHA1) continue """ Handle KIDS Info/Sha1 files """ if ( isValidPatchInfoSuffix(fileName) or isValidPatchInfoSha1Suffix(fileName) ): self._kidsInfoFileList.append(absFilename) continue """ Handle Global/Sha1 Files """ if ( isValidGlobalFileSuffix(fileName) or isValidGlobalSha1Suffix(fileName) ): logger.debug("Adding %s Global files to list" % absFilename) self._globalFilesSet.add(absFilename) continue """ handle all csv files """ if isValidCSVSuffix(fileName): if isValidOrderCSVFile(absFilename): self._csvOrderFileList.append(absFilename) continue """ Handle .py files """ if isValidPythonSuffix(fileName): logger.debug("Adding %s python script to list" % absFilename) self._pythonScriptList.append(absFilename) continue logger.info("Total # of KIDS Builds are %d" % len(self._kidsBuildFileDict)) logger.info("Total # of KIDS Info are %d" % len(self._kidsInfoFileList)) logger.info("Total # of Global files are %d" % len(self._globalFilesSet)) logger.info("Total # of Python files are %d" % len(self._pythonScriptList)) logger.info("Total # of CSV files are %d" % len(self._csvOrderFileList)) """ parse all the KIDS files, update kidsInstallNameDict, multibuildDict """ def __parseAllKIDSBuildFilesList__(self): for basename in self._kidsBuildFileDict: kidsFile, sha1Path = self._kidsBuildFileDict[basename] if kidsFile == None: logger.error("No KIDS file available for name %s" % basename) continue installNameList, seqNo, kidsBuilds = None, None, None if isValidKIDSBuildHeaderSuffix(kidsFile): from KIDSBuildParser import loadMetaDataFromJSON #continue installNameList, seqNo, kidsBuilds = loadMetaDataFromJSON(kidsFile) else: kidsParser = KIDSBuildParser(None) kidsParser.unregisterSectionHandler(KIDSBuildParser.ROUTINE_SECTION) kidsParser.parseKIDSBuild(kidsFile) installNameList = kidsParser.installNameList logger.debug("install name list is %s" % installNameList) seqNo = kidsParser.seqNo kidsBuilds = kidsParser.kidsBuilds if len(installNameList) > 1: if not self._multiBuildDict.get(kidsFile): self._multiBuildDict[kidsFile] = installNameList else: assert self._multiBuildDict[kidsFile] == installNameList elif seqNo: if installNameList[0] not in self._installNameSeqMap: self._installNameSeqMap[installNameList[0]] = seqNo else: logger.error("Duplicated KIDS build file %s" % kidsFile) for installName in installNameList: if installName in self._kidsInstallNameDict: logger.warn("%s is already in the dict %s" % (installName, kidsFile)) logger.debug("Added installName %s, file %s" % (installName, kidsFile)) self._kidsInstallNameDict[installName] = os.path.normpath(kidsFile) """ handle KIDS sha1 file Path """ if sha1Path: if installName in self._kidsInstallNameSha1Dict: logger.warn("%s is already in the dict %s" % (installName, sha1Path)) self._kidsInstallNameSha1Dict[installName] = sha1Path """ update kids dependency """ if installName in self._kidsDepBuildDict: logger.warn("%s already has the dep map %s" % (installName, self._kidsDepBuildDict[installName])) if kidsBuilds: for kidsBuild in kidsBuilds: if kidsBuild.installName == installName: depList = kidsBuild.dependencyList if depList: self._kidsDepBuildDict[installName] = set([x[0] for x in depList]) logger.info("%s: %s" % (installName, self._kidsDepBuildDict[installName])) logger.debug("%s" % sorted(self._kidsInstallNameDict.keys())) logger.info("Total # of install name %d" % len(self._kidsInstallNameDict)) """ parse all the KIDS info files, update patchInfoDict, missKidsBuildDict""" def __parseAllKIDSInfoFilesList__(self): kidsParser = PatchInfoParser() for kidsInfoFile in self._kidsInfoFileList: patchInfo = kidsParser.parseKIDSInfoFile(kidsInfoFile) if not patchInfo: logger.debug("invalid kids info file %s" % kidsInfoFile) self._invalidInfoFileSet.add(kidsInfoFile) continue """ only add to list for info that is related to a Patch""" installName = patchInfo.installName if installName not in self._kidsInstallNameDict: logger.warn("no KIDS file related to %s (%s)" % (installName, kidsInfoFile)) if installName in self._missKidsBuildDict: logger.warn("duplicated kids install name") if kidsInfoFile != self._missKidsBuildDict[installName].kidsInfoPath: logger.warn("duplicated kids info file name %s" % kidsInfoFile) continue self._missKidsBuildDict[installName] = patchInfo continue patchInfo.kidsFilePath = self._kidsInstallNameDict[installName] assert patchInfo.kidsFilePath """ update PatchInfo kidsSha1 and kidsSha1Path """ if installName in self._kidsInstallNameSha1Dict: sha1Path = self._kidsInstallNameSha1Dict[installName] patchInfo.kidsSha1Path = sha1Path patchInfo.kidsSha1 = readSha1SumFromSha1File(sha1Path) if installName in self._patchInfoDict: logger.warn("duplicated installName %s, %s, %s" % (installName, self._patchInfoDict[installName], kidsInfoFile)) """ merge the dependency if needed, also put extra dependency into optional set """ if installName in self._kidsDepBuildDict: infoDepSet = set() kidsDepSet = set() if patchInfo.depKIDSBuild: infoDepSet = patchInfo.depKIDSBuild if self._kidsDepBuildDict[installName]: kidsDepSet = self._kidsDepBuildDict[installName] diffSet = kidsDepSet ^ infoDepSet if len(diffSet): logger.info("Merging kids dependencies %s" % installName) logger.debug("kids build set is %s" % kidsDepSet) logger.debug("info build set is %s" % infoDepSet) logger.warning("difference set: %s" % diffSet) patchInfo.depKIDSBuild = infoDepSet | kidsDepSet patchInfo.optionalDepSet = infoDepSet - kidsDepSet else: patchInfo.depKIDSBuild = infoDepSet self._patchInfoDict[installName] = patchInfo """ update multiBuild KIDS patch info""" def __updateMultiBuildPatchInfo__(self): patchList = self._patchInfoDict for installList in self._multiBuildDict.values(): for installName in installList: patchInfo = patchList[installName] patchInfo.isMultiBuilds = True patchInfo.multiBuildsList = installList """ update multiBuild KIDS files dependencies """ def __updateMultiBuildDependencies__(self): patchList = self._patchInfoDict for installList in self._multiBuildDict.values(): logger.info("Multi-Buids KIDS install List: %s" % (installList)) firstPatch = patchList[installList[0]] firstPatch.otherKidsInfoList = [] if firstPatch.csvDepPatch is None: """ If primary build install name is not specified in the csv file will fall back to use dependency specified in the first secondary build """ secondPatch = patchList[installList[1]] if secondPatch.csvDepPatch != firstPatch.installName: logger.info("Assign first patch CSV Dep %s" % firstPatch.installName) firstPatch.csvDepPatch = secondPatch.csvDepPatch for index in range(1,len(installList)): nextPatchInfo = patchList[installList[index]] """ just to make sure the first one has all the dependencies """ firstPatch.depKIDSBuild.update(nextPatchInfo.depKIDSBuild) firstPatch.optionalDepSet.update(nextPatchInfo.optionalDepSet) firstPatch.otherKidsInfoList.append([nextPatchInfo.kidsInfoPath, nextPatchInfo.kidsInfoSha1]) prevInstallName = installList[index - 1] # Removing May 2019: Causes cyclical dependency - J.Snyder #if prevInstallName not in nextPatchInfo.depKIDSBuild: # nextPatchInfo.depKIDSBuild.add(prevInstallName) #del patchList[installList[index]] #remove the other patch from the list logger.debug("%s:%s" % (nextPatchInfo.installName, nextPatchInfo.depKIDSBuild)) """ remove the self dependencies of the first patch """ firstPatch.depKIDSBuild.difference_update(installList) logger.debug("%s:%s" % (firstPatch.installName, firstPatch.depKIDSBuild)) """ update the csvDepPatch based on csv file based dependencies """ def __updateCSVDependencies__(self): for patchInfo in self._patchInfoDict.values(): installName = patchInfo.installName if installName in self._csvDepDict: patchInfo.csvDepPatch = self._csvDepDict[installName] def __updatePatchDependency__(self, updCSVDep=True): if updCSVDep: """ update the dependencies based on csv files """ self.__updateCSVDependencies__() """ update the dependencies based on patch Sequenece # """ self.__updateSeqNoDependencies__() """ update the dependencies for multi-build KIDS files """ self.__updateMultiBuildDependencies__() def __updateSeqNoDependencies__(self): namespaceVerSeq = dict() patchInfoDict = self._patchInfoDict for patchInfo in patchInfoDict.values(): """ generate dependencies map based on seq # """ namespace = patchInfo.namespace version = patchInfo.version seqNo = patchInfo.seqNo installName = patchInfo.installName if namespace and version: if not seqNo: continue if namespace not in namespaceVerSeq: namespaceVerSeq[namespace] = dict() if version not in namespaceVerSeq[namespace]: namespaceVerSeq[namespace][version] = [] namespaceVerSeq[namespace][version].append((int(seqNo), installName)) """ add dependencies based on SEQ # """ for versionDict in namespaceVerSeq.values(): for seqList in versionDict.values(): if len(seqList) < 2: continue else: # sorted list by sequence # seqOrder = sorted(seqList, key=lambda item: item[0]) for idx in range(len(seqOrder)-1,0,-1): installName = seqOrder[idx][1] patchInfoDict[installName].depKIDSBuild.add(seqOrder[idx-1][1]) """ now generate the dependency graph """ def __generatePatchDependencyGraph__(self): depDict = self._patchDependencyDict namespaceVerSeq = dict() for patchInfo in self._patchInfoDict.values(): installName = patchInfo.installName if installName not in depDict: depDict[installName] = set() if patchInfo.depKIDSBuild: depDict[installName].update(patchInfo.depKIDSBuild) """ combine csv dependencies """ if patchInfo.csvDepPatch: if installName not in depDict: depDict[installName] = set() if patchInfo.csvDepPatch in self._patchInfoDict: depDict[installName].add(patchInfo.csvDepPatch) """ generate self._missKidsInfoSet """ def __generateMissKIDSInfoSet__(self): patchInstallNameSet = set(x for x in self._patchInfoDict) kidsInstallNameSet = set(self._kidsInstallNameDict.keys()) self._missKidsInfoSet = kidsInstallNameSet.difference(patchInstallNameSet) logger.info("Missing KIDS Info set %s" % self._missKidsInfoSet) """ add missing info Patch """ def __addMissKIDSInfoPatch__(self): for kidsInstallName in self._missKidsInfoSet: logger.debug("Installation Name: %s, does not have info file, %s" % (kidsInstallName, self._kidsInstallNameDict[kidsInstallName])) patchInfo = PatchInfo() patchInfo.installName = kidsInstallName setPatchInfoFromInstallName(kidsInstallName, patchInfo) if kidsInstallName in self._kidsInstallNameSha1Dict: sha1Path = self._kidsInstallNameSha1Dict[kidsInstallName] patchInfo.kidsSha1Path = sha1Path patchInfo.kidsSha1 = readSha1SumFromSha1File(sha1Path) if kidsInstallName in self._installNameSeqMap: patchInfo.seqNo = self._installNameSeqMap[kidsInstallName] patchInfo.kidsFilePath = self._kidsInstallNameDict[kidsInstallName] if kidsInstallName in self._kidsDepBuildDict: logger.info("update the Missing Info KIDS depencency %s" % kidsInstallName) patchInfo.depKIDSBuild = self._kidsDepBuildDict[kidsInstallName] self._patchInfoDict[kidsInstallName] = patchInfo """ update the associated files for patchInfo """ def __handlePatchAssociatedFiles__(self): """ handle the info files first """ """ first by name assiciation """ patchInfoList = list(self._patchInfoDict.values()) #handle the associated files for missingKIDSBuild info patchInfoList.extend(list(self._missKidsBuildDict.values())) for patchInfo in patchInfoList: infoPath = patchInfo.kidsInfoPath if infoPath: infoName = os.path.basename(infoPath) associateSet = set() for infoFile in self._invalidInfoFileSet: infoFileName = os.path.basename(infoFile) if infoFileName.startswith(infoName[:infoName.rfind('.')]): patchInfo.addToAssociatedInfoList(infoFile) associateSet.add(infoFile) continue self._invalidInfoFileSet.difference_update(associateSet) """ second by mapping association """ associateSet = set() for infoFile in self._invalidInfoFileSet: installName = getAssociatedInstallName(infoFile) if installName: if installName in self._patchInfoDict: patchInfo = self._patchInfoDict[installName] #handle the associated files for missingKIDSBuild info elif installName in self._missKidsBuildDict: patchInfo = self._missKidsBuildDict[installName] else: continue patchInfo.addToAssociatedInfoList(infoFile) associateSet.add(infoFile) self._invalidInfoFileSet.difference_update(associateSet) """ handle global files """ associateSet = set() for globalFile in self._globalFilesSet: installName = getAssociatedInstallName(globalFile) if installName and installName in self._patchInfoDict: patchInfo = self._patchInfoDict[installName] patchInfo.addToAssociatedGlobalList(globalFile) associateSet.add(globalFile) self._globalFilesSet.difference_update(associateSet) logger.info("Total # of leftover info files: %s" % len(self._invalidInfoFileSet)) logger.debug(self._invalidInfoFileSet) logger.info("Total # of leftover global files: %s" % len(self._globalFilesSet)) logger.debug(self._globalFilesSet) """ update PatchInfo custom installer """ def __updateCustomInstaller__(self): for pythonScript in self._pythonScriptList: installName = os.path.basename(pythonScript) installName = dirNameToInstallName(installName[:installName.rfind('.')]) if installName in self._patchInfoDict: patchInfo = self._patchInfoDict[installName] patchInfo.hasCustomInstaller = True if patchInfo.customInstallerPath: logger.warning("Duplicated installer for %s: [%s:%s]" % ( installName, patchInfo.customInstallerPath, pythonScript)) logger.info("%s: custom installer %s" % (pythonScript, installName)) self._patchInfoDict[installName].customInstallerPath = pythonScript """ get all the patch order dependency by csv files """ def __getPatchOrderDependencyByCSVFiles__(self): for csvOrderFile in self._csvOrderFileList: self.__getPatchOrderListByCSV__(csvOrderFile) self.__buildPatchOrderDependencyByCSV__(csvOrderFile) sortedPatchList = self.__sortCSVDependencyList__() """ build csvDepDict based on csv File """ def __buildPatchOrderDependencyByCSV__(self, orderCSV): patchOrderList = self._patchOrderCSVDict[orderCSV] if patchOrderList is None: return """ some sanity check """ outPatchList = [] multiBuildSet = set() for patchOrder in patchOrderList: installName = patchOrder[0] if installName not in self._patchInfoDict: if (installName not in self._informationalKidsSet and installName not in self._kidsInstallNameDict): logger.warn("No KIDS file found for %s" % str(patchOrder)) continue patchInfo = self._patchInfoDict[installName] patchInfo.verifiedDate = patchOrder[2] """ check the seq no """ seqNo = patchOrder[1] if len(seqNo) > 0: """ check the seq no match the parsing result """ if patchInfo.seqNo is not None: if int(seqNo) != int(patchInfo.seqNo): logger.error("SeqNo mismatch for %s, from csv: %s, info %s" % (installName, seqNo, patchInfo.seqNo)) else: logger.info("Add seqNo %s for %s" % (seqNo, installName)) patchInfo.seqNo = seqNo """ handle the multi-build patch """ if patchInfo.installName in multiBuildSet: logger.info("%s is already part of the multiBuild" % installName) continue if patchInfo.isMultiBuilds: patchList = [self._patchInfoDict[x] for x in patchInfo.multiBuildsList] for patchInfo in patchList: patchInfo.verifiedDate = patchOrder[2] outPatchList.extend(patchList) multiBuildSet.update(patchInfo.multiBuildsList) else: outPatchList.append(patchInfo) """ update the order list to include only patch info """ self._patchOrderCSVDict[orderCSV] = outPatchList def __sortCSVDependencyList__(self): """ Utility methods to sort the CSV file based dependency """ outOrderList = [] """ sort the csv file by the first entry's verification date """ csvFileOrder = sorted(list(self._patchOrderCSVDict.keys()), key=lambda item: self._patchOrderCSVDict[item][0].verifiedDate) for csvFile in csvFileOrder: outOrderList.extend(self._patchOrderCSVDict[csvFile]) # Removing May 2019: Causes cyclical dependency - J.Snyder #for idx in range(len(outOrderList)-1, 0, -1): # installName = outOrderList[idx].installName # prevInstallName = outOrderList[idx-1].installName # #if not outOrderList[idx].kidsFilePath == outOrderList[idx-1].kidsFilePath: # self._csvDepDict[installName] = prevInstallName def _removeNotInstalledKIDSBuild(self, installName): patchInfo = self._patchInfoDict.get(installName) if not patchInfo: return listToRemove = [installName] if patchInfo.isMultiBuilds: listToRemove = patchInfo.multiBuildsList self._multiBuildDict.pop(patchInfo.kidsFilePath, None) for install in listToRemove: logger.info("Removing %s" % install) self._kidsInstallNameDict.pop(install, None) self._patchInfoDict.pop(install, None) """ parse the order csv file and generate an ordered list of install name """ def __getPatchOrderListByCSV__(self, orderCSV): """INSTALLED,VERIFY_DT,STATUS,SEQ#,LABELED_AS,CATEGORY,PRODUCT_NAME""" assert os.path.exists(orderCSV) logger.info("Parsing file: %s" % orderCSV) if orderCSV not in self._patchOrderCSVDict: self._patchOrderCSVDict[orderCSV] = [] patchOrderList = self._patchOrderCSVDict[orderCSV] result = csv.DictReader(open(orderCSV, 'r')) installNameSet = set() # to check possible duplicates entry for row in result: installName = convertToInstallName(row['LABELED_AS'].strip()) if installName in installNameSet: logger.error("Ignore duplicate installName %s" % installName) continue installNameSet.add(installName) if row['INSTALLED'].strip() != "TRUE": self._notInstalledKidsSet.add(installName) if installName in self._kidsInstallNameDict: logger.error("Uninstalled patch %s found in %s: %s" % (installName, self._kidsInstallNameDict[installName], row)) self._removeNotInstalledKIDSBuild(installName) logger.debug("Ignore uninstalled patch %s" % row) continue try: verifiedTime = datetime.strptime(row['VERIFY_DT'], "%d-%b-%y") except ValueError as ex: verifiedTime = datetime.strptime(row['VERIFY_DT'], "%Y-%m-%d") """ check the seq # field """ seqNo = row['SEQ#'].strip() if len(seqNo) > 0: try: int(seqNo) except: seqNo = "" patchOrderList.append((installName, seqNo, verifiedTime)) if re.match("^Informational$", row['CATEGORY'].strip(), re.IGNORECASE): logger.debug("patch is informational %s " % row) self._informationalKidsSet.add(installName) """ generate a sequence of patches that need to be applied by using topologic sort algorithm. If installName is provided, will only generated the order WRT. """ def __topologicSort__(self, installName=None): patchDict = self._patchInfoDict depDict = self._patchDependencyDict result = topologicSort(depDict, installName) self._patchOrder = [self._patchInfoDict[x] for x in result if x in patchDict] self._checkMultiBuildsOrder() def _checkMultiBuildsOrder(self): """ make sure that all the multi-build are grouped together """ multiDict = dict() for index in range(len(self._patchOrder)): patchInfo = self._patchOrder[index] if patchInfo.isMultiBuilds: if patchInfo.kidsFilePath not in multiDict: multiDict[patchInfo.kidsFilePath] = index if ( multiDict[patchInfo.kidsFilePath] != index and multiDict[patchInfo.kidsFilePath] != index - 1 ): logger.error("Patch out of order %s" % patchInfo) multiDict[patchInfo.kidsFilePath] = index """ compare function for PatchInfo objects """ def comparePatchInfo(one, two): assert isinstance(one, PatchInfo) assert isinstance(two, PatchInfo) if (one.package == two.package and (one.version != None and two.version != None) and float(one.version) == float(two.version)): if one.seqNo and two.seqNo: return cmp(int(one.seqNo), int(two.seqNo)) if one.seqNo: return 1 if two.seqNo: return -1 return 0 if one.rundate and two.rundate: return cmp(one.rundate, two.rundate) if one.rundate: return -1 return 1 """ topologic sort the DAG graph """ def topologicSort(depDict, item=None): initSet = set() if item: initSet.add(item) else: initSet = set(depDict.keys()) visitSet = set() # store all node that are already visited tempStack = [] # mark the temp list result = [] while len(initSet) > 0: item = initSet.pop() visitNode(item, depDict, visitSet, tempStack, result) initSet.difference_update(visitSet) return result def visitNode(nodeName, depDict, visitSet, tempStack, result): if nodeName in visitSet: # already visited, just return return if nodeName in tempStack: # there is a cycle in DAG index = tempStack.index(nodeName) logger.error("This is a cycle among these items:\n" + '\n'.join(repr(x) for x in tempStack[index:])) raise CycleException("DAG is NOT acyclic") tempStack.append(nodeName) for item in depDict.get(nodeName,[]): visitNode(item, depDict, visitSet, tempStack, result) """ remove from tempStach """ item = tempStack.pop() assert item == nodeName visitSet.add(nodeName) result.append(nodeName) """ Utility function to print result of a ordered patch list """ def printPatchOrderList(patchOrderList): for x in patchOrderList: print(({"Name" : x.installName}, {"Seq#" : x.seqNo}, {"KIDS" : os.path.basename(x.kidsFilePath)}, {"CSVDep" : x.csvDepPatch}, )) """ Utility function to check if the csv file is indeed in valid format """ def isValidOrderCSVFile(patchesCSV): assert os.path.exists(patchesCSV) validFields = VALID_CSV_ORDER_FILE_FIELDS patches_csv = csv.DictReader(open(patchesCSV, 'r')) patchCSVHeader = patches_csv.fieldnames if (patchCSVHeader is None or len(patchCSVHeader) < len(validFields)): return False fieldSet = set(patchCSVHeader) if fieldSet.issuperset(validFields): return True return False """ generate an output file that can be plotted by graphviz """ def generateDependencyGraph(depDict, outputFile): with open(outputFile, 'w') as output: output.write("digraph dependency_graph {\n") # set graph prop output.write("\tgraph [nodesep=\"0.35\",\n\t\transsep=\"0.55\"\n\t];\n") # set the node shape to be box output.write("\tnode [fontsize=14,\n\t\tshape=box\n\t];\n") # set the edge label and size props output.write("\tedge [fontsize=12];\n") for depNode in depDict: for item in depDict[depNode]: output.write("\t\"%s\" -> \"%s\";\n" % (depNode, item)) output.write("}\n") ########################################################################### ####### """ Testing code section """ ########################################################################### def testGeneratePatchOrder(): import logging initConsoleLogging(logging.INFO) patchOrderGen = PatchOrderGenerator() if len(sys.argv) <= 1: sys.stderr.write("Specify patch directory") sys.exit(-1) result = [] if len(sys.argv) == 2: result = patchOrderGen.generatePatchOrder(sys.argv[1]) else: result = patchOrderGen.generatePatchOrder(sys.argv[1], sys.argv[2]) printPatchOrderList(result) if __name__ == '__main__': testGeneratePatchOrder()
########## # Contribution by the Center on Long-Term Risk: # https://github.com/longtermrisk/marltoolbox ########## import random import numpy as np from ray.rllib.examples.env.coin_game_non_vectorized_env import CoinGame, AsymCoinGame # TODO add tests for grid_size != 3 def test_reset(): max_steps, grid_size = 20, 3 envs = init_several_env(max_steps, grid_size) for env in envs: obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) def init_several_env(max_steps, grid_size, players_can_pick_same_coin=True): coin_game = init_env( max_steps, CoinGame, grid_size, players_can_pick_same_coin=players_can_pick_same_coin, ) asymm_coin_game = init_env( max_steps, AsymCoinGame, grid_size, players_can_pick_same_coin=players_can_pick_same_coin, ) return [coin_game, asymm_coin_game] def init_env( max_steps, env_class, seed=None, grid_size=3, players_can_pick_same_coin=True ): config = { "max_steps": max_steps, "grid_size": grid_size, "both_players_can_pick_the_same_coin": players_can_pick_same_coin, } env = env_class(config) env.seed(seed) return env def check_obs(obs, grid_size): assert len(obs) == 2, "two players" for key, player_obs in obs.items(): assert player_obs.shape == (grid_size, grid_size, 4) assert ( player_obs[..., 0].sum() == 1.0 ), f"observe 1 player red in grid: {player_obs[..., 0]}" assert ( player_obs[..., 1].sum() == 1.0 ), f"observe 1 player blue in grid: {player_obs[..., 1]}" assert ( player_obs[..., 2:].sum() == 1.0 ), f"observe 1 coin in grid: {player_obs[..., 0]}" def assert_logger_buffer_size(env, n_steps): assert len(env.red_pick) == n_steps assert len(env.red_pick_own) == n_steps assert len(env.blue_pick) == n_steps assert len(env.blue_pick_own) == n_steps def test_step(): max_steps, grid_size = 20, 3 envs = init_several_env(max_steps, grid_size) for env in envs: obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) actions = { policy_id: random.randint(0, env.NUM_ACTIONS - 1) for policy_id in env.players_ids } obs, reward, done, info = env.step(actions) check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=1) assert not done["__all__"] def test_multiple_steps(): max_steps, grid_size = 20, 3 n_steps = int(max_steps * 0.75) envs = init_several_env(max_steps, grid_size) for env in envs: obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) for step_i in range(1, n_steps, 1): actions = { policy_id: random.randint(0, env.NUM_ACTIONS - 1) for policy_id in env.players_ids } obs, reward, done, info = env.step(actions) check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=step_i) assert not done["__all__"] def test_multiple_episodes(): max_steps, grid_size = 20, 3 n_steps = int(max_steps * 8.25) envs = init_several_env(max_steps, grid_size) for env in envs: obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) step_i = 0 for _ in range(n_steps): step_i += 1 actions = { policy_id: random.randint(0, env.NUM_ACTIONS - 1) for policy_id in env.players_ids } obs, reward, done, info = env.step(actions) check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=step_i) assert not done["__all__"] or (step_i == max_steps and done["__all__"]) if done["__all__"]: obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) step_i = 0 def overwrite_pos(env, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos): assert c_red_pos is None or c_blue_pos is None if c_red_pos is None: env.red_coin = 0 coin_pos = c_blue_pos if c_blue_pos is None: env.red_coin = 1 coin_pos = c_red_pos env.red_pos = p_red_pos env.blue_pos = p_blue_pos env.coin_pos = coin_pos env.red_pos = np.array(env.red_pos) env.blue_pos = np.array(env.blue_pos) env.coin_pos = np.array(env.coin_pos) env.red_coin = np.array(env.red_coin) def assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed, blue_speed, red_own, blue_own, ): step_i = 0 for _ in range(n_steps): step_i += 1 actions = { "player_red": p_red_act[step_i - 1], "player_blue": p_blue_act[step_i - 1], } obs, reward, done, info = env.step(actions) check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=step_i) assert not done["__all__"] or (step_i == max_steps and done["__all__"]) if done["__all__"]: assert info["player_red"]["pick_speed"] == red_speed assert info["player_blue"]["pick_speed"] == blue_speed if red_own is None: assert "pick_own_color" not in info["player_red"] else: assert info["player_red"]["pick_own_color"] == red_own if blue_own is None: assert "pick_own_color" not in info["player_blue"] else: assert info["player_blue"]["pick_own_color"] == blue_own obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) step_i = 0 overwrite_pos( env, p_red_pos[step_i], p_blue_pos[step_i], c_red_pos[step_i], c_blue_pos[step_i], ) def test_logged_info_no_picking(): p_red_pos = [[0, 0], [0, 0], [0, 0], [0, 0]] p_blue_pos = [[0, 0], [0, 0], [0, 0], [0, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] c_blue_pos = [None, None, None, None] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env in envs: obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.0, blue_speed=0.0, red_own=None, blue_own=None, ) envs = init_several_env(max_steps, grid_size, players_can_pick_same_coin=False) for env in envs: obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.0, blue_speed=0.0, red_own=None, blue_own=None, ) def test_logged_info__red_pick_red_all_the_time(): p_red_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_blue_pos = [[0, 0], [0, 0], [0, 0], [0, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] c_blue_pos = [None, None, None, None] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=1.0, blue_speed=0.0, red_own=1.0, blue_own=None, ) envs = init_several_env(max_steps, grid_size, players_can_pick_same_coin=False) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=1.0, blue_speed=0.0, red_own=1.0, blue_own=None, ) def test_logged_info__blue_pick_red_all_the_time(): p_red_pos = [[0, 0], [0, 0], [0, 0], [0, 0]] p_blue_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] c_blue_pos = [None, None, None, None] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.0, blue_speed=1.0, red_own=None, blue_own=0.0, ) envs = init_several_env(max_steps, grid_size, players_can_pick_same_coin=False) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.0, blue_speed=1.0, red_own=None, blue_own=0.0, ) def test_logged_info__blue_pick_blue_all_the_time(): p_red_pos = [[0, 0], [0, 0], [0, 0], [0, 0]] p_blue_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [None, None, None, None] c_blue_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.0, blue_speed=1.0, red_own=None, blue_own=1.0, ) envs = init_several_env(max_steps, grid_size, players_can_pick_same_coin=False) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.0, blue_speed=1.0, red_own=None, blue_own=1.0, ) def test_logged_info__red_pick_blue_all_the_time(): p_red_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_blue_pos = [[0, 0], [0, 0], [0, 0], [0, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [None, None, None, None] c_blue_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=1.0, blue_speed=0.0, red_own=0.0, blue_own=None, ) envs = init_several_env(max_steps, grid_size, players_can_pick_same_coin=False) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=1.0, blue_speed=0.0, red_own=0.0, blue_own=None, ) def test_logged_info__both_pick_blue_all_the_time(): p_red_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_blue_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [None, None, None, None] c_blue_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=1.0, blue_speed=1.0, red_own=0.0, blue_own=1.0, ) def test_logged_info__both_pick_red_all_the_time(): p_red_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_blue_pos = [[1, 0], [1, 0], [1, 0], [1, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] c_blue_pos = [None, None, None, None] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) print( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, ) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=1.0, blue_speed=1.0, red_own=1.0, blue_own=0.0, ) def test_logged_info__both_pick_red_half_the_time(): p_red_pos = [[0, 0], [0, 0], [1, 0], [1, 0]] p_blue_pos = [[1, 0], [1, 0], [0, 0], [0, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] c_blue_pos = [None, None, None, None] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.5, blue_speed=0.5, red_own=1.0, blue_own=0.0, ) def test_logged_info__both_pick_blue_half_the_time(): p_red_pos = [[0, 0], [0, 0], [1, 0], [1, 0]] p_blue_pos = [[1, 0], [1, 0], [0, 0], [0, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [None, None, None, None] c_blue_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.5, blue_speed=0.5, red_own=0.0, blue_own=1.0, ) def test_logged_info__both_pick_blue(): p_red_pos = [[0, 0], [0, 0], [0, 0], [1, 0]] p_blue_pos = [[1, 0], [1, 0], [0, 0], [0, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [None, None, None, None] c_blue_pos = [[1, 1], [1, 1], [1, 1], [1, 1]] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.25, blue_speed=0.5, red_own=0.0, blue_own=1.0, ) def test_logged_info__pick_half_the_time_half_blue_half_red(): p_red_pos = [[0, 0], [0, 0], [1, 0], [1, 0]] p_blue_pos = [[1, 0], [1, 0], [0, 0], [0, 0]] p_red_act = [0, 0, 0, 0] p_blue_act = [0, 0, 0, 0] c_red_pos = [[1, 1], None, [1, 1], None] c_blue_pos = [None, [1, 1], None, [1, 1]] max_steps, grid_size = 4, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): obs = env.reset() check_obs(obs, grid_size) assert_logger_buffer_size(env, n_steps=0) overwrite_pos(env, p_red_pos[0], p_blue_pos[0], c_red_pos[0], c_blue_pos[0]) assert_info( n_steps, p_red_act, p_blue_act, env, grid_size, max_steps, p_red_pos, p_blue_pos, c_red_pos, c_blue_pos, red_speed=0.5, blue_speed=0.5, red_own=0.5, blue_own=0.5, ) def test_observations_are_invariant_to_the_player_trained_in_reset(): p_red_pos = [ [0, 0], [0, 0], [1, 1], [1, 1], [0, 0], [1, 1], [2, 0], [0, 1], [2, 2], [1, 2], ] p_blue_pos = [ [0, 0], [0, 0], [1, 1], [1, 1], [1, 1], [0, 0], [0, 1], [2, 0], [1, 2], [2, 2], ] p_red_act = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] p_blue_act = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] c_red_pos = [[1, 1], None, [0, 1], None, None, [2, 2], [0, 0], None, None, [2, 1]] c_blue_pos = [None, [1, 1], None, [0, 1], [2, 2], None, None, [0, 0], [2, 1], None] max_steps, grid_size = 10, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): _ = env.reset() step_i = 0 overwrite_pos( env, p_red_pos[step_i], p_blue_pos[step_i], c_red_pos[step_i], c_blue_pos[step_i], ) for _ in range(n_steps): step_i += 1 actions = { "player_red": p_red_act[step_i - 1], "player_blue": p_blue_act[step_i - 1], } _, _, _, _ = env.step(actions) if step_i == max_steps: break overwrite_pos( env, p_red_pos[step_i], p_blue_pos[step_i], c_red_pos[step_i], c_blue_pos[step_i], ) def assert_obs_is_symmetrical(obs, env): assert np.all(obs[env.players_ids[0]][..., 0] == obs[env.players_ids[1]][..., 1]) assert np.all(obs[env.players_ids[1]][..., 0] == obs[env.players_ids[0]][..., 1]) assert np.all(obs[env.players_ids[0]][..., 2] == obs[env.players_ids[1]][..., 3]) assert np.all(obs[env.players_ids[1]][..., 2] == obs[env.players_ids[0]][..., 3]) def test_observations_are_invariant_to_the_player_trained_in_step(): p_red_pos = [ [0, 0], [0, 0], [1, 1], [1, 1], [0, 0], [1, 1], [2, 0], [0, 1], [2, 2], [1, 2], ] p_blue_pos = [ [0, 0], [0, 0], [1, 1], [1, 1], [1, 1], [0, 0], [0, 1], [2, 0], [1, 2], [2, 2], ] p_red_act = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] p_blue_act = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] c_red_pos = [[1, 1], None, [0, 1], None, None, [2, 2], [0, 0], None, None, [2, 1]] c_blue_pos = [None, [1, 1], None, [0, 1], [2, 2], None, None, [0, 0], [2, 1], None] max_steps, grid_size = 10, 3 n_steps = max_steps envs = init_several_env(max_steps, grid_size) for env_i, env in enumerate(envs): _ = env.reset() step_i = 0 overwrite_pos( env, p_red_pos[step_i], p_blue_pos[step_i], c_red_pos[step_i], c_blue_pos[step_i], ) for _ in range(n_steps): step_i += 1 actions = { "player_red": p_red_act[step_i - 1], "player_blue": p_blue_act[step_i - 1], } obs, reward, done, info = env.step(actions) # assert observations are symmetrical respective to the actions if step_i % 2 == 1: obs_step_odd = obs elif step_i % 2 == 0: assert np.all( obs[env.players_ids[0]] == obs_step_odd[env.players_ids[1]] ) assert np.all( obs[env.players_ids[1]] == obs_step_odd[env.players_ids[0]] ) if step_i == max_steps: break overwrite_pos( env, p_red_pos[step_i], p_blue_pos[step_i], c_red_pos[step_i], c_blue_pos[step_i], )
import sys sys.path.append('/home/jwalker/dynamics/python/atmos-tools') sys.path.append('/home/jwalker/dynamics/python/atmos-read') import xarray as xray import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import collections import pandas as pd import atmos as atm import indices import utils # Format for article publication or presentation slides pres = True if pres: figwidth = 12 style = atm.homedir() + 'dynamics/python/mpl-styles/presentation.mplstyle' else: figwidth = 7.48 style = atm.homedir() + 'dynamics/python/mpl-styles/grl_article.mplstyle' plt.style.use(style) fontsize = mpl.rcParams['font.size'] labelsize = fontsize + 3 dashes = [6, 2] # ---------------------------------------------------------------------- version = 'merra2' years = np.arange(1980, 2016) datadir = atm.homedir() + 'datastore/%s/analysis/' % version onset_nm = 'CHP_MFC' onset_nms = ['CHP_MFC', 'MOK', 'HOWI', 'OCI'] #pts_nm = 'CHP_PCP' pts_nm = 'CHP_GPCP' #pcp_nm = 'PRECTOT' pcp_nm = 'GPCP' varnms = ['PRECTOT', 'U200', 'V200', 'U850', 'V850'] lat_extract = {'U200' : 0, 'V200' : 15, 'U850' : 15, 'V850' : 15} lon1, lon2 = 60, 100 lat1, lat2 = 10, 30 nroll = 5 # n-day rolling averages for smoothing daily timeseries ind_nm, npre, npost = 'onset', 120, 200 #ind_nm, npre, npost = 'retreat', 270, 89 fracmin = 0.5 # Precip JJAS frac of total for gridpoint masking yearstr = '%d-%d.nc' % (min(years), max(years)) filestr = datadir + version + '_index_%s_' + yearstr indfiles = collections.OrderedDict() for nm in ['CHP_MFC', 'HOWI', 'OCI']: indfiles[nm] = filestr % nm indfiles['MOK'] = atm.homedir() + 'dynamics/python/monsoon-onset/data/MOK.dat' filestr2 = datadir + version + '_%s_dailyrel_' + onset_nm + '_' + yearstr datafiles = {nm : filestr2 % nm for nm in varnms} datafiles['CMAP'] = datadir + 'cmap_dailyrel_' + onset_nm + '_1980-2014.nc' datafiles['GPCP'] = datadir + 'gpcp_dailyrel_' + onset_nm + '_1997-2015.nc' ptsfile = datadir + version + '_index_pts_%s_' % pts_nm ptsmaskfile = None if pts_nm == 'CHP_CMAP': ptsfile = ptsfile + '1980-2014.nc' pts_xroll, pts_yroll = None, None elif pts_nm == 'CHP_GPCP': ptsfile = ptsfile + '1997-2015.nc' ptsmaskfile = atm.homedir() + 'datastore/gpcp/gpcp_daily_1997-2014.nc' pts_xroll, pts_yroll = None, None else: ptsfile = ptsfile + yearstr pts_xroll, pts_yroll = 3, 3 mfcbudget_file = datadir + version + '_mfc_budget_' + yearstr if ind_nm == 'retreat': for nm in datafiles: datafiles[nm] = datafiles[nm].replace('dailyrel', 'dailyrel_retreat') enso_nm = 'NINO3' #enso_nm = 'NINO3.4' ensodir = atm.homedir() + 'dynamics/python/data/ENSO/' ensofile = ensodir + ('enso_sst_monthly_%s.csv' % enso_nm.lower().replace('.', '').replace('+', '')) enso_keys = ['MAM', 'JJA'] # ---------------------------------------------------------------------- # Read data # Large-scale onset/retreat indices index_all = collections.OrderedDict() for nm in indfiles: print('Loading ' + indfiles[nm]) if nm == 'MOK': mok = indices.onset_MOK(indfiles['MOK'], yearsub=years) index_all['MOK'] = xray.Dataset({'onset' : mok}) else: with xray.open_dataset(indfiles[nm]) as ds: index_all[nm] = ds.load() index = index_all[onset_nm] index['length'] = index['retreat'] - index['onset'] onset_all = pd.DataFrame() for nm in index_all: onset_all[nm] = index_all[nm]['onset'].to_series() # Onset/retreat at grid points print('Loading ' + ptsfile) with xray.open_dataset(ptsfile) as index_pts: index_pts.load() for nm in index_pts.data_vars: if pts_xroll is not None: index_pts[nm] = atm.rolling_mean(index_pts[nm], pts_xroll, axis=-1, center=True) if pts_yroll is not None: index_pts[nm] = atm.rolling_mean(index_pts[nm], pts_yroll, axis=-2, center=True) # Regression of gridpoint indices onto large-scale index print('Regression of gridpoint indices onto large-scale index') pts_reg, pts_mask = {}, {} for nm in index_pts.data_vars: ind = index[nm].sel(year=index_pts['year']) pts_reg[nm] = atm.regress_field(index_pts[nm], ind, axis=0) pts_reg[nm]['pts_mask'] = (pts_reg[nm]['p'] >= 0.05) # Mask out grid points where CHP index is ill-defined def applymask(ds, mask_in): for nm in ds.data_vars: mask = atm.biggify(mask_in, ds[nm], tile=True) vals = np.ma.masked_array(ds[nm], mask=mask).filled(np.nan) ds[nm].values = vals return ds if ptsmaskfile is not None: day1 = atm.mmdd_to_jday(6, 1) day2 = atm.mmdd_to_jday(9, 30) with xray.open_dataset(ptsmaskfile) as ds: pcp = ds['PREC'].sel(lat=index_pts.lat).sel(lon=index_pts.lon).load() pcp_ssn = atm.subset(pcp, {'day' : (day1, day2)}) pcp_frac = pcp_ssn.sum(dim='day') / pcp.sum(dim='day') mask = pcp_frac < fracmin index_pts = applymask(index_pts, mask) for key in pts_reg: pts_reg[key] = applymask(pts_reg[key], mask) # MFC budget with xray.open_dataset(mfcbudget_file) as mfc_budget: mfc_budget.load() mfc_budget = mfc_budget.rename({'DWDT' : 'dw/dt'}) mfc_budget['P-E'] = mfc_budget['PRECTOT'] - mfc_budget['EVAP'] if nroll is not None: for nm in mfc_budget.data_vars: mfc_budget[nm] = atm.rolling_mean(mfc_budget[nm], nroll, center=True) # Dailyrel climatology keys_dict = {'PRECTOT' : 'PRECTOT', 'CMAP' : 'precip', 'GPCP' : 'PREC', 'U200' : 'U', 'U850' : 'U', 'V200' : 'V', 'V850' : 'V'} data = {} for nm in datafiles: print('Loading ' + datafiles[nm]) with xray.open_dataset(datafiles[nm]) as ds: if 'year' in ds.dims: ds = ds.mean(dim='year') data[nm] = ds[keys_dict[nm]].load() # ENSO indices enso = pd.read_csv(ensofile, index_col=0) enso = enso.loc[years] for key in enso_keys: if key not in enso.columns: months = atm.season_months(key) month_names = [(atm.month_str(m)).capitalize() for m in months] enso[key] = enso[month_names].mean(axis=1) enso = enso[enso_keys] col_names = [enso_nm + ' ' + nm for nm in enso.columns] enso.columns = col_names # ---------------------------------------------------------------------- # Daily timeseries ts = xray.Dataset() for nm in ['GPCP', 'PRECTOT']: ts[nm] = atm.mean_over_geobox(data[nm], lat1, lat2, lon1, lon2) ts['MFC'] = utils.daily_rel2onset(index_all['CHP_MFC']['daily_ts'], index[ind_nm], npre, npost) ts['CMFC'] = utils.daily_rel2onset(index_all['CHP_MFC']['tseries'], index[ind_nm], npre, npost) # Extract variables at specified latitudes for nm, lat0 in lat_extract.iteritems(): var = atm.dim_mean(data[nm], 'lon', lon1, lon2) lat = atm.get_coord(var, 'lat') lat0_str = atm.latlon_labels(lat0, 'lat', deg_symbol=False) # key = nm + '_' + lat0_str key = nm lat_closest, _ = atm.find_closest(lat, lat0) print '%s %.2f %.2f' % (nm, lat0, lat_closest) ts[key] = atm.subset(var, {'lat' : (lat_closest, None)}, squeeze=True) # Compute climatology and smooth with rolling mean if 'year' in ts.dims: ts = ts.mean(dim='year') if nroll is not None: for nm in ts.data_vars: ts[nm] = atm.rolling_mean(ts[nm], nroll, center=True) tseries = atm.subset(ts, {'dayrel' : (-npre, npost)}) # Smooth latitude-dayrel data with rolling mean for nm in data: daydim = atm.get_coord(data[nm], 'dayrel', 'dim') data[nm] = atm.rolling_mean(data[nm], nroll, axis=daydim, center=True) # ---------------------------------------------------------------------- # Plotting functions def fix_axes(axlims): plt.gca().set_ylim(axlims[:2]) plt.gca().set_xlim(axlims[2:]) plt.draw() def add_labels(grp, labels, pos, fontsize, fontweight='bold'): # Expand pos to list for each subplot, if needed try: n = len(pos[0]) except TypeError: pos = [pos] * (grp.nrow * grp.ncol) i = 0 for row in range(grp.nrow): for col in range(grp.ncol): grp.subplot(row, col) atm.text(labels[i], pos[i], fontsize=fontsize, fontweight=fontweight) i += 1 def skip_ticklabel(xticks): xtick_labels = [] for i, n in enumerate(xticks): if i % 2 == 0: xtick_labels = xtick_labels + [''] else: xtick_labels = xtick_labels + [n] return xtick_labels def plot_mfc_budget(mfc_budget, index, year, legend=True, legend_kw={'fontsize' : 9, 'loc' : 'upper left', 'handlelength' : 2.5}, dashes=[6, 2], netprecip=False, labelpad=1.5): ts = mfc_budget.sel(year=year) ind = index.sel(year=year) days = ts['day'].values styles = {'PRECTOT' : {'color' : 'k', 'linestyle' : '--', 'dashes' : dashes}, 'EVAP' : {'color' : 'k'}, 'MFC' : {'color' : 'k', 'linewidth' : 2}, 'dw/dt' : {'color' : '0.7', 'linewidth' : 2}} if netprecip: styles['P-E'] = {'color' : 'b', 'linewidth' : 2} for nm in styles: plt.plot(days, ts[nm], label=nm, **styles[nm]) plt.axvline(ind['onset'], color='k') plt.axvline(ind['retreat'], color='k') plt.xlabel('Day of Year') plt.ylabel('mm day$^{-1}$', labelpad=labelpad) ax1 = plt.gca() ax2 = plt.twinx() plt.sca(ax2) plt.plot(days, ind['tseries'], 'r', alpha=0.6, linewidth=2, label='CMFC') atm.fmt_axlabels('y', 'mm', color='r', alpha=0.6) plt.gca().set_ylabel('mm', labelpad=labelpad) if legend: atm.legend_2ax(ax1, ax2, **legend_kw) return ax1, ax2 def yrly_index(onset_all, grid=False,legend=True, legend_kw={'loc' : 'upper left', 'ncol' : 2}): """Plot onset day vs. year for different onset definitions.""" #corr = onset_all.corr()[onset_nm] labels = {nm : nm for nm in onset_all.columns} labels['CHP_MFC'] = 'CHP' styles = {'CHP_MFC' : {'color' : 'k', 'linewidth' : 2}, 'OCI' : {'color' : 'r'}, 'HOWI' : {'color' : 'g'}, 'MOK' : {'color' : 'b'}} styles['retreat'] = styles['CHP_MFC'] styles['length'] = styles['CHP_MFC'] xticks = np.arange(1980, 2016, 5) xticklabels = [1980, '', 1990, '', 2000, '', 2010, ''] for nm in onset_all.columns: plt.plot(years, onset_all[nm], label=labels[nm], **styles[nm]) if legend: plt.legend(**legend_kw) plt.grid(grid) plt.xlim(min(years) - 1, max(years) + 1) plt.xticks(xticks, xticklabels) plt.xlabel('Year') plt.ylabel('Day of Year') def daily_tseries(tseries, index, pcp_nm, npre, npost, legend, grp, ind_nm='onset', grid=False, dashes=[6, 2], dlist=[15], labelpad=1.5): """Plot dailyrel timeseries climatology""" xlims = (-npre, npost) xticks = range(-npre, npost + 10, 30) xlabel = 'Days Since ' + ind_nm.capitalize() if ind_nm == 'onset': x0 = [0, index['length'].mean(dim='year')] xtick_labels = xticks else: x0 = [-index['length'].mean(dim='year'), 0] xtick_labels = skip_ticklabel(xticks) keypairs = [(['MFC', pcp_nm], ['CMFC']), (['U850'], ['V850'])] opts = [('upper left', 'mm day$^{-1}$', 'mm'), ('upper left', ' m s$^{-1}$', ' m s$^{-1}$')] ylim_list = [(-3.5, 9), (-7, 15)] y2_opts={'color' : 'r', 'alpha' : 0.6} dashed = {'color' : 'k', 'linestyle' : '--', 'dashes' : dashes} styles = ['k', dashed, 'g', 'm'] legend_kw = {} for pair, opt, ylims in zip(keypairs, opts, ylim_list): grp.next() keys1, keys2 = pair legend_kw['loc'] = opt[0] y1_label = opt[1] y2_label = opt[2] data1 = tseries[keys1] if keys2 is not None: data2 = tseries[keys2] else: data2 = None data1_styles = {nm : style for (nm, style) in zip(keys1, styles)} axs = utils.plotyy(data1, data2, xname='dayrel', data1_styles=data1_styles, y2_opts=y2_opts, xlims=xlims, xticks=xticks, ylims=ylims, xlabel=xlabel, y1_label=y1_label, y2_label=y2_label, legend=legend, legend_kw=legend_kw, x0_axvlines=x0, grid=grid) for ax, label in zip(axs, [y1_label, y2_label]): ax.set_ylabel(label, labelpad=labelpad) plt.gca().set_xticklabels(xtick_labels) if dlist is not None: for d0 in dlist: plt.axvline(d0, color='k', linestyle='--', dashes=dashes) def contourf_latday(var, clev=None, title='', nc_pref=40, grp=None, xlims=(-120, 200), xticks=np.arange(-120, 201, 30), ylims=(-60, 60), yticks=np.arange(-60, 61, 20), dlist=None, grid=False, ind_nm='onset'): vals = var.values.T lat = atm.get_coord(var, 'lat') days = atm.get_coord(var, 'dayrel') if var.min() >= 0: cmap, extend, symmetric = 'PuBuGn', 'max', False else: cmap, extend, symmetric = 'RdBu_r', 'both', True if clev == None: cint = atm.cinterval(vals, n_pref=nc_pref, symmetric=symmetric) clev = atm.clevels(vals, cint, symmetric=symmetric) elif len(atm.makelist(clev)) == 1: if var.name == 'PREC': clev = np.arange(0, 10 + clev/2.0, clev) else: clev = atm.clevels(vals, clev, symmetric=symmetric) cticks_dict = {'PRECTOT' : np.arange(0, 13, 2), 'PREC' : np.arange(0, 11, 2), 'T200' : np.arange(-208, 227, 2), 'U200' : np.arange(-60, 61, 10), 'PSI500' : np.arange(-800, 801, 200)} cticks = cticks_dict.get(var.name) plt.contourf(days, lat, vals, clev, cmap=cmap, extend=extend) plt.colorbar(ticks=cticks) atm.ax_lims_ticks(xlims, xticks, ylims, yticks) plt.grid(grid) plt.title(title) if dlist is not None: for d0 in dlist: plt.axvline(d0, color='k') if grp is not None and grp.row == grp.ncol - 1: plt.xlabel('Days Since ' + ind_nm.capitalize()) if grp is not None and grp.col == 0: plt.ylabel('Latitude') def plot_maps(var, days, grp, cmin=0, cmax=20, cint=1, axlims=(5, 35, 60, 100), cmap='PuBuGn', res='c', extend='max', cticks=None, daypos=(0.05, 0.85)): """Lat-lon maps of precip on selected days.""" clev = np.arange(cmin, cmax + cint/2.0, cint) if cticks is None: cticks = np.arange(cmin, clev.max() + 1, 2) lat1, lat2, lon1, lon2 = axlims for day in days: grp.next() pcp = var.sel(dayrel=day) m = atm.init_latlon(lat1, lat2, lon1, lon2, resolution=res) m = atm.contourf_latlon(pcp, m=m, clev=clev, axlims=axlims, cmap=cmap, colorbar=False, extend=extend) atm.text(day, daypos, fontsize=12, fontweight='bold') # plt.colorbar(ax=grp.axes.ravel().tolist(), orientation='vertical', # shrink=0.8, ticks=cticks) atm.colorbar_multiplot(orientation='vertical', shrink=0.8, ticks=cticks) fix_axes(axlims) xticks = [60, 70, 80, 90, 100] xtick_labels = atm.latlon_labels(xticks, 'lon') xtick_labels[1] = '' xtick_labels[3] = '' plt.xticks(xticks, xtick_labels) def plot_kerala(color='b', linewidth=1): """Plot the boundaries of the Kerala region""" datadir = atm.homedir() + 'dynamics/python/monsoon-onset/data/' filenm = datadir + 'india_state.geojson' x, y = utils.kerala_boundaries(filenm) plt.plot(x, y, color, linewidth=linewidth) def pts_clim(index_pts, nm, clev_bar=10, clev_std=np.arange(0, 21, 1), axlims=(5, 32, 60, 100), cmap='spectral', res='l', label_locs=None, inline_spacing=2): """Plot climatological mean and standard deviation of grid point indices.""" varbar = index_pts[nm].mean(dim='year') varstd = index_pts[nm].std(dim='year') lat1, lat2, lon1, lon2 = axlims m = atm.init_latlon(lat1, lat2, lon1, lon2, resolution=res) m = atm.contourf_latlon(varstd, m=m, clev=clev_std, axlims=axlims, cmap=cmap, symmetric=False, colorbar=False, extend='max') m.colorbar(ticks=np.arange(0, 21, 2)) _, cs = atm.contour_latlon(varbar, clev=clev_bar, axlims=axlims, colors='k', linewidths=2) cs_opts = {'fmt' : '%.0f', 'fontsize' : 9, 'inline_spacing' : inline_spacing} if label_locs is not None: cs_opts['manual'] = label_locs plt.clabel(cs, **cs_opts) plot_kerala() fix_axes(axlims) # Plot regression def plot_reg(pts_reg, nm, clev=0.2, xsample=1, ysample=1, axlims=(5, 32, 60, 100), cline=None, color='0.3', alpha=1.0, markersize=2, res='l'): """Plot regression of grid point indices onto large-scale index.""" var = pts_reg[nm]['m'] mask = pts_reg[nm]['pts_mask'] xname = atm.get_coord(mask, 'lon', 'name') yname = atm.get_coord(mask, 'lat', 'name') lat1, lat2, lon1, lon2 = axlims m = atm.init_latlon(lat1, lat2, lon1, lon2, resolution=res) atm.contourf_latlon(var, m=m, clev=clev, axlims=axlims, extend='both') atm.stipple_pts(mask, xname=xname, yname=yname, xsample=xsample, ysample=ysample, color=color, alpha=alpha, markersize=markersize) if cline is not None: atm.contour_latlon(var, clev=[cline], axlims=axlims, colors='b', linewidths=2) plot_kerala() fix_axes(axlims) # ---------------------------------------------------------------------- # FIGURES # Timeseries plots - setup figure for subplots nrow, ncol = 2, 2 fig_kw = {'figsize' : (figwidth, 0.7 * figwidth)} gridspec_kw = {'left' : 0.07, 'right' : 0.9, 'bottom' : 0.07, 'top' : 0.9, 'wspace' : 0.5, 'hspace' : 0.35} legend = True legend_kw = {'loc' : 'upper left', 'framealpha' : 0.0} labelpos = (-0.2, 1.05) grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw) # Daily MFC budget and CHP tseries fit in a single year plotyear = 2000 if ind_nm == 'onset': grp.next() plot_mfc_budget(mfc_budget, index, plotyear, dashes=dashes, legend=legend, legend_kw=legend_kw) # Plot yearly tseries grp.next() yrly_index(onset_all, legend=True) else: for i in [0, 1]: grp.next() plt.axis('off') ax = plt.subplot(2, 1, 1) df = index[['length', 'retreat', 'onset']].to_dataframe() plt.boxplot(df.values, vert=False, labels=['Length', 'Retreat', 'Onset'], whis='range') plt.xlabel('Day of Year | Number of Days') plt.xlim(120, 320) plt.xticks(np.arange(120, 321, 20)) pos = ax.get_position() pos2 = [pos.x0, pos.y0 + 0.05, pos.width, pos.height] ax.set_position(pos2) atm.text('a', (-0.16, 1.01), fontsize=labelsize, fontweight='bold') # Plot daily tseries legend = True if ind_nm == 'onset': dlist = [15] else: dlist = None daily_tseries(tseries, index, pcp_nm, npre, npost, legend, grp, ind_nm=ind_nm, dlist=dlist) # Add a-d labels if ind_nm == 'onset': labels = ['a', 'b', 'c', 'd'] add_labels(grp, labels, labelpos, labelsize) else: labels = ['b', 'c'] for i in [0, 1]: grp.subplot(1, i) atm.text(labels[i], labelpos, fontsize=labelsize, fontweight='bold') # Lat-day contour plots xticks = range(-npre, npost + 10, 30) if ind_nm == 'onset': dlist = [0, index['length'].mean(dim='year')] d0 = 15 xtick_labels = xticks else: dlist = [-index['length'].mean(dim='year'), 0] d0 = None xtick_labels = skip_ticklabel(xticks) keys = [pcp_nm, 'V200', 'U200', 'U850'] clevs = {pcp_nm : 1, 'U200' : 5, 'V200' : 1, 'U850' : 2} nrow, ncol = 2, 2 fig_kw = {'figsize' : (figwidth, 0.64 * figwidth), 'sharex' : True, 'sharey' : True} gridspec_kw = {'left' : 0.07, 'right' : 0.99, 'bottom' : 0.07, 'top' : 0.94, 'wspace' : 0.05} grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw) for key in keys: grp.next() var = atm.dim_mean(data[key], 'lon', lon1, lon2) contourf_latday(var, clev=clevs[key], title=key.upper(), grp=grp, dlist=dlist, ind_nm=ind_nm) if d0 is not None: plt.axvline(d0, color='k', linestyle='--', dashes=dashes) plt.xticks(xticks, xtick_labels) plt.xlim(-npre, npost) labels = ['a', 'b', 'c', 'd'] x1, x2, y0 = -0.15, -0.05, 1.05 pos = [(x1, y0), (x2, y0), (x1, y0), (x2, y0)] add_labels(grp, labels, pos, labelsize) # Precip maps axlims=(5, 35, 57, 103) #days = [-30, -15, 0, 15, 30, 45, 60, 75, 90] if ind_nm == 'onset': days = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55] else: days = [-45, -40, -35, -30, -25, -20, -15, -10, -5, 0, 5, 10] nrow, ncol = 4, 3 cmax, cint = 12, 1 fig_kw = {'figsize' : (figwidth, 0.8 * figwidth), 'sharex' : True, 'sharey' : True} gridspec_kw = {'left' : 0.07, 'right' : 0.99, 'wspace' : 0.15, 'hspace' : 0.05} grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw) plot_maps(data[pcp_nm], days, grp, cmax=cmax, cint=cint, axlims=axlims) # -- Add MFC box to one subplot if ind_nm == 'onset': x = [lon1, lon1, lon2, lon2, lon1] y = [lat1, lat2, lat2, lat1, lat1] grp.subplot(0, 0) plt.plot(x, y, color='m', linewidth=2) # Grid point indices cmap = 'spectral' stipple_clr = '0.3' if ind_nm == 'onset': label_locs = [(75, 10), (71, 10), (88, 15), (67, 17), (77, 21), (75, 24), (95, 12)] else: label_locs = [(95, 24), (85, 22), (75, 25), (76, 21), (88, 15)] clev_bar = 10 clev_std = np.arange(0, 21, 1) clev_reg = np.arange(-1.2, 1.25, 0.2) if pts_nm == 'CHP_PCP': xsample, ysample = 2, 2 else: xsample, ysample = 1, 1 nrow, ncol = 1, 2 fig_kw = {'figsize' : (figwidth, 0.4 * figwidth)} gridspec_kw = {'left' : 0.1, 'wspace' : 0.3} grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw) grp.next() pts_clim(index_pts, ind_nm, clev_bar=clev_bar, clev_std=clev_std, cmap=cmap, label_locs=label_locs) grp.next() plot_reg(pts_reg, ind_nm, clev=clev_reg, xsample=xsample, ysample=ysample, color=stipple_clr) add_labels(grp, ['a', 'b'], (-0.15, 1.05), labelsize) # ---------------------------------------------------------------------- # Extra plots - maps of U850, V850 on various days nms = ['U850', 'V850'] #nms = ['U850', 'V850', 'U200', 'V200'] suptitle_on = False axlims=(-30, 30, 40, 120) xticks = [40, 60, 80, 100, 120] xtick_labels = atm.latlon_labels(xticks, 'lon') yticks = [-30, -15, 0, 15, 30] ytick_labels = atm.latlon_labels(yticks, 'lat') daypos = 0.05, 1.02 opts = {'U850' : {'cmax' : 10, 'cint' : 1, 'ctick_int' : 2}, 'V850' : {'cmax' : 10, 'cint' : 1, 'ctick_int' : 2}, 'U200' : {'cmax' : 40, 'cint' : 5, 'ctick_int' : 10}, 'V200' : {'cmax' : 10, 'cint' : 1, 'ctick_int' : 2}} plotdays = [-15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40] nrow, ncol = 4, 3 fig_kw = {'figsize' : (figwidth, 0.85 * figwidth), 'sharex' : True, 'sharey' : True} gridspec_kw = {'top' : 0.95, 'bottom' : 0.05, 'left' : 0.07, 'right' : 1.06, 'wspace' : 0.3, 'hspace' : 0.25} for nm in nms: var = data[nm] cmax = opts[nm]['cmax'] cmin = -cmax cint = opts[nm]['cint'] ctick_int = opts[nm]['ctick_int'] cticks = np.arange(cmin, cmax + ctick_int/2.0, ctick_int) if suptitle_on: suptitle = nm else: suptitle = '' grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw, suptitle=suptitle) plot_maps(var, plotdays, grp, cmin=cmin, cmax=cmax, cint=cint, axlims=axlims, cmap='RdBu_r', res='c', extend='both', cticks=cticks, daypos=daypos) plt.xticks(xticks, xtick_labels) plt.yticks(yticks, ytick_labels) # ---------------------------------------------------------------------- # Table of summary stats on onset/retreat/length nms = ['Mean', 'Std', 'Max', 'Min'] for nm in ['onset', 'retreat', 'length']: ind = index[nm].values series = pd.Series([ind.mean(), ind.std(), ind.max(), ind.min()], index=nms) if nm == 'onset': stats = series.to_frame(name=nm.capitalize()) else: stats[nm.capitalize()] = series stats = stats.T def daystr(day): day = round(day) mm, dd = atm.jday_to_mmdd(day) mon = atm.month_str(mm) return '%.0f (%s-%.0f)' % (day, mon.capitalize(), dd) for nm1 in stats.columns: for nm2 in stats.index: if nm1 != 'Std' and nm2 != 'Length': stats[nm1].loc[nm2] = daystr(stats[nm1].loc[nm2]) else: stats[nm1].loc[nm2] = '%.0f' % stats[nm1].loc[nm2] print(stats.to_latex()) # ---------------------------------------------------------------------- # Table of correlations between detrended indices detrend = True df_onset = onset_all df_ind = index[['onset', 'retreat', 'length']].to_dataframe() df_enso = enso if detrend: df_onset = atm.detrend(df_onset) df_ind = atm.detrend(df_ind) df_enso = atm.detrend(df_enso) corr_onset = df_onset.corr() print(corr_onset.round(2)) df1 = pd.concat([df_ind, df_enso], axis=1) df2 = df_ind corr = {} for key in ['r', 'm', 'p']: corr[key] = pd.DataFrame(np.ones((len(df1.columns), len(df2.columns))), index=df1.columns, columns=df2.columns) for key1 in df1.columns: for key2 in df2.columns: reg = atm.Linreg(df1[key1], df2[key2]) corr['r'].loc[key1][key2] = reg.r corr['m'].loc[key1][key2] = reg.slope corr['p'].loc[key1][key2] = reg.p # Minimum absolute value of r to be significant rcrit = (abs(corr['r'][corr['p'] <= 0.05])).min().min() def format_r(r): rstr = '%.2f' % r # if abs(r) >= rcrit: # rstr = 'textbf ' + rstr return rstr print('\n\n*** Correlation coefficients ***') print(corr['r'].to_latex(float_format=format_r)) print('Lowest significant value of abs(r) %.2f' % rcrit) print('\n\n*** Regression coefficients ***') print(corr['m'].round(2)) # ---------------------------------------------------------------------- # Duration of transition d0, peak1, peak2 = 0, 20, 100 #d1_list = [7, 14, 21, 28, 5, 10, 15, 20, 25] d1_list = [5, 10, 15, 20, 25] ts_peak = atm.dim_mean(tseries, 'dayrel', peak1, peak2) ts0 = atm.subset(tseries, {'dayrel' : (d0, d0)}, squeeze=True) df = pd.DataFrame() for d1 in d1_list: ts1 = atm.subset(tseries, {'dayrel' : (d1, d1)}, squeeze=True) delta = (ts1 - ts0) / (ts_peak - ts0) delta = delta.round(2).to_array().to_series() df['D1=%d' % d1] = delta print('Ratio of onset transition (day D1 minus day %d) to peak difference\n' '(peak days %d to %d minus day %d)' % (d0, peak1, peak2, d0)) print(df) # ---------------------------------------------------------------------- # MFC budget timeseries in each year nrow, ncol = 3, 4 fig_kw = {'figsize' : (11, 7)} gridspec_kw = {'left' : 0.05, 'right' : 0.9, 'wspace' : 0.05, 'hspace' : 0.1} legend_kw = {'fontsize' : 9, 'loc' : 'upper left', 'handlelength' : 2.5, 'frameon' : False, 'framealpha' : 0.0} xlims = (0, 400) y1_lims = (-5, 15) y2_lims = (-400, 400) grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gridspec_kw) for year in years: grp.next() if grp.row == 0 and grp.col == 0: legend = True else: legend = False ax1, ax2 = plot_mfc_budget(mfc_budget, index, year, legend=legend, legend_kw=legend_kw, netprecip=True) ax1.set_ylim(y1_lims) ax2.set_ylim(y2_lims) if grp.col > 0: ax1.set_ylabel('') ax1.set_yticklabels([]) if grp.col < grp.ncol - 1: ax2.set_ylabel('') ax2.set_yticklabels([]) if grp.row < grp.nrow - 1: for ax in [ax1, ax2]: ax.set_xlabel('') ax.set_xticklabels([]) atm.text(year, (0.05, 0.9), fontsize=9) # ---------------------------------------------------------------------- # Correlation between CHP_MFC and CHP_PCP filestr = atm.homedir() + 'datastore/merra2/analysis/merra2_index_%s_' + yearstr files = {nm : filestr % nm for nm in ['CHP_MFC', 'CHP_PCP']} index1 = pd.DataFrame() for nm in files: with xray.open_dataset(files[nm]) as ds: for nm2 in ['onset', 'retreat']: index1[nm2 + '_' + nm] = ds[nm2].load().to_series() index1['length_' + nm] = index1['retreat_' + nm] - index1['onset_' + nm] years = index1.index xticks = np.arange(1980, 2015, 10) opts = {'CHP_MFC' : {}, 'CHP_PCP' : {'dashes' : [6, 2]}} plt.figure() for i, nm in enumerate(['onset', 'retreat', 'length']): plt.subplot(2, 2, i + 1) for nm2 in ['CHP_MFC', 'CHP_PCP']: plt.plot(years, index1[nm + '_' + nm2], 'k', label=nm2, **opts[nm2]) plt.legend(fontsize=8, handlelength=3) plt.title(nm.capitalize()) plt.xticks(xticks) print(index1.corr()) keys = ['onset', 'retreat', 'length'] plt.figure() plt.subplots_adjust(wspace=0.25, hspace=0.3, left=0.1, right=0.97, top=0.95) for i, key in enumerate(keys): plt.subplot(2, 2, i + 1) key1 = key + '_CHP_MFC' key2 = key + '_CHP_PCP' reg = atm.Linreg(index1[key1], index1[key2]) reg.plot(scatter_clr='k', scatter_sym='+', line_clr='k') plt.xlabel(key1) plt.ylabel(key2) # ---------------------------------------------------------------------- # Extreme onset years index1 = {} index1['MOK'] = indices.onset_MOK(indfiles['MOK']) index1['MOK_SUB'] = index1['MOK'].loc[years] with xray.open_dataset(indfiles['CHP_MFC']) as ds: index1[onset_nm] = ds['onset'].load().to_series() def extreme_years(ind, nstd=1): """Return years more than nstd away from the mean""" early = ind[ind - ind.mean() < -nstd * ind.std()] late = ind[ind - ind.mean() > nstd * ind.std()] return early, late early, late = {}, {} for nm in index1: early[nm], late[nm] = extreme_years(index1[nm]) # ---------------------------------------------------------------------- # Fourier harmonics kmax_list = np.arange(2, 21, 1) nms = [pcp_nm, 'U850', 'V850'] days = np.arange(-138, 227) ts1 = ts.sel(dayrel=days) ts_sm = {kmax : xray.Dataset() for kmax in kmax_list} Rsq = {kmax : {} for kmax in kmax_list} for kmax in kmax_list: for nm in nms: vals, Rsq[kmax][nm] = atm.fourier_smooth(ts1[nm], kmax) print kmax, nm, Rsq[kmax][nm] ts_sm[kmax][nm] = xray.DataArray(vals, coords=ts1[nm].coords) # Find days where smoothed values are closest to actual timeseries # values at days 0, 15 def closest_day(nm, ts1, ts_sm, d0, buf=20): val0 = ts1[nm].sel(dayrel=d0).values sm = atm.subset(ts_sm[nm], {'dayrel' : (d0 - buf, d0 + buf)}) i0 = int(np.argmin(abs(sm - val0))) day0 = int(sm['dayrel'][i0]) return day0 # Annual + semi-annual harmonics xticks = np.arange(-120, 230, 30) sz = 10 dlist = [0, 15] for kmax in [2, 4, 6, 7, 8, 9, 10]: dclose = {nm : [] for nm in nms} for nm in nms: for d0 in dlist: day0 = closest_day(nm, ts1, ts_sm[kmax], d0) dclose[nm] = dclose[nm] + [day0] plt.figure() plt.suptitle('Fourier fit kmax = %d. Delta = day %d to %d' % (kmax, dlist[0], dlist[1])) for i, nm in enumerate(nms): dlist2 = dclose[nm] plt.subplot(2, 2, i + 1) plt.plot(days, ts1[nm], 'b') plt.plot(dlist, ts1[nm].sel(dayrel=dlist), 'b.', markersize=sz) plt.plot(days, ts_sm[kmax][nm], 'r') plt.plot(dlist2, ts_sm[kmax][nm].sel(dayrel=dlist2), 'r.', markersize=sz) plt.title(nm) plt.xticks(xticks) plt.grid() s = 'Rsq = %.2f\nNum days = %d' % (Rsq[kmax][nm], dlist2[1] - dlist2[0]) atm.text(s, (0.05, 0.85)) # See which kmax is needed to minimize Rsq between tseries and Fourier # fit over days 0-15 d1, d2 = 0, 15 ts_sub = atm.subset(ts1, {'dayrel' : (d1, d2)}) ts_sm_sub = {} for kmax in kmax_list: ts_sm_sub[kmax] = atm.subset(ts_sm[kmax], {'dayrel' : (d1, d2)}) def get_rss_sub(kmax, nm, ts_sub, ts_sm_sub): var0 = ts_sub[nm].values var1 = ts_sm_sub[kmax][nm].values return np.sum(np.sqrt((var1 - var0)**2)) rss_sub = {} for nm in nms: rss_sub[nm] = [get_rss_sub(kmax, nm, ts_sub, ts_sm_sub) for kmax in kmax_list] plt.figure() plt.subplots_adjust(hspace=0.3) plt.suptitle('RSS over days %d-%d for truncated Fourier fits' % (d1, d2)) for i, nm in enumerate(nms): plt.subplot(2, 2, i + 1) plt.plot(kmax_list, rss_sub[nm], 'k') plt.xlabel('kmax') plt.ylabel('RSS') plt.title(nm) plt.grid() # ---------------------------------------------------------------------- # Calculate seasonal precip - totals and average daily rate pcpfile = (atm.homedir() + 'datastore/merra2/analysis/' + 'merra2_gpcp_mfc_box_daily.nc') with xray.open_dataset(pcpfile) as pcpts: pcpts.load() ssn = utils.get_strength_indices(years, pcpts, index['onset'], index['retreat']) def detrend(df): df_detrend = df.copy() x = df.index.values for col in df.columns: y = df[col].values reg = atm.Linreg(x, y) df_detrend[col] = df[col] - reg.predict(x) return df_detrend # Cumulative and average rainfall over monsoon season i_detrend = True df1 = ssn[['onset', 'retreat', 'length']] for nm in df1.columns: df1 = df1.rename(columns={nm : nm.upper()}) if i_detrend: df1 = detrend(df1) nms = ['MFC', 'PCP', 'GPCP', 'EVAP'] figsize = (7, 7) fmts={'line_width': 1, 'annotation_pos': (0.05, 0.7), 'pmax_bold': 0.05, 'scatter_size': 3, 'scatter_clr': 'k', 'scatter_sym': '+', 'line_clr': 'k'} subplot_fmts={'right': 0.98, 'bottom': 0.05, 'top': 0.95, 'wspace': 0.1, 'hspace': 0.15, 'left': 0.12} for nm1 in ['_LRS']: for key in ['_TOT', '_AVG']: keys = [nm + nm1 + key for nm in nms] df2 = ssn[keys] newcols = {nm : nm.replace('_LRS', '') for nm in df2.columns} df2 = df2.rename(columns=newcols) if i_detrend: df2 = detrend(df2) atm.scatter_matrix_pairs(df1, df2, figsize=figsize, fmts=fmts, subplot_fmts=subplot_fmts) for i in range(9): plt.subplot(4, 3, i + 1) ax = plt.gca() ax.set_xticklabels([]) # # Daily timeseries of GPCP years # yrs_gpcp = range(1997, 2015) # days = pcpts['day'] # nroll = 5 # fig_kw = {'figsize' : (8, 11), 'sharex' : True, 'sharey' : True} # gs_kw = {'left' : 0.05, 'right' : 0.95, 'hspace' : 0.05, 'wspace' : 0.05} # nrow, ncol = 3, 3 # grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, gridspec_kw=gs_kw) # for yr in yrs_gpcp: # grp.next() # ts = atm.rolling_mean(pcpts['GPCP'].sel(year=yr), nroll) # plt.plot(days, ts, 'k') # plt.axvline(ssn['onset'].loc[yr]) # plt.axvline(ssn['retreat'].loc[yr]) # plt.title(yr) # plt.xlim(0, 366) # ts = pcpts['GPCP'].sel(year=yrs_gpcp) # d_onset = ssn['onset'].loc[yrs_gpcp].values # ssn_length = ssn['length'].loc[yrs_gpcp].values # tsrel = utils.daily_rel2onset(ts, d_onset, npre=120, npost=165) # ts_acc = np.cumsum(tsrel.sel(dayrel=range(0,166)), axis=1) # ---------------------------------------------------------------------- # Masking on lat-lon maps plt.figure(figsize=(5, 4)) m = atm.init_latlon(0, 35, 58, 102, resolution='l', coastlines=False, fillcontinents=True) m.drawcoastlines(linewidth=0.5, color='0.5') plot_kerala(linewidth=1) x = [lon1, lon1, lon2, lon2, lon1] y = [lat1, lat2, lat2, lat1, lat1] plt.plot(x, y, color='m', linewidth=2) _, cs = atm.contour_latlon(pcp_frac, m=m, clev=np.arange(0, 1, 0.1), linewidths=1.5, axlims=(0, 35, 58, 102), colors='k') label_locs = [(80, 5), (75, 6), (72, 8), (72, 10), (70, 15), (70, 18), (72, 25), (84, 5), (60, 5), (65, 3), (95, 18)] cs_opts = {'fmt' : '%.1f', 'fontsize' : 9, 'manual' : label_locs, 'inline_spacing' : 2} plt.clabel(cs, **cs_opts) # ------------------------------------------------------------------------ # Figure for AGU presentation plt.figure(figsize=(8, 6)) m = atm.init_latlon(0, 35, 58, 102, resolution='l') atm.contourf_latlon(pcp_frac, clev=np.arange(0, 1.1, 0.1), m=m, axlims=(0, 35, 58, 102), cmap='PuBuGn') _, cs = atm.contour_latlon(pcp_frac, m=m, clev=[0.5], linewidths=2, colors='k', axlims=(0, 35, 58, 102)) label_locs = [(72, 8)] cs_opts = {'fmt' : '%.1f', 'fontsize' : fontsize, 'manual' : label_locs, 'inline_spacing' : 2} plt.clabel(cs, **cs_opts) plot_kerala(linewidth=1) x = [lon1, lon1, lon2, lon2, lon1] y = [lat1, lat2, lat2, lat1, lat1] plt.plot(x, y, color='m', linewidth=2)
"""Mock Zookeeper TestCase. Usage:: class MyTest(MockZookeeperTestCase): @mock.patch('zookeeper.get', mock.Mock()) @mock.patch('zookeeper.get_children', mock.Mock()) def test_some_zk_ops(self): zkdata = { 'foo': { 'bar': '123' } } self.make_mock_zk(zkdata) # call funcs that will call zookeeper.get / get_children zkdata['foo']['bla'] = '456' # The watcher will be invoked, and get_children will return # ['bla', 'bar'] self.notify(zookeeper.CHILD_EVENT, '/foo') """ import copy import Queue import threading import time import unittest from collections import namedtuple import kazoo from kazoo.protocol import states import yaml class MockZookeeperMetadata(namedtuple('MockZookeeperMetadata', ['czxid', 'ctime', 'mzxid', 'mtime', 'ephemeralOwner'])): """Subset of the Zookeeper metadata we are using.""" # namedtuple classes dont have an __init__, that's ok # Use ephemeralOwner as that is the name in the real Zookeeper metadata # object # pylint: disable=W0232,C0103 _BASE_ZXID = int(time.time()) @property def creation_transaction_id(self): """creation_transaction_id getter.""" return self.czxid @property def last_modified_transaction_id(self): """last_modified_transaction_id getter.""" return self.mzxid @property def created(self): """created getter.""" return self.ctime / 100.0 @property def last_modified(self): """last_modified getter.""" return self.mtime / 100.0 @classmethod def from_dict(cls, value_dict): """Create a Metadata instance from dict values.""" curr_time = time.time() zxid = int(cls._BASE_ZXID + curr_time) timestamp_ms = int(curr_time * 100) ctime = value_dict.get('ctime', timestamp_ms) czxid = value_dict.get('czxid', zxid) mtime = value_dict.get('mtime', timestamp_ms) mzxid = value_dict.get('mzxid', zxid) ephemeralOwner = value_dict.get('ephemeralOwner', 0) if 'creation_transaction_id' in value_dict: czxid = value_dict['creation_transaction_id'] if 'created' in value_dict: ctime = int(value_dict['created'] * 100) if 'last_modified_transaction_id' in value_dict: mzxid = value_dict['last_modified_transaction_id'] if 'last_modified' in value_dict: mtime = int(value_dict['last_modified'] * 100) return cls(ctime=ctime, czxid=czxid, mtime=mtime, mzxid=mzxid, ephemeralOwner=ephemeralOwner) class MockZookeeperTestCase(unittest.TestCase): """Helper class to mock Zk get[children] events.""" # Disable too many branches warning. # # pylint: disable=R0912 def setUp(self): super(MockZookeeperTestCase, self).setUp() self.watch_events = None def tearDown(self): """Send terminate signal to mock Zk events thread.""" if self.watch_events: self.watch_events.put('exit') def make_mock_zk(self, zk_content, events=False): """Constructs zk mock implementation of get based on dictionary. Treats dictionary as tree structure, mapping it into mock Zk instance. """ watches = {} def mock_exists(zkpath, watch=None): """Mocks node exists.""" del watch # TODO: support watch. path = zkpath.split('/') path.pop(0) content = zk_content while path: path_component = path.pop(0) if path_component not in content: return False content = content[path_component] return True def mock_delete(zkpath, recursive=False): """Mocks node deletion.""" del recursive path = zkpath.split('/') path.pop(0) last = path.pop(-1) content = zk_content while path: path_component = path.pop(0) if path_component not in content: raise kazoo.client.NoNodeError() content = content[path_component] # verified that parent exists. now delete the node. if last not in content: raise kazoo.client.NoNodeError() else: del content[last] def mock_get(zkpath, watch=None): """Traverse data recursively, return the node content.""" path = zkpath.split('/') path.pop(0) content = zk_content while path: path_component = path.pop(0) if path_component not in content: raise kazoo.client.NoNodeError() content = content[path_component] # Content is a copy of the zk data content = copy.copy(content) # Setup a fake metadata values meta_dict = {} if isinstance(content, dict): meta_values = content.pop('.metadata', {}) meta_dict.update(meta_values) data = content.pop('.data', yaml.dump(content)) else: data = content # Generate the final readonly metadata metadata = MockZookeeperMetadata.from_dict(meta_dict) # Setup the watch watches[(zkpath, states.EventType.CHANGED)] = watch return (data, metadata) def mock_get_children(zkpath, watch=None): """Traverse data recursively, returns element keys.""" path = zkpath.split('/') path.pop(0) content = zk_content while path: path_component = path.pop(0) content = content[path_component] watches[(zkpath, states.EventType.CHILD)] = watch if isinstance(content, dict): return sorted(content.keys()) else: return [] if events: self.watch_events = Queue.Queue() def run_events(): """Invoke watcher callback for each event.""" while True: event = self.watch_events.get() if event == 'exit': break delay, event_type, state, path = event if delay: time.sleep(delay) watch = watches.get((path, event_type), None) if watch: watch(states.WatchedEvent(type=event_type, state=state, path=path)) threading.Thread(target=run_events).start() side_effects = [ (kazoo.client.KazooClient.exists, mock_exists), (kazoo.client.KazooClient.get, mock_get), (kazoo.client.KazooClient.delete, mock_delete), (kazoo.client.KazooClient.get_children, mock_get_children)] for mthd, side_effect in side_effects: try: mthd.side_effect = side_effect except AttributeError: # not mocked. pass def notify(self, event_type, path, state=states.KazooState.CONNECTED, delay=None): """Notify watchers of the event.""" self.watch_events.put((delay, event_type, state, path))
from __future__ import unicode_literals import logging import pickle import sys if sys.version_info < (2, 7): import unittest2 as unittest else: import unittest import django from django.conf import settings from django.test import TestCase, TransactionTestCase from django.utils import translation, encoding if sys.version_info >= (3, ): from unittest import mock else: import mock import jinja2 from caching import base, invalidation, config, compat from .testapp.models import Addon, User cache = invalidation.cache log = logging.getLogger(__name__) if django.get_version().startswith('1.3'): class settings_patch(object): def __init__(self, **kwargs): self.options = kwargs def __enter__(self): self._old_settings = dict((k, getattr(settings, k, None)) for k in self.options) for k, v in list(self.options.items()): setattr(settings, k, v) def __exit__(self, *args): for k in self.options: setattr(settings, k, self._old_settings[k]) TestCase.settings = settings_patch class CachingTestCase(TestCase): fixtures = ['tests/testapp/fixtures/testapp/test_cache.json'] extra_apps = ['tests.testapp'] def setUp(self): cache.clear() self.old_timeout = config.TIMEOUT if getattr(settings, 'CACHE_MACHINE_USE_REDIS', False): invalidation.redis.flushall() def tearDown(self): config.TIMEOUT = self.old_timeout def test_flush_key(self): """flush_key should work for objects or strings.""" a = Addon.objects.get(id=1) self.assertEqual(base.flush_key(a.get_cache_key(incl_db=False)), base.flush_key(a)) def test_cache_key(self): a = Addon.objects.get(id=1) self.assertEqual(a.cache_key, 'o:testapp.addon:1:default') keys = set((a.cache_key, a.author1.cache_key, a.author2.cache_key)) self.assertEqual(set(a._cache_keys()), keys) def test_cache(self): """Basic cache test: second get comes from cache.""" self.assertIs(Addon.objects.get(id=1).from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) def test_filter_cache(self): self.assertIs(Addon.objects.filter(id=1)[0].from_cache, False) self.assertIs(Addon.objects.filter(id=1)[0].from_cache, True) def test_slice_cache(self): self.assertIs(Addon.objects.filter(id=1)[:1][0].from_cache, False) self.assertIs(Addon.objects.filter(id=1)[:1][0].from_cache, True) def test_invalidation(self): self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) a.save() self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) def test_invalidation_cross_locale(self): self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) # Do query & invalidation in a different locale. old_locale = translation.get_language() translation.activate('fr') self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) a.save() translation.activate(old_locale) self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) def test_fk_invalidation(self): """When an object is invalidated, its foreign keys get invalidated.""" a = Addon.objects.get(id=1) self.assertIs(User.objects.get(name='clouseroo').from_cache, False) a.save() self.assertIs(User.objects.get(name='clouseroo').from_cache, False) def test_fk_parent_invalidation(self): """When a foreign key changes, any parent objects get invalidated.""" self.assertIs(Addon.objects.get(id=1).from_cache, False) a = Addon.objects.get(id=1) self.assertIs(a.from_cache, True) u = User.objects.get(id=a.author1.id) self.assertIs(u.from_cache, True) u.name = 'fffuuu' u.save() self.assertIs(User.objects.get(id=a.author1.id).from_cache, False) a = Addon.objects.get(id=1) self.assertIs(a.from_cache, False) self.assertEqual(a.author1.name, 'fffuuu') def test_raw_cache(self): sql = 'SELECT * FROM %s WHERE id = 1' % Addon._meta.db_table raw = list(Addon.objects.raw(sql)) self.assertEqual(len(raw), 1) raw_addon = raw[0] a = Addon.objects.get(id=1) for field in Addon._meta.fields: self.assertEqual(getattr(a, field.name), getattr(raw_addon, field.name)) self.assertIs(raw_addon.from_cache, False) cached = list(Addon.objects.raw(sql)) self.assertEqual(len(cached), 1) cached_addon = cached[0] a = Addon.objects.get(id=1) for field in Addon._meta.fields: self.assertEqual(getattr(a, field.name), getattr(cached_addon, field.name)) self.assertIs(cached_addon.from_cache, True) def test_raw_cache_params(self): """Make sure the query params are included in the cache key.""" sql = 'SELECT * from %s WHERE id = %%s' % Addon._meta.db_table raw = list(Addon.objects.raw(sql, [1]))[0] self.assertEqual(raw.id, 1) raw2 = list(Addon.objects.raw(sql, [2]))[0] self.assertEqual(raw2.id, 2) @mock.patch('caching.base.CacheMachine') def test_raw_nocache(self, CacheMachine): base.TIMEOUT = 60 sql = 'SELECT * FROM %s WHERE id = 1' % Addon._meta.db_table raw = list(Addon.objects.raw(sql, timeout=config.NO_CACHE)) self.assertEqual(len(raw), 1) raw_addon = raw[0] self.assertFalse(hasattr(raw_addon, 'from_cache')) self.assertFalse(CacheMachine.called) @mock.patch('caching.base.cache') def test_count_cache(self, cache_mock): config.TIMEOUT = 60 cache_mock.scheme = 'memcached' cache_mock.get.return_value = None q = Addon.objects.all() q.count() self.assertTrue(cache_mock.set.call_args, 'set not called') args, kwargs = cache_mock.set.call_args key, value, timeout = args self.assertEqual(value, 2) self.assertEqual(timeout, 60) @mock.patch('caching.base.cached') def test_count_none_timeout(self, cached_mock): config.TIMEOUT = config.NO_CACHE Addon.objects.count() self.assertEqual(cached_mock.call_count, 0) @mock.patch('caching.base.cached') def test_count_nocache(self, cached_mock): base.TIMEOUT = 60 Addon.objects.no_cache().count() self.assertEqual(cached_mock.call_count, 0) def test_queryset_flush_list(self): """Check that we're making a flush list for the queryset.""" q = Addon.objects.all() objects = list(q) # Evaluate the queryset so it gets cached. base.invalidator.add_to_flush_list({q.flush_key(): ['remove-me']}) cache.set('remove-me', 15) Addon.objects.invalidate(objects[0]) self.assertIs(cache.get(q.flush_key()), None) self.assertIs(cache.get('remove-me'), None) def test_jinja_cache_tag_queryset(self): env = jinja2.Environment(extensions=['caching.ext.cache']) def check(q, expected): t = env.from_string( "{% cache q %}{% for x in q %}{{ x.id }}:{{ x.val }};" "{% endfor %}{% endcache %}") self.assertEqual(t.render(q=q), expected) # Get the template in cache, then hijack iterator to make sure we're # hitting the cached fragment. check(Addon.objects.all(), '1:42;2:42;') qs = Addon.objects.all() qs.iterator = mock.Mock() check(qs, '1:42;2:42;') self.assertFalse(qs.iterator.called) # Make changes, make sure we dropped the cached fragment. a = Addon.objects.get(id=1) a.val = 17 a.save() q = Addon.objects.all() cache.get(q.flush_key()) self.assertIs(cache.get(q.flush_key()), None) check(Addon.objects.all(), '1:17;2:42;') qs = Addon.objects.all() qs.iterator = mock.Mock() check(qs, '1:17;2:42;') def test_jinja_cache_tag_object(self): env = jinja2.Environment(extensions=['caching.ext.cache']) addon = Addon.objects.get(id=1) def check(obj, expected): t = env.from_string( '{% cache obj, 30 %}{{ obj.id }}:{{ obj.val }}{% endcache %}') self.assertEqual(t.render(obj=obj), expected) check(addon, '1:42') addon.val = 17 addon.save() check(addon, '1:17') def test_jinja_multiple_tags(self): env = jinja2.Environment(extensions=['caching.ext.cache']) addon = Addon.objects.get(id=1) template = ("{% cache obj %}{{ obj.id }}{% endcache %}\n" "{% cache obj %}{{ obj.val }}{% endcache %}") def check(obj, expected): t = env.from_string(template) self.assertEqual(t.render(obj=obj), expected) check(addon, '1\n42') addon.val = 17 addon.save() check(addon, '1\n17') def test_jinja_cache_tag_extra(self): env = jinja2.Environment(extensions=['caching.ext.cache']) addon = Addon.objects.get(id=1) template = ('{% cache obj, extra=[obj.key] %}{{ obj.id }}:' '{{ obj.key }}{% endcache %}') def check(obj, expected): t = env.from_string(template) self.assertEqual(t.render(obj=obj), expected) addon.key = 1 check(addon, '1:1') addon.key = 2 check(addon, '1:2') template = ('{% cache obj, 10, extra=[obj.key] %}{{ obj.id }}:' '{{ obj.key }}{% endcache %}') addon.key = 1 check(addon, '1:1') addon.key = 2 check(addon, '1:2') def test_cached_with(self): counter = mock.Mock() def expensive(): counter() return counter.call_count a = Addon.objects.get(id=1) f = lambda: base.cached_with(a, expensive, 'key') # Only gets called once. self.assertEqual(f(), 1) self.assertEqual(f(), 1) # Switching locales does not reuse the cache. old_locale = translation.get_language() translation.activate('fr') self.assertEqual(f(), 2) # Called again after flush. a.save() self.assertEqual(f(), 3) translation.activate(old_locale) self.assertEqual(f(), 4) counter.reset_mock() q = Addon.objects.filter(id=1) f = lambda: base.cached_with(q, expensive, 'key') # Only gets called once. self.assertEqual(f(), 1) self.assertEqual(f(), 1) # Called again after flush. list(q)[0].save() self.assertEqual(f(), 2) self.assertEqual(f(), 2) def test_cached_with_bad_object(self): """cached_with shouldn't fail if the object is missing a cache key.""" counter = mock.Mock() def f(): counter() return counter.call_count self.assertEqual(base.cached_with([], f, 'key'), 1) def test_cached_with_unicode(self): u = encoding.smart_bytes('\\u05ea\\u05d9\\u05d0\\u05d5\\u05e8 ' '\\u05d0\\u05d5\\u05e1\\u05e3') obj = mock.Mock() obj.query_key.return_value = 'xxx' obj.flush_key.return_value = 'key' f = lambda: 1 self.assertEqual(base.cached_with(obj, f, 'adf:%s' % u), 1) def test_cached_method(self): a = Addon.objects.get(id=1) self.assertEqual(a.calls(), (1, 1)) self.assertEqual(a.calls(), (1, 1)) a.save() # Still returns 1 since the object has it's own local cache. self.assertEqual(a.calls(), (1, 1)) self.assertEqual(a.calls(3), (3, 2)) a = Addon.objects.get(id=1) self.assertEqual(a.calls(), (1, 3)) self.assertEqual(a.calls(4), (4, 4)) self.assertEqual(a.calls(3), (3, 2)) b = Addon.objects.create(id=5, val=32, author1_id=1, author2_id=2) self.assertEqual(b.calls(), (1, 5)) # Make sure we're updating the wrapper's docstring. self.assertEqual(b.calls.__doc__, Addon.calls.__doc__) @mock.patch('caching.base.CacheMachine') def test_no_cache_from_manager(self, CacheMachine): a = Addon.objects.no_cache().get(id=1) self.assertEqual(a.id, 1) self.assertFalse(hasattr(a, 'from_cache')) self.assertFalse(CacheMachine.called) @mock.patch('caching.base.CacheMachine') def test_no_cache_from_queryset(self, CacheMachine): a = Addon.objects.all().no_cache().get(id=1) self.assertEqual(a.id, 1) self.assertFalse(hasattr(a, 'from_cache')) self.assertFalse(CacheMachine.called) def test_timeout_from_manager(self): q = Addon.objects.cache(12).filter(id=1) self.assertEqual(q.timeout, 12) a = q.get() self.assertTrue(hasattr(a, 'from_cache')) self.assertEqual(a.id, 1) def test_timeout_from_queryset(self): q = Addon.objects.all().cache(12).filter(id=1) self.assertEqual(q.timeout, 12) a = q.get() self.assertTrue(hasattr(a, 'from_cache')) self.assertEqual(a.id, 1) @unittest.skipUnless( any(['memcache' in c['BACKEND'] for c in settings.CACHES.values()]), 'This test requires that Django use memcache') @mock.patch('memcache.Client.set') def test_infinite_timeout(self, mock_set): """ Test that memcached infinite timeouts work with all Django versions. """ cache.set('foo', 'bar', timeout=compat.FOREVER) # for memcached, 0 timeout means store forever mock_set.assert_called_with(':1:foo', 'bar', 0) def test_cache_and_no_cache(self): """Whatever happens last sticks.""" q = Addon.objects.no_cache().cache(12).filter(id=1) self.assertEqual(q.timeout, 12) no_cache = q.no_cache() # The querysets don't share anything. self.assertEqual(q.timeout, 12) self.assertNotEqual(no_cache.timeout, 12) self.assertFalse(hasattr(no_cache.get(), 'from_cache')) self.assertEqual(q.get().id, 1) self.assertTrue(hasattr(q.get(), 'from_cache')) @mock.patch('caching.base.cache') def test_cache_machine_timeout(self, cache): cache.scheme = 'memcached' cache.get.return_value = None cache.get_many.return_value = {} a = Addon.objects.cache(12).get(id=1) self.assertEqual(a.id, 1) self.assertTrue(cache.add.called) args, kwargs = cache.add.call_args self.assertEqual(kwargs, {'timeout': 12}) def test_unicode_key(self): list(User.objects.filter(name='\\xfcmla\\xfct')) def test_empty_in(self): # Raised an exception before fixing #2. self.assertEqual([], list(User.objects.filter(pk__in=[]))) def test_empty_in_count(self): # Regression test for #14. self.assertEqual(0, User.objects.filter(pk__in=[]).count()) def test_empty_queryset(self): for k in (1, 1): with self.assertNumQueries(k): self.assertEqual(len(Addon.objects.filter(pk=42)), 0) @mock.patch('caching.config.CACHE_EMPTY_QUERYSETS', True) def test_cache_empty_queryset(self): for k in (1, 0): with self.assertNumQueries(k): self.assertEqual(len(Addon.objects.filter(pk=42)), 0) def test_invalidate_empty_queryset(self): u = User.objects.create() self.assertEqual(list(u.addon_set.all()), []) Addon.objects.create(val=42, author1=u, author2=u) self.assertEqual([a.val for a in u.addon_set.all()], [42]) def test_invalidate_new_related_object(self): u = User.objects.create() Addon.objects.create(val=42, author1=u, author2=u) self.assertEqual([a.val for a in u.addon_set.all()], [42]) Addon.objects.create(val=17, author1=u, author2=u) self.assertEqual([a.val for a in u.addon_set.all()], [42, 17]) def test_make_key_unicode(self): translation.activate('en-US') f = 'fragment\xe9\x9b\xbb\xe8\x85\xa6\xe7\x8e' # This would crash with a unicode error. base.make_key(f, with_locale=True) translation.deactivate() @mock.patch('caching.invalidation.cache.get_many') def test_get_flush_lists_none(self, cache_mock): if not getattr(settings, 'CACHE_MACHINE_USE_REDIS', False): cache_mock.return_value.values.return_value = [None, [1]] self.assertEqual(base.invalidator.get_flush_lists(None), set([1])) def test_parse_backend_uri(self): """ Test that parse_backend_uri works as intended. Regression for #92. """ from caching.invalidation import parse_backend_uri uri = 'redis://127.0.0.1:6379?socket_timeout=5' host, params = parse_backend_uri(uri) self.assertEqual(host, '127.0.0.1:6379') self.assertEqual(params, {'socket_timeout': '5'}) @mock.patch('caching.config.CACHE_INVALIDATE_ON_CREATE', 'whole-model') def test_invalidate_on_create_enabled(self): """ Test that creating new objects invalidates cached queries for that model. """ self.assertEqual([a.name for a in User.objects.all()], ['fliggy', 'clouseroo']) User.objects.create(name='spam') users = User.objects.all() # our new user should show up and the query should not have come from the cache self.assertEqual([a.name for a in users], ['fliggy', 'clouseroo', 'spam']) self.assertFalse(any([u.from_cache for u in users])) # if we run it again, it should be cached this time users = User.objects.all() self.assertEqual([a.name for a in users], ['fliggy', 'clouseroo', 'spam']) self.assertTrue(all([u.from_cache for u in User.objects.all()])) @mock.patch('caching.config.CACHE_INVALIDATE_ON_CREATE', None) def test_invalidate_on_create_disabled(self): """ Test that creating new objects does NOT invalidate cached queries when whole-model invalidation on create is disabled. """ users = User.objects.all() self.assertTrue(users, "Can't run this test without some users") self.assertFalse(any([u.from_cache for u in users])) User.objects.create(name='spam') self.assertTrue(all([u.from_cache for u in User.objects.all()])) def test_pickle_queryset(self): """ Test for CacheingQuerySet.__getstate__ and CachingQuerySet.__setstate__. """ # Make sure CachingQuerySet.timeout, when set to DEFAULT_TIMEOUT, can be safely # pickled/unpickled on/from different Python processes which may have different # underlying values for DEFAULT_TIMEOUT: q1 = Addon.objects.all() self.assertEqual(q1.timeout, compat.DEFAULT_TIMEOUT) pickled = pickle.dumps(q1) new_timeout = object() with mock.patch('caching.base.DEFAULT_TIMEOUT', new_timeout): q2 = pickle.loads(pickled) self.assertEqual(q2.timeout, new_timeout) # Make sure values other than DEFAULT_TIMEOUT remain unaffected: q1 = Addon.objects.cache(10).all() self.assertEqual(q1.timeout, 10) pickled = pickle.dumps(q1) with mock.patch('caching.base.DEFAULT_TIMEOUT', new_timeout): q2 = pickle.loads(pickled) self.assertEqual(q2.timeout, 10) # use TransactionTestCase so that ['TEST']['MIRROR'] setting works # see https://code.djangoproject.com/ticket/23718 class MultiDbTestCase(TransactionTestCase): multi_db = True fixtures = ['tests/testapp/fixtures/testapp/test_cache.json'] extra_apps = ['tests.testapp'] def test_multidb_cache(self): """ Test where master and slave DB result in two different cache keys """ self.assertIs(Addon.objects.get(id=1).from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) from_slave = Addon.objects.using('slave').get(id=1) self.assertIs(from_slave.from_cache, False) self.assertEqual(from_slave._state.db, 'slave') def test_multidb_fetch_by_id(self): """ Test where master and slave DB result in two different cache keys with FETCH_BY_ID""" with self.settings(FETCH_BY_ID=True): self.assertIs(Addon.objects.get(id=1).from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) from_slave = Addon.objects.using('slave').get(id=1) self.assertIs(from_slave.from_cache, False) self.assertEqual(from_slave._state.db, 'slave') def test_multidb_master_slave_invalidation(self): """ Test saving an object on one DB invalidates it for all DBs """ log.debug('priming the DB & cache') master_obj = User.objects.using('default').create(name='new-test-user') slave_obj = User.objects.using('slave').get(name='new-test-user') self.assertIs(slave_obj.from_cache, False) log.debug('deleting the original object') User.objects.using('default').filter(pk=slave_obj.pk).delete() log.debug('re-creating record with a new primary key') master_obj = User.objects.using('default').create(name='new-test-user') log.debug('attempting to force re-fetch from DB (should not use cache)') slave_obj = User.objects.using('slave').get(name='new-test-user') self.assertIs(slave_obj.from_cache, False) self.assertEqual(slave_obj.pk, master_obj.pk) def test_multidb_no_db_crossover(self): """ Test no crossover of objects with identical PKs """ master_obj = User.objects.using('default').create(name='new-test-user') master_obj2 = User.objects.using('master2').create(pk=master_obj.pk, name='other-test-user') # prime the cache for the default DB master_obj = User.objects.using('default').get(name='new-test-user') self.assertIs(master_obj.from_cache, False) master_obj = User.objects.using('default').get(name='new-test-user') self.assertIs(master_obj.from_cache, True) # prime the cache for the 2nd master DB master_obj2 = User.objects.using('master2').get(name='other-test-user') self.assertIs(master_obj2.from_cache, False) master_obj2 = User.objects.using('master2').get(name='other-test-user') self.assertIs(master_obj2.from_cache, True) # ensure no crossover between databases self.assertNotEqual(master_obj.name, master_obj2.name)
# -*- coding: utf-8 -*- # Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of website configuration command for buckets.""" from __future__ import absolute_import import sys from apitools.base.py import encoding from gslib.command import Command from gslib.command_argument import CommandArgument from gslib.cs_api_map import ApiSelector from gslib.exception import CommandException from gslib.help_provider import CreateHelpText from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages from gslib.util import NO_MAX _SET_SYNOPSIS = """ gsutil web set [-m main_page_suffix] [-e error_page] bucket_url... """ _GET_SYNOPSIS = """ gsutil web get bucket_url """ _SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') _SET_DESCRIPTION = """ <B>SET</B> The "gsutil web set" command will allow you to configure or disable Website Configuration on your bucket(s). The "set" sub-command has the following options (leave both options blank to disable): <B>SET OPTIONS</B> -m <index.html> Specifies the object name to serve when a bucket listing is requested via the CNAME alias to c.storage.googleapis.com. -e <404.html> Specifies the error page to serve when a request is made for a non-existent object via the CNAME alias to c.storage.googleapis.com. """ _GET_DESCRIPTION = """ <B>GET</B> The "gsutil web get" command will gets the web semantics configuration for a bucket and displays a JSON representation of the configuration. In Google Cloud Storage, this would look like: { "notFoundPage": "404.html", "mainPageSuffix": "index.html" } """ _DESCRIPTION = """ The Website Configuration feature enables you to configure a Google Cloud Storage bucket to behave like a static website. This means requests made via a domain-named bucket aliased using a Domain Name System "CNAME" to c.storage.googleapis.com will work like any other website, i.e., a GET to the bucket will serve the configured "main" page instead of the usual bucket listing and a GET for a non-existent object will serve the configured error page. For example, suppose your company's Domain name is example.com. You could set up a website bucket as follows: 1. Create a bucket called example.com (see the "DOMAIN NAMED BUCKETS" section of "gsutil help naming" for details about creating such buckets). 2. Create index.html and 404.html files and upload them to the bucket. 3. Configure the bucket to have website behavior using the command: gsutil web set -m index.html -e 404.html gs://www.example.com 4. Add a DNS CNAME record for example.com pointing to c.storage.googleapis.com (ask your DNS administrator for help with this). Now if you open a browser and navigate to http://www.example.com, it will display the main page instead of the default bucket listing. Note: It can take time for DNS updates to propagate because of caching used by the DNS, so it may take up to a day for the domain-named bucket website to work after you create the CNAME DNS record. Additional notes: 1. Because the main page is only served when a bucket listing request is made via the CNAME alias, you can continue to use "gsutil ls" to list the bucket and get the normal bucket listing (rather than the main page). 2. The main_page_suffix applies to each subdirectory of the bucket. For example, with the main_page_suffix configured to be index.html, a GET request for http://www.example.com would retrieve http://www.example.com/index.html, and a GET request for http://www.example.com/photos would retrieve http://www.example.com/photos/index.html. 3. There is just one 404.html page: For example, a GET request for http://www.example.com/photos/missing would retrieve http://www.example.com/404.html, not http://www.example.com/photos/404.html. 4. For additional details see https://developers.google.com/storage/docs/website-configuration. The web command has two sub-commands: """ + _SET_DESCRIPTION + _GET_DESCRIPTION _DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION) _get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION) _set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION) class WebCommand(Command): """Implementation of gsutil web command.""" # Command specification. See base class for documentation. command_spec = Command.CreateCommandSpec( 'web', command_name_aliases=['setwebcfg', 'getwebcfg'], usage_synopsis=_SYNOPSIS, min_args=2, max_args=NO_MAX, supported_sub_args='m:e:', file_url_ok=False, provider_url_ok=False, urls_start_arg=1, gs_api_support=[ApiSelector.XML, ApiSelector.JSON], gs_default_api=ApiSelector.JSON, argparse_arguments={ 'set': [ CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument() ], 'get': [ CommandArgument.MakeNCloudBucketURLsArgument(1) ] } ) # Help specification. See help_provider.py for documentation. help_spec = Command.HelpSpec( help_name='web', help_name_aliases=['getwebcfg', 'setwebcfg'], help_type='command_help', help_one_line_summary=( 'Set a main page and/or error page for one or more buckets'), help_text=_DETAILED_HELP_TEXT, subcommand_help_text={'get': _get_help_text, 'set': _set_help_text}, ) def _GetWeb(self): """Gets website configuration for a bucket.""" bucket_url, bucket_metadata = self.GetSingleBucketUrlFromArg( self.args[0], bucket_fields=['website']) if bucket_url.scheme == 's3': sys.stdout.write(self.gsutil_api.XmlPassThroughGetWebsite( bucket_url, provider=bucket_url.scheme)) else: if bucket_metadata.website and (bucket_metadata.website.mainPageSuffix or bucket_metadata.website.notFoundPage): sys.stdout.write(str(encoding.MessageToJson( bucket_metadata.website)) + '\n') else: sys.stdout.write('%s has no website configuration.\n' % bucket_url) return 0 def _SetWeb(self): """Sets website configuration for a bucket.""" main_page_suffix = None error_page = None if self.sub_opts: for o, a in self.sub_opts: if o == '-m': main_page_suffix = a elif o == '-e': error_page = a url_args = self.args website = apitools_messages.Bucket.WebsiteValue( mainPageSuffix=main_page_suffix, notFoundPage=error_page) # Iterate over URLs, expanding wildcards and setting the website # configuration on each. some_matched = False for url_str in url_args: bucket_iter = self.GetBucketUrlIterFromArg(url_str, bucket_fields=['id']) for blr in bucket_iter: url = blr.storage_url some_matched = True self.logger.info('Setting website configuration on %s...', blr) bucket_metadata = apitools_messages.Bucket(website=website) self.gsutil_api.PatchBucket(url.bucket_name, bucket_metadata, provider=url.scheme, fields=['id']) if not some_matched: raise CommandException('No URLs matched') return 0 def RunCommand(self): """Command entry point for the web command.""" action_subcommand = self.args.pop(0) self.ParseSubOpts(check_args=True) if action_subcommand == 'get': func = self._GetWeb elif action_subcommand == 'set': func = self._SetWeb else: raise CommandException(('Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help web".') % (action_subcommand, self.command_name)) return func()
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2012 Midokura Japan K.K. # Copyright (C) 2013 Midokura PTE LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Takaaki Suzuki, Midokura Japan KK # @author: Tomoe Sugihara, Midokura Japan KK # @author: Ryu Ishimoto, Midokura Japan KK # @author: Rossella Sblendido, Midokura Japan KK from midonetclient import api from oslo.config import cfg from sqlalchemy.orm import exc as sa_exc from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import api as db from neutron.db import db_base_plugin_v2 from neutron.db import dhcp_rpc_base from neutron.db import external_net_db from neutron.db import l3_db from neutron.db import models_v2 from neutron.db import securitygroups_db from neutron.extensions import l3 from neutron.extensions import securitygroup as ext_sg from neutron.openstack.common import excutils from neutron.openstack.common import log as logging from neutron.openstack.common import rpc from neutron.plugins.midonet.common import config # noqa from neutron.plugins.midonet.common import net_util from neutron.plugins.midonet import midonet_lib LOG = logging.getLogger(__name__) EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO METADATA_DEFAULT_IP = "169.254.169.254/32" OS_FLOATING_IP_RULE_KEY = 'OS_FLOATING_IP' OS_SG_RULE_KEY = 'OS_SG_RULE_ID' OS_TENANT_ROUTER_RULE_KEY = 'OS_TENANT_ROUTER_RULE' PRE_ROUTING_CHAIN_NAME = "OS_PRE_ROUTING_%s" PORT_INBOUND_CHAIN_NAME = "OS_PORT_%s_INBOUND" PORT_OUTBOUND_CHAIN_NAME = "OS_PORT_%s_OUTBOUND" POST_ROUTING_CHAIN_NAME = "OS_POST_ROUTING_%s" SG_INGRESS_CHAIN_NAME = "OS_SG_%s_INGRESS" SG_EGRESS_CHAIN_NAME = "OS_SG_%s_EGRESS" SG_PORT_GROUP_NAME = "OS_PG_%s" SNAT_RULE = 'SNAT' def _get_nat_ips(type, fip): """Get NAT IP address information. From the route type given, determine the source and target IP addresses from the provided floating IP DB object. """ if type == 'pre-routing': return fip["floating_ip_address"], fip["fixed_ip_address"] elif type == 'post-routing': return fip["fixed_ip_address"], fip["floating_ip_address"] else: raise ValueError(_("Invalid nat_type %s") % type) def _nat_chain_names(router_id): """Get the chain names for NAT. These names are used to associate MidoNet chains to the NAT rules applied to the router. For each of these, there are two NAT types, 'dnat' and 'snat' that are returned as keys, and the corresponding chain names as their values. """ pre_routing_name = PRE_ROUTING_CHAIN_NAME % router_id post_routing_name = POST_ROUTING_CHAIN_NAME % router_id return {'pre-routing': pre_routing_name, 'post-routing': post_routing_name} def _sg_chain_names(sg_id): """Get the chain names for security group. These names are used to associate a security group to MidoNet chains. There are two names for ingress and egress security group directions. """ ingress = SG_INGRESS_CHAIN_NAME % sg_id egress = SG_EGRESS_CHAIN_NAME % sg_id return {'ingress': ingress, 'egress': egress} def _port_chain_names(port_id): """Get the chain names for a port. These are chains to hold security group chains. """ inbound = PORT_INBOUND_CHAIN_NAME % port_id outbound = PORT_OUTBOUND_CHAIN_NAME % port_id return {'inbound': inbound, 'outbound': outbound} def _sg_port_group_name(sg_id): """Get the port group name for security group.. This name is used to associate a security group to MidoNet port groups. """ return SG_PORT_GROUP_NAME % sg_id def _rule_direction(sg_direction): """Convert the SG direction to MidoNet direction MidoNet terms them 'inbound' and 'outbound' instead of 'ingress' and 'egress'. Also, the direction is reversed since MidoNet sees it from the network port's point of view, not the VM's. """ if sg_direction == 'ingress': return 'outbound' elif sg_direction == 'egress': return 'inbound' else: raise ValueError(_("Unrecognized direction %s") % sg_direction) def _is_router_interface_port(port): """Check whether the given port is a router interface port.""" device_owner = port['device_owner'] return (device_owner in l3_db.DEVICE_OWNER_ROUTER_INTF) def _is_router_gw_port(port): """Check whether the given port is a router gateway port.""" device_owner = port['device_owner'] return (device_owner in l3_db.DEVICE_OWNER_ROUTER_GW) def _is_vif_port(port): """Check whether the given port is a standard VIF port.""" device_owner = port['device_owner'] return (not _is_dhcp_port(port) and device_owner not in (l3_db.DEVICE_OWNER_ROUTER_GW, l3_db.DEVICE_OWNER_ROUTER_INTF)) def _is_dhcp_port(port): """Check whether the given port is a DHCP port.""" device_owner = port['device_owner'] return device_owner.startswith('network:dhcp') def _check_resource_exists(func, id, name, raise_exc=False): """Check whether the given resource exists in MidoNet data store.""" try: func(id) except midonet_lib.MidonetResourceNotFound as exc: LOG.error(_("There is no %(name)s with ID %(id)s in MidoNet."), {"name": name, "id": id}) if raise_exc: raise MidonetPluginException(msg=exc) class MidoRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin): RPC_API_VERSION = '1.1' def create_rpc_dispatcher(self): """Get the rpc dispatcher for this manager. This a basic implementation that will call the plugin like get_ports and handle basic events If a manager would like to set an rpc API version, or support more than one class as the target of rpc messages, override this method. """ return n_rpc.PluginRpcDispatcher([self, agents_db.AgentExtRpcCallback()]) class MidonetPluginException(n_exc.NeutronException): message = _("%(msg)s") class MidonetPluginV2(db_base_plugin_v2.NeutronDbPluginV2, external_net_db.External_net_db_mixin, l3_db.L3_NAT_db_mixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, securitygroups_db.SecurityGroupDbMixin): supported_extension_aliases = ['external-net', 'router', 'security-group', 'agent' 'dhcp_agent_scheduler'] __native_bulk_support = False def __init__(self): # Read config values midonet_conf = cfg.CONF.MIDONET midonet_uri = midonet_conf.midonet_uri admin_user = midonet_conf.username admin_pass = midonet_conf.password admin_project_id = midonet_conf.project_id self.provider_router_id = midonet_conf.provider_router_id self.provider_router = None self.mido_api = api.MidonetApi(midonet_uri, admin_user, admin_pass, project_id=admin_project_id) self.client = midonet_lib.MidoClient(self.mido_api) # self.provider_router_id should have been set. if self.provider_router_id is None: msg = _('provider_router_id should be configured in the plugin ' 'config file') LOG.exception(msg) raise MidonetPluginException(msg=msg) self.setup_rpc() db.configure_db() def _get_provider_router(self): if self.provider_router is None: self.provider_router = self.client.get_router( self.provider_router_id) return self.provider_router def _dhcp_mappings(self, context, fixed_ips, mac): for fixed_ip in fixed_ips: subnet = self._get_subnet(context, fixed_ip["subnet_id"]) if subnet["ip_version"] == 6: # TODO(ryu) handle IPv6 continue yield subnet['cidr'], fixed_ip["ip_address"], mac def _metadata_subnets(self, context, fixed_ips): for fixed_ip in fixed_ips: subnet = self._get_subnet(context, fixed_ip["subnet_id"]) if subnet["ip_version"] == 6: continue yield subnet['cidr'], fixed_ip["ip_address"] def _initialize_port_chains(self, port, in_chain, out_chain, sg_ids): tenant_id = port["tenant_id"] position = 1 # mac spoofing protection self._add_chain_rule(in_chain, action='drop', dl_src=port["mac_address"], inv_dl_src=True, position=position) # ip spoofing protection for fixed_ip in port["fixed_ips"]: position += 1 self._add_chain_rule(in_chain, action="drop", src_addr=fixed_ip["ip_address"] + "/32", inv_nw_src=True, dl_type=0x0800, # IPv4 position=position) # conntrack position += 1 self._add_chain_rule(in_chain, action='accept', match_forward_flow=True, position=position) # Reset the position to process egress position = 1 # Add rule for SGs if sg_ids: for sg_id in sg_ids: chain_name = _sg_chain_names(sg_id)["ingress"] chain = self.client.get_chain_by_name(tenant_id, chain_name) self._add_chain_rule(out_chain, action='jump', jump_chain_id=chain.get_id(), jump_chain_name=chain_name, position=position) position += 1 # add reverse flow matching at the end self._add_chain_rule(out_chain, action='accept', match_return_flow=True, position=position) position += 1 # fall back DROP rule at the end except for ARP self._add_chain_rule(out_chain, action='drop', dl_type=0x0806, # ARP inv_dl_type=True, position=position) def _bind_port_to_sgs(self, context, port, sg_ids): self._process_port_create_security_group(context, port, sg_ids) if sg_ids is not None: for sg_id in sg_ids: pg_name = _sg_port_group_name(sg_id) self.client.add_port_to_port_group_by_name( port["tenant_id"], pg_name, port["id"]) def _unbind_port_from_sgs(self, context, port_id): self._delete_port_security_group_bindings(context, port_id) self.client.remove_port_from_port_groups(port_id) def _create_accept_chain_rule(self, context, sg_rule, chain=None): direction = sg_rule["direction"] tenant_id = sg_rule["tenant_id"] sg_id = sg_rule["security_group_id"] chain_name = _sg_chain_names(sg_id)[direction] if chain is None: chain = self.client.get_chain_by_name(tenant_id, chain_name) pg_id = None if sg_rule["remote_group_id"] is not None: pg_name = _sg_port_group_name(sg_id) pg = self.client.get_port_group_by_name(tenant_id, pg_name) pg_id = pg.get_id() props = {OS_SG_RULE_KEY: str(sg_rule["id"])} # Determine source or destination address by looking at direction src_pg_id = dst_pg_id = None src_addr = dst_addr = None src_port_to = dst_port_to = None src_port_from = dst_port_from = None if direction == "egress": dst_pg_id = pg_id dst_addr = sg_rule["remote_ip_prefix"] dst_port_from = sg_rule["port_range_min"] dst_port_to = sg_rule["port_range_max"] else: src_pg_id = pg_id src_addr = sg_rule["remote_ip_prefix"] src_port_from = sg_rule["port_range_min"] src_port_to = sg_rule["port_range_max"] return self._add_chain_rule( chain, action='accept', port_group_src=src_pg_id, port_group_dst=dst_pg_id, src_addr=src_addr, src_port_from=src_port_from, src_port_to=src_port_to, dst_addr=dst_addr, dst_port_from=dst_port_from, dst_port_to=dst_port_to, nw_proto=net_util.get_protocol_value(sg_rule["protocol"]), dl_type=net_util.get_ethertype_value(sg_rule["ethertype"]), properties=props) def _remove_nat_rules(self, context, fip): router = self.client.get_router(fip["router_id"]) self.client.remove_static_route(self._get_provider_router(), fip["floating_ip_address"]) chain_names = _nat_chain_names(router.get_id()) for _type, name in chain_names.iteritems(): self.client.remove_rules_by_property( router.get_tenant_id(), name, OS_FLOATING_IP_RULE_KEY, fip["id"]) def setup_rpc(self): # RPC support self.topic = topics.PLUGIN self.conn = rpc.create_connection(new=True) self.callbacks = MidoRpcCallbacks() self.dispatcher = self.callbacks.create_rpc_dispatcher() self.conn.create_consumer(self.topic, self.dispatcher, fanout=False) # Consume from all consumers in a thread self.conn.consume_in_thread() def create_subnet(self, context, subnet): """Create Neutron subnet. Creates a Neutron subnet and a DHCP entry in MidoNet bridge. """ LOG.debug(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet) s = subnet["subnet"] net = super(MidonetPluginV2, self).get_network( context, subnet['subnet']['network_id'], fields=None) session = context.session with session.begin(subtransactions=True): sn_entry = super(MidonetPluginV2, self).create_subnet(context, subnet) bridge = self.client.get_bridge(sn_entry['network_id']) gateway_ip = s['gateway_ip'] cidr = s['cidr'] dns_nameservers = None host_routes = None if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED: dns_nameservers = s['dns_nameservers'] if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED: host_routes = s['host_routes'] self.client.create_dhcp(bridge, gateway_ip, cidr, host_rts=host_routes, dns_servers=dns_nameservers) # For external network, link the bridge to the provider router. if net['router:external']: self._link_bridge_to_gw_router( bridge, self._get_provider_router(), gateway_ip, cidr) LOG.debug(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"), sn_entry) return sn_entry def delete_subnet(self, context, id): """Delete Neutron subnet. Delete neutron network and its corresponding MidoNet bridge. """ LOG.debug(_("MidonetPluginV2.delete_subnet called: id=%s"), id) subnet = super(MidonetPluginV2, self).get_subnet(context, id, fields=None) net = super(MidonetPluginV2, self).get_network(context, subnet['network_id'], fields=None) bridge = self.client.get_bridge(subnet['network_id']) self.client.delete_dhcp(bridge) # If the network is external, clean up routes, links, ports. if net['router:external']: self._unlink_bridge_from_gw_router(bridge, self._get_provider_router()) super(MidonetPluginV2, self).delete_subnet(context, id) LOG.debug(_("MidonetPluginV2.delete_subnet exiting")) def create_network(self, context, network): """Create Neutron network. Create a new Neutron network and its corresponding MidoNet bridge. """ LOG.debug(_('MidonetPluginV2.create_network called: network=%r'), network) tenant_id = self._get_tenant_id_for_create(context, network['network']) self._ensure_default_security_group(context, tenant_id) bridge = self.client.create_bridge(tenant_id, network['network']['name']) network['network']['id'] = bridge.get_id() session = context.session with session.begin(subtransactions=True): net = super(MidonetPluginV2, self).create_network(context, network) self._process_l3_create(context, net, network['network']) LOG.debug(_("MidonetPluginV2.create_network exiting: net=%r"), net) return net def update_network(self, context, id, network): """Update Neutron network. Update an existing Neutron network and its corresponding MidoNet bridge. """ LOG.debug(_("MidonetPluginV2.update_network called: id=%(id)r, " "network=%(network)r"), {'id': id, 'network': network}) session = context.session with session.begin(subtransactions=True): net = super(MidonetPluginV2, self).update_network( context, id, network) self._process_l3_update(context, net, network['network']) self.client.update_bridge(id, net['name']) LOG.debug(_("MidonetPluginV2.update_network exiting: net=%r"), net) return net def get_network(self, context, id, fields=None): """Get Neutron network. Retrieves a Neutron network and its corresponding MidoNet bridge. """ LOG.debug(_("MidonetPluginV2.get_network called: id=%(id)r, " "fields=%(fields)r"), {'id': id, 'fields': fields}) qnet = super(MidonetPluginV2, self).get_network(context, id, fields) self.client.get_bridge(id) LOG.debug(_("MidonetPluginV2.get_network exiting: qnet=%r"), qnet) return qnet def delete_network(self, context, id): """Delete a network and its corresponding MidoNet bridge.""" LOG.debug(_("MidonetPluginV2.delete_network called: id=%r"), id) self.client.delete_bridge(id) try: super(MidonetPluginV2, self).delete_network(context, id) except Exception: LOG.error(_('Failed to delete neutron db, while Midonet bridge=%r' 'had been deleted'), id) raise def create_port(self, context, port): """Create a L2 port in Neutron/MidoNet.""" LOG.debug(_("MidonetPluginV2.create_port called: port=%r"), port) port_data = port['port'] # Create a bridge port in MidoNet and set the bridge port ID as the # port ID in Neutron. bridge = self.client.get_bridge(port_data["network_id"]) tenant_id = bridge.get_tenant_id() bridge_port = self.client.add_bridge_port(bridge) port_data["id"] = bridge_port.get_id() try: session = context.session with session.begin(subtransactions=True): # Create a Neutron port new_port = super(MidonetPluginV2, self).create_port(context, port) port_data.update(new_port) self._ensure_default_security_group_on_port(context, port) if _is_vif_port(port_data): # Bind security groups to the port sg_ids = self._get_security_groups_on_port(context, port) self._bind_port_to_sgs(context, port_data, sg_ids) # Create port chains port_chains = {} for d, name in _port_chain_names( new_port["id"]).iteritems(): port_chains[d] = self.client.create_chain(tenant_id, name) self._initialize_port_chains(port_data, port_chains['inbound'], port_chains['outbound'], sg_ids) # Update the port with the chain self.client.update_port_chains( bridge_port, port_chains["inbound"].get_id(), port_chains["outbound"].get_id()) # DHCP mapping is only for VIF ports for cidr, ip, mac in self._dhcp_mappings( context, port_data["fixed_ips"], port_data["mac_address"]): self.client.add_dhcp_host(bridge, cidr, ip, mac) elif _is_dhcp_port(port_data): # For DHCP port, add a metadata route for cidr, ip in self._metadata_subnets( context, port_data["fixed_ips"]): self.client.add_dhcp_route_option(bridge, cidr, ip, METADATA_DEFAULT_IP) except Exception as ex: # Try removing the MidoNet port before raising an exception. with excutils.save_and_reraise_exception(): LOG.error(_("Failed to create a port on network %(net_id)s: " "%(err)s"), {"net_id": port_data["network_id"], "err": ex}) self.client.delete_port(bridge_port.get_id()) LOG.debug(_("MidonetPluginV2.create_port exiting: port=%r"), port_data) return port_data def get_port(self, context, id, fields=None): """Retrieve port.""" LOG.debug(_("MidonetPluginV2.get_port called: id=%(id)s " "fields=%(fields)r"), {'id': id, 'fields': fields}) port = super(MidonetPluginV2, self).get_port(context, id, fields) "Check if the port exists in MidoNet DB""" try: self.client.get_port(id) except midonet_lib.MidonetResourceNotFound as exc: LOG.error(_("There is no port with ID %(id)s in MidoNet."), {"id": id}) port['status'] = constants.PORT_STATUS_ERROR raise exc LOG.debug(_("MidonetPluginV2.get_port exiting: port=%r"), port) return port def get_ports(self, context, filters=None, fields=None): """List neutron ports and verify that they exist in MidoNet.""" LOG.debug(_("MidonetPluginV2.get_ports called: filters=%(filters)s " "fields=%(fields)r"), {'filters': filters, 'fields': fields}) ports = super(MidonetPluginV2, self).get_ports(context, filters, fields) return ports def delete_port(self, context, id, l3_port_check=True): """Delete a neutron port and corresponding MidoNet bridge port.""" LOG.debug(_("MidonetPluginV2.delete_port called: id=%(id)s " "l3_port_check=%(l3_port_check)r"), {'id': id, 'l3_port_check': l3_port_check}) # if needed, check to see if this is a port owned by # and l3-router. If so, we should prevent deletion. if l3_port_check: self.prevent_l3_port_deletion(context, id) self.disassociate_floatingips(context, id) port = self.get_port(context, id) device_id = port['device_id'] # If this port is for router interface/gw, unlink and delete. if _is_router_interface_port(port): self._unlink_bridge_from_router(device_id, id) elif _is_router_gw_port(port): # Gateway removed # Remove all the SNAT rules that are tagged. router = self._get_router(context, device_id) tenant_id = router["tenant_id"] chain_names = _nat_chain_names(device_id) for _type, name in chain_names.iteritems(): self.client.remove_rules_by_property( tenant_id, name, OS_TENANT_ROUTER_RULE_KEY, SNAT_RULE) # Remove the default routes and unlink self._remove_router_gateway(port['device_id']) self.client.delete_port(id, delete_chains=True) try: for cidr, ip, mac in self._dhcp_mappings( context, port["fixed_ips"], port["mac_address"]): self.client.delete_dhcp_host(port["network_id"], cidr, ip, mac) except Exception: LOG.error(_("Failed to delete DHCP mapping for port %(id)s"), {"id": id}) super(MidonetPluginV2, self).delete_port(context, id) def update_port(self, context, id, port): """Handle port update, including security groups and fixed IPs.""" with context.session.begin(subtransactions=True): # Get the port and save the fixed IPs old_port = self._get_port(context, id) net_id = old_port["network_id"] mac = old_port["mac_address"] old_ips = old_port["fixed_ips"] # update the port DB p = super(MidonetPluginV2, self).update_port(context, id, port) new_ips = p["fixed_ips"] if new_ips: bridge = self.client.get_bridge(net_id) # If it's a DHCP port, add a route to reach the MD server if _is_dhcp_port(p): for cidr, ip in self._metadata_subnets( context, new_ips): self.client.add_dhcp_route_option( bridge, cidr, ip, METADATA_DEFAULT_IP) else: # IPs have changed. Re-map the DHCP entries for cidr, ip, mac in self._dhcp_mappings( context, old_ips, mac): self.client.remove_dhcp_host( bridge, cidr, ip, mac) for cidr, ip, mac in self._dhcp_mappings( context, new_ips, mac): self.client.add_dhcp_host( bridge, cidr, ip, mac) if (self._check_update_deletes_security_groups(port) or self._check_update_has_security_groups(port)): self._unbind_port_from_sgs(context, p["id"]) sg_ids = self._get_security_groups_on_port(context, port) self._bind_port_to_sgs(context, p, sg_ids) return p def create_router(self, context, router): """Handle router creation. When a new Neutron router is created, its corresponding MidoNet router is also created. In MidoNet, this router is initialized with chains for inbuond and outbound traffic, which will be used to hold other chains that include various rules, such as NAT. :param router: Router information provided to create a new router. """ # NOTE(dcahill): Similar to the Nicira plugin, we completely override # this method in order to be able to use the MidoNet ID as Neutron ID # TODO(dcahill): Propose upstream patch for allowing # 3rd parties to specify IDs as we do with l2 plugin LOG.debug(_("MidonetPluginV2.create_router called: router=%(router)s"), {"router": router}) tenant_id = self._get_tenant_id_for_create(context, router['router']) mido_router = self.client.create_router(tenant_id, router['router']['name']) mido_router_id = mido_router.get_id() try: r = router['router'] has_gw_info = False if EXTERNAL_GW_INFO in r: has_gw_info = True gw_info = r[EXTERNAL_GW_INFO] del r[EXTERNAL_GW_INFO] tenant_id = self._get_tenant_id_for_create(context, r) with context.session.begin(subtransactions=True): # pre-generate id so it will be available when # configuring external gw port router_db = l3_db.Router(id=mido_router_id, tenant_id=tenant_id, name=r['name'], admin_state_up=r['admin_state_up'], status="ACTIVE") context.session.add(router_db) if has_gw_info: self._update_router_gw_info(context, router_db['id'], gw_info) router_data = self._make_router_dict(router_db, process_extensions=False) except Exception: # Try removing the midonet router with excutils.save_and_reraise_exception(): self.client.delete_router(mido_router_id) # Create router chains chain_names = _nat_chain_names(mido_router_id) try: self.client.add_router_chains(mido_router, chain_names["pre-routing"], chain_names["post-routing"]) except Exception: # Set the router status to Error with context.session.begin(subtransactions=True): r = self._get_router(context, router_data["id"]) router_data['status'] = constants.NET_STATUS_ERROR r['status'] = router_data['status'] context.session.add(r) LOG.debug(_("MidonetPluginV2.create_router exiting: " "router_data=%(router_data)s."), {"router_data": router_data}) return router_data def _set_router_gateway(self, id, gw_router, gw_ip): """Set router uplink gateway :param ID: ID of the router :param gw_router: gateway router to link to :param gw_ip: gateway IP address """ LOG.debug(_("MidonetPluginV2.set_router_gateway called: id=%(id)s, " "gw_router=%(gw_router)s, gw_ip=%(gw_ip)s"), {'id': id, 'gw_router': gw_router, 'gw_ip': gw_ip}), router = self.client.get_router(id) # Create a port in the gw router gw_port = self.client.add_router_port(gw_router, port_address='169.254.255.1', network_address='169.254.255.0', network_length=30) # Create a port in the router port = self.client.add_router_port(router, port_address='169.254.255.2', network_address='169.254.255.0', network_length=30) # Link them self.client.link(gw_port, port.get_id()) # Add a route for gw_ip to bring it down to the router self.client.add_router_route(gw_router, type='Normal', src_network_addr='0.0.0.0', src_network_length=0, dst_network_addr=gw_ip, dst_network_length=32, next_hop_port=gw_port.get_id(), weight=100) # Add default route to uplink in the router self.client.add_router_route(router, type='Normal', src_network_addr='0.0.0.0', src_network_length=0, dst_network_addr='0.0.0.0', dst_network_length=0, next_hop_port=port.get_id(), weight=100) def _remove_router_gateway(self, id): """Clear router gateway :param ID: ID of the router """ LOG.debug(_("MidonetPluginV2.remove_router_gateway called: " "id=%(id)s"), {'id': id}) router = self.client.get_router(id) # delete the port that is connected to the gateway router for p in router.get_ports(): if p.get_port_address() == '169.254.255.2': peer_port_id = p.get_peer_id() if peer_port_id is not None: self.client.unlink(p) self.client.delete_port(peer_port_id) # delete default route for r in router.get_routes(): if (r.get_dst_network_addr() == '0.0.0.0' and r.get_dst_network_length() == 0): self.client.delete_route(r.get_id()) def update_router(self, context, id, router): """Handle router updates.""" LOG.debug(_("MidonetPluginV2.update_router called: id=%(id)s " "router=%(router)r"), {"id": id, "router": router}) router_data = router["router"] # Check if the update included changes to the gateway. gw_updated = l3_db.EXTERNAL_GW_INFO in router_data with context.session.begin(subtransactions=True): # Update the Neutron DB r = super(MidonetPluginV2, self).update_router(context, id, router) tenant_id = r["tenant_id"] if gw_updated: if (l3_db.EXTERNAL_GW_INFO in r and r[l3_db.EXTERNAL_GW_INFO] is not None): # Gateway created gw_port = self._get_port(context.elevated(), r["gw_port_id"]) gw_ip = gw_port['fixed_ips'][0]['ip_address'] # First link routers and set up the routes self._set_router_gateway(r["id"], self._get_provider_router(), gw_ip) # Get the NAT chains and add dynamic SNAT rules. chain_names = _nat_chain_names(r["id"]) props = {OS_TENANT_ROUTER_RULE_KEY: SNAT_RULE} self.client.add_dynamic_snat(tenant_id, chain_names['pre-routing'], chain_names['post-routing'], gw_ip, gw_port["id"], **props) # Update the name if changed changed_name = router_data.get('name') if changed_name: self.client.update_router(id, changed_name) LOG.debug(_("MidonetPluginV2.update_router exiting: router=%r"), r) return r def delete_router(self, context, id): """Handler for router deletion. Deleting a router on Neutron simply means deleting its corresponding router in MidoNet. :param id: router ID to remove """ LOG.debug(_("MidonetPluginV2.delete_router called: id=%s"), id) self.client.delete_router_chains(id) self.client.delete_router(id) super(MidonetPluginV2, self).delete_router(context, id) def _link_bridge_to_gw_router(self, bridge, gw_router, gw_ip, cidr): """Link a bridge to the gateway router :param bridge: bridge :param gw_router: gateway router to link to :param gw_ip: IP address of gateway :param cidr: network CIDR """ net_addr, net_len = net_util.net_addr(cidr) # create a port on the gateway router gw_port = self.client.add_router_port(gw_router, port_address=gw_ip, network_address=net_addr, network_length=net_len) # create a bridge port, then link it to the router. port = self.client.add_bridge_port(bridge) self.client.link(gw_port, port.get_id()) # add a route for the subnet in the gateway router self.client.add_router_route(gw_router, type='Normal', src_network_addr='0.0.0.0', src_network_length=0, dst_network_addr=net_addr, dst_network_length=net_len, next_hop_port=gw_port.get_id(), weight=100) def _unlink_bridge_from_gw_router(self, bridge, gw_router): """Unlink a bridge from the gateway router :param bridge: bridge to unlink :param gw_router: gateway router to unlink from """ # Delete routes and unlink the router and the bridge. routes = self.client.get_router_routes(gw_router.get_id()) bridge_ports_to_delete = [ p for p in gw_router.get_peer_ports() if p.get_device_id() == bridge.get_id()] for p in bridge.get_peer_ports(): if p.get_device_id() == gw_router.get_id(): # delete the routes going to the bridge for r in routes: if r.get_next_hop_port() == p.get_id(): self.client.delete_route(r.get_id()) self.client.unlink(p) self.client.delete_port(p.get_id()) # delete bridge port for port in bridge_ports_to_delete: self.client.delete_port(port.get_id()) def _link_bridge_to_router(self, router, bridge_port_id, net_addr, net_len, gw_ip, metadata_gw_ip): router_port = self.client.add_router_port( router, port_address=gw_ip, network_address=net_addr, network_length=net_len) self.client.link(router_port, bridge_port_id) self.client.add_router_route(router, type='Normal', src_network_addr='0.0.0.0', src_network_length=0, dst_network_addr=net_addr, dst_network_length=net_len, next_hop_port=router_port.get_id(), weight=100) if metadata_gw_ip: # Add a route for the metadata server. # Not all VM images supports DHCP option 121. Add a route for the # Metadata server in the router to forward the packet to the bridge # that will send them to the Metadata Proxy. md_net_addr, md_net_len = net_util.net_addr(METADATA_DEFAULT_IP) self.client.add_router_route( router, type='Normal', src_network_addr=net_addr, src_network_length=net_len, dst_network_addr=md_net_addr, dst_network_length=md_net_len, next_hop_port=router_port.get_id(), next_hop_gateway=metadata_gw_ip) def _unlink_bridge_from_router(self, router_id, bridge_port_id): """Unlink a bridge from a router.""" # Remove the routes to the port and unlink the port bridge_port = self.client.get_port(bridge_port_id) routes = self.client.get_router_routes(router_id) self.client.delete_port_routes(routes, bridge_port.get_peer_id()) self.client.unlink(bridge_port) def add_router_interface(self, context, router_id, interface_info): """Handle router linking with network.""" LOG.debug(_("MidonetPluginV2.add_router_interface called: " "router_id=%(router_id)s " "interface_info=%(interface_info)r"), {'router_id': router_id, 'interface_info': interface_info}) with context.session.begin(subtransactions=True): info = super(MidonetPluginV2, self).add_router_interface( context, router_id, interface_info) try: subnet = self._get_subnet(context, info["subnet_id"]) cidr = subnet["cidr"] net_addr, net_len = net_util.net_addr(cidr) router = self.client.get_router(router_id) # Get the metadatat GW IP metadata_gw_ip = None rport_qry = context.session.query(models_v2.Port) dhcp_ports = rport_qry.filter_by( network_id=subnet["network_id"], device_owner='network:dhcp').all() if dhcp_ports and dhcp_ports[0].fixed_ips: metadata_gw_ip = dhcp_ports[0].fixed_ips[0].ip_address else: LOG.warn(_("DHCP agent is not working correctly. No port " "to reach the Metadata server on this network")) # Link the router and the bridge self._link_bridge_to_router(router, info["port_id"], net_addr, net_len, subnet["gateway_ip"], metadata_gw_ip) except Exception: LOG.error(_("Failed to create MidoNet resources to add router " "interface. info=%(info)s, router_id=%(router_id)s"), {"info": info, "router_id": router_id}) with excutils.save_and_reraise_exception(): with context.session.begin(subtransactions=True): self.remove_router_interface(context, router_id, info) LOG.debug(_("MidonetPluginV2.add_router_interface exiting: " "info=%r"), info) return info def update_floatingip(self, context, id, floatingip): """Handle floating IP assocation and disassociation.""" LOG.debug(_("MidonetPluginV2.update_floatingip called: id=%(id)s " "floatingip=%(floatingip)s "), {'id': id, 'floatingip': floatingip}) session = context.session with session.begin(subtransactions=True): if floatingip['floatingip']['port_id']: fip = super(MidonetPluginV2, self).update_floatingip( context, id, floatingip) # Add a route for the floating IP on the provider router. router = self.client.get_router(fip["router_id"]) link_port = self.client.get_link_port( self._get_provider_router(), router.get_id()) self.client.add_router_route( self._get_provider_router(), src_network_addr='0.0.0.0', src_network_length=0, dst_network_addr=fip["floating_ip_address"], dst_network_length=32, next_hop_port=link_port.get_peer_id()) # Add static SNAT and DNAT rules on the tenant router. props = {OS_FLOATING_IP_RULE_KEY: id} tenant_id = router.get_tenant_id() chain_names = _nat_chain_names(router.get_id()) for chain_type, name in chain_names.iteritems(): src_ip, target_ip = _get_nat_ips(chain_type, fip) if chain_type == 'pre-routing': nat_type = 'dnat' else: nat_type = 'snat' self.client.add_static_nat(tenant_id, name, src_ip, target_ip, link_port.get_id(), nat_type, **props) # disassociate floating IP elif floatingip['floatingip']['port_id'] is None: fip = super(MidonetPluginV2, self).get_floatingip(context, id) self._remove_nat_rules(context, fip) super(MidonetPluginV2, self).update_floatingip(context, id, floatingip) LOG.debug(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip) return fip def disassociate_floatingips(self, context, port_id): """Disassociate floating IPs (if any) from this port.""" try: fip_qry = context.session.query(l3_db.FloatingIP) fip_db = fip_qry.filter_by(fixed_port_id=port_id).one() self._remove_nat_rules(context, fip_db) except sa_exc.NoResultFound: pass super(MidonetPluginV2, self).disassociate_floatingips(context, port_id) def create_security_group(self, context, security_group, default_sg=False): """Create security group. Create a new security group, including the default security group. In MidoNet, this means creating a pair of chains, inbound and outbound, as well as a new port group. """ LOG.debug(_("MidonetPluginV2.create_security_group called: " "security_group=%(security_group)s " "default_sg=%(default_sg)s "), {'security_group': security_group, 'default_sg': default_sg}) sg = security_group.get('security_group') tenant_id = self._get_tenant_id_for_create(context, sg) if not default_sg: self._ensure_default_security_group(context, tenant_id) # Create the Neutron sg first sg = super(MidonetPluginV2, self).create_security_group( context, security_group, default_sg) try: # Process the MidoNet side self.client.create_port_group(tenant_id, _sg_port_group_name(sg["id"])) chain_names = _sg_chain_names(sg["id"]) chains = {} for direction, chain_name in chain_names.iteritems(): c = self.client.create_chain(tenant_id, chain_name) chains[direction] = c # Create all the rules for this SG. Only accept rules are created for r in sg['security_group_rules']: self._create_accept_chain_rule(context, r, chain=chains[r['direction']]) except Exception: LOG.error(_("Failed to create MidoNet resources for sg %(sg)r"), {"sg": sg}) with excutils.save_and_reraise_exception(): with context.session.begin(subtransactions=True): sg = self._get_security_group(context, sg["id"]) context.session.delete(sg) LOG.debug(_("MidonetPluginV2.create_security_group exiting: sg=%r"), sg) return sg def delete_security_group(self, context, id): """Delete chains for Neutron security group.""" LOG.debug(_("MidonetPluginV2.delete_security_group called: id=%s"), id) with context.session.begin(subtransactions=True): sg = super(MidonetPluginV2, self).get_security_group(context, id) if not sg: raise ext_sg.SecurityGroupNotFound(id=id) if sg["name"] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() sg_id = sg['id'] filters = {'security_group_id': [sg_id]} if super(MidonetPluginV2, self)._get_port_security_group_bindings( context, filters): raise ext_sg.SecurityGroupInUse(id=sg_id) # Delete MidoNet Chains and portgroup for the SG tenant_id = sg['tenant_id'] self.client.delete_chains_by_names( tenant_id, _sg_chain_names(sg["id"]).values()) self.client.delete_port_group_by_name( tenant_id, _sg_port_group_name(sg["id"])) super(MidonetPluginV2, self).delete_security_group(context, id) def create_security_group_rule(self, context, security_group_rule): """Create a security group rule Create a security group rule in the Neutron DB and corresponding MidoNet resources in its data store. """ LOG.debug(_("MidonetPluginV2.create_security_group_rule called: " "security_group_rule=%(security_group_rule)r"), {'security_group_rule': security_group_rule}) with context.session.begin(subtransactions=True): rule = super(MidonetPluginV2, self).create_security_group_rule( context, security_group_rule) self._create_accept_chain_rule(context, rule) LOG.debug(_("MidonetPluginV2.create_security_group_rule exiting: " "rule=%r"), rule) return rule def delete_security_group_rule(self, context, sg_rule_id): """Delete a security group rule Delete a security group rule from the Neutron DB and corresponding MidoNet resources from its data store. """ LOG.debug(_("MidonetPluginV2.delete_security_group_rule called: " "sg_rule_id=%s"), sg_rule_id) with context.session.begin(subtransactions=True): rule = super(MidonetPluginV2, self).get_security_group_rule( context, sg_rule_id) if not rule: raise ext_sg.SecurityGroupRuleNotFound(id=sg_rule_id) sg = self._get_security_group(context, rule["security_group_id"]) chain_name = _sg_chain_names(sg["id"])[rule["direction"]] self.client.remove_rules_by_property(rule["tenant_id"], chain_name, OS_SG_RULE_KEY, str(rule["id"])) super(MidonetPluginV2, self).delete_security_group_rule( context, sg_rule_id) def _add_chain_rule(self, chain, action, **kwargs): nw_proto = kwargs.get("nw_proto") src_addr = kwargs.pop("src_addr", None) dst_addr = kwargs.pop("dst_addr", None) src_port_from = kwargs.pop("src_port_from", None) src_port_to = kwargs.pop("src_port_to", None) dst_port_from = kwargs.pop("dst_port_from", None) dst_port_to = kwargs.pop("dst_port_to", None) # Convert to the keys and values that midonet client understands if src_addr: kwargs["nw_src_addr"], kwargs["nw_src_length"] = net_util.net_addr( src_addr) if dst_addr: kwargs["nw_dst_addr"], kwargs["nw_dst_length"] = net_util.net_addr( dst_addr) kwargs["tp_src"] = {"start": src_port_from, "end": src_port_to} kwargs["tp_dst"] = {"start": dst_port_from, "end": dst_port_to} if nw_proto == 1: # ICMP # Overwrite port fields regardless of the direction kwargs["tp_src"] = {"start": src_port_from, "end": src_port_from} kwargs["tp_dst"] = {"start": dst_port_to, "end": dst_port_to} return self.client.add_chain_rule(chain, action=action, **kwargs)
# -*- coding: utf-8 -*- from __future__ import with_statement import json import datetime from djangocms_text_ckeditor.cms_plugins import TextPlugin from djangocms_text_ckeditor.models import Text from django.contrib import admin from django.contrib.admin.models import LogEntry from django.contrib.admin.sites import site from django.contrib.auth.models import Permission, AnonymousUser from django.contrib.sites.models import Site from django.core.urlresolvers import reverse from django.http import (Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponse) from django.utils.datastructures import MultiValueDictKeyError from django.utils.encoding import smart_str from django.utils import timezone from cms.test_utils.util.fuzzy_int import FuzzyInt from cms.admin.change_list import CMSChangeList from cms.admin.forms import PageForm, AdvancedSettingsForm from cms.admin.pageadmin import PageAdmin from cms.admin.permissionadmin import PagePermissionInlineAdmin from cms.api import create_page, create_title, add_plugin, assign_user_to_page, publish_page from cms.compat import get_user_model from cms.constants import PLUGIN_MOVE_ACTION from cms.models import UserSettings, StaticPlaceholder from cms.models.pagemodel import Page from cms.models.permissionmodels import GlobalPagePermission, PagePermission from cms.models.placeholdermodel import Placeholder from cms.models.pluginmodel import CMSPlugin from cms.models.titlemodels import Title from cms.test_utils import testcases as base from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE, URL_CMS_TRANSLATION_DELETE from cms.test_utils.util.context_managers import SettingsOverride from cms.utils import get_cms_setting from cms.utils.compat import DJANGO_1_4 from cms.utils.compat.dj import force_unicode class AdminTestsBase(CMSTestCase): @property def admin_class(self): return site._registry[Page] def _get_guys(self, admin_only=False, use_global_permissions=True): admiN_user = self.get_superuser() if admin_only: return admiN_user USERNAME = 'test' if get_user_model().USERNAME_FIELD == 'email': normal_guy = get_user_model().objects.create_user(USERNAME, 'test@test.com', 'test@test.com') else: normal_guy = get_user_model().objects.create_user(USERNAME, 'test@test.com', USERNAME) normal_guy.is_staff = True normal_guy.is_active = True normal_guy.save() normal_guy.user_permissions = Permission.objects.filter( codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title'] ) if use_global_permissions: gpp = GlobalPagePermission.objects.create( user=normal_guy, can_change=True, can_delete=True, can_change_advanced_settings=False, can_publish=True, can_change_permissions=False, can_move_page=True, ) gpp.sites = Site.objects.all() return admiN_user, normal_guy class AdminTestCase(AdminTestsBase): def test_permissioned_page_list(self): """ Makes sure that a user with restricted page permissions can view the page list. """ admin_user, normal_guy = self._get_guys(use_global_permissions=False) current_site = Site.objects.get(pk=1) page = create_page("Test page", "nav_playground.html", "en", site=current_site, created_by=admin_user) PagePermission.objects.create(page=page, user=normal_guy) with self.login_user_context(normal_guy): resp = self.client.get(URL_CMS_PAGE) self.assertEqual(resp.status_code, 200) def test_edit_does_not_reset_page_adv_fields(self): """ Makes sure that if a non-superuser with no rights to edit advanced page fields edits a page, those advanced fields are not touched. """ OLD_PAGE_NAME = 'Test Page' NEW_PAGE_NAME = 'Test page 2' REVERSE_ID = 'Test' OVERRIDE_URL = 'my/override/url' admin_user, normal_guy = self._get_guys() current_site = Site.objects.get(pk=1) # The admin creates the page page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user) page.reverse_id = REVERSE_ID page.save() title = page.get_title_obj() title.has_url_overwrite = True title.path = OVERRIDE_URL title.save() self.assertEqual(page.get_title(), OLD_PAGE_NAME) self.assertEqual(page.reverse_id, REVERSE_ID) self.assertEqual(title.overwrite_url, OVERRIDE_URL) # The user edits the page (change the page name for ex.) page_data = { 'title': NEW_PAGE_NAME, 'slug': page.get_slug(), 'language': title.language, 'site': page.site.pk, 'template': page.template, 'pagepermission_set-TOTAL_FORMS': 0, 'pagepermission_set-INITIAL_FORMS': 0, 'pagepermission_set-MAX_NUM_FORMS': 0, 'pagepermission_set-2-TOTAL_FORMS': 0, 'pagepermission_set-2-INITIAL_FORMS': 0, 'pagepermission_set-2-MAX_NUM_FORMS': 0 } # required only if user haves can_change_permission with self.login_user_context(normal_guy): resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data, follow=True) self.assertEqual(resp.status_code, 200) self.assertTemplateNotUsed(resp, 'admin/login.html') page = Page.objects.get(pk=page.pk) self.assertEqual(page.get_title(), NEW_PAGE_NAME) self.assertEqual(page.reverse_id, REVERSE_ID) title = page.get_title_obj() self.assertEqual(title.overwrite_url, OVERRIDE_URL) # The admin edits the page (change the page name for ex.) page_data = { 'title': OLD_PAGE_NAME, 'slug': page.get_slug(), 'language': title.language, 'site': page.site.pk, 'template': page.template, 'reverse_id': page.reverse_id, 'pagepermission_set-TOTAL_FORMS': 0, # required only if user haves can_change_permission 'pagepermission_set-INITIAL_FORMS': 0, 'pagepermission_set-MAX_NUM_FORMS': 0, 'pagepermission_set-2-TOTAL_FORMS': 0, 'pagepermission_set-2-INITIAL_FORMS': 0, 'pagepermission_set-2-MAX_NUM_FORMS': 0 } with self.login_user_context(admin_user): resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data, follow=True) self.assertEqual(resp.status_code, 200) self.assertTemplateNotUsed(resp, 'admin/login.html') page = Page.objects.get(pk=page.pk) self.assertEqual(page.get_title(), OLD_PAGE_NAME) self.assertEqual(page.reverse_id, REVERSE_ID) title = page.get_title_obj() self.assertEqual(title.overwrite_url, None) def test_edit_does_not_reset_apphook(self): """ Makes sure that if a non-superuser with no rights to edit advanced page fields edits a page, those advanced fields are not touched. """ OLD_PAGE_NAME = 'Test Page' NEW_PAGE_NAME = 'Test page 2' REVERSE_ID = 'Test' APPLICATION_URLS = 'project.sampleapp.urls' admin_user, normal_guy = self._get_guys() current_site = Site.objects.get(pk=1) # The admin creates the page page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user) page.reverse_id = REVERSE_ID page.save() title = page.get_title_obj() title.has_url_overwrite = True title.save() page.application_urls = APPLICATION_URLS page.save() self.assertEqual(page.get_title(), OLD_PAGE_NAME) self.assertEqual(page.reverse_id, REVERSE_ID) self.assertEqual(page.application_urls, APPLICATION_URLS) # The user edits the page (change the page name for ex.) page_data = { 'title': NEW_PAGE_NAME, 'slug': page.get_slug(), 'language': title.language, 'site': page.site.pk, 'template': page.template, 'pagepermission_set-TOTAL_FORMS': 0, 'pagepermission_set-INITIAL_FORMS': 0, 'pagepermission_set-MAX_NUM_FORMS': 0, 'pagepermission_set-2-TOTAL_FORMS': 0, 'pagepermission_set-2-INITIAL_FORMS': 0, 'pagepermission_set-2-MAX_NUM_FORMS': 0, } with self.login_user_context(normal_guy): resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data, follow=True) self.assertEqual(resp.status_code, 200) self.assertTemplateNotUsed(resp, 'admin/login.html') page = Page.objects.get(pk=page.pk) self.assertEqual(page.get_title(), NEW_PAGE_NAME) self.assertEqual(page.reverse_id, REVERSE_ID) self.assertEqual(page.application_urls, APPLICATION_URLS) title = page.get_title_obj() # The admin edits the page (change the page name for ex.) page_data = { 'title': OLD_PAGE_NAME, 'slug': page.get_slug(), 'language': title.language, 'site': page.site.pk, 'template': page.template, 'reverse_id': page.reverse_id, } with self.login_user_context(admin_user): resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data, follow=True) self.assertEqual(resp.status_code, 200) self.assertTemplateNotUsed(resp, 'admin/login.html') resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data, follow=True) self.assertEqual(resp.status_code, 200) self.assertTemplateNotUsed(resp, 'admin/login.html') page = Page.objects.get(pk=page.pk) self.assertEqual(page.get_title(), OLD_PAGE_NAME) self.assertEqual(page.reverse_id, REVERSE_ID) self.assertEqual(page.application_urls, '') def test_2apphooks_with_same_namespace(self): PAGE1 = 'Test Page' PAGE2 = 'Test page 2' APPLICATION_URLS = 'project.sampleapp.urls' admin_user, normal_guy = self._get_guys() current_site = Site.objects.get(pk=1) # The admin creates the page page = create_page(PAGE1, "nav_playground.html", "en", site=current_site, created_by=admin_user) page2 = create_page(PAGE2, "nav_playground.html", "en", site=current_site, created_by=admin_user) page.application_urls = APPLICATION_URLS page.application_namespace = "space1" page.save() page2.application_urls = APPLICATION_URLS page2.save() # The admin edits the page (change the page name for ex.) page_data = { 'title': PAGE2, 'slug': page2.get_slug(), 'language': 'en', 'site': page.site.pk, 'template': page2.template, 'application_urls': 'SampleApp', 'application_namespace': 'space1', } with self.login_user_context(admin_user): resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data) self.assertEqual(resp.status_code, 302) self.assertEqual(Page.objects.filter(application_namespace="space1").count(), 1) resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data) self.assertEqual(resp.status_code, 200) page_data['application_namespace'] = 'space2' resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data) self.assertEqual(resp.status_code, 302) def test_delete(self): admin_user = self.get_superuser() create_page("home", "nav_playground.html", "en", created_by=admin_user, published=True) page = create_page("delete-page", "nav_playground.html", "en", created_by=admin_user, published=True) create_page('child-page', "nav_playground.html", "en", created_by=admin_user, published=True, parent=page) body = page.placeholders.get(slot='body') add_plugin(body, 'TextPlugin', 'en', body='text') page.publish('en') with self.login_user_context(admin_user): data = {'post': 'yes'} with self.assertNumQueries(FuzzyInt(300, 382)): response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data) self.assertRedirects(response, URL_CMS_PAGE) def test_delete_diff_language(self): admin_user = self.get_superuser() create_page("home", "nav_playground.html", "en", created_by=admin_user, published=True) page = create_page("delete-page", "nav_playground.html", "en", created_by=admin_user, published=True) create_page('child-page', "nav_playground.html", "de", created_by=admin_user, published=True, parent=page) body = page.placeholders.get(slot='body') add_plugin(body, 'TextPlugin', 'en', body='text') page.publish('en') with self.login_user_context(admin_user): data = {'post': 'yes'} with self.assertNumQueries(FuzzyInt(300, 382)): response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data) self.assertRedirects(response, URL_CMS_PAGE) def test_search_fields(self): superuser = self.get_superuser() from django.contrib.admin import site with self.login_user_context(superuser): for model, admin_instance in site._registry.items(): if model._meta.app_label != 'cms': continue if not admin_instance.search_fields: continue url = reverse('admin:cms_%s_changelist' % model._meta.module_name) response = self.client.get('%s?q=1' % url) errmsg = response.content self.assertEqual(response.status_code, 200, errmsg) def test_delete_translation(self): admin_user = self.get_superuser() page = create_page("delete-page-translation", "nav_playground.html", "en", created_by=admin_user, published=True) create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2") create_title("es-mx", "delete-page-translation-es", page, slug="delete-page-translation-es") with self.login_user_context(admin_user): response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'}) self.assertEqual(response.status_code, 200) response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'}) self.assertRedirects(response, URL_CMS_PAGE) response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'}) self.assertEqual(response.status_code, 200) response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'}) self.assertRedirects(response, URL_CMS_PAGE) def test_change_dates(self): admin_user, staff = self._get_guys() page = create_page('test-page', 'nav_playground.html', 'en') page.publish('en') draft = page.get_draft_object() with self.settings(USE_TZ=False): original_date = draft.publication_date original_end_date = draft.publication_end_date new_date = timezone.now() - datetime.timedelta(days=1) new_end_date = timezone.now() + datetime.timedelta(days=1) url = reverse('admin:cms_page_dates', args=(draft.pk,)) with self.login_user_context(admin_user): response = self.client.post(url, { 'language': 'en', 'site': draft.site.pk, 'publication_date_0': new_date.date(), 'publication_date_1': new_date.strftime("%H:%M:%S"), 'publication_end_date_0': new_end_date.date(), 'publication_end_date_1': new_end_date.strftime("%H:%M:%S"), }) self.assertEqual(response.status_code, 302) draft = Page.objects.get(pk=draft.pk) self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple()) self.assertEqual(draft.publication_date.timetuple(), new_date.timetuple()) self.assertEqual(draft.publication_end_date.timetuple(), new_end_date.timetuple()) if original_end_date: self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple()) with self.settings(USE_TZ=True): original_date = draft.publication_date original_end_date = draft.publication_end_date new_date = timezone.localtime(timezone.now()) - datetime.timedelta(days=1) new_end_date = timezone.localtime(timezone.now()) + datetime.timedelta(days=1) url = reverse('admin:cms_page_dates', args=(draft.pk,)) with self.login_user_context(admin_user): response = self.client.post(url, { 'language': 'en', 'site': draft.site.pk, 'publication_date_0': new_date.date(), 'publication_date_1': new_date.strftime("%H:%M:%S"), 'publication_end_date_0': new_end_date.date(), 'publication_end_date_1': new_end_date.strftime("%H:%M:%S"), }) self.assertEqual(response.status_code, 302) draft = Page.objects.get(pk=draft.pk) self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple()) self.assertEqual(timezone.localtime(draft.publication_date).timetuple(), new_date.timetuple()) self.assertEqual(timezone.localtime(draft.publication_end_date).timetuple(), new_end_date.timetuple()) if original_end_date: self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple()) def test_change_template(self): admin_user, staff = self._get_guys() request = self.get_request('/admin/cms/page/1/', 'en') request.method = "POST" pageadmin = site._registry[Page] with self.login_user_context(staff): self.assertRaises(Http404, pageadmin.change_template, request, 1) page = create_page('test-page', 'nav_playground.html', 'en') response = pageadmin.change_template(request, page.pk) self.assertEqual(response.status_code, 403) url = reverse('admin:cms_page_change_template', args=(page.pk,)) with self.login_user_context(admin_user): response = self.client.post(url, {'template': 'doesntexist'}) self.assertEqual(response.status_code, 400) response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]}) self.assertEqual(response.status_code, 200) def test_get_permissions(self): page = create_page('test-page', 'nav_playground.html', 'en') url = reverse('admin:cms_page_get_permissions', args=(page.pk,)) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'admin/login.html') admin_user = self.get_superuser() with self.login_user_context(admin_user): response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTemplateNotUsed(response, 'admin/login.html') def test_changelist_items(self): admin_user = self.get_superuser() first_level_page = create_page('level1', 'nav_playground.html', 'en') second_level_page_top = create_page('level21', "nav_playground.html", "en", created_by=admin_user, published=True, parent=first_level_page) second_level_page_bottom = create_page('level22', "nav_playground.html", "en", created_by=admin_user, published=True, parent=self.reload(first_level_page)) third_level_page = create_page('level3', "nav_playground.html", "en", created_by=admin_user, published=True, parent=second_level_page_top) self.assertEqual(Page.objects.all().count(), 4) url = reverse('admin:cms_%s_changelist' % Page._meta.module_name) request = self.get_request(url) request.session = {} request.user = admin_user page_admin = site._registry[Page] cl_params = [request, page_admin.model, page_admin.list_display, page_admin.list_display_links, page_admin.list_filter, page_admin.date_hierarchy, page_admin.search_fields, page_admin.list_select_related, page_admin.list_per_page] if hasattr(page_admin, 'list_max_show_all'): # django 1.4 cl_params.append(page_admin.list_max_show_all) cl_params.extend([page_admin.list_editable, page_admin]) cl = CMSChangeList(*tuple(cl_params)) cl.set_items(request) root_page = cl.get_items()[0] self.assertEqual(root_page, first_level_page) self.assertEqual(root_page.get_children()[0], second_level_page_top) self.assertEqual(root_page.get_children()[1], second_level_page_bottom) self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page) def test_changelist_tree(self): """ This test checks for proper jstree cookie unquoting. It should be converted to a selenium test to actually test the jstree behaviour. Cookie set below is just a forged example (from live session) """ admin_user = self.get_superuser() first_level_page = create_page('level1', 'nav_playground.html', 'en') second_level_page_top = create_page('level21', "nav_playground.html", "en", created_by=admin_user, published=True, parent=first_level_page) second_level_page_bottom = create_page('level22', "nav_playground.html", "en", created_by=admin_user, published=True, parent=self.reload(first_level_page)) third_level_page = create_page('level3', "nav_playground.html", "en", created_by=admin_user, published=True, parent=second_level_page_top) url = reverse('admin:cms_%s_changelist' % Page._meta.module_name) if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='admin@django-cms.org', password='admin@django-cms.org') else: self.client.login(username='admin', password='admin') self.client.cookies['djangocms_nodes_open'] = 'page_1%2Cpage_2' response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context["open_menu_trees"], [1, 2]) # tests descendants method for the lazy load ajax call url = "%s%d/en/descendants/" % (url, first_level_page.pk) response = self.client.get(url) self.assertEqual(response.status_code, 200) # should include both direct descendant pages self.assertContains(response, 'id="page_%s"' % second_level_page_top.pk) self.assertContains(response, 'id="page_%s"' % second_level_page_bottom.pk) # but not any further down the tree self.assertNotContains(response, 'id="page_%s"' % third_level_page.pk) self.assertNotContains(response, 'None') def test_unihandecode_doesnt_break_404_in_admin(self): self.get_superuser() if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='admin@django-cms.org', password='admin@django-cms.org') else: self.client.login(username='admin', password='admin') response = self.client.get('/en/admin/cms/page/1/?language=en') self.assertEqual(response.status_code, 404) def test_tree_displays_in_correct_language(self): ''' Test to prove and protect that the page titles in the tree are displayed in the currently set language. ''' admin_guy, normal_guy = self._get_guys(use_global_permissions=False) site = Site.objects.get(pk=1) en_title = "EN Page" es_title = "ES Pagina" # Create a page in en page = create_page(en_title, "nav_playground.html", "en", site=site, created_by=admin) # Add a es-mx translation for this page create_title("es-mx", es_title, page, slug="es_pagina") url = reverse('admin:cms_%s_changelist' % Page._meta.module_name) url_pat = '<a href="{0}/{1}/preview/"[^>]*>{2}</a>' with self.login_user_context(admin_guy): # Check the EN version of the tree... response = self.client.get(url, {'language': 'en'}) self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'en', en_title, )) # Check the ES version of the tree... response = self.client.get(url, {'language': 'es-mx'}) self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'es-mx', es_title, )) class AdminTests(AdminTestsBase): # TODO: needs tests for actual permissions, not only superuser/normaluser def setUp(self): self.page = create_page("testpage", "nav_playground.html", "en") def get_admin(self): User = get_user_model() fields = dict(email="admin@django-cms.org", is_staff=True, is_superuser=True) if (User.USERNAME_FIELD != 'email'): fields[User.USERNAME_FIELD] = "admin" usr = User(**fields) usr.set_password(getattr(usr, User.USERNAME_FIELD)) usr.save() return usr def get_permless(self): User = get_user_model() fields = dict(email="permless@django-cms.org", is_staff=True) if (User.USERNAME_FIELD != 'email'): fields[User.USERNAME_FIELD] = "permless" usr = User(**fields) usr.set_password(getattr(usr, User.USERNAME_FIELD)) usr.save() return usr def get_page(self): return self.page def test_change_publish_unpublish(self): page = self.get_page() permless = self.get_permless() with self.login_user_context(permless): request = self.get_request() response = self.admin_class.publish_page(request, page.pk, "en") self.assertEqual(response.status_code, 403) page = self.reload(page) self.assertFalse(page.is_published('en')) request = self.get_request(post_data={'no': 'data'}) response = self.admin_class.publish_page(request, page.pk, "en") # Forbidden self.assertEqual(response.status_code, 403) self.assertFalse(page.is_published('en')) admin_user = self.get_admin() with self.login_user_context(admin_user): request = self.get_request(post_data={'no': 'data'}) response = self.admin_class.publish_page(request, page.pk, "en") self.assertEqual(response.status_code, 302) page = self.reload(page) self.assertTrue(page.is_published('en')) response = self.admin_class.unpublish(request, page.pk, "en") self.assertEqual(response.status_code, 302) page = self.reload(page) self.assertFalse(page.is_published('en')) def test_change_status_adds_log_entry(self): page = self.get_page() admin_user = self.get_admin() with self.login_user_context(admin_user): request = self.get_request(post_data={'no': 'data'}) self.assertFalse(LogEntry.objects.count()) response = self.admin_class.publish_page(request, page.pk, "en") self.assertEqual(response.status_code, 302) self.assertEqual(1, LogEntry.objects.count()) self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id)) def test_change_innavigation(self): page = self.get_page() permless = self.get_permless() admin_user = self.get_admin() with self.login_user_context(permless): request = self.get_request() response = self.admin_class.change_innavigation(request, page.pk) self.assertEqual(response.status_code, 403) with self.login_user_context(permless): request = self.get_request(post_data={'no': 'data'}) self.assertRaises(Http404, self.admin_class.change_innavigation, request, page.pk + 100) with self.login_user_context(permless): request = self.get_request(post_data={'no': 'data'}) response = self.admin_class.change_innavigation(request, page.pk) self.assertEqual(response.status_code, 403) with self.login_user_context(admin_user): request = self.get_request(post_data={'no': 'data'}) old = page.in_navigation response = self.admin_class.change_innavigation(request, page.pk) self.assertEqual(response.status_code, 200) page = self.reload(page) self.assertEqual(old, not page.in_navigation) def test_publish_page_requires_perms(self): permless = self.get_permless() with self.login_user_context(permless): request = self.get_request() request.method = "POST" response = self.admin_class.publish_page(request, Page.objects.all()[0].pk, "en") self.assertEqual(response.status_code, 403) def test_revert_page(self): self.page.publish('en') title = self.page.title_set.get(language='en') title.title = 'new' title.save() self.assertEqual(Title.objects.all().count(), 2) self.assertEqual(Page.objects.all().count(), 2) with self.login_user_context(self.get_superuser()): request = self.get_request() request.method = "POST" response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, "en") self.assertEqual(response.status_code, 302) self.assertEqual(Title.objects.all().count(), 2) self.assertEqual(Page.objects.all().count(), 2) new_title = Title.objects.get(pk=title.pk) self.assertNotEqual(title.title, new_title.title) self.assertTrue(title.publisher_is_draft) self.assertTrue(new_title.publisher_is_draft) def test_revert_page_requires_perms(self): permless = self.get_permless() with self.login_user_context(permless): request = self.get_request() request.method = "POST" response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, 'en') self.assertEqual(response.status_code, 403) def test_revert_page_redirects(self): admin_user = self.get_admin() self.page.publish("en") # Ensure public copy exists before reverting with self.login_user_context(admin_user): response = self.client.get(reverse('admin:cms_page_revert_page', args=(self.page.pk, 'en'))) self.assertEqual(response.status_code, 302) url = response['Location'] self.assertTrue(url.endswith('?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))) def test_remove_plugin_requires_post(self): ph = Placeholder.objects.create(slot='test') plugin = add_plugin(ph, 'TextPlugin', 'en', body='test') admin_user = self.get_admin() with self.login_user_context(admin_user): request = self.get_request() response = self.admin_class.delete_plugin(request, plugin.pk) self.assertEqual(response.status_code, 200) def test_move_plugin(self): ph = Placeholder.objects.create(slot='test') plugin = add_plugin(ph, 'TextPlugin', 'en', body='test') page = self.get_page() source, target = list(page.placeholders.all())[:2] pageplugin = add_plugin(source, 'TextPlugin', 'en', body='test') plugin_class = pageplugin.get_plugin_class_instance() expected = {'reload': plugin_class.requires_reload(PLUGIN_MOVE_ACTION)} placeholder = Placeholder.objects.all()[0] permless = self.get_permless() admin_user = self.get_admin() with self.login_user_context(permless): request = self.get_request() response = self.admin_class.move_plugin(request) self.assertEqual(response.status_code, 405) request = self.get_request(post_data={'not_usable': '1'}) self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request) with self.login_user_context(admin_user): request = self.get_request(post_data={'ids': plugin.pk}) self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request) with self.login_user_context(admin_user): request = self.get_request(post_data={'plugin_id': pageplugin.pk, 'placeholder_id': 'invalid-placeholder', 'plugin_language': 'en'}) self.assertRaises(ValueError, self.admin_class.move_plugin, request) with self.login_user_context(permless): request = self.get_request(post_data={'plugin_id': pageplugin.pk, 'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'}) self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code) with self.login_user_context(admin_user): request = self.get_request(post_data={'plugin_id': pageplugin.pk, 'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'}) response = self.admin_class.move_plugin(request) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content.decode('utf8')), expected) with self.login_user_context(permless): request = self.get_request(post_data={'plugin_id': pageplugin.pk, 'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'}) self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code) with self.login_user_context(admin_user): request = self.get_request(post_data={'plugin_id': pageplugin.pk, 'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'}) response = self.admin_class.move_plugin(request) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content.decode('utf8')), expected) def test_move_language(self): page = self.get_page() source, target = list(page.placeholders.all())[:2] col = add_plugin(source, 'MultiColumnPlugin', 'en') sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col) col2 = add_plugin(source, 'MultiColumnPlugin', 'de') admin_user = self.get_admin() with self.login_user_context(admin_user): request = self.get_request(post_data={'plugin_id': sub_col.pk, 'placeholder_id': source.id, 'plugin_parent': col2.pk, 'plugin_language': 'de'}) response = self.admin_class.move_plugin(request) self.assertEqual(response.status_code, 200) sub_col = CMSPlugin.objects.get(pk=sub_col.pk) self.assertEqual(sub_col.language, "de") self.assertEqual(sub_col.parent_id, col2.pk) def test_preview_page(self): permless = self.get_permless() with self.login_user_context(permless): request = self.get_request() self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en") page = self.get_page() page.publish("en") base_url = page.get_absolute_url() with self.login_user_context(permless): request = self.get_request('/?public=true') response = self.admin_class.preview_page(request, page.pk, 'en') self.assertEqual(response.status_code, 302) self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))) request = self.get_request() response = self.admin_class.preview_page(request, page.pk, 'en') self.assertEqual(response.status_code, 302) self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))) current_site = Site.objects.create(domain='django-cms.org', name='django-cms') page.site = current_site page.save() page.publish("en") self.assertTrue(page.is_home) response = self.admin_class.preview_page(request, page.pk, 'en') self.assertEqual(response.status_code, 302) self.assertEqual(response['Location'], 'http://django-cms.org%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))) def test_too_many_plugins_global(self): conf = { 'body': { 'limits': { 'global': 1, }, }, } admin_user = self.get_admin() url = reverse('admin:cms_page_add_plugin') with SettingsOverride(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf): page = create_page('somepage', 'nav_playground.html', 'en') body = page.placeholders.get(slot='body') add_plugin(body, 'TextPlugin', 'en', body='text') with self.login_user_context(admin_user): data = { 'plugin_type': 'TextPlugin', 'placeholder_id': body.pk, 'plugin_language': 'en', } response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponseBadRequest.status_code) def test_too_many_plugins_type(self): conf = { 'body': { 'limits': { 'TextPlugin': 1, }, }, } admin_user = self.get_admin() url = reverse('admin:cms_page_add_plugin') with SettingsOverride(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf): page = create_page('somepage', 'nav_playground.html', 'en') body = page.placeholders.get(slot='body') add_plugin(body, 'TextPlugin', 'en', body='text') with self.login_user_context(admin_user): data = { 'plugin_type': 'TextPlugin', 'placeholder_id': body.pk, 'plugin_language': 'en', 'plugin_parent': '', } response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponseBadRequest.status_code) def test_edit_title_dirty_bit(self): language = "en" admin_user = self.get_admin() page = create_page('A', 'nav_playground.html', language) page_admin = PageAdmin(Page, None) page_admin._current_page = page page.publish("en") draft_page = page.get_draft_object() admin_url = reverse("admin:cms_page_edit_title_fields", args=( draft_page.pk, language )) post_data = { 'title': "A Title" } with self.login_user_context(admin_user): self.client.post(admin_url, post_data) draft_page = Page.objects.get(pk=page.pk).get_draft_object() self.assertTrue(draft_page.is_dirty('en')) def test_edit_title_languages(self): language = "en" admin_user = self.get_admin() page = create_page('A', 'nav_playground.html', language) page_admin = PageAdmin(Page, None) page_admin._current_page = page page.publish("en") draft_page = page.get_draft_object() admin_url = reverse("admin:cms_page_edit_title_fields", args=( draft_page.pk, language )) post_data = { 'title': "A Title" } with self.login_user_context(admin_user): self.client.post(admin_url, post_data) draft_page = Page.objects.get(pk=page.pk).get_draft_object() self.assertTrue(draft_page.is_dirty('en')) class NoDBAdminTests(CMSTestCase): @property def admin_class(self): return site._registry[Page] def test_lookup_allowed_site__exact(self): self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1')) def test_lookup_allowed_published(self): self.assertTrue(self.admin_class.lookup_allowed('published', value='1')) class PluginPermissionTests(AdminTestsBase): def setUp(self): self._page = create_page('test page', 'nav_playground.html', 'en') self._placeholder = self._page.placeholders.all()[0] def _get_admin(self): User = get_user_model() fields = dict(email="admin@django-cms.org", is_staff=True, is_active=True) if (User.USERNAME_FIELD != 'email'): fields[User.USERNAME_FIELD] = "admin" admin_user = User(**fields) admin_user.set_password('admin') admin_user.save() return admin_user def _get_page_admin(self): return admin.site._registry[Page] def _give_permission(self, user, model, permission_type, save=True): codename = '%s_%s' % (permission_type, model._meta.object_name.lower()) user.user_permissions.add(Permission.objects.get(codename=codename)) def _give_page_permission_rights(self, user): self._give_permission(user, PagePermission, 'add') self._give_permission(user, PagePermission, 'change') self._give_permission(user, PagePermission, 'delete') def _get_change_page_request(self, user, page): return type('Request', (object,), { 'user': user, 'path': base.URL_CMS_PAGE_CHANGE % page.pk }) def _give_cms_permissions(self, user, save=True): for perm_type in ['add', 'change', 'delete']: for model in [Page, Title]: self._give_permission(user, model, perm_type, False) gpp = GlobalPagePermission.objects.create( user=user, can_change=True, can_delete=True, can_change_advanced_settings=False, can_publish=True, can_change_permissions=False, can_move_page=True, ) gpp.sites = Site.objects.all() if save: user.save() def _create_plugin(self): plugin = add_plugin(self._placeholder, 'TextPlugin', 'en') return plugin def test_plugin_add_requires_permissions(self): """User tries to add a plugin but has no permissions. He can add the plugin after he got the permissions""" admin = self._get_admin() self._give_cms_permissions(admin) if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='admin@django-cms.org', password='admin') else: self.client.login(username='admin', password='admin') url = reverse('admin:cms_page_add_plugin') data = { 'plugin_type': 'TextPlugin', 'placeholder_id': self._placeholder.pk, 'plugin_language': 'en', 'plugin_parent': '', } response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponseForbidden.status_code) self._give_permission(admin, Text, 'add') response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponse.status_code) def test_plugin_edit_requires_permissions(self): """User tries to edit a plugin but has no permissions. He can edit the plugin after he got the permissions""" plugin = self._create_plugin() _, normal_guy = self._get_guys() if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='test@test.com', password='test@test.com') else: self.client.login(username='test', password='test') url = reverse('admin:cms_page_edit_plugin', args=[plugin.id]) response = self.client.post(url, dict()) self.assertEqual(response.status_code, HttpResponseForbidden.status_code) # After he got the permissions, he can edit the plugin self._give_permission(normal_guy, Text, 'change') response = self.client.post(url, dict()) self.assertEqual(response.status_code, HttpResponse.status_code) def test_plugin_remove_requires_permissions(self): """User tries to remove a plugin but has no permissions. He can remove the plugin after he got the permissions""" plugin = self._create_plugin() _, normal_guy = self._get_guys() if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='test@test.com', password='test@test.com') else: self.client.login(username='test', password='test') url = reverse('admin:cms_page_delete_plugin', args=[plugin.pk]) data = dict(plugin_id=plugin.id) response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponseForbidden.status_code) # After he got the permissions, he can edit the plugin self._give_permission(normal_guy, Text, 'delete') response = self.client.post(url, data) self.assertEqual(response.status_code, 302) def test_plugin_move_requires_permissions(self): """User tries to move a plugin but has no permissions. He can move the plugin after he got the permissions""" plugin = self._create_plugin() _, normal_guy = self._get_guys() if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='test@test.com', password='test@test.com') else: self.client.login(username='test', password='test') url = reverse('admin:cms_page_move_plugin') data = dict(plugin_id=plugin.id, placeholder_id=self._placeholder.pk, plugin_parent='', ) response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponseForbidden.status_code) # After he got the permissions, he can edit the plugin self._give_permission(normal_guy, Text, 'change') response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponse.status_code) def test_plugins_copy_requires_permissions(self): """User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions""" plugin = self._create_plugin() _, normal_guy = self._get_guys() if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='test@test.com', password='test@test.com') else: self.client.login(username='test', password='test') url = reverse('admin:cms_page_copy_plugins') data = dict(source_plugin_id=plugin.id, source_placeholder_id=self._placeholder.pk, source_language='en', target_language='fr', target_placeholder_id=self._placeholder.pk, ) response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponseForbidden.status_code) # After he got the permissions, he can edit the plugin self._give_permission(normal_guy, Text, 'add') response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponse.status_code) def test_plugins_copy_placeholder_ref(self): """User copies a placeholder into a clipboard. A PlaceholderReferencePlugin is created. Afterwards he copies this into a placeholder and the PlaceholderReferencePlugin unpacks its content. After that he clear the clipboard""" self.assertEqual(Placeholder.objects.count(), 2) self._create_plugin() self._create_plugin() admin_user = self.get_superuser() clipboard = Placeholder() clipboard.save() self.assertEqual(CMSPlugin.objects.count(), 2) settings = UserSettings(language="fr", clipboard=clipboard, user=admin_user) settings.save() self.assertEqual(Placeholder.objects.count(), 3) if get_user_model().USERNAME_FIELD == 'email': self.client.login(username='admin@django-cms.org', password='admin@django-cms.org') else: self.client.login(username='admin', password='admin') url = reverse('admin:cms_page_copy_plugins') data = dict(source_plugin_id='', source_placeholder_id=self._placeholder.pk, source_language='en', target_language='en', target_placeholder_id=clipboard.pk, ) response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponse.status_code) clipboard_plugins = clipboard.get_plugins() self.assertEqual(CMSPlugin.objects.count(), 5) self.assertEqual(clipboard_plugins.count(), 1) self.assertEqual(clipboard_plugins[0].plugin_type, "PlaceholderPlugin") placeholder_plugin, _ = clipboard_plugins[0].get_plugin_instance() ref_placeholder = placeholder_plugin.placeholder_ref copied_plugins = ref_placeholder.get_plugins() self.assertEqual(copied_plugins.count(), 2) data = dict(source_plugin_id=placeholder_plugin.pk, source_placeholder_id=clipboard.pk, source_language='en', target_language='fr', target_placeholder_id=self._placeholder.pk, ) response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponse.status_code) plugins = self._placeholder.get_plugins() self.assertEqual(plugins.count(), 4) self.assertEqual(CMSPlugin.objects.count(), 7) self.assertEqual(Placeholder.objects.count(), 4) url = reverse('admin:cms_page_clear_placeholder', args=[clipboard.pk]) response = self.client.post(url, {'test': 0}) self.assertEqual(response.status_code, 302) self.assertEqual(CMSPlugin.objects.count(), 4) self.assertEqual(Placeholder.objects.count(), 3) def test_plugins_copy_language(self): """User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions""" self._create_plugin() _, normal_guy = self._get_guys() if get_user_model().USERNAME_FIELD != 'email': self.client.login(username='test', password='test') else: self.client.login(username='test@test.com', password='test@test.com') self.assertEqual(1, CMSPlugin.objects.all().count()) url = reverse('admin:cms_page_copy_language', args=[self._page.pk]) data = dict( source_language='en', target_language='fr', ) response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponseForbidden.status_code) # After he got the permissions, he can edit the plugin self._give_permission(normal_guy, Text, 'add') response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponse.status_code) self.assertEqual(2, CMSPlugin.objects.all().count()) def test_page_permission_inline_visibility(self): User = get_user_model() fields = dict(email='user@domain.com', password='user', is_staff=True) if get_user_model().USERNAME_FIELD != 'email': fields[get_user_model().USERNAME_FIELD] = 'user' user = User(**fields) user.save() self._give_page_permission_rights(user) page = create_page('A', 'nav_playground.html', 'en') page_permission = PagePermission.objects.create( can_change_permissions=True, user=user, page=page) request = self._get_change_page_request(user, page) page_admin = PageAdmin(Page, None) page_admin._current_page = page # user has can_change_permission # => must see the PagePermissionInline self.assertTrue( any(type(inline) is PagePermissionInlineAdmin for inline in page_admin.get_inline_instances(request, page if not DJANGO_1_4 else None))) page = Page.objects.get(pk=page.pk) # remove can_change_permission page_permission.can_change_permissions = False page_permission.save() request = self._get_change_page_request(user, page) page_admin = PageAdmin(Page, None) page_admin._current_page = page # => PagePermissionInline is no longer visible self.assertFalse( any(type(inline) is PagePermissionInlineAdmin for inline in page_admin.get_inline_instances(request, page if not DJANGO_1_4 else None))) def test_edit_title_is_allowed_for_staff_user(self): """ We check here both the permission on a single page, and the global permissions """ user = self._create_user('user', is_staff=True) another_user = self._create_user('another_user', is_staff=True) page = create_page('A', 'nav_playground.html', 'en') admin_url = reverse("admin:cms_page_edit_title_fields", args=( page.pk, 'en' )) page_admin = PageAdmin(Page, None) page_admin._current_page = page username = getattr(user, get_user_model().USERNAME_FIELD) self.client.login(username=username, password=username) response = self.client.get(admin_url) self.assertEqual(response.status_code, HttpResponseForbidden.status_code) assign_user_to_page(page, user, grant_all=True) username = getattr(user, get_user_model().USERNAME_FIELD) self.client.login(username=username, password=username) response = self.client.get(admin_url) self.assertEqual(response.status_code, HttpResponse.status_code) self._give_cms_permissions(another_user) username = getattr(another_user, get_user_model().USERNAME_FIELD) self.client.login(username=username, password=username) response = self.client.get(admin_url) self.assertEqual(response.status_code, HttpResponse.status_code) def test_plugin_add_returns_valid_pk_for_plugin(self): admin_user = self._get_admin() self._give_cms_permissions(admin_user) self._give_permission(admin_user, Text, 'add') username = getattr(admin_user, get_user_model().USERNAME_FIELD) self.client.login(username=username, password='admin') url = reverse('admin:cms_page_add_plugin') data = { 'plugin_type': 'TextPlugin', 'placeholder_id': self._placeholder.pk, 'plugin_language': 'en', 'plugin_parent': '', } response = self.client.post(url, data) self.assertEqual(response.status_code, HttpResponse.status_code) self.assertEqual(response['content-type'], 'application/json') pk = response.content.decode('utf8').split("edit-plugin/")[1].split("/")[0] self.assertTrue(CMSPlugin.objects.filter(pk=int(pk)).exists()) class AdminFormsTests(AdminTestsBase): def test_clean_overwrite_url(self): user = AnonymousUser() user.is_superuser = True user.pk = 1 request = type('Request', (object,), {'user': user}) with SettingsOverride(): data = { 'title': 'TestPage', 'slug': 'test-page', 'language': 'en', 'overwrite_url': '/overwrite/url/', 'site': Site.objects.get_current().pk, 'template': get_cms_setting('TEMPLATES')[0][0], 'published': True } form = PageForm(data) self.assertTrue(form.is_valid(), form.errors.as_text()) # WTF? WHY DOES form.save() not handle this stuff??? instance = form.save() instance.permission_user_cache = user instance.permission_advanced_settings_cache = True Title.objects.set_or_create(request, instance, form, 'en') form = PageForm(data, instance=instance) self.assertTrue(form.is_valid(), form.errors.as_text()) def test_missmatching_site_parent_dotsite(self): site0 = Site.objects.create(domain='foo.com', name='foo.com') site1 = Site.objects.create(domain='foo.com', name='foo.com') parent_page = Page.objects.create( template='nav_playground.html', site=site0) new_page_data = { 'title': 'Title', 'slug': 'slug', 'language': 'en', 'site': site1.pk, 'template': get_cms_setting('TEMPLATES')[0][0], 'reverse_id': '', 'parent': parent_page.pk, } form = PageForm(data=new_page_data, files=None) self.assertFalse(form.is_valid()) self.assertIn(u"Site doesn't match the parent's page site", form.errors['__all__']) def test_reverse_id_error_location(self): ''' Test moving the reverse_id validation error to a field specific one ''' # this is the Reverse ID we'll re-use to break things. dupe_id = 'p1' curren_site = Site.objects.get_current() create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id) page2 = create_page('Page 2', 'nav_playground.html', 'en') # Assemble a bunch of data to test the page form page2_data = { 'language': 'en', 'site': curren_site.pk, 'reverse_id': dupe_id, 'template': 'col_two.html', } form = AdvancedSettingsForm(data=page2_data, files=None) self.assertFalse(form.is_valid()) # reverse_id is the only item that is in __all__ as every other field # has it's own clean method. Moving it to be a field error means # __all__ is now not available. self.assertNotIn('__all__', form.errors) # In moving it to it's own field, it should be in form.errors, and # the values contained therein should match these. self.assertIn('reverse_id', form.errors) self.assertEqual(1, len(form.errors['reverse_id'])) self.assertEqual([u'A page with this reverse URL id exists already.'], form.errors['reverse_id']) page2_data['reverse_id'] = "" form = AdvancedSettingsForm(data=page2_data, files=None) self.assertTrue(form.is_valid()) admin_user = self._get_guys(admin_only=True) # reset some of page2_data so we can use cms.api.create_page page2 = page2.reload() page2.site = curren_site page2.save() with self.login_user_context(admin_user): # re-reset the page2_data for the admin form instance. page2_data['reverse_id'] = dupe_id page2_data['site'] = curren_site.pk # post to the admin change form for page 2, and test that the # reverse_id form row has an errors class. Django's admin avoids # collapsing these, so that the error is visible. resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data) self.assertContains(resp, '<div class="form-row errors reverse_id">') def test_create_page_type(self): page = create_page('Test', 'static.html', 'en', published=True, reverse_id="home") for placeholder in Placeholder.objects.all(): add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>') page.publish('en') self.assertEqual(Page.objects.count(), 2) self.assertEqual(CMSPlugin.objects.count(), 4) superuser = self.get_superuser() with self.login_user_context(superuser): response = self.client.get( "%s?copy_target=%s&language=%s" % (reverse("admin:cms_page_add_page_type"), page.pk, 'en')) self.assertEqual(response.status_code, 302) self.assertEqual(Page.objects.count(), 3) self.assertEqual(Page.objects.filter(reverse_id="page_types").count(), 1) page_types = Page.objects.get(reverse_id='page_types') self.assertRedirects(response, "/en/admin/cms/page/add/?target=%s&position=first-child&add_page_type=1&copy_target=%s&language=en" % ( page_types.pk, page.pk)) # test no page types if no page types there response = self.client.get(reverse('admin:cms_page_add')) self.assertNotContains(response, "page_type") # create out first page type page_data = { 'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'static.html', 'site': 1, 'language': 'en' } response = self.client.post( "/en/admin/cms/page/add/?target=%s&position=first-child&add_page_type=1&copy_target=%s&language=en" % ( page_types.pk, page.pk), data=page_data) self.assertEqual(response.status_code, 302) self.assertEqual(Page.objects.count(), 4) self.assertEqual(CMSPlugin.objects.count(), 6) response = self.client.get(reverse('admin:cms_page_add')) self.assertContains(response, "page_type") # no page types available if you use the copy_target response = self.client.get("%s?copy_target=%s&language=en" % (reverse('admin:cms_page_add'), page.pk)) self.assertNotContains(response, "page_type") def test_render_edit_mode(self): from django.core.cache import cache cache.clear() create_page('Test', 'static.html', 'en', published=True) for placeholder in Placeholder.objects.all(): add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>') user = self.get_superuser() self.assertEqual(Placeholder.objects.all().count(), 4) with self.login_user_context(user): with self.assertNumQueries(FuzzyInt(40, 66)): output = force_unicode(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content) self.assertIn('<b>Test</b>', output) self.assertEqual(Placeholder.objects.all().count(), 9) self.assertEqual(StaticPlaceholder.objects.count(), 2) for placeholder in Placeholder.objects.all(): add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>') with self.assertNumQueries(FuzzyInt(40, 60)): output = force_unicode(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content) self.assertIn('<b>Test</b>', output) with self.assertNumQueries(FuzzyInt(18, 34)): force_unicode(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content) with self.assertNumQueries(FuzzyInt(12, 14)): force_unicode(self.client.get('/en/').content) def test_tree_view_queries(self): from django.core.cache import cache cache.clear() for i in range(10): create_page('Test%s' % i, 'col_two.html', 'en', published=True) for placeholder in Placeholder.objects.all(): add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>') user = self.get_superuser() with self.login_user_context(user): with self.assertNumQueries(FuzzyInt(18, 33)): force_unicode(self.client.get('/en/admin/cms/page/')) def test_smart_link_published_pages(self): admin, staff_guy = self._get_guys() page_url = '/en/admin/cms/page/published-pages/' # Not sure how to achieve this with reverse... with self.login_user_context(staff_guy): multi_title_page = create_page('main_title', 'col_two.html', 'en', published=True, overwrite_url='overwritten_url', menu_title='menu_title') title = multi_title_page.get_title_obj() title.page_title = 'page_title' title.save() multi_title_page.save() publish_page(multi_title_page, admin, 'en') # Non ajax call should return a 403 as this page shouldn't be accessed by anything else but ajax queries self.assertEqual(403, self.client.get(page_url).status_code) self.assertEqual(200, self.client.get(page_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest').status_code ) # Test that the query param is working as expected. self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'main_title'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8")))) self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'menu_title'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8")))) self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'overwritten_url'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8")))) self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'page_title'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8")))) class AdminPageEditContentSizeTests(AdminTestsBase): """ System user count influences the size of the page edit page, but the users are only 2 times present on the page The test relates to extra=0 at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin """ def test_editpage_contentsize(self): """ Expected a username only 2 times in the content, but a relationship between usercount and pagesize """ with SettingsOverride(CMS_PERMISSION=True): admin_user = self.get_superuser() PAGE_NAME = 'TestPage' USER_NAME = 'test_size_user_0' current_site = Site.objects.get(pk=1) page = create_page(PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user) page.save() self._page = page with self.login_user_context(admin_user): url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk response = self.client.get(url) self.assertEqual(response.status_code, 200) old_response_size = len(response.content) old_user_count = get_user_model().objects.count() # create additionals user and reload the page get_user_model().objects.create_user(username=USER_NAME, email=USER_NAME + '@django-cms.org', password=USER_NAME) user_count = get_user_model().objects.count() more_users_in_db = old_user_count < user_count # we have more users self.assertTrue(more_users_in_db, "New users got NOT created") response = self.client.get(url) new_response_size = len(response.content) page_size_grown = old_response_size < new_response_size # expect that the pagesize gets influenced by the useramount of the system self.assertTrue(page_size_grown, "Page size has not grown after user creation") # usernames are only 2 times in content text = smart_str(response.content, response._charset) foundcount = text.count(USER_NAME) # 2 forms contain usernames as options self.assertEqual(foundcount, 2, "Username %s appeared %s times in response.content, expected 2 times" % ( USER_NAME, foundcount))
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs perf tests. Our buildbot infrastructure requires each slave to run steps serially. This is sub-optimal for android, where these steps can run independently on multiple connected devices. The buildbots will run this script multiple times per cycle: - First: all steps listed in --steps in will be executed in parallel using all connected devices. Step results will be pickled to disk. Each step has a unique name. The result code will be ignored if the step name is listed in --flaky-steps. The buildbot will treat this step as a regular step, and will not process any graph data. - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file with the step results previously saved. The buildbot will then process the graph data accordingly. The JSON steps file contains a dictionary in the format: { "version": int, "steps": { "foo": { "device_affinity": int, "cmd": "script_to_execute foo" }, "bar": { "device_affinity": int, "cmd": "script_to_execute bar" } } } The JSON flaky steps file contains a list with step names which results should be ignored: [ "step_name_foo", "step_name_bar" ] Note that script_to_execute necessarily have to take at least the following option: --device: the serial number to be passed to all adb commands. """ import collections import io import json import logging import os import pickle import re import shutil import sys import tempfile import threading import time import zipfile from devil.android import battery_utils from devil.android import device_errors from devil.android import forwarder from devil.constants import exit_codes from devil.utils import cmd_helper from pylib import constants from pylib.base import base_test_result from pylib.base import base_test_runner from pylib.constants import host_paths # Regex for the master branch commit position. _GIT_CR_POS_RE = re.compile(r'^Cr-Commit-Position: refs/heads/master@{#(\d+)}$') def _GetChromiumRevision(): # pylint: disable=line-too-long """Get the git hash and commit position of the chromium master branch. See: https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/runtest.py#212 Returns: A dictionary with 'revision' and 'commit_pos' keys. """ # pylint: enable=line-too-long status, output = cmd_helper.GetCmdStatusAndOutput( ['git', 'log', '-n', '1', '--pretty=format:%H%n%B', 'HEAD'], host_paths.DIR_SOURCE_ROOT) revision = None commit_pos = None if not status: lines = output.splitlines() revision = lines[0] for line in reversed(lines): m = _GIT_CR_POS_RE.match(line.strip()) if m: commit_pos = int(m.group(1)) break return {'revision': revision, 'commit_pos': commit_pos} def GetPersistedResult(test_name): file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) if not os.path.exists(file_name): logging.error('File not found %s', file_name) return None with file(file_name, 'r') as f: return pickle.loads(f.read()) def OutputJsonList(json_input, json_output): with file(json_input, 'r') as i: all_steps = json.load(i) step_values = [] for k, v in all_steps['steps'].iteritems(): data = {'test': k, 'device_affinity': v['device_affinity']} persisted_result = GetPersistedResult(k) if persisted_result: data['start_time'] = persisted_result['start_time'] data['end_time'] = persisted_result['end_time'] data['total_time'] = persisted_result['total_time'] data['has_archive'] = persisted_result['archive_bytes'] is not None step_values.append(data) with file(json_output, 'w') as o: o.write(json.dumps(step_values)) return 0 def PrintTestOutput(test_name, json_file_name=None, archive_file_name=None): """Helper method to print the output of previously executed test_name. Args: test_name: name of the test that has been previously executed. json_file_name: name of the file to output chartjson data to. archive_file_name: name of the file to write the compressed ZIP archive. Returns: exit code generated by the test step. """ persisted_result = GetPersistedResult(test_name) if not persisted_result: return exit_codes.INFRA logging.info('*' * 80) logging.info('Output from:') logging.info(persisted_result['cmd']) logging.info('*' * 80) output_formatted = '' persisted_outputs = persisted_result['output'] for i in xrange(len(persisted_outputs)): output_formatted += '\n\nOutput from run #%d:\n\n%s' % ( i, persisted_outputs[i]) print output_formatted if json_file_name: with file(json_file_name, 'w') as f: f.write(persisted_result['chartjson']) if archive_file_name: if persisted_result['archive_bytes'] is not None: with file(archive_file_name, 'wb') as f: f.write(persisted_result['archive_bytes']) else: logging.error('The output dir was not archived.') return persisted_result['exit_code'] def PrintSummary(test_names): logging.info('*' * 80) logging.info('Sharding summary') device_total_time = collections.defaultdict(int) for test_name in test_names: file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) if not os.path.exists(file_name): logging.info('%s : No status file found', test_name) continue with file(file_name, 'r') as f: result = pickle.loads(f.read()) logging.info('%s : exit_code=%d in %d secs at %s', result['name'], result['exit_code'], result['total_time'], result['device']) device_total_time[result['device']] += result['total_time'] for device, device_time in device_total_time.iteritems(): logging.info('Total for device %s : %d secs', device, device_time) logging.info('Total steps time: %d secs', sum(device_total_time.values())) class _HeartBeatLogger(object): # How often to print the heartbeat on flush(). _PRINT_INTERVAL = 30.0 def __init__(self): """A file-like class for keeping the buildbot alive.""" self._len = 0 self._tick = time.time() self._stopped = threading.Event() self._timer = threading.Thread(target=self._runner) self._timer.start() def _runner(self): while not self._stopped.is_set(): self.flush() self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL) def write(self, data): self._len += len(data) def flush(self): now = time.time() if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL: self._tick = now print '--single-step output length %d' % self._len sys.stdout.flush() def stop(self): self._stopped.set() class TestRunner(base_test_runner.BaseTestRunner): def __init__(self, test_options, device, shard_index, max_shard, tests, flaky_tests): """A TestRunner instance runs a perf test on a single device. Args: test_options: A PerfOptions object. device: Device to run the tests. shard_index: the index of this device. max_shards: the maximum shard index. tests: a dict mapping test_name to command. flaky_tests: a list of flaky test_name. """ super(TestRunner, self).__init__(device, None) self._options = test_options self._shard_index = shard_index self._max_shard = max_shard self._tests = tests self._flaky_tests = flaky_tests self._output_dir = None self._device_battery = battery_utils.BatteryUtils(self.device) @staticmethod def _SaveResult(result): pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name']) if os.path.exists(pickled): with file(pickled, 'r') as f: previous = pickle.loads(f.read()) result['output'] = previous['output'] + result['output'] with file(pickled, 'w') as f: f.write(pickle.dumps(result)) def _CheckDeviceAffinity(self, test_name): """Returns True if test_name has affinity for this shard.""" affinity = (self._tests['steps'][test_name]['device_affinity'] % self._max_shard) if self._shard_index == affinity: return True logging.info('Skipping %s on %s (affinity is %s, device is %s)', test_name, self.device_serial, affinity, self._shard_index) return False def _CleanupOutputDirectory(self): if self._output_dir: shutil.rmtree(self._output_dir, ignore_errors=True) self._output_dir = None def _ReadChartjsonOutput(self): if not self._output_dir: return '' json_output_path = os.path.join(self._output_dir, 'results-chart.json') try: with open(json_output_path) as f: return f.read() except IOError: logging.exception('Exception when reading chartjson.') logging.error('This usually means that telemetry did not run, so it could' ' not generate the file. Please check the device running' ' the test.') return '' def _WriteBuildBotJson(self): """Write metadata about the buildbot environment to the output dir.""" data = { 'chromium': _GetChromiumRevision(), 'environment': dict(os.environ)} logging.info('BuildBot environment: %s', data) with open(os.path.join(self._output_dir, 'buildbot.json'), 'w') as f: json.dump(data, f, sort_keys=True, indent=2, separators=(',', ': ')) def _ArchiveOutputDir(self): """Archive all files in the output dir, and return as compressed bytes.""" with io.BytesIO() as archive: with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents: num_files = 0 for absdir, _, files in os.walk(self._output_dir): reldir = os.path.relpath(absdir, self._output_dir) for filename in files: src_path = os.path.join(absdir, filename) # We use normpath to turn './file.txt' into just 'file.txt'. dst_path = os.path.normpath(os.path.join(reldir, filename)) contents.write(src_path, dst_path) num_files += 1 if num_files: logging.info('%d files in the output dir were archived.', num_files) else: logging.warning('No files in the output dir. Archive is empty.') return archive.getvalue() def _LaunchPerfTest(self, test_name): """Runs a perf test. Args: test_name: the name of the test to be executed. Returns: A tuple containing (Output, base_test_result.ResultType) """ if not self._CheckDeviceAffinity(test_name): return '', base_test_result.ResultType.PASS try: logging.warning('Unmapping device ports') forwarder.Forwarder.UnmapAllDevicePorts(self.device) self.device.RestartAdbd() except Exception as e: # pylint: disable=broad-except logging.error('Exception when tearing down device %s', e) test_config = self._tests['steps'][test_name] cmd = ('%s --device %s' % (test_config['cmd'], self.device_serial)) if (self._options.collect_chartjson_data or test_config.get('archive_output_dir')): self._output_dir = tempfile.mkdtemp() self._WriteBuildBotJson() cmd = cmd + ' --output-dir=%s' % self._output_dir logging.info( 'temperature: %s (0.1 C)', str(self._device_battery.GetBatteryInfo().get('temperature'))) if self._options.max_battery_temp: self._device_battery.LetBatteryCoolToTemperature( self._options.max_battery_temp) logging.info('Charge level: %s%%', str(self._device_battery.GetBatteryInfo().get('level'))) if self._options.min_battery_level: self._device_battery.ChargeDeviceToLevel( self._options.min_battery_level) self.device.SetScreen(True) logging.info('%s : %s', test_name, cmd) start_time = time.time() timeout = test_config.get('timeout', 3600) if self._options.no_timeout: timeout = None logging.info('Timeout for %s test: %s', test_name, timeout) full_cmd = cmd if self._options.dry_run: full_cmd = 'echo %s' % cmd logfile = sys.stdout archive_bytes = None if self._options.single_step: # Just print a heart-beat so that the outer buildbot scripts won't timeout # without response. logfile = _HeartBeatLogger() cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT) if full_cmd.startswith('src/'): cwd = os.path.abspath(os.path.join(host_paths.DIR_SOURCE_ROOT, os.pardir)) try: exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile) json_output = self._ReadChartjsonOutput() if test_config.get('archive_output_dir'): archive_bytes = self._ArchiveOutputDir() except cmd_helper.TimeoutError as e: exit_code = -1 output = e.output json_output = '' finally: self._CleanupOutputDirectory() if self._options.single_step: logfile.stop() end_time = time.time() if exit_code is None: exit_code = -1 logging.info('%s : exit_code=%d in %d secs at %s', test_name, exit_code, end_time - start_time, self.device_serial) if exit_code == 0: result_type = base_test_result.ResultType.PASS else: result_type = base_test_result.ResultType.FAIL # Since perf tests use device affinity, give the device a chance to # recover if it is offline after a failure. Otherwise, the master sharder # will remove it from the pool and future tests on this device will fail. try: self.device.WaitUntilFullyBooted(timeout=120) except device_errors.CommandTimeoutError as e: logging.error('Device failed to return after %s: %s', test_name, e) actual_exit_code = exit_code if test_name in self._flaky_tests: # The exit_code is used at the second stage when printing the # test output. If the test is flaky, force to "0" to get that step green # whilst still gathering data to the perf dashboards. # The result_type is used by the test_dispatcher to retry the test. exit_code = 0 persisted_result = { 'name': test_name, 'output': [output], 'chartjson': json_output, 'archive_bytes': archive_bytes, 'exit_code': exit_code, 'actual_exit_code': actual_exit_code, 'result_type': result_type, 'start_time': start_time, 'end_time': end_time, 'total_time': end_time - start_time, 'device': self.device_serial, 'cmd': cmd, } self._SaveResult(persisted_result) return (output, result_type) def RunTest(self, test_name): """Run a perf test on the device. Args: test_name: String to use for logging the test result. Returns: A tuple of (TestRunResults, retry). """ _, result_type = self._LaunchPerfTest(test_name) results = base_test_result.TestRunResults() results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) retry = None if not results.DidRunPass(): retry = test_name return results, retry
""" Author: Dr. John T. Hwang <hwangjt@umich.edu> This package is distributed under New BSD license. """ import numpy as np import scipy.sparse from numbers import Integral from smt.utils.linear_solvers import get_solver, LinearSolver, VALID_SOLVERS from smt.utils.line_search import get_line_search_class, LineSearch, VALID_LINE_SEARCHES from smt.utils.caching import cached_operation from smt.surrogate_models.surrogate_model import SurrogateModel class RMTS(SurrogateModel): """ Regularized Minimal-energy Tensor-product Spline interpolant base class for RMTC and RMTB. """ def _initialize(self): super(RMTS, self)._initialize() declare = self.options.declare supports = self.supports declare( "xlimits", types=np.ndarray, desc="Lower/upper bounds in each dimension - ndarray [nx, 2]", ) declare( "smoothness", 1.0, types=(Integral, float, tuple, list, np.ndarray), desc="Smoothness parameter in each dimension - length nx. None implies uniform", ) declare( "regularization_weight", 1e-14, types=(Integral, float), desc="Weight of the term penalizing the norm of the spline coefficients." + " This is useful as an alternative to energy minimization " + " when energy minimization makes the training time too long.", ) declare( "energy_weight", 1e-4, types=(Integral, float), desc="The weight of the energy minimization terms", ) declare( "extrapolate", False, types=bool, desc="Whether to perform linear extrapolation for external evaluation points", ) declare( "min_energy", True, types=bool, desc="Whether to perform energy minimization", ) declare( "approx_order", 4, types=Integral, desc="Exponent in the approximation term" ) declare( "solver", "krylov", values=VALID_SOLVERS, types=LinearSolver, desc="Linear solver", ) declare( "derivative_solver", "krylov", values=VALID_SOLVERS, types=LinearSolver, desc="Linear solver used for computing output derivatives (dy_dyt)", ) declare( "grad_weight", 0.5, types=(Integral, float), desc="Weight on gradient training data", ) declare( "solver_tolerance", 1e-12, types=(Integral, float), desc="Convergence tolerance for the nonlinear solver", ) declare( "nonlinear_maxiter", 10, types=Integral, desc="Maximum number of nonlinear solver iterations", ) declare( "line_search", "backtracking", values=VALID_LINE_SEARCHES, types=LineSearch, desc="Line search algorithm", ) declare( "save_energy_terms", False, types=bool, desc="Whether to cache energy terms in the data_dir directory", ) declare( "data_dir", None, values=(None,), types=str, desc="Directory for loading / saving cached data; None means do not save or load", ) declare( "max_print_depth", 5, types=Integral, desc="Maximum depth (level of nesting) to print operation descriptions and times", ) supports["training_derivatives"] = True supports["derivatives"] = True supports["output_derivatives"] = True def _setup_hessian(self): diag = np.ones(self.num["dof"]) arange = np.arange(self.num["dof"]) full_hess = scipy.sparse.csc_matrix((diag, (arange, arange))) return full_hess def _compute_jac(self, ix1, ix2, x): data, rows, cols = self._compute_jac_raw(ix1, ix2, x) n = x.shape[0] full_jac = scipy.sparse.csc_matrix( (data, (rows, cols)), shape=(n, self.num["coeff"]) ) if self.full_dof2coeff is not None: full_jac = full_jac * self.full_dof2coeff return full_jac def _compute_approx_terms(self): # This computes the approximation terms for the training points. # We loop over kx: 0 is for values and kx>0 represents. # the 1-based index of the derivative given by the training point data. num = self.num xlimits = self.options["xlimits"] full_jac_dict = {} for kx in self.training_points[None]: xt, yt = self.training_points[None][kx] xmin = np.min(xt, axis=0) xmax = np.max(xt, axis=0) assert np.all(xlimits[:, 0] <= xmin), ( "Training points below min for %s" % kx ) assert np.all(xlimits[:, 1] >= xmax), ( "Training points above max for %s" % kx ) if kx == 0: c = 1.0 else: self.options["grad_weight"] / xlimits.shape[0] full_jac = self._compute_jac(kx, 0, xt) full_jac_dict[kx] = (full_jac, full_jac.T.tocsc(), c) return full_jac_dict def _compute_energy_terms(self): # This computes the energy terms that are to be minimized. # The quadrature points are the centroids of the multi-dimensional elements. num = self.num xlimits = self.options["xlimits"] inputs = {} inputs["nx"] = xlimits.shape[0] inputs["elem_list"] = num["elem_list"] if self.__class__.__name__ == "RMTB": inputs["num_ctrl_list"] = num["ctrl_list"] inputs["order_list"] = num["order_list"] if self.options["save_energy_terms"]: cache_dir = self.options["data_dir"] else: cache_dir = None with cached_operation(inputs, cache_dir) as outputs: if outputs: sq_mtx = outputs["sq_mtx"] else: n = np.prod(2 * num["elem_list"]) x = np.empty(n * num["x"]) self.rmtsc.compute_quadrature_points( n, np.array(2 * num["elem_list"], dtype=np.int32), x ) x = x.reshape((n, num["x"])) sq_mtx = [None] * num["x"] for kx in range(num["x"]): mtx = self._compute_jac(kx + 1, kx + 1, x) sq_mtx[kx] = ( mtx.T.tocsc() * mtx * (xlimits[kx, 1] - xlimits[kx, 0]) ** 4 ) outputs["sq_mtx"] = sq_mtx elem_vol = np.prod((xlimits[:, 1] - xlimits[:, 0]) / (2 * num["elem_list"])) total_vol = np.prod(xlimits[:, 1] - xlimits[:, 0]) full_hess = scipy.sparse.csc_matrix((num["dof"], num["dof"])) for kx in range(num["x"]): full_hess += sq_mtx[kx] * ( elem_vol / total_vol * self.options["smoothness"][kx] / (xlimits[kx, 1] - xlimits[kx, 0]) ** 4 ) return full_hess def _opt_func(self, sol, p, yt_dict): full_hess = self.full_hess full_jac_dict = self.full_jac_dict func = 0.5 * np.dot(sol, full_hess * sol) for kx in self.training_points[None]: full_jac, full_jac_T, c = full_jac_dict[kx] yt = yt_dict[kx] func += 0.5 * c * np.sum((full_jac * sol - yt) ** p) return func def _opt_grad(self, sol, p, yt_dict): full_hess = self.full_hess full_jac_dict = self.full_jac_dict grad = full_hess * sol for kx in self.training_points[None]: full_jac, full_jac_T, c = full_jac_dict[kx] yt = yt_dict[kx] grad += 0.5 * c * full_jac_T * p * (full_jac * sol - yt) ** (p - 1) return grad def _opt_dgrad_dyt(self, sol, p, yt_dict, kx): full_hess = self.full_hess full_jac_dict = self.full_jac_dict full_jac, full_jac_T, c = full_jac_dict[kx] yt = yt_dict[kx] diag_vec = p * (p - 1) * (full_jac * sol - yt) ** (p - 2) diag_mtx = scipy.sparse.diags(diag_vec, format="csc") mtx = 0.5 * c * full_jac_T.dot(diag_mtx) return -mtx.todense() def _opt_hess(self, sol, p, yt_dict): full_hess = self.full_hess full_jac_dict = self.full_jac_dict hess = scipy.sparse.csc_matrix(full_hess) for kx in self.training_points[None]: full_jac, full_jac_T, c = full_jac_dict[kx] yt = yt_dict[kx] diag_vec = p * (p - 1) * (full_jac * sol - yt) ** (p - 2) diag_mtx = scipy.sparse.diags(diag_vec, format="csc") hess += 0.5 * c * full_jac_T * diag_mtx * full_jac return hess def _opt_norm(self, sol, p, yt_dict): full_hess = self.full_hess full_jac_dict = self.full_jac_dict grad = self._opt_grad(sol, p, yt_dict) return np.linalg.norm(grad) def _get_yt_dict(self, ind_y): yt_dict = {} for kx in self.training_points[None]: xt, yt = self.training_points[None][kx] yt_dict[kx] = yt[:, ind_y] return yt_dict def _run_newton_solver(self, sol): num = self.num options = self.options solver = get_solver(options["solver"]) ls_class = get_line_search_class(options["line_search"]) total_size = int(num["dof"]) rhs = np.zeros((total_size, num["y"])) d_sol = np.zeros((total_size, num["y"])) p = self.options["approx_order"] for ind_y in range(rhs.shape[1]): with self.printer._timed_context("Solving for output %i" % ind_y): yt_dict = self._get_yt_dict(ind_y) norm = self._opt_norm(sol[:, ind_y], p, yt_dict) fval = self._opt_func(sol[:, ind_y], p, yt_dict) self.printer( "Iteration (num., iy, grad. norm, func.) : %3i %3i %15.9e %15.9e" % (0, ind_y, norm, fval) ) iter_count = 0 while ( iter_count < options["nonlinear_maxiter"] and norm > options["solver_tolerance"] ): with self.printer._timed_context(): with self.printer._timed_context("Assembling linear system"): mtx = self._opt_hess(sol[:, ind_y], p, yt_dict) rhs[:, ind_y] = -self._opt_grad(sol[:, ind_y], p, yt_dict) with self.printer._timed_context("Initializing linear solver"): solver._setup(mtx, self.printer) with self.printer._timed_context("Solving linear system"): solver._solve(rhs[:, ind_y], d_sol[:, ind_y], ind_y=ind_y) func = lambda x: self._opt_func(x, p, yt_dict) grad = lambda x: self._opt_grad(x, p, yt_dict) # sol[:, ind_y] += d_sol[:, ind_y] ls = ls_class(sol[:, ind_y], d_sol[:, ind_y], func, grad) with self.printer._timed_context("Performing line search"): sol[:, ind_y] = ls(1.0) norm = self._opt_norm(sol[:, ind_y], p, yt_dict) fval = self._opt_func(sol[:, ind_y], p, yt_dict) self.printer( "Iteration (num., iy, grad. norm, func.) : %3i %3i %15.9e %15.9e" % (iter_count, ind_y, norm, fval) ) self.mtx = mtx iter_count += 1 def _solve(self): num = self.num options = self.options solver = get_solver(options["solver"]) ls_class = get_line_search_class(options["line_search"]) total_size = int(num["dof"]) rhs = np.zeros((total_size, num["y"])) sol = np.zeros((total_size, num["y"])) d_sol = np.zeros((total_size, num["y"])) with self.printer._timed_context( "Solving initial startup problem (n=%i)" % total_size ): approx_order = options["approx_order"] nonlinear_maxiter = options["nonlinear_maxiter"] options["approx_order"] = 2 options["nonlinear_maxiter"] = 1 self._run_newton_solver(sol) options["approx_order"] = approx_order options["nonlinear_maxiter"] = nonlinear_maxiter with self.printer._timed_context( "Solving nonlinear problem (n=%i)" % total_size ): self._run_newton_solver(sol) return sol def _new_train(self): """ Train the model """ with self.printer._timed_context("Pre-computing matrices", "assembly"): with self.printer._timed_context("Computing dof2coeff", "dof2coeff"): self.full_dof2coeff = self._compute_dof2coeff() with self.printer._timed_context("Initializing Hessian", "init_hess"): self.full_hess = ( self._setup_hessian() * self.options["regularization_weight"] ) if self.options["min_energy"]: with self.printer._timed_context("Computing energy terms", "energy"): self.full_hess += ( self._compute_energy_terms() * self.options["energy_weight"] ) with self.printer._timed_context("Computing approximation terms", "approx"): self.full_jac_dict = self._compute_approx_terms() with self.printer._timed_context( "Solving for degrees of freedom", "total_solution" ): self.sol = self._solve() if self.full_dof2coeff is not None: self.sol_coeff = self.full_dof2coeff * self.sol else: self.sol_coeff = self.sol def _train(self): """ Train the model """ self._setup() tmp = self.rmtsc self.rmtsc = None inputs = {"self": self} with cached_operation(inputs, self.options["data_dir"]) as outputs: self.rmtsc = tmp if outputs: self.sol_coeff = outputs["sol_coeff"] self.sol = outputs["sol"] self.mtx = outputs["mtx"] self.full_dof2coeff = outputs["full_dof2coeff"] self.full_hess = outputs["full_hess"] self.full_jac_dict = outputs["full_jac_dict"] else: self._new_train() outputs["sol_coeff"] = self.sol_coeff outputs["sol"] = self.sol outputs["mtx"] = self.mtx outputs["full_dof2coeff"] = self.full_dof2coeff outputs["full_hess"] = self.full_hess outputs["full_jac_dict"] = self.full_jac_dict def _predict_values(self, x): """ Evaluates the model at a set of points. Arguments --------- x : np.ndarray [n_evals, dim] Evaluation point input variable values Returns ------- y : np.ndarray Evaluation point output variable values """ mtx = self._compute_prediction_mtx(x, 0) y = mtx.dot(self.sol_coeff) return y def _predict_derivatives(self, x, kx): """ Evaluates the derivatives at a set of points. Arguments --------- x : np.ndarray [n_evals, dim] Evaluation point input variable values kx : int The 0-based index of the input variable with respect to which derivatives are desired. Returns ------- y : np.ndarray Derivative values. """ mtx = self._compute_prediction_mtx(x, kx + 1) y = mtx.dot(self.sol_coeff) return y def _compute_prediction_mtx(self, x, kx): n = x.shape[0] num = self.num options = self.options data, rows, cols = self._compute_jac_raw(kx, 0, x) # In the explanation below, n is the number of dimensions, and # a_k and b_k are the lower and upper bounds for x_k. # # A C1 extrapolation can get very tricky, so we implement a simple C0 # extrapolation. We basically linarly extrapolate from the nearest # domain point. For example, if n = 4 and x2 > b2 and x3 > b3: # f(x1,x2,x3,x4) = f(x1,b2,b3,x4) + dfdx2 (x2-b2) + dfdx3 (x3-b3) # where the derivatives are evaluated at x1,b2,b3,x4 (called b) and # dfdx1|x = dfdx1|b + d2fdx1dx2|b (x2-b2) + d2fdx1dx3|b (x3-b3) # dfdx2|x = dfdx2|b. # The dfdx2|x derivative is what it is because f and all derivatives # evaluated at x1,b2,b3,x4 are constant with respect to changes in x2. # On the other hand, the dfdx1|x derivative is what it is because # f and all derivatives evaluated at x1,b2,b3,x4 change with x1. # The extrapolation function is non-differentiable at boundaries: # i.e., where x_k = a_k or x_k = b_k for at least one k. if options["extrapolate"]: # First we evaluate the vector pointing to each evaluation points # from the nearest point on the domain, in a matrix called dx. # If the ith evaluation point is not external, dx[i, :] = 0. dx = np.empty(n * num["support"] * num["x"]) self.rmtsc.compute_ext_dist(n, num["support"], x.flatten(), dx) dx = dx.reshape((n * num["support"], num["x"])) isexternal = np.array(np.array(dx, bool), float) for ix in range(num["x"]): # Now we compute the first order term where we have a # derivative times (x_k - b_k) or (x_k - a_k). data_tmp, rows, cols = self._compute_jac_raw(kx, ix + 1, x) data_tmp *= dx[:, ix] # If we are evaluating a derivative (with index kx), # we zero the first order terms for which dx_k = 0. if kx != 0: data_tmp *= 1 - isexternal[:, kx - 1] data += data_tmp mtx = scipy.sparse.csc_matrix((data, (rows, cols)), shape=(n, num["coeff"])) return mtx def _predict_output_derivatives(self, x): # dy_dyt = dy_dw * (dR_dw)^{-1} * dR_dyt n = x.shape[0] nw = self.mtx.shape[0] nx = x.shape[1] ny = self.sol.shape[1] p = self.options["approx_order"] dy_dw = self._compute_prediction_mtx(x, 0) if self.full_dof2coeff is not None: dy_dw = dy_dw * self.full_dof2coeff dy_dw = dy_dw.todense() dR_dw = self.mtx dy_dyt = {} for kx in self.training_points[None]: nt = self.training_points[None][kx][0].shape[0] dR_dyt = np.zeros((nw, nt, ny)) for ind_y in range(ny): yt_dict = self._get_yt_dict(ind_y) dR_dyt[:, :, ind_y] = self._opt_dgrad_dyt( self.sol[:, ind_y], p, yt_dict, kx ) solver = get_solver(self.options["derivative_solver"]) solver._setup(dR_dw, self.printer) dw_dyt = np.zeros((nw, nt, ny)) for ind_t in range(nt): for ind_y in range(ny): solver._solve( dR_dyt[:, ind_t, ind_y], dw_dyt[:, ind_t, ind_y], ind_y=ind_y ) dw_dyt[:, ind_t, ind_y] *= -1.0 if kx == 0: dy_dyt[None] = np.einsum("ij,jkl->ikl", dy_dw, dw_dyt) else: dy_dyt[kx - 1] = np.einsum("ij,jkl->ikl", dy_dw, dw_dyt) return dy_dyt
from os import urandom # increase block gas limit import eth_tester.backends.pyevm.main as pyevm_main from eth_tester import EthereumTester, PyEVMBackend from eth_utils import encode_hex from web3 import EthereumTesterProvider, Web3 from raiden.constants import TRANSACTION_GAS_LIMIT from raiden.transfer.balance_proof import pack_balance_proof from raiden.transfer.identifiers import CanonicalIdentifier from raiden.transfer.utils import hash_balance_data from raiden.utils.signer import LocalSigner from raiden_contracts.contract_manager import CONTRACTS_SOURCE_DIRS, ContractManager from raiden_contracts.utils.utils import get_pending_transfers_tree pyevm_main.GENESIS_GAS_LIMIT = 6 * 10 ** 6 class ContractTester: def __init__(self, generate_keys=0): self.tester = EthereumTester(PyEVMBackend()) self.web3 = Web3(EthereumTesterProvider(self.tester)) if generate_keys > 0: self.private_keys = [urandom(32) for _ in range(generate_keys)] self.accounts = [self.tester.add_account(encode_hex(key)) for key in self.private_keys] for account in self.accounts: self.tester.send_transaction( { "from": self.tester.get_accounts()[0], "to": account, "value": 10 ** 21, "gas": 21000, } ) else: self.accounts = self.tester.get_accounts() self.contract_manager = ContractManager(CONTRACTS_SOURCE_DIRS) self.name_to_creation_hash = dict() self.name_to_contract = dict() def deploy_contract(self, name, **kwargs): raise NotImplementedError("needs refactoring") # data = self.contract_manager.get_contract(name) # contract = self.web3.eth.contract(abi=data["abi"], bytecode=data["bin"]) # transaction = contract.constructor(**kwargs).buildTransaction( # {"from": self.accounts[0], "gas": 5900000} # ) # self.name_to_creation_hash[name] = self.web3.eth.sendTransaction(transaction) # self.name_to_contract[name] = self.web3.eth.contract( # address=self.contract_address(name), abi=data["abi"] # ) def contract_address(self, name): raise NotImplementedError("needs refactoring") # tx_hash = self.name_to_creation_hash[name] # return self.web3.eth.getTransactionReceipt(tx_hash)["contractAddress"] def call_transaction(self, contract, function, **kwargs): raise NotImplementedError("needs refactoring") # sender = kwargs.pop("sender", self.accounts[0]) # tx_hash = ( # self.name_to_contract[contract] # .functions[function](**kwargs) # .transact({"from": sender}) # ) # return self.web3.eth.getTransactionReceipt(tx_hash) def find_max_pending_transfers(gas_limit): """Measure gas consumption of TokenNetwork.unlock() depending on number of pending transfers and find the maximum number of pending transfers so gas_limit is not exceeded.""" tester = ContractTester(generate_keys=2) tester.deploy_contract("SecretRegistry") tester.deploy_contract( "HumanStandardToken", _initialAmount=100000, _decimalUnits=3, _tokenName="SomeToken", _tokenSymbol="SMT", ) tester.deploy_contract( "TokenNetwork", _token_address=tester.contract_address("HumanStandardToken"), _secret_registry=tester.contract_address("SecretRegistry"), _chain_id=1, _settlement_timeout_min=100, _settlement_timeout_max=200, ) tester.call_transaction("HumanStandardToken", "transfer", _to=tester.accounts[1], _value=10000) receipt = tester.call_transaction( "TokenNetwork", "openChannel", participant1=tester.accounts[0], participant2=tester.accounts[1], settle_timeout=150, ) channel_identifier = int(encode_hex(receipt["logs"][0]["topics"][1]), 16) tester.call_transaction( "HumanStandardToken", "approve", sender=tester.accounts[0], _spender=tester.contract_address("TokenNetwork"), _value=10000, ) tester.call_transaction( "HumanStandardToken", "approve", sender=tester.accounts[1], _spender=tester.contract_address("TokenNetwork"), _value=5000, ) tester.call_transaction( "TokenNetwork", "setTotalDeposit", channel_identifier=channel_identifier, participant=tester.accounts[0], total_deposit=5000, partner=tester.accounts[1], ) tester.call_transaction( "TokenNetwork", "setTotalDeposit", channel_identifier=channel_identifier, participant=tester.accounts[1], total_deposit=2000, partner=tester.accounts[0], ) print("Measuring unlock()'s gas cost for different Merkle tree widths, can take a while...") before_closing = tester.tester.take_snapshot() enough = 0 too_much = 1024 nonce = 10 additional_hash = urandom(32) token_network_identifier = tester.contract_address("TokenNetwork") while enough + 1 < too_much: tree_size = (enough + too_much) // 2 tester.tester.revert_to_snapshot(before_closing) pending_transfers_tree = get_pending_transfers_tree( tester.web3, unlockable_amounts=[1] * tree_size ) balance_hash = hash_balance_data(3000, 2000, pending_transfers_tree.merkle_root) # FIXME: outdated data_to_sign = pack_balance_proof( nonce=nonce, balance_hash=balance_hash, additional_hash=additional_hash, canonical_identifier=CanonicalIdentifier( chain_identifier=1, token_network_address=token_network_identifier, channel_identifier=channel_identifier, ), ) signature = LocalSigner(tester.private_keys[1]).sign(data=data_to_sign) tester.call_transaction( "TokenNetwork", "closeChannel", channel_identifier=channel_identifier, partner=tester.accounts[1], balance_hash=balance_hash, nonce=nonce, additional_hash=additional_hash, signature=signature, ) tester.tester.mine_blocks(160) # close settlement window tester.call_transaction( "TokenNetwork", "settleChannel", channel_identifier=channel_identifier, participant1=tester.accounts[0], participant1_transferred_amount=0, participant1_locked_amount=0, participant1_locksroot=b"\x00" * 32, participant2=tester.accounts[1], participant2_transferred_amount=3000, participant2_locked_amount=2000, participant2_locksroot=pending_transfers_tree.merkle_root, ) receipt = tester.call_transaction( "TokenNetwork", "unlock", channel_identifier=channel_identifier, participant=tester.accounts[0], partner=tester.accounts[1], merkle_tree_leaves=pending_transfers_tree.packed_transfers, ) gas_used = receipt["gasUsed"] if gas_used <= gas_limit: enough = tree_size print(f"{tree_size} pending transfers work ({gas_used} gas needed to unlock)") else: too_much = tree_size print(f"{tree_size} pending transfers are too much ({gas_used} gas needed to unlock)") if __name__ == "__main__": find_max_pending_transfers(TRANSACTION_GAS_LIMIT)
# This file is part of the bapsflib package, a Python toolkit for the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 Erik T. Everson and contributors # # License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full # license terms and contributor agreement. # """ Module for defining functionality that parses command lists. Command lists are a list of strings used by control devices to indicate what is changed from shot-to-shot. This usually represents a command string that is sent to a device to tell it to change an output. """ __all__ = ["CLParse"] import numpy as np import re from typing import Iterable, Union from warnings import warn class CLParse(object): """ Class for parsing RE from a command list. (A command list is a list of strings where each string is a set of commands sent to a control device to define that control device's state.) """ def __init__(self, command_list: Union[str, Iterable[str]]): """ :param command_list: the command list for a control device :type command_list: list of strings """ super().__init__() # condition `command list` try: if isinstance(command_list, str): command_list = [command_list] elif isinstance(command_list, Iterable): if not all(isinstance(val, str) for val in command_list): raise ValueError else: raise ValueError except ValueError: raise ValueError("`command_list` must be a str or an Iterable of strings") # set command list self._cl = command_list def apply_patterns(self, patterns: Union[str, Iterable[str]]): """ Applies a the REs defined in `patterns` to parse the command list. :param patterns: list or raw stings defining REs for parsing the command list :type patterns: str or list of strings :return: (bool, dict) :Example: >>> # define a command list >>> cl = ['VOLT 20.0', 'VOLT 25.0', 'VOLT 30.0'] >>> >>> # define clparse object >>> clparse = CLParse(cl) >>> >>> # apply patterns >>> patterns = (r'(?P<FREQ>(\bFREQ\s)' >>> + r'(?P<VAL>(\d+\.\d*|\.\d+|\d+\b)))') >>> results = clparse.apply_patterns(patterns) >>> results[0] True >>> results[1] {'VOLT': {'cl str': ('VOLT 20.0', 'VOLT 25.0', 'VOLT 30.0'), 'command list': (20.0, 25.0, 30.0), 're pattern': re.compile(pattern, re.UNICODE), 'dtype': numpy.float64}} """ # initialize new cl dict cls_dict = {} # type: dict # condition patterns if isinstance(patterns, str): # convert string to list patterns = [patterns] elif isinstance(patterns, Iterable): # ensure all entries are strings if not all(isinstance(pat, str) for pat in patterns): raise ValueError("`patterns` must be a str or Iterable of strings") # ensure all entries are unique patterns = list(set(patterns)) else: raise ValueError("`patterns` must be a string or list of strings") # compile all patterns for pattern in patterns: rpat = re.compile(pattern) # confirm each pattern has 2 symbolic group names # 1. 'NAME' -- name of the new probe state value # 2. 'VAL' -- the value associated with 'NAME' # if len(rpat.groupindex) == 2: # ensure the VAL symbolic group is defined if "VAL" not in rpat.groupindex: raise ValueError( "user needs to define symbolic group VAL for " "the value of the probe state" ) # get name symbolic group name sym_groups = list(rpat.groupindex) name = sym_groups[0] if sym_groups.index("VAL") == 1 else sym_groups[1] # check symbolic group is not already defined if name in cls_dict: raise ValueError( f"Symbolic group ({name}) defined in multiple RE patterns" ) elif name.lower() == "remainder": raise ValueError(f"Can NOT use {name} as a symbolic group name") # initialize cls dict entry cls_dict[name] = {"re pattern": rpat, "command list": [], "cl str": []} else: raise ValueError( "user needs to define two symbolic groups, VAL for" " the value group and NAME for the name of the " "probe state value" ) # add a 'remainder' entry to the cls dict cls_dict["remainder"] = { "re pattern": None, "command list": list(self._cl).copy(), } cls_dict["remainder"]["cl str"] = cls_dict["remainder"]["command list"] # scan through state values (ie re patterns) # - iterate 'remainder' first # names = list(cls_dict.keys()) names.remove("remainder") names = ["remainder"] + names for name in names: # check 'remainder' entry for NULL strings # - then skip or break if name == "remainder": if "" in cls_dict["remainder"]["command list"]: del cls_dict["remainder"] break else: # pragma: no cover # this is not covered due to CPython's peephole # optimizer (see coverage.py issue 198) continue # search for pattern r_cl = [] for command in cls_dict["remainder"]["command list"]: results = cls_dict[name]["re pattern"].search(command) if results is not None: # try to convert the 'VAL' string into float # - for now, assuming 'VAL' will always be a float # or string, NEVER an integer try: value = float(results.group("VAL")) except ValueError: value = results.group("VAL") # type: str value = value.strip() if value == "": value = None # add to command list cls_dict[name]["command list"].append(value) cls_dict[name]["cl str"].append(results.group(name)) # make a new remainder command list stripped_cmd = command.replace(results.group(name), "").strip() if stripped_cmd == "": stripped_cmd = None r_cl.append(stripped_cmd) else: cls_dict[name]["command list"].append(None) cls_dict[name]["cl str"].append(None) # update remainder command list # - only if the above 'command list' build does NOT produce # trivial (None) elements and all elements of 'command # list' have the same type # if None not in cls_dict[name]["command list"] and all( isinstance(val, type(cls_dict[name]["command list"][0])) for val in cls_dict[name]["command list"] ): cls_dict["remainder"]["command list"] = r_cl cls_dict["remainder"]["cl str"] = cls_dict["remainder"]["command list"] # delete and break if 'remainder' as trivial elements # - i.e. RE can NOT be matched anymore # if None in cls_dict["remainder"]["command list"]: del cls_dict["remainder"] break # remove trivial command lists and convert lists to tuples names = list(cls_dict.keys()) for name in names: if None in cls_dict[name]["command list"] or not bool( cls_dict[name]["command list"] ): # command list is trivial del cls_dict[name] # issue warning warn( f"Symbolic group ({name}) removed since some or all of the " f"'command list' has None values" ) elif not all( isinstance(val, type(cls_dict[name]["command list"][0])) for val in cls_dict[name]["command list"] ): # ensure all command list elements have the same # type del cls_dict[name] # issue warning warn( f"Symbolic group ({name}) removed since all entries in " f"'command list' do NOT have the same type" ) else: # condition 'command list' value and determine 'dtype' if isinstance(cls_dict[name]["command list"][0], float): # 'command list' is a float cls_dict[name]["dtype"] = np.float64 else: # 'command list' is a string mlen = len(max(cls_dict[name]["command list"], key=lambda x: len(x))) cls_dict[name]["dtype"] = np.dtype((np.unicode_, mlen)) # convert lists to tuples # - first check dict `name` has not been deleted if name in cls_dict: cls_dict[name]["command list"] = tuple(cls_dict[name]["command list"]) cls_dict[name]["cl str"] = tuple(cls_dict[name]["cl str"]) # determine if parse was successful success = True if len(cls_dict) == 0: # dictionary is empty success = False cls_dict = {} elif len(cls_dict) == 1 and "remainder" in cls_dict: # only 'remainder' is in dictionary success = False cls_dict = {} # return return success, cls_dict def try_patterns(self, patterns: Union[str, Iterable[str]]): """ Prints to the results of applying the REs in patterns to the command list. Pretty print of :meth:`apply_patterns`. :param patterns: list or raw stings defining REs for parsing the command list :type patterns: str or list of strings """ # TODO: clean method and format print better # # build dictionary success, cls_dict = self.apply_patterns(patterns) # print results hline1 = "command".ljust(9) + "command".ljust(40) hline2 = "index".ljust(9) + "str".ljust(40) for name in cls_dict: hline1 += str(name).ljust(10) hline2 += type(cls_dict[name]["command list"][0]).__name__.ljust(10) print(hline1) print(hline2) for ci, command in enumerate(self._cl): line = str(ci).ljust(9) + str(command).ljust(40) for name in cls_dict: line += str(cls_dict[name]["command list"][ci]) print(line)
import collections import threading import uuid from nose.tools import eq_, ok_ from kazoo.exceptions import CancelledError from kazoo.exceptions import LockTimeout from kazoo.testing import KazooTestCase from kazoo.tests import util as test_util class SleepBarrier(object): """A crappy spinning barrier.""" def __init__(self, wait_for, sleep_func): self._wait_for = wait_for self._arrived = collections.deque() self._sleep_func = sleep_func def __enter__(self): self._arrived.append(threading.current_thread()) return self def __exit__(self, type, value, traceback): try: self._arrived.remove(threading.current_thread()) except ValueError: pass def wait(self): while len(self._arrived) < self._wait_for: self._sleep_func(0.001) class KazooLockTests(KazooTestCase): thread_count = 20 def __init__(self, *args, **kw): super(KazooLockTests, self).__init__(*args, **kw) self.threads_made = [] def tearDown(self): super(KazooLockTests, self).tearDown() while self.threads_made: t = self.threads_made.pop() t.join() @staticmethod def make_condition(): return threading.Condition() @staticmethod def make_event(): return threading.Event() def make_thread(self, *args, **kwargs): t = threading.Thread(*args, **kwargs) t.daemon = True self.threads_made.append(t) return t @staticmethod def make_wait(): return test_util.Wait() def setUp(self): super(KazooLockTests, self).setUp() self.lockpath = "/" + uuid.uuid4().hex self.condition = self.make_condition() self.released = self.make_event() self.active_thread = None self.cancelled_threads = [] def _thread_lock_acquire_til_event(self, name, lock, event): try: with lock: with self.condition: eq_(self.active_thread, None) self.active_thread = name self.condition.notify_all() event.wait() with self.condition: eq_(self.active_thread, name) self.active_thread = None self.condition.notify_all() self.released.set() except CancelledError: with self.condition: self.cancelled_threads.append(name) self.condition.notify_all() def test_lock_one(self): lock_name = uuid.uuid4().hex lock = self.client.Lock(self.lockpath, lock_name) event = self.make_event() thread = self.make_thread(target=self._thread_lock_acquire_til_event, args=(lock_name, lock, event)) thread.start() lock2_name = uuid.uuid4().hex anotherlock = self.client.Lock(self.lockpath, lock2_name) # wait for any contender to show up on the lock wait = self.make_wait() wait(anotherlock.contenders) eq_(anotherlock.contenders(), [lock_name]) with self.condition: while self.active_thread != lock_name: self.condition.wait() # release the lock event.set() with self.condition: while self.active_thread: self.condition.wait() self.released.wait() thread.join() def test_lock(self): threads = [] names = ["contender" + str(i) for i in range(5)] contender_bits = {} for name in names: e = self.make_event() l = self.client.Lock(self.lockpath, name) t = self.make_thread(target=self._thread_lock_acquire_til_event, args=(name, l, e)) contender_bits[name] = (t, e) threads.append(t) # acquire the lock ourselves first to make the others line up lock = self.client.Lock(self.lockpath, "test") lock.acquire() for t in threads: t.start() # wait for everyone to line up on the lock wait = self.make_wait() wait(lambda: len(lock.contenders()) == 6) contenders = lock.contenders() eq_(contenders[0], "test") contenders = contenders[1:] remaining = list(contenders) # release the lock and contenders should claim it in order lock.release() for contender in contenders: thread, event = contender_bits[contender] with self.condition: while not self.active_thread: self.condition.wait() eq_(self.active_thread, contender) eq_(lock.contenders(), remaining) remaining = remaining[1:] event.set() with self.condition: while self.active_thread: self.condition.wait() for thread in threads: thread.join() def test_lock_reconnect(self): event = self.make_event() other_lock = self.client.Lock(self.lockpath, 'contender') thread = self.make_thread(target=self._thread_lock_acquire_til_event, args=('contender', other_lock, event)) # acquire the lock ourselves first to make the contender line up lock = self.client.Lock(self.lockpath, "test") lock.acquire() thread.start() # wait for the contender to line up on the lock wait = self.make_wait() wait(lambda: len(lock.contenders()) == 2) eq_(lock.contenders(), ['test', 'contender']) self.expire_session(self.make_event) lock.release() with self.condition: while not self.active_thread: self.condition.wait() eq_(self.active_thread, 'contender') event.set() thread.join() def test_lock_non_blocking(self): lock_name = uuid.uuid4().hex lock = self.client.Lock(self.lockpath, lock_name) event = self.make_event() thread = self.make_thread(target=self._thread_lock_acquire_til_event, args=(lock_name, lock, event)) thread.start() lock1 = self.client.Lock(self.lockpath, lock_name) # wait for the thread to acquire the lock with self.condition: if not self.active_thread: self.condition.wait(5) ok_(not lock1.acquire(blocking=False)) eq_(lock.contenders(), [lock_name]) # just one - itself event.set() thread.join() def test_lock_fail_first_call(self): event1 = self.make_event() lock1 = self.client.Lock(self.lockpath, "one") thread1 = self.make_thread(target=self._thread_lock_acquire_til_event, args=("one", lock1, event1)) thread1.start() # wait for this thread to acquire the lock with self.condition: if not self.active_thread: self.condition.wait(5) eq_(self.active_thread, "one") eq_(lock1.contenders(), ["one"]) event1.set() thread1.join() def test_lock_cancel(self): event1 = self.make_event() lock1 = self.client.Lock(self.lockpath, "one") thread1 = self.make_thread(target=self._thread_lock_acquire_til_event, args=("one", lock1, event1)) thread1.start() # wait for this thread to acquire the lock with self.condition: if not self.active_thread: self.condition.wait(5) eq_(self.active_thread, "one") client2 = self._get_client() client2.start() event2 = self.make_event() lock2 = client2.Lock(self.lockpath, "two") thread2 = self.make_thread(target=self._thread_lock_acquire_til_event, args=("two", lock2, event2)) thread2.start() # this one should block in acquire. check that it is a contender wait = self.make_wait() wait(lambda: len(lock2.contenders()) > 1) eq_(lock2.contenders(), ["one", "two"]) lock2.cancel() with self.condition: if "two" not in self.cancelled_threads: self.condition.wait() assert "two" in self.cancelled_threads eq_(lock2.contenders(), ["one"]) thread2.join() event1.set() thread1.join() client2.stop() def test_lock_no_double_calls(self): lock1 = self.client.Lock(self.lockpath, "one") lock1.acquire() self.assertTrue(lock1.is_acquired) self.assertFalse(lock1.acquire(timeout=0.5)) self.assertTrue(lock1.is_acquired) lock1.release() self.assertFalse(lock1.is_acquired) def test_lock_same_thread_no_block(self): lock = self.client.Lock(self.lockpath, "one") gotten = lock.acquire(blocking=False) self.assertTrue(gotten) self.assertTrue(lock.is_acquired) gotten = lock.acquire(blocking=False) self.assertFalse(gotten) def test_lock_many_threads_no_block(self): lock = self.client.Lock(self.lockpath, "one") attempts = collections.deque() def _acquire(): attempts.append(int(lock.acquire(blocking=False))) threads = [] for _i in range(0, self.thread_count): t = self.make_thread(target=_acquire) threads.append(t) t.start() while threads: t = threads.pop() t.join() self.assertEqual(1, sum(list(attempts))) def test_lock_many_threads(self): sleep_func = self.client.handler.sleep_func lock = self.client.Lock(self.lockpath, "one") acquires = collections.deque() differences = collections.deque() barrier = SleepBarrier(self.thread_count, sleep_func) def _acquire(): # Wait until all threads are ready to go... with barrier as b: b.wait() with lock: # Ensure that no two threads enter here and cause the # count to differ by more than one, do this by recording # the count that was captured and examining it post run. starting_count = len(acquires) acquires.append(1) sleep_func(0.01) end_count = len(acquires) differences.append(end_count - starting_count) threads = [] for _i in range(0, self.thread_count): t = self.make_thread(target=_acquire) threads.append(t) t.start() while threads: t = threads.pop() t.join() self.assertEqual(self.thread_count, len(acquires)) self.assertEqual([1] * self.thread_count, list(differences)) def test_lock_reacquire(self): lock = self.client.Lock(self.lockpath, "one") lock.acquire() lock.release() lock.acquire() lock.release() def test_lock_timeout(self): timeout = 3 e = self.make_event() started = self.make_event() # In the background thread, acquire the lock and wait thrice the time # that the main thread is going to wait to acquire the lock. lock1 = self.client.Lock(self.lockpath, "one") def _thread(lock, event, timeout): with lock: started.set() event.wait(timeout) if not event.isSet(): # Eventually fail to avoid hanging the tests self.fail("lock2 never timed out") t = self.make_thread(target=_thread, args=(lock1, e, timeout * 3)) t.start() # Start the main thread's kazoo client and try to acquire the lock # but give up after `timeout` seconds client2 = self._get_client() client2.start() started.wait(5) self.assertTrue(started.isSet()) lock2 = client2.Lock(self.lockpath, "two") try: lock2.acquire(timeout=timeout) except LockTimeout: # A timeout is the behavior we're expecting, since the background # thread should still be holding onto the lock pass else: self.fail("Main thread unexpectedly acquired the lock") finally: # Cleanup e.set() t.join() client2.stop() def test_read_lock(self): # Test that we can obtain a read lock lock = self.client.ReadLock(self.lockpath, "reader one") gotten = lock.acquire(blocking=False) self.assertTrue(gotten) self.assertTrue(lock.is_acquired) # and that it's still not reentrant. gotten = lock.acquire(blocking=False) self.assertFalse(gotten) # Test that a second client we can share the same read lock client2 = self._get_client() client2.start() lock2 = client2.ReadLock(self.lockpath, "reader two") gotten = lock2.acquire(blocking=False) self.assertTrue(gotten) self.assertTrue(lock2.is_acquired) gotten = lock2.acquire(blocking=False) self.assertFalse(gotten) # Test that a writer is unable to share it client3 = self._get_client() client3.start() lock3 = client3.WriteLock(self.lockpath, "writer") gotten = lock3.acquire(blocking=False) self.assertFalse(gotten) def test_write_lock(self): # Test that we can obtain a write lock lock = self.client.WriteLock(self.lockpath, "writer") gotten = lock.acquire(blocking=False) self.assertTrue(gotten) self.assertTrue(lock.is_acquired) gotten = lock.acquire(blocking=False) self.assertFalse(gotten) # Test that we are unable to obtain a read lock while the # write lock is held. client2 = self._get_client() client2.start() lock2 = client2.ReadLock(self.lockpath, "reader") gotten = lock2.acquire(blocking=False) self.assertFalse(gotten) class TestSemaphore(KazooTestCase): def __init__(self, *args, **kw): super(TestSemaphore, self).__init__(*args, **kw) self.threads_made = [] def tearDown(self): super(TestSemaphore, self).tearDown() while self.threads_made: t = self.threads_made.pop() t.join() @staticmethod def make_condition(): return threading.Condition() @staticmethod def make_event(): return threading.Event() def make_thread(self, *args, **kwargs): t = threading.Thread(*args, **kwargs) t.daemon = True self.threads_made.append(t) return t def setUp(self): super(TestSemaphore, self).setUp() self.lockpath = "/" + uuid.uuid4().hex self.condition = self.make_condition() self.released = self.make_event() self.active_thread = None self.cancelled_threads = [] def test_basic(self): sem1 = self.client.Semaphore(self.lockpath) sem1.acquire() sem1.release() def test_lock_one(self): sem1 = self.client.Semaphore(self.lockpath, max_leases=1) sem2 = self.client.Semaphore(self.lockpath, max_leases=1) started = self.make_event() event = self.make_event() sem1.acquire() def sema_one(): started.set() with sem2: event.set() thread = self.make_thread(target=sema_one, args=()) thread.start() started.wait(10) self.assertFalse(event.is_set()) sem1.release() event.wait(10) self.assert_(event.is_set()) thread.join() def test_non_blocking(self): sem1 = self.client.Semaphore( self.lockpath, identifier='sem1', max_leases=2) sem2 = self.client.Semaphore( self.lockpath, identifier='sem2', max_leases=2) sem3 = self.client.Semaphore( self.lockpath, identifier='sem3', max_leases=2) sem1.acquire() sem2.acquire() ok_(not sem3.acquire(blocking=False)) eq_(set(sem1.lease_holders()), set(['sem1', 'sem2'])) sem2.release() # the next line isn't required, but avoids timing issues in tests sem3.acquire() eq_(set(sem1.lease_holders()), set(['sem1', 'sem3'])) sem1.release() sem3.release() def test_non_blocking_release(self): sem1 = self.client.Semaphore( self.lockpath, identifier='sem1', max_leases=1) sem2 = self.client.Semaphore( self.lockpath, identifier='sem2', max_leases=1) sem1.acquire() sem2.acquire(blocking=False) # make sure there's no shutdown / cleanup error sem1.release() sem2.release() def test_holders(self): started = self.make_event() event = self.make_event() def sema_one(): with self.client.Semaphore(self.lockpath, 'fred', max_leases=1): started.set() event.wait() thread = self.make_thread(target=sema_one, args=()) thread.start() started.wait() sem1 = self.client.Semaphore(self.lockpath) holders = sem1.lease_holders() eq_(holders, ['fred']) event.set() thread.join() def test_semaphore_cancel(self): sem1 = self.client.Semaphore(self.lockpath, 'fred', max_leases=1) sem2 = self.client.Semaphore(self.lockpath, 'george', max_leases=1) sem1.acquire() started = self.make_event() event = self.make_event() def sema_one(): started.set() try: with sem2: started.set() except CancelledError: event.set() thread = self.make_thread(target=sema_one, args=()) thread.start() started.wait() eq_(sem1.lease_holders(), ['fred']) eq_(event.is_set(), False) sem2.cancel() event.wait() eq_(event.is_set(), True) thread.join() def test_multiple_acquire_and_release(self): sem1 = self.client.Semaphore(self.lockpath, 'fred', max_leases=1) sem1.acquire() sem1.acquire() eq_(True, sem1.release()) eq_(False, sem1.release()) def test_handle_session_loss(self): expire_semaphore = self.client.Semaphore(self.lockpath, 'fred', max_leases=1) client = self._get_client() client.start() lh_semaphore = client.Semaphore(self.lockpath, 'george', max_leases=1) lh_semaphore.acquire() started = self.make_event() event = self.make_event() event2 = self.make_event() def sema_one(): started.set() with expire_semaphore: event.set() event2.wait() thread1 = self.make_thread(target=sema_one, args=()) thread1.start() started.wait() eq_(lh_semaphore.lease_holders(), ['george']) # Fired in a separate thread to make sure we can see the effect expired = self.make_event() def expire(): self.expire_session(self.make_event) expired.set() thread2 = self.make_thread(target=expire, args=()) thread2.start() expire_semaphore.wake_event.wait() expired.wait() lh_semaphore.release() client.stop() event.wait(15) eq_(expire_semaphore.lease_holders(), ['fred']) event2.set() for t in (thread1, thread2): t.join() def test_inconsistent_max_leases(self): sem1 = self.client.Semaphore(self.lockpath, max_leases=1) sem2 = self.client.Semaphore(self.lockpath, max_leases=2) sem1.acquire() self.assertRaises(ValueError, sem2.acquire) def test_inconsistent_max_leases_other_data(self): sem1 = self.client.Semaphore(self.lockpath, max_leases=1) sem2 = self.client.Semaphore(self.lockpath, max_leases=2) self.client.ensure_path(self.lockpath) self.client.set(self.lockpath, b'a$') sem1.acquire() # sem2 thinks it's ok to have two lease holders ok_(sem2.acquire(blocking=False)) def test_reacquire(self): lock = self.client.Semaphore(self.lockpath) lock.acquire() lock.release() lock.acquire() lock.release() def test_acquire_after_cancelled(self): lock = self.client.Semaphore(self.lockpath) self.assertTrue(lock.acquire()) self.assertTrue(lock.release()) lock.cancel() self.assertTrue(lock.cancelled) self.assertTrue(lock.acquire()) def test_timeout(self): timeout = 3 e = self.make_event() started = self.make_event() # In the background thread, acquire the lock and wait thrice the time # that the main thread is going to wait to acquire the lock. sem1 = self.client.Semaphore(self.lockpath, "one") def _thread(sem, event, timeout): with sem: started.set() event.wait(timeout) if not event.isSet(): # Eventually fail to avoid hanging the tests self.fail("sem2 never timed out") t = self.make_thread(target=_thread, args=(sem1, e, timeout * 3)) t.start() # Start the main thread's kazoo client and try to acquire the lock # but give up after `timeout` seconds client2 = self._get_client() client2.start() started.wait(5) self.assertTrue(started.isSet()) sem2 = client2.Semaphore(self.lockpath, "two") try: sem2.acquire(timeout=timeout) except LockTimeout: # A timeout is the behavior we're expecting, since the background # thread will still be holding onto the lock e.set() finally: # Cleanup t.join() client2.stop()
#!/usr/bin/env python """Ninja toolchain abstraction for Clang compiler suite""" import os import subprocess import toolchain class ClangToolchain(toolchain.Toolchain): def initialize(self, project, archs, configs, includepaths, dependlibs, libpaths, variables): #Local variable defaults self.toolchain = '' self.sdkpath = '' self.includepaths = includepaths self.libpaths = libpaths self.ccompiler = 'clang' self.archiver = 'ar' self.linker = 'clang' if self.target.is_windows(): self.archiver = 'llvm-ar' #Default variables self.sysroot = '' if self.target.is_ios(): self.deploymenttarget = '9.0' if self.target.is_macos(): self.deploymenttarget = '10.7' #Command definitions self.cccmd = '$toolchain$cc -MMD -MT $out -MF $out.d -I. $includepaths $moreincludepaths $cflags $carchflags $cconfigflags -c $in -o $out' self.ccdeps = 'gcc' self.ccdepfile = '$out.d' self.arcmd = self.rmcmd('$out') + ' && $toolchain$ar crsD $ararchflags $arflags $out $in' self.linkcmd = '$toolchain$cc $libpaths $configlibpaths $linkflags $linkarchflags $linkconfigflags -o $out $in $libs $archlibs $oslibs $frameworks' #Base flags self.cflags = [ '-std=c11', '-D' + project.upper() + '_COMPILE=1', '-W', '-Werror', '-pedantic', '-Wall', '-Weverything', '-Wno-padded', '-Wno-documentation-unknown-command', '-funit-at-a-time', '-fstrict-aliasing', '-fno-math-errno','-ffinite-math-only', '-funsafe-math-optimizations', '-fno-trapping-math', '-ffast-math' ] self.mflags = [] self.arflags = [] self.linkflags = [] self.oslibs = [] self.frameworks = [] if self.target.is_linux() or self.target.is_bsd() or self.target.is_raspberrypi(): self.linkflags += ['-pthread'] self.oslibs += ['m'] if self.target.is_linux() or self.target.is_raspberrypi(): self.oslibs += ['dl'] if self.target.is_bsd(): self.oslibs += ['execinfo'] self.initialize_archs(archs) self.initialize_configs(configs) self.initialize_project(project) self.initialize_toolchain() self.initialize_depends(dependlibs) self.parse_default_variables(variables) self.read_build_prefs() if self.is_monolithic(): self.cflags += ['-DBUILD_MONOLITHIC=1'] if self.use_coverage(): self.cflags += ['--coverage'] self.linkflags += ['--coverage'] #Overrides self.objext = '.o' #Builders self.builders['c'] = self.builder_cc self.builders['lib'] = self.builder_lib self.builders['sharedlib'] = self.builder_sharedlib self.builders['bin'] = self.builder_bin if self.target.is_macos() or self.target.is_ios(): self.builders['m'] = self.builder_cm self.builders['multilib'] = self.builder_apple_multilib self.builders['multisharedlib'] = self.builder_apple_multisharedlib self.builders['multibin'] = self.builder_apple_multibin elif self.target.is_pnacl(): self.builders['multilib'] = self.builder_multicopy self.builders['multisharedlib'] = self.builder_multicopy self.builders['multibin'] = self.builder_pnacl_multibin else: self.builders['multilib'] = self.builder_multicopy self.builders['multisharedlib'] = self.builder_multicopy self.builders['multibin'] = self.builder_multicopy #Setup target platform self.build_toolchain() def name(self): return 'clang' def parse_prefs(self, prefs): super(ClangToolchain, self).parse_prefs(prefs) if 'clang' in prefs: clangprefs = prefs['clang'] if 'toolchain' in clangprefs: self.toolchain = clangprefs['toolchain'] if os.path.split(self.toolchain)[1] != 'bin': self.toolchain = os.path.join(self.toolchain, 'bin') if 'archiver' in clangprefs: self.archiver = clangprefs['archiver'] if self.target.is_ios() and 'ios' in prefs: iosprefs = prefs['ios'] if 'deploymenttarget' in iosprefs: self.deploymenttarget = iosprefs['deploymenttarget'] if self.target.is_macos() and 'macos' in prefs: macosprefs = prefs['macos'] if 'deploymenttarget' in macosprefs: self.deploymenttarget = macosprefs['deploymenttarget'] if self.target.is_pnacl() and 'pnacl' in prefs: pnaclprefs = prefs['pnacl'] if 'sdkpath' in pnaclprefs: self.sdkpath = os.path.expanduser(pnaclprefs['sdkpath']) def write_variables(self, writer): super(ClangToolchain, self).write_variables(writer) writer.variable('toolchain', self.toolchain) writer.variable('sdkpath', self.sdkpath) writer.variable('sysroot', self.sysroot) writer.variable('cc', self.ccompiler) writer.variable('ar', self.archiver) writer.variable('link', self.linker) if self.target.is_macos() or self.target.is_ios(): writer.variable('lipo', self.lipo) if self.target.is_pnacl(): writer.variable('finalize', self.finalizer) writer.variable('nmf', self.nmfer) writer.variable('includepaths', self.make_includepaths(self.includepaths)) writer.variable('moreincludepaths', '') writer.variable('cflags', self.cflags) if self.target.is_macos() or self.target.is_ios(): writer.variable('mflags', self.mflags) writer.variable('carchflags', '') writer.variable('cconfigflags', '') writer.variable('arflags', self.arflags) writer.variable('ararchflags', '') writer.variable('arconfigflags', '') writer.variable('linkflags', self.linkflags) writer.variable('linkarchflags', '') writer.variable('linkconfigflags', '') writer.variable('libs', '') writer.variable('libpaths', self.make_libpaths(self.libpaths)) writer.variable('configlibpaths', '') writer.variable('archlibs', '') writer.variable('oslibs', self.make_libs(self.oslibs)) writer.variable('frameworks', '') writer.newline() def write_rules(self, writer): super(ClangToolchain, self).write_rules(writer) writer.rule('cc', command = self.cccmd, depfile = self.ccdepfile, deps = self.ccdeps, description = 'CC $in') if self.target.is_macos() or self.target.is_ios(): writer.rule('cm', command = self.cmcmd, depfile = self.ccdepfile, deps = self.ccdeps, description = 'CM $in') writer.rule( 'lipo', command = self.lipocmd, description = 'LIPO $out' ) writer.rule('ar', command = self.arcmd, description = 'LIB $out') writer.rule('link', command = self.linkcmd, description = 'LINK $out') writer.rule('so', command = self.linkcmd, description = 'SO $out') if self.target.is_pnacl(): writer.rule('finalize', command = self.finalizecmd, description = 'FINALIZE $out') writer.rule('nmf', command = self.nmfcmd, description = 'NMF $out') writer.newline() def build_toolchain(self): super(ClangToolchain, self).build_toolchain() if self.target.is_windows(): self.build_windows_toolchain() elif self.target.is_android(): self.build_android_toolchain() elif self.target.is_macos() or self.target.is_ios(): self.build_xcode_toolchain() elif self.target.is_pnacl(): self.build_pnacl_toolchain() if self.toolchain != '' and not self.toolchain.endswith('/') and not self.toolchain.endswith('\\'): self.toolchain += os.sep def build_windows_toolchain(self): self.cflags += ['-U__STRICT_ANSI__', '-Wno-reserved-id-macro'] self.oslibs = ['kernel32', 'user32', 'shell32', 'advapi32'] def build_android_toolchain(self): self.archiver = 'ar' self.cccmd += ' --sysroot=$sysroot' self.linkcmd += ' -shared -Wl,-soname,$liblinkname --sysroot=$sysroot' self.cflags += ['-fpic', '-ffunction-sections', '-funwind-tables', '-fstack-protector', '-fomit-frame-pointer', '-no-canonical-prefixes', '-Wa,--noexecstack'] self.linkflags += ['-no-canonical-prefixes', '-Wl,--no-undefined', '-Wl,-z,noexecstack', '-Wl,-z,relro', '-Wl,-z,now'] self.includepaths += [os.path.join('$ndk', 'sources', 'android', 'native_app_glue'), os.path.join('$ndk', 'sources', 'android', 'cpufeatures')] self.oslibs += ['log'] self.toolchain = os.path.join('$ndk', 'toolchains', 'llvm', 'prebuilt', self.android.hostarchname, 'bin', '') def build_xcode_toolchain(self): if self.target.is_macos(): sdk = 'macosx' deploytarget = 'MACOSX_DEPLOYMENT_TARGET=' + self.deploymenttarget self.cflags += ['-fasm-blocks', '-mmacosx-version-min=' + self.deploymenttarget, '-isysroot', '$sysroot'] self.arflags += ['-static', '-no_warning_for_no_symbols'] self.linkflags += ['-isysroot', '$sysroot'] elif self.target.is_ios(): sdk = 'iphoneos' deploytarget = 'IPHONEOS_DEPLOYMENT_TARGET=' + self.deploymenttarget self.cflags += ['-fasm-blocks', '-miphoneos-version-min=' + self.deploymenttarget, '-isysroot', '$sysroot'] self.arflags += ['-static', '-no_warning_for_no_symbols'] self.linkflags += ['-isysroot', '$sysroot'] self.cflags += ['-fembed-bitcode-marker'] platformpath = subprocess.check_output(['xcrun', '--sdk', sdk, '--show-sdk-platform-path']).strip() localpath = platformpath + "/Developer/usr/bin:/Applications/Xcode.app/Contents/Developer/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin" self.sysroot = subprocess.check_output(['xcrun', '--sdk', sdk, '--show-sdk-path']).strip() self.ccompiler = "PATH=" + localpath + " " + subprocess.check_output(['xcrun', '--sdk', sdk, '-f', 'clang']).strip() self.archiver = "PATH=" + localpath + " " + subprocess.check_output(['xcrun', '--sdk', sdk, '-f', 'libtool']).strip() self.linker = deploytarget + " " + self.ccompiler self.lipo = "PATH=" + localpath + " " + subprocess.check_output(['xcrun', '--sdk', sdk, '-f', 'lipo']).strip() self.mflags += self.cflags + ['-fobjc-arc', '-fno-objc-exceptions', '-x', 'objective-c'] self.cflags += ['-x', 'c'] self.cmcmd = self.cccmd.replace('$cflags', '$mflags') self.arcmd = self.rmcmd('$out') + ' && $ar $ararchflags $arflags $in -o $out' self.lipocmd = '$lipo $in -create -output $out' if self.target.is_macos(): self.frameworks = ['Cocoa', 'CoreFoundation'] if self.target.is_ios(): self.frameworks = ['CoreGraphics', 'UIKit', 'Foundation'] def build_pnacl_toolchain(self): if self.sdkpath == '': self.sdkpath = os.path.expanduser(os.getenv('NACL_SDK_ROOT')) osname = subprocess.check_output([self.python, os.path.join(self.sdkpath, 'tools', 'getos.py')]).strip() self.toolchain = os.path.join(self.sdkpath, 'toolchain', osname + '_pnacl') shsuffix = '' if self.host.is_windows(): shsuffix = '.bat' self.ccompiler = os.path.join('bin', 'pnacl-clang' + shsuffix) self.archiver = os.path.join('bin', 'pnacl-ar' + shsuffix) self.linker = self.ccompiler self.finalizer = os.path.join('bin', 'pnacl-finalize' + shsuffix) self.nmfer = os.path.join('tools', 'create_nmf.py') self.finalizecmd = '$toolchain$finalize -o $out $in' self.nmfcmd = self.python + ' ' + os.path.join('$sdkpath', '$nmf') + ' -o $out $in' self.includepaths += [os.path.join(self.sdkpath, 'include')] self.oslibs += ['ppapi', 'm'] def make_includepaths(self, includepaths): if not includepaths is None: return ['-I' + self.path_escape(path) for path in list(includepaths)] return [] def make_libpaths(self, libpaths): if not libpaths is None: if self.target.is_windows(): return ['-Xlinker /LIBPATH:' + self.path_escape(path) for path in libpaths] return ['-L' + self.path_escape(path) for path in libpaths] return [] def make_targetarchflags(self, arch, targettype): flags = [] if self.target.is_android(): if arch == 'x86': flags += ['-target', 'i686-none-linux-android'] flags += ['-march=i686', '-mtune=intel', '-mssse3', '-mfpmath=sse', '-m32'] elif arch == 'x86-64': flags += ['-target', 'x86_64-none-linux-android'] flags += ['-march=x86-64', '-msse4.2', '-mpopcnt', '-m64', '-mtune=intel'] elif arch == 'arm6': flags += ['-target', 'armv5te-none-linux-androideabi'] flags += ['-march=armv5te', '-mtune=xscale', '-msoft-float', '-marm'] elif arch == 'arm7': flags += ['-target', 'armv7-none-linux-androideabi'] flags += ['-march=armv7-a', '-mhard-float', '-mfpu=vfpv3-d16', '-mfpu=neon', '-D_NDK_MATH_NO_SOFTFP=1', '-marm'] elif arch == 'arm64': flags += ['-target', 'aarch64-none-linux-android'] elif arch == 'mips': flags += ['-target', 'mipsel-none-linux-android'] elif arch == 'mips64': flags += ['-target', 'mips64el-none-linux-android'] flags += ['-gcc-toolchain', self.android.make_gcc_toolchain_path(arch)] elif self.target.is_macos() or self.target.is_ios(): if arch == 'x86': flags += [' -arch x86'] elif arch == 'x86-64': flags += [' -arch x86_64'] elif arch == 'arm7': flags += [' -arch armv7'] elif arch == 'arm64': flags += [' -arch arm64'] else: if arch == 'x86': flags += ['-m32'] elif arch == 'x86-64': flags += ['-m64'] return flags def make_carchflags(self, arch, targettype): flags = [] if targettype == 'sharedlib': flags += ['-DBUILD_DYNAMIC_LINK=1'] flags += self.make_targetarchflags(arch, targettype) return flags def make_cconfigflags(self, config, targettype): flags = [] if config == 'debug': flags += ['-DBUILD_DEBUG=1', '-g'] elif config == 'release': flags += ['-DBUILD_RELEASE=1', '-O3', '-g', '-funroll-loops'] elif config == 'profile': flags += ['-DBUILD_PROFILE=1', '-O3', '-g', '-funroll-loops'] elif config == 'deploy': flags += ['-DBUILD_DEPLOY=1', '-O3', '-g', '-funroll-loops'] return flags def make_ararchflags(self, arch, targettype): flags = [] return flags def make_arconfigflags(self, config, targettype): flags = [] return flags def make_linkarchflags(self, arch, targettype, variables): flags = [] flags += self.make_targetarchflags(arch, targettype) if self.target.is_android(): if arch == 'arm7': flags += ['-Wl,--no-warn-mismatch', '-Wl,--fix-cortex-a8'] if self.target.is_windows(): if arch == 'x86': flags += ['-Xlinker', '/MACHINE:X86'] elif arch == 'x86-64': flags += ['-Xlinker', '/MACHINE:X64'] if self.target.is_macos() and 'support_lua' in variables and variables['support_lua']: flags += ['-pagezero_size', '10000', '-image_base', '100000000'] return flags def make_linkconfigflags(self, config, targettype, variables): flags = [] if self.target.is_windows(): if targettype == 'sharedlib': flags += ['-Xlinker', '/DLL'] elif targettype == 'bin': flags += ['-Xlinker', '/SUBSYSTEM:CONSOLE'] return flags def make_linkarchlibs(self, arch, targettype): archlibs = [] if self.target.is_android(): if arch == 'arm7': archlibs += ['m_hard'] else: archlibs += ['m'] archlibs += ['gcc', 'android'] return archlibs def make_libs(self, libs): if libs != None: return ['-l' + lib for lib in libs] return [] def make_frameworks(self, frameworks): if frameworks != None: return ['-framework ' + framework for framework in frameworks] return [] def make_configlibpaths(self, config, arch, extralibpaths): libpaths = [self.libpath, os.path.join(self.libpath, config)] if not self.target.is_macos() and not self.target.is_ios(): libpaths += [os.path.join(self.libpath, arch)] libpaths += [os.path.join(self.libpath, config, arch)] if extralibpaths != None: libpaths += [os.path.join(libpath, self.libpath) for libpath in extralibpaths] libpaths += [os.path.join(libpath, self.libpath, config) for libpath in extralibpaths] if not self.target.is_macos() and not self.target.is_ios(): libpaths += [os.path.join(libpath, self.libpath, arch) for libpath in extralibpaths] libpaths += [os.path.join(libpath, self.libpath, config, arch) for libpath in extralibpaths] return self.make_libpaths(libpaths) def cc_variables(self, config, arch, targettype, variables): localvariables = [] if 'includepaths' in variables: moreincludepaths = self.make_includepaths(variables['includepaths']) if not moreincludepaths == []: localvariables += [('moreincludepaths', moreincludepaths)] carchflags = self.make_carchflags(arch, targettype) if carchflags != []: localvariables += [('carchflags', carchflags)] cconfigflags = self.make_cconfigflags(config, targettype) if cconfigflags != []: localvariables += [('cconfigflags', cconfigflags)] if self.target.is_android(): localvariables += [('sysroot', self.android.make_sysroot_path(arch))] return localvariables def ar_variables(self, config, arch, targettype, variables): localvariables = [] ararchflags = self.make_ararchflags(arch, targettype) if ararchflags != []: localvariables += [('ararchflags', ararchflags)] arconfigflags = self.make_arconfigflags(config, targettype) if arconfigflags != []: localvariables += [('arconfigflags', arconfigflags)] if self.target.is_android(): localvariables += [('toolchain', self.android.make_gcc_bin_path(arch))] return localvariables def link_variables(self, config, arch, targettype, variables): localvariables = [] linkarchflags = self.make_linkarchflags(arch, targettype, variables) if linkarchflags != []: localvariables += [('linkarchflags', linkarchflags)] linkconfigflags = self.make_linkconfigflags(config, targettype, variables) if linkconfigflags != []: localvariables += [('linkconfigflags', linkconfigflags)] if 'libs' in variables: libvar = self.make_libs(variables['libs']) if libvar != []: localvariables += [('libs', libvar)] localframeworks = self.frameworks or [] if 'frameworks' in variables and variables['frameworks'] != None: localframeworks += list(variables['frameworks']) if len(localframeworks) > 0: localvariables += [('frameworks', self.make_frameworks(list(localframeworks)))] libpaths = [] if 'libpaths' in variables: libpaths = variables['libpaths'] localvariables += [('configlibpaths', self.make_configlibpaths(config, arch, libpaths))] if self.target.is_android(): localvariables += [('sysroot', self.android.make_sysroot_path(arch))] archlibs = self.make_linkarchlibs(arch, targettype) if archlibs != []: localvariables += [('archlibs', self.make_libs(archlibs))] return localvariables def builder_cc(self, writer, config, arch, targettype, infile, outfile, variables): return writer.build(outfile, 'cc', infile, implicit = self.implicit_deps(config, variables), variables = self.cc_variables(config, arch, targettype, variables)) def builder_cm(self, writer, config, arch, targettype, infile, outfile, variables): return writer.build(outfile, 'cm', infile, implicit = self.implicit_deps(config, variables), variables = self.cc_variables(config, arch, targettype, variables)) def builder_lib(self, writer, config, arch, targettype, infiles, outfile, variables): return writer.build(outfile, 'ar', infiles, implicit = self.implicit_deps(config, variables), variables = self.ar_variables(config, arch, targettype, variables)) def builder_sharedlib(self, writer, config, arch, targettype, infiles, outfile, variables): return writer.build(outfile, 'so', infiles, implicit = self.implicit_deps(config, variables), variables = self.link_variables(config, arch, targettype, variables)) def builder_bin(self, writer, config, arch, targettype, infiles, outfile, variables): return writer.build(outfile, 'link', infiles, implicit = self.implicit_deps(config, variables), variables = self.link_variables(config, arch, targettype, variables)) #Apple universal targets def builder_apple_multilib(self, writer, config, arch, targettype, infiles, outfile, variables): localvariables = [('arflags', '-static -no_warning_for_no_symbols')] if variables != None: localvariables = variables + localvariables return writer.build(os.path.join(outfile, self.buildtarget), 'ar', infiles, variables = localvariables); def builder_apple_multisharedlib(self, writer, config, arch, targettype, infiles, outfile, variables): return writer.build(outfile, 'so', infiles, implicit = self.implicit_deps(config, variables), variables = self.link_variables(config, arch, targettype, variables)) def builder_apple_multibin(self, writer, config, arch, targettype, infiles, outfile, variables): return writer.build(os.path.join(outfile, self.buildtarget), 'lipo', infiles, variables = variables) #PNaCl finalizer def builder_pnacl_multibin(self, writer, config, arch, targettype, infiles, outfile, variables): binfile = os.path.splitext(self.buildtarget)[0] pexe = writer.build(os.path.join(outfile, binfile + '.pexe'), 'finalize', infiles) nmf = writer.build(os.path.join(outfile, binfile + '.nmf'), 'nmf', pexe + infiles) return [pexe, nmf] def create(host, target, toolchain): return ClangToolchain(host, target, toolchain)
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup, find_packages, Command from setuptools.command.test import test as TestCommand import imp import logging import os import pip import sys logger = logging.getLogger(__name__) # Kept manually in sync with airflow.__version__ version = imp.load_source( 'airflow.version', os.path.join('airflow', 'version.py')).version class Tox(TestCommand): user_options = [('tox-args=', None, "Arguments to pass to tox")] def initialize_options(self): TestCommand.initialize_options(self) self.tox_args = '' def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): #import here, cause outside the eggs aren't loaded import tox errno = tox.cmdline(args=self.tox_args.split()) sys.exit(errno) class CleanCommand(Command): """Custom clean command to tidy up the project root.""" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info') def git_version(version): """ Return a version to identify the state of the underlying git repo. The version will indicate whether the head of the current git-backed working directory is tied to a release tag or not : it will indicate the former with a 'release:{version}' prefix and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes are present. """ repo = None try: import git repo = git.Repo('.git') except ImportError: logger.warning('gitpython not found: Cannot compute the git version.') return '' except Exception as e: logger.warning('Git repo not found: Cannot compute the git version.') return '' if repo: sha = repo.head.commit.hexsha if repo.is_dirty(): return '.dev0+{sha}.dirty'.format(sha=sha) # commit is clean # is it release of `version` ? try: tag = repo.git.describe( match='[0-9]*', exact_match=True, tags=True, dirty=True) assert tag == version, (tag, version) return '.release:{version}+{sha}'.format(version=version, sha=sha) except git.GitCommandError: return '.dev0+{sha}'.format(sha=sha) else: return 'no_git_version' def write_version(filename=os.path.join(*['airflow', 'git_version'])): text = "{}".format(git_version(version)) with open(filename, 'w') as a: a.write(text) async = [ 'greenlet>=0.4.9', 'eventlet>= 0.9.7', 'gevent>=0.13' ] azure = ['azure-storage>=0.34.0'] sendgrid = ['sendgrid>=5.2.0'] celery = [ 'celery>=4.0.0', 'flower>=0.7.3' ] cgroups = [ 'cgroupspy>=0.1.4', ] crypto = ['cryptography>=0.9.3'] dask = [ 'distributed>=1.15.2, <2' ] databricks = ['requests>=2.5.1, <3'] datadog = ['datadog>=0.14.0'] doc = [ 'sphinx>=1.2.3', 'sphinx-argparse>=0.1.13', 'sphinx-rtd-theme>=0.1.6', 'Sphinx-PyPI-upload>=0.2.1' ] docker = ['docker-py>=1.6.0'] emr = ['boto3>=1.0.0'] gcp_api = [ 'httplib2', 'google-api-python-client>=1.5.0, <1.6.0', 'oauth2client>=2.0.2, <2.1.0', 'PyOpenSSL', 'google-cloud-dataflow', 'pandas-gbq' ] hdfs = ['snakebite>=2.7.8'] webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4'] jira = ['JIRA>1.0.7'] hive = [ 'hive-thrift-py>=0.0.1', 'pyhive>=0.1.3', 'impyla>=0.13.3', 'unicodecsv>=0.14.1' ] jdbc = ['jaydebeapi>=1.1.1'] mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1'] mysql = ['mysqlclient>=1.3.6'] rabbitmq = ['librabbitmq>=1.6.1'] oracle = ['cx_Oracle>=5.1.2'] postgres = ['psycopg2>=2.7.1'] ssh = ['paramiko>=2.1.1'] salesforce = ['simple-salesforce>=0.72'] s3 = ['boto3>=1.0.0'] samba = ['pysmbclient>=0.1.3'] slack = ['slackclient>=1.0.0'] statsd = ['statsd>=3.0.1, <4.0'] vertica = ['vertica-python>=0.5.1'] ldap = ['ldap3>=0.9.9.1'] kerberos = ['pykerberos>=1.1.13', 'requests_kerberos>=0.10.0', 'thrift_sasl>=0.2.0', 'snakebite[kerberos]>=2.7.8', 'kerberos>=1.2.5'] password = [ 'bcrypt>=2.0.0', 'flask-bcrypt>=0.7.1', ] github_enterprise = ['Flask-OAuthlib>=0.9.1'] qds = ['qds-sdk>=1.9.6'] cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x redis = ['redis>=2.10.5'] all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant devel = [ 'click', 'freezegun', 'jira', 'lxml>=3.3.4', 'mock', 'moto==1.1.19', 'nose', 'nose-ignore-docstring==0.2', 'nose-timer', 'parameterized', 'qds-sdk>=1.9.6', 'rednose', 'paramiko', 'requests_mock' ] devel_minreq = devel + mysql + doc + password + s3 + cgroups devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker + ssh def do_setup(): write_version() setup( name='apache-airflow', description='Programmatically author, schedule and monitor data pipelines', license='Apache License 2.0', version=version, packages=find_packages(exclude=['tests*']), package_data={'': ['airflow/alembic.ini', "airflow/git_version"]}, include_package_data=True, zip_safe=False, scripts=['airflow/bin/airflow'], install_requires=[ 'alembic>=0.8.3, <0.9', 'bleach==2.0.0', 'configparser>=3.5.0, <3.6.0', 'croniter>=0.3.17, <0.4', 'dill>=0.2.2, <0.3', 'flask>=0.11, <0.12', 'flask-admin==1.4.1', 'flask-cache>=0.13.1, <0.14', 'flask-login==0.2.11', 'flask-swagger==0.2.13', 'flask-wtf==0.14', 'funcsigs==1.0.0', 'future>=0.16.0, <0.17', 'gitpython>=2.0.2', 'gunicorn>=19.4.0, <20.0', 'iso8601>=0.1.12', 'jinja2>=2.7.3, <2.9.0', 'lxml>=3.6.0, <4.0', 'markdown>=2.5.2, <3.0', 'pandas>=0.17.1, <1.0.0', 'pendulum==1.3.2', 'psutil>=4.2.0, <5.0.0', 'pygments>=2.0.1, <3.0', 'python-daemon>=2.1.1, <2.2', 'python-dateutil>=2.3, <3', 'python-nvd3==0.14.2', 'requests>=2.5.1, <3', 'setproctitle>=1.1.8, <2', 'sqlalchemy>=0.9.8', 'sqlalchemy-utc>=0.9.0', 'tabulate>=0.7.5, <0.8.0', 'thrift>=0.9.2', 'tzlocal>=1.4', 'zope.deprecation>=4.0, <5.0', ], setup_requires=[ 'docutils>=0.14, <1.0', ], extras_require={ 'all': devel_all, 'all_dbs': all_dbs, 'async': async, 'azure': azure, 'celery': celery, 'cgroups': cgroups, 'cloudant': cloudant, 'crypto': crypto, 'dask': dask, 'databricks': databricks, 'datadog': datadog, 'devel': devel_minreq, 'devel_hadoop': devel_hadoop, 'doc': doc, 'docker': docker, 'emr': emr, 'gcp_api': gcp_api, 'github_enterprise': github_enterprise, 'hdfs': hdfs, 'hive': hive, 'jdbc': jdbc, 'kerberos': kerberos, 'ldap': ldap, 'mssql': mssql, 'mysql': mysql, 'oracle': oracle, 'password': password, 'postgres': postgres, 'qds': qds, 'rabbitmq': rabbitmq, 's3': s3, 'salesforce': salesforce, 'samba': samba, 'sendgrid' : sendgrid, 'slack': slack, 'ssh': ssh, 'statsd': statsd, 'vertica': vertica, 'webhdfs': webhdfs, 'jira': jira, 'redis': redis, }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Topic :: System :: Monitoring', ], author='Apache Software Foundation', author_email='dev@airflow.incubator.apache.org', url='http://airflow.incubator.apache.org/', download_url=( 'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version), cmdclass={ 'test': Tox, 'extra_clean': CleanCommand, }, ) if __name__ == "__main__": do_setup()
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for Linux servers running LVM. """ import math import os import socket from oslo.config import cfg from cinder.brick import exception as brick_exception from cinder.brick.local_dev import lvm as lvm from cinder import exception from cinder.image import image_utils from cinder.openstack.common import fileutils from cinder.openstack.common import log as logging from cinder.openstack.common import processutils from cinder.openstack.common import units from cinder import utils from cinder.volume import driver from cinder.volume import utils as volutils LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('volume_group', default='cinder-volumes', help='Name for the VG that will contain exported volumes'), cfg.IntOpt('lvm_mirrors', default=0, help='If >0, create LVs with multiple mirrors. Note that ' 'this requires lvm_mirrors + 2 PVs with available space'), cfg.StrOpt('lvm_type', default='default', help='Type of LVM volumes to deploy; (default or thin)'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class LVMVolumeDriver(driver.VolumeDriver): """Executes commands relating to Volumes.""" VERSION = '2.0.0' def __init__(self, vg_obj=None, *args, **kwargs): super(LVMVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.hostname = socket.gethostname() self.vg = vg_obj self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'LVM' self.protocol = 'local' def set_execute(self, execute): self._execute = execute def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" if self.vg is None: root_helper = utils.get_root_helper() try: self.vg = lvm.LVM(self.configuration.volume_group, root_helper, lvm_type=self.configuration.lvm_type, executor=self._execute) except brick_exception.VolumeGroupNotFound: message = ("Volume Group %s does not exist" % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups( self.configuration.volume_group) vg_dict = \ (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next() if vg_dict is None: message = ("Volume Group %s does not exist" % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) if self.configuration.lvm_type == 'thin': # Specific checks for using Thin provisioned LV's if not volutils.supports_thin_provisioning(): message = ("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except processutils.ProcessExecutionError as exc: exception_message = ("Failed to create thin pool, " "error message was: %s" % exc.stderr) raise exception.VolumeBackendAPIException( data=exception_message) def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100m' return '%sg' % size_in_g def _volume_not_present(self, volume_name): return self.vg.get_volume(volume_name) is None def _delete_volume(self, volume, is_snapshot=False): """Deletes a logical volume.""" if self.configuration.volume_clear != 'none' and \ self.configuration.lvm_type != 'thin': self._clear_volume(volume, is_snapshot) name = volume['name'] if is_snapshot: name = self._escape_snapshot(volume['name']) self.vg.delete(name) def _clear_volume(self, volume, is_snapshot=False): # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority if is_snapshot: # if the volume to be cleared is a snapshot of another volume # we need to clear out the volume using the -cow instead of the # directly volume path. We need to skip this if we are using # thin provisioned LVs. # bug# lp1191812 dev_path = self.local_path(volume) + "-cow" else: dev_path = self.local_path(volume) # TODO(jdg): Maybe we could optimize this for snaps by looking at # the cow table and only overwriting what's necessary? # for now we're still skipping on snaps due to hang issue if not os.path.exists(dev_path): msg = (_('Volume device file path %s does not exist.') % dev_path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) size_in_g = volume.get('size', volume.get('volume_size', None)) if size_in_g is None: msg = (_("Size for volume: %s not found, " "cannot secure delete.") % volume['id']) LOG.error(msg) raise exception.InvalidParameterValue(msg) # clear_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in vol_sz_in_meg = size_in_g * units.Ki volutils.clear_volume( vol_sz_in_meg, dev_path, volume_clear=self.configuration.volume_clear, volume_clear_size=self.configuration.volume_clear_size) def _escape_snapshot(self, snapshot_name): # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not snapshot_name.startswith('snapshot'): return snapshot_name return '_' + snapshot_name def _create_volume(self, name, size, lvm_type, mirror_count, vg=None): vg_ref = self.vg if vg is not None: vg_ref = vg vg_ref.create_volume(name, size, lvm_type, mirror_count) def create_volume(self, volume): """Creates a logical volume.""" mirror_count = 0 if self.configuration.lvm_mirrors: mirror_count = self.configuration.lvm_mirrors self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, mirror_count) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, self.configuration.lvm_mirrors) # Some configurations of LVM do not automatically activate # ThinLVM snapshot LVs. self.vg.activate_lv(snapshot['name'], is_snapshot=True) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in volutils.copy_volume(self.local_path(snapshot), self.local_path(volume), snapshot['volume_size'] * units.Ki, self.configuration.volume_dd_blocksize, execute=self._execute) def delete_volume(self, volume): """Deletes a logical volume.""" # NOTE(jdg): We don't need to explicitly call # remove export here because we already did it # in the manager before we got here. if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True if self.vg.lv_has_snapshot(volume['name']): LOG.error(_('Unabled to delete due to existing snapshot ' 'for volume: %s') % volume['name']) raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), snapshot['volume_name'], self.configuration.lvm_type) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" if self._volume_not_present(self._escape_snapshot(snapshot['name'])): # If the snapshot isn't present, then don't attempt to delete LOG.warning(_("snapshot: %s not found, " "skipping delete operations") % snapshot['name']) return True # TODO(yamahata): zeroing out the whole snapshot triggers COW. # it's quite slow. self._delete_volume(snapshot, is_snapshot=True) def local_path(self, volume, vg=None): if vg is None: vg = self.configuration.volume_group # NOTE(vish): stops deprecation warning escaped_group = vg.replace('-', '--') escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size']) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_utils.upload_volume(context, image_service, image_meta, self.local_path(volume)) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" mirror_count = 0 if self.configuration.lvm_mirrors: mirror_count = self.configuration.lvm_mirrors LOG.info(_('Creating clone of volume: %s') % src_vref['id']) volume_name = src_vref['name'] temp_id = 'tmp-snap-%s' % volume['id'] temp_snapshot = {'volume_name': volume_name, 'size': src_vref['size'], 'volume_size': src_vref['size'], 'name': 'clone-snap-%s' % volume['id'], 'id': temp_id} self.create_snapshot(temp_snapshot) self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, mirror_count) self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in try: volutils.copy_volume( self.local_path(temp_snapshot), self.local_path(volume), src_vref['size'] * units.Ki, self.configuration.volume_dd_blocksize, execute=self._execute) finally: self.delete_snapshot(temp_snapshot) def clone_image(self, volume, image_location, image_id, image_meta): return None, False def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup['volume_id']) volume_path = self.local_path(volume) with utils.temporary_chown(volume_path): with fileutils.file_open(volume_path) as volume_file: backup_service.backup(backup, volume_file) def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" volume_path = self.local_path(volume) with utils.temporary_chown(volume_path): with fileutils.file_open(volume_path, 'wb') as volume_file: backup_service.restore(backup, volume['id'], volume_file) def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") if self.vg is None: LOG.warning(_('Unable to update stats on non-initialized ' 'Volume Group: %s'), self.configuration.volume_group) return self.vg.update_volume_group_info() data = {} # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. data["volume_backend_name"] = self.backend_name data["vendor_name"] = 'Open Source' data["driver_version"] = self.VERSION data["storage_protocol"] = self.protocol if self.configuration.lvm_mirrors > 0: data['total_capacity_gb'] =\ self.vg.vg_mirror_size(self.configuration.lvm_mirrors) data['free_capacity_gb'] =\ self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) elif self.configuration.lvm_type == 'thin': data['total_capacity_gb'] = self.vg.vg_thin_pool_size data['free_capacity_gb'] = self.vg.vg_thin_pool_free_space else: data['total_capacity_gb'] = self.vg.vg_size data['free_capacity_gb'] = self.vg.vg_free_space data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False data['location_info'] =\ ('LVMVolumeDriver:%(hostname)s:%(vg)s' ':%(lvm_type)s:%(lvm_mirrors)s' % {'hostname': self.hostname, 'vg': self.configuration.volume_group, 'lvm_type': self.configuration.lvm_type, 'lvm_mirrors': self.configuration.lvm_mirrors}) self._stats = data def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" self.vg.extend_volume(volume['name'], self._sizestr(new_size)) def manage_existing(self, volume, existing_ref): """Manages an existing LV. Renames the LV to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated. """ lv_name = existing_ref['lv_name'] self.vg.get_volume(lv_name) # Attempt to rename the LV to match the OpenStack internal name. try: self.vg.rename_volume(lv_name, volume['name']) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to rename logical volume %(name)s, " "error message was: %(err_msg)s") % {'name': lv_name, 'err_msg': exc.stderr}) raise exception.VolumeBackendAPIException( data=exception_message) def manage_existing_get_size(self, volume, existing_ref): """Return size of an existing LV for manage_existing. existing_ref is a dictionary of the form: {'lv_name': <name of LV>} """ # Check that the reference is valid if 'lv_name' not in existing_ref: reason = _('Reference must contain lv_name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lv_name = existing_ref['lv_name'] lv = self.vg.get_volume(lv_name) # Raise an exception if we didn't find a suitable LV. if not lv: kwargs = {'existing_ref': lv_name, 'reason': 'Specified logical volume does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) # LV size is returned in gigabytes. Attempt to parse size as a float # and round up to the next integer. try: lv_size = int(math.ceil(float(lv['size']))) except ValueError: exception_message = (_("Failed to manage existing volume " "%(name)s, because reported size %(size)s " "was not a floating-point number.") % {'name': lv_name, 'size': lv['size']}) raise exception.VolumeBackendAPIException( data=exception_message) return lv_size class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver): """Executes commands relating to ISCSI volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSCSI target information in the same format as an ietadm discovery i.e. '<ip>:<port>,<portal> <target IQN>' ``provider_auth`` if present, contains a space-separated triple: '<auth method> <auth username> <auth password>'. `CHAP` is the only auth_method in use at the moment. """ def __init__(self, *args, **kwargs): self.db = kwargs.get('db') self.target_helper = self.get_target_helper(self.db) super(LVMISCSIDriver, self).__init__(*args, **kwargs) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'LVM_iSCSI' self.protocol = 'iSCSI' def set_execute(self, execute): super(LVMISCSIDriver, self).set_execute(execute) if self.target_helper is not None: self.target_helper.set_execute(execute) def _create_target(self, iscsi_name, iscsi_target, volume_path, chap_auth, lun=0, check_exit_code=False, old_name=None): # NOTE(jdg): tgt driver has an issue where with a lot of activity # (or sometimes just randomly) it will get *confused* and attempt # to reuse a target ID, resulting in a target already exists error # Typically a simple retry will address this # For now we have this while loop, might be useful in the # future to throw a retry decorator in common or utils attempts = 2 while attempts > 0: attempts -= 1 try: # NOTE(jdg): For TgtAdm case iscsi_name is all we need # should clean this all up at some point in the future tid = self.target_helper.create_iscsi_target( iscsi_name, iscsi_target, 0, volume_path, chap_auth, check_exit_code=check_exit_code, old_name=old_name) break except brick_exception.ISCSITargetCreateFailed: if attempts == 0: raise else: LOG.warning(_('Error creating iSCSI target, retrying ' 'creation for target: %s') % iscsi_name) return tid def ensure_export(self, context, volume): volume_name = volume['name'] iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, volume_name) volume_path = "/dev/%s/%s" % (self.configuration.volume_group, volume_name) # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need # should clean this all up at some point in the future model_update = self.target_helper.ensure_export( context, volume, iscsi_name, volume_path, self.configuration.volume_group) if model_update: self.db.volume_update(context, volume['id'], model_update) def create_export(self, context, volume): return self._create_export(context, volume) def _create_export(self, context, volume, vg=None): """Creates an export for a logical volume.""" if vg is None: vg = self.configuration.volume_group volume_path = "/dev/%s/%s" % (vg, volume['name']) data = self.target_helper.create_export(context, volume, volume_path, self.configuration) return { 'provider_location': data['location'], 'provider_auth': data['auth'], } def remove_export(self, context, volume): self.target_helper.remove_export(context, volume) def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') lvm_mirrors = int(lvm_mirrors) except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg != self.vg.vg_name: vg_list = volutils.get_all_volume_groups() try: (vg for vg in vg_list if vg['name'] == dest_vg).next() except StopIteration: message = (_("Destination Volume Group %s does not exist") % dest_vg) LOG.error(message) return false_ret helper = utils.get_root_helper() dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type=lvm_type, executor=self._execute) self.remove_export(ctxt, volume) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) volutils.copy_volume(self.local_path(volume), self.local_path(volume, vg=dest_vg), volume['size'], self.configuration.volume_dd_blocksize, execute=self._execute) self._delete_volume(volume) model_update = self._create_export(ctxt, volume, vg=dest_vg) return (True, model_update) def _iscsi_location(self, ip, target, iqn, lun=None): return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port, target, iqn, lun) def _iscsi_authentication(self, chap, name, password): return "%s %s %s" % (chap, name, password) class LVMISERDriver(LVMISCSIDriver, driver.ISERDriver): """Executes commands relating to ISER volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSER target information in the same format as an ietadm discovery i.e. '<ip>:<port>,<portal> <target IQN>' ``provider_auth`` if present, contains a space-separated triple: '<auth method> <auth username> <auth password>'. `CHAP` is the only auth_method in use at the moment. """ def __init__(self, *args, **kwargs): self.target_helper = self.get_target_helper(kwargs.get('db')) LVMVolumeDriver.__init__(self, *args, **kwargs) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'LVM_iSER' self.protocol = 'iSER'
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from lxml import etree import webob from nova.api.openstack.compute.contrib import simple_tenant_usage from nova.compute import api from nova.compute import flavors from nova import context from nova import exception from nova.openstack.common import jsonutils from nova.openstack.common import policy as common_policy from nova.openstack.common import timeutils from nova import policy from nova import test from nova.tests.api.openstack import fakes from nova import utils SERVERS = 5 TENANTS = 2 HOURS = 24 ROOT_GB = 10 EPHEMERAL_GB = 20 MEMORY_MB = 1024 VCPUS = 2 NOW = timeutils.utcnow() START = NOW - datetime.timedelta(hours=HOURS) STOP = NOW FAKE_INST_TYPE = {'id': 1, 'vcpus': VCPUS, 'root_gb': ROOT_GB, 'ephemeral_gb': EPHEMERAL_GB, 'memory_mb': MEMORY_MB, 'name': 'fakeflavor', 'flavorid': 'foo', 'rxtx_factor': 1.0, 'vcpu_weight': 1, 'swap': 0} def get_fake_db_instance(start, end, instance_id, tenant_id): sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, FAKE_INST_TYPE)) return {'id': instance_id, 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % instance_id, 'image_ref': '1', 'project_id': tenant_id, 'user_id': 'fakeuser', 'display_name': 'name', 'state_description': 'state', 'instance_type_id': 1, 'launched_at': start, 'terminated_at': end, 'system_metadata': sys_meta} def fake_instance_get_active_by_window_joined(self, context, begin, end, project_id): return [get_fake_db_instance(START, STOP, x, "faketenant_%s" % (x / SERVERS)) for x in xrange(TENANTS * SERVERS)] class SimpleTenantUsageTest(test.TestCase): def setUp(self): super(SimpleTenantUsageTest, self).setUp() self.stubs.Set(api.API, "get_active_by_window", fake_instance_get_active_by_window_joined) self.admin_context = context.RequestContext('fakeadmin_0', 'faketenant_0', is_admin=True) self.user_context = context.RequestContext('fakeadmin_0', 'faketenant_0', is_admin=False) self.alt_user_context = context.RequestContext('fakeadmin_0', 'faketenant_1', is_admin=False) self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], osapi_compute_ext_list=['Simple_tenant_usage']) def _test_verify_index(self, start, stop): req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage?start=%s&end=%s' % (start.isoformat(), stop.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.admin_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 200) res_dict = jsonutils.loads(res.body) usages = res_dict['tenant_usages'] for i in xrange(TENANTS): self.assertEqual(int(usages[i]['total_hours']), SERVERS * HOURS) self.assertEqual(int(usages[i]['total_local_gb_usage']), SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS) self.assertEqual(int(usages[i]['total_memory_mb_usage']), SERVERS * MEMORY_MB * HOURS) self.assertEqual(int(usages[i]['total_vcpus_usage']), SERVERS * VCPUS * HOURS) self.assertFalse(usages[i].get('server_usages')) def test_verify_index(self): self._test_verify_index(START, STOP) def test_verify_index_future_end_time(self): future = NOW + datetime.timedelta(hours=HOURS) self._test_verify_index(START, future) def test_verify_show(self): self._test_verify_show(START, STOP) def test_verify_show_future_end_time(self): future = NOW + datetime.timedelta(hours=HOURS) self._test_verify_show(START, future) def _get_tenant_usages(self, detailed=''): req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage?' 'detailed=%s&start=%s&end=%s' % (detailed, START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.admin_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 200) res_dict = jsonutils.loads(res.body) return res_dict['tenant_usages'] def test_verify_detailed_index(self): usages = self._get_tenant_usages('1') for i in xrange(TENANTS): servers = usages[i]['server_usages'] for j in xrange(SERVERS): self.assertEqual(int(servers[j]['hours']), HOURS) def test_verify_simple_index(self): usages = self._get_tenant_usages(detailed='0') for i in xrange(TENANTS): self.assertEqual(usages[i].get('server_usages'), None) def test_verify_simple_index_empty_param(self): # NOTE(lzyeval): 'detailed=&start=..&end=..' usages = self._get_tenant_usages() for i in xrange(TENANTS): self.assertEqual(usages[i].get('server_usages'), None) def _test_verify_show(self, start, stop): tenant_id = 0 req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage/' 'faketenant_%s?start=%s&end=%s' % (tenant_id, start.isoformat(), stop.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 200) res_dict = jsonutils.loads(res.body) usage = res_dict['tenant_usage'] servers = usage['server_usages'] self.assertEqual(len(usage['server_usages']), SERVERS) uuids = ['00000000-0000-0000-0000-00000000000000%02d' % (x + (tenant_id * SERVERS)) for x in xrange(SERVERS)] for j in xrange(SERVERS): delta = STOP - START uptime = delta.days * 24 * 3600 + delta.seconds self.assertEqual(int(servers[j]['uptime']), uptime) self.assertEqual(int(servers[j]['hours']), HOURS) self.assertIn(servers[j]['instance_id'], uuids) def test_verify_show_cant_view_other_tenant(self): req = webob.Request.blank( '/v2/faketenant_1/os-simple-tenant-usage/' 'faketenant_0?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" rules = { "compute_extension:simple_tenant_usage:show": common_policy.parse_rule([ ["role:admin"], ["project_id:%(project_id)s"] ]) } common_policy.set_rules(common_policy.Rules(rules)) try: res = req.get_response(fakes.wsgi_app( fake_auth_context=self.alt_user_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 403) finally: policy.reset() def test_get_tenants_usage_with_bad_start_date(self): future = NOW + datetime.timedelta(hours=HOURS) tenant_id = 0 req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage/' 'faketenant_%s?start=%s&end=%s' % (tenant_id, future.isoformat(), NOW.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 400) class SimpleTenantUsageSerializerTest(test.TestCase): def _verify_server_usage(self, raw_usage, tree): self.assertEqual('server_usage', tree.tag) # Figure out what fields we expect not_seen = set(raw_usage.keys()) for child in tree: self.assertIn(child.tag, not_seen) not_seen.remove(child.tag) self.assertEqual(str(raw_usage[child.tag]), child.text) self.assertEqual(len(not_seen), 0) def _verify_tenant_usage(self, raw_usage, tree): self.assertEqual('tenant_usage', tree.tag) # Figure out what fields we expect not_seen = set(raw_usage.keys()) for child in tree: self.assertIn(child.tag, not_seen) not_seen.remove(child.tag) if child.tag == 'server_usages': for idx, gr_child in enumerate(child): self._verify_server_usage(raw_usage['server_usages'][idx], gr_child) else: self.assertEqual(str(raw_usage[child.tag]), child.text) self.assertEqual(len(not_seen), 0) def test_serializer_show(self): serializer = simple_tenant_usage.SimpleTenantUsageTemplate() today = timeutils.utcnow() yesterday = today - datetime.timedelta(days=1) raw_usage = dict( tenant_id='tenant', total_local_gb_usage=789, total_vcpus_usage=456, total_memory_mb_usage=123, total_hours=24, start=yesterday, stop=today, server_usages=[dict( instance_id='00000000-0000-0000-0000-0000000000000000', name='test', hours=24, memory_mb=1024, local_gb=50, vcpus=1, tenant_id='tenant', flavor='m1.small', started_at=yesterday, ended_at=today, state='terminated', uptime=86400), dict( instance_id='00000000-0000-0000-0000-0000000000000002', name='test2', hours=12, memory_mb=512, local_gb=25, vcpus=2, tenant_id='tenant', flavor='m1.tiny', started_at=yesterday, ended_at=today, state='terminated', uptime=43200), ], ) tenant_usage = dict(tenant_usage=raw_usage) text = serializer.serialize(tenant_usage) tree = etree.fromstring(text) self._verify_tenant_usage(raw_usage, tree) def test_serializer_index(self): serializer = simple_tenant_usage.SimpleTenantUsagesTemplate() today = timeutils.utcnow() yesterday = today - datetime.timedelta(days=1) raw_usages = [dict( tenant_id='tenant1', total_local_gb_usage=1024, total_vcpus_usage=23, total_memory_mb_usage=512, total_hours=24, start=yesterday, stop=today, server_usages=[dict( instance_id='00000000-0000-0000-0000-0000000000000001', name='test1', hours=24, memory_mb=1024, local_gb=50, vcpus=2, tenant_id='tenant1', flavor='m1.small', started_at=yesterday, ended_at=today, state='terminated', uptime=86400), dict( instance_id='00000000-0000-0000-0000-0000000000000002', name='test2', hours=42, memory_mb=4201, local_gb=25, vcpus=1, tenant_id='tenant1', flavor='m1.tiny', started_at=today, ended_at=yesterday, state='terminated', uptime=43200), ], ), dict( tenant_id='tenant2', total_local_gb_usage=512, total_vcpus_usage=32, total_memory_mb_usage=1024, total_hours=42, start=today, stop=yesterday, server_usages=[dict( instance_id='00000000-0000-0000-0000-0000000000000003', name='test3', hours=24, memory_mb=1024, local_gb=50, vcpus=2, tenant_id='tenant2', flavor='m1.small', started_at=yesterday, ended_at=today, state='terminated', uptime=86400), dict( instance_id='00000000-0000-0000-0000-0000000000000002', name='test2', hours=42, memory_mb=4201, local_gb=25, vcpus=1, tenant_id='tenant4', flavor='m1.tiny', started_at=today, ended_at=yesterday, state='terminated', uptime=43200), ], ), ] tenant_usages = dict(tenant_usages=raw_usages) text = serializer.serialize(tenant_usages) tree = etree.fromstring(text) self.assertEqual('tenant_usages', tree.tag) self.assertEqual(len(raw_usages), len(tree)) for idx, child in enumerate(tree): self._verify_tenant_usage(raw_usages[idx], child) class SimpleTenantUsageControllerTest(test.TestCase): def setUp(self): super(SimpleTenantUsageControllerTest, self).setUp() self.controller = simple_tenant_usage.SimpleTenantUsageController() class FakeComputeAPI: def get_instance_type(self, context, flavor_type): if flavor_type == 1: return flavors.get_default_flavor() else: raise exception.InstanceTypeNotFound(flavor_type) self.compute_api = FakeComputeAPI() self.context = None now = timeutils.utcnow() self.baseinst = dict(display_name='foo', launched_at=now - datetime.timedelta(1), terminated_at=now, instance_type_id=1, vm_state='deleted', deleted=0) basetype = flavors.get_default_flavor() sys_meta = utils.dict_to_metadata( flavors.save_flavor_info({}, basetype)) self.baseinst['system_metadata'] = sys_meta self.basetype = flavors.extract_flavor(self.baseinst) def test_get_flavor_from_sys_meta(self): # Non-deleted instances get their type information from their # system_metadata flavor = self.controller._get_flavor(self.context, self.compute_api, self.baseinst, {}) self.assertEqual(flavor, self.basetype) def test_get_flavor_from_non_deleted_with_id_fails(self): # If an instance is not deleted and missing type information from # system_metadata, then that's a bug inst_without_sys_meta = dict(self.baseinst, system_metadata=[]) self.assertRaises(KeyError, self.controller._get_flavor, self.context, self.compute_api, inst_without_sys_meta, {}) def test_get_flavor_from_deleted_with_id(self): # Deleted instances may not have type info in system_metadata, # so verify that they get their type from a lookup of their # instance_type_id inst_without_sys_meta = dict(self.baseinst, system_metadata=[], deleted=1) flavor = self.controller._get_flavor(self.context, self.compute_api, inst_without_sys_meta, {}) self.assertEqual(flavor, flavors.get_default_flavor()) def test_get_flavor_from_deleted_with_id_of_deleted(self): # Verify the legacy behavior of instance_type_id pointing to a # missing type being non-fatal inst_without_sys_meta = dict(self.baseinst, system_metadata=[], deleted=1, instance_type_id=2) flavor = self.controller._get_flavor(self.context, self.compute_api, inst_without_sys_meta, {}) self.assertEqual(flavor, None)
import warnings from collections import OrderedDict from decimal import Decimal from django.core.serializers import deserialize, serialize from django.core.serializers.base import DeserializationError from django.forms import ValidationError from django.test import TestCase from .models import ( CallableDefaultModel, GenericForeignKeyObj, JSONCharModel, JSONModel, JSONModelCustomEncoders, JSONModelWithForeignKey, JSONNotRequiredModel, JSONRequiredModel, MTIChildModel, MTIParentModel, OrderedJSONModel, RemoteJSONModel, ) class JSONModelWithForeignKeyTestCase(TestCase): def test_object_create(self): foreign_obj = GenericForeignKeyObj.objects.create(name='Brain') JSONModelWithForeignKey.objects.create(foreign_obj=foreign_obj) class RemoteJSONFieldTests(TestCase): """Test JSON fields across a ForeignKey""" @classmethod def setUpTestData(cls): RemoteJSONModel.objects.create() def test_related_accessor(self): RemoteJSONModel.objects.get().foreign def test_select_related(self): RemoteJSONModel.objects.select_related('foreign').get() class JSONFieldTest(TestCase): """JSONField Wrapper Tests""" json_model = JSONModel def test_json_field_create(self): """Test saving a JSON object in our JSONField""" json_obj = { "item_1": "this is a json blah", "blergh": "hey, hey, hey"} obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj) def test_string_in_json_field(self): """Test saving an ordinary Python string in our JSONField""" json_obj = 'blah blah' obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj) def test_float_in_json_field(self): """Test saving a Python float in our JSONField""" json_obj = 1.23 obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj) def test_int_in_json_field(self): """Test saving a Python integer in our JSONField""" json_obj = 1234567 obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj) def test_decimal_in_json_field(self): """Test saving a Python Decimal in our JSONField""" json_obj = Decimal(12.34) obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) # here we must know to convert the returned string back to Decimal, # since json does not support that format self.assertEqual(Decimal(new_obj.json), json_obj) def test_json_field_modify(self): """Test modifying a JSON object in our JSONField""" json_obj_1 = {'a': 1, 'b': 2} json_obj_2 = {'a': 3, 'b': 4} obj = self.json_model.objects.create(json=json_obj_1) self.assertEqual(obj.json, json_obj_1) obj.json = json_obj_2 self.assertEqual(obj.json, json_obj_2) obj.save() self.assertEqual(obj.json, json_obj_2) self.assertTrue(obj) def test_json_field_load(self): """Test loading a JSON object from the DB""" json_obj_1 = {'a': 1, 'b': 2} obj = self.json_model.objects.create(json=json_obj_1) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj_1) def test_json_list(self): """Test storing a JSON list""" json_obj = ["my", "list", "of", 1, "objs", {"hello": "there"}] obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj) def test_empty_objects(self): """Test storing empty objects""" for json_obj in [{}, [], 0, '', False]: obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(json_obj, obj.json) self.assertEqual(json_obj, new_obj.json) def test_custom_encoder(self): """Test encoder_cls and object_hook""" value = 1 + 3j # A complex number obj = JSONModelCustomEncoders.objects.create(json=value) new_obj = JSONModelCustomEncoders.objects.get(pk=obj.pk) self.assertEqual(value, new_obj.json) def test_django_serializers(self): """Test serializing/deserializing jsonfield data""" for json_obj in [{}, [], 0, '', False, {'key': 'value', 'num': 42, 'ary': list(range(5)), 'dict': {'k': 'v'}}]: obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertTrue(new_obj) queryset = self.json_model.objects.all() ser = serialize('json', queryset) for dobj in deserialize('json', ser): obj = dobj.object pulled = self.json_model.objects.get(id=obj.pk) self.assertEqual(obj.json, pulled.json) def test_serialize_deserialize(self): self.json_model.objects.create(json={'foo': 'bar'}) for f in ['python', 'json', 'xml']: with self.subTest(format=f): data = serialize(f, self.json_model.objects.all()) deserialized, = deserialize(f, data) # The actual model instance is accessed as `object`. self.assertEqual(deserialized.object.json, {'foo': 'bar'}) def test_serialize_deserialize_unsaved(self): unsaved = self.json_model(json={'foo': 'bar'}) for f in ['python', 'json', 'xml']: with self.subTest(format=f): data = serialize(f, [unsaved]) deserialized, = deserialize(f, data) # The actual model instance is accessed as `object`. self.assertEqual(deserialized.object.json, {'foo': 'bar'}) def test_default_parameters(self): """Test providing a default value to the model""" model = JSONModel() model.json = {"check": 12} self.assertEqual(model.json, {"check": 12}) self.assertEqual(type(model.json), dict) self.assertEqual(model.default_json, {"check": 12}) self.assertEqual(type(model.default_json), dict) def test_invalid_json(self): # invalid json data {] in the json and default_json fields ser = '[{"pk": 1, "model": "tests.jsoncharmodel", ' \ '"fields": {"json": "{]", "default_json": "{]"}}]' with self.assertRaises(DeserializationError) as cm: next(deserialize('json', ser)) inner = cm.exception.__context__ self.assertIsInstance(inner, ValidationError) self.assertEqual('Enter valid JSON.', inner.messages[0]) def test_integer_in_string_in_json_field(self): """Test saving the Python string '123' in our JSONField""" json_obj = '123' obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj) def test_boolean_in_string_in_json_field(self): """Test saving the Python string 'true' in our JSONField""" json_obj = 'true' obj = self.json_model.objects.create(json=json_obj) new_obj = self.json_model.objects.get(id=obj.id) self.assertEqual(new_obj.json, json_obj) def test_pass_by_reference_pollution(self): """Make sure the default parameter is copied rather than passed by reference""" model = JSONModel() model.default_json["check"] = 144 model.complex_default_json[0]["checkcheck"] = 144 self.assertEqual(model.default_json["check"], 144) self.assertEqual(model.complex_default_json[0]["checkcheck"], 144) # Make sure when we create a new model, it resets to the default value # and not to what we just set it to (it would be if it were passed by reference) model = JSONModel() self.assertEqual(model.default_json["check"], 12) self.assertEqual(model.complex_default_json[0]["checkcheck"], 1212) def test_save_blank_object(self): """Test that JSON model can save a blank object as none""" model = JSONModel() self.assertEqual(model.empty_default, {}) model.save() self.assertEqual(model.empty_default, {}) model1 = JSONModel(empty_default={"hey": "now"}) self.assertEqual(model1.empty_default, {"hey": "now"}) model1.save() self.assertEqual(model1.empty_default, {"hey": "now"}) def test_model_full_clean(self): instances = [ JSONNotRequiredModel(), JSONModel(json={'a': 'b'}), ] for instance in instances: with self.subTest(instance=instance): instance.full_clean() instance.save() class JSONCharFieldTest(JSONFieldTest): json_model = JSONCharModel class MiscTests(TestCase): def test_load_kwargs_hook(self): data = OrderedDict([ ('number', [1, 2, 3, 4]), ('notes', True), ('alpha', True), ('romeo', True), ('juliet', True), ('bravo', True), ]) instance = OrderedJSONModel.objects.create(json=data) from_db = OrderedJSONModel.objects.get() expected_key_order = ['number', 'notes', 'alpha', 'romeo', 'juliet', 'bravo'] # OrderedJSONModel explicitly sets `object_pairs_hook` to `OrderedDict` self.assertEqual(list(instance.json), expected_key_order) self.assertEqual(list(from_db.json), expected_key_order) self.assertIsInstance(from_db.json, OrderedDict) def test_callable_default_function(self): instance = CallableDefaultModel.objects.create() self.assertTrue(instance.json, {'example': 'data'}) instance.refresh_from_db() self.assertTrue(instance.json, {'example': 'data'}) def test_mti_deserialization(self): # Note that jsonfields are present on both the child and parent models. MTIChildModel.objects.create( parent_data={'parent': 'data'}, child_data={'child': 'data'}, ) parent = MTIParentModel.objects.get() self.assertEqual(parent.parent_data, {'parent': 'data'}) child = MTIChildModel.objects.get() self.assertEqual(child.parent_data, {'parent': 'data'}) self.assertEqual(child.child_data, {'child': 'data'}) def test_load_invalid_json(self): # Ensure invalid DB values don't crash deserialization. from django.db import connection with connection.cursor() as cursor: cursor.execute('INSERT INTO tests_jsonnotrequiredmodel (json) VALUES ("foo")') with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") instance = JSONNotRequiredModel.objects.get() self.assertEqual(len(w), 1) self.assertIs(w[0].category, RuntimeWarning) self.assertEqual(str(w[0].message), ( 'tests.JSONNotRequiredModel.json failed to load invalid json (foo) ' 'from the database. The value has been returned as a string instead.' )) self.assertEqual(instance.json, 'foo') def test_resave_invalid_json(self): # Ensure invalid DB values are resaved as a JSON string. from django.db import connection with connection.cursor() as cursor: cursor.execute('INSERT INTO tests_jsonnotrequiredmodel (json) VALUES ("foo")') with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") instance = JSONNotRequiredModel.objects.get() self.assertEqual(len(w), 1) self.assertEqual(instance.json, 'foo') # Save instance and reload from the database. instance.save() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") instance = JSONNotRequiredModel.objects.get() # No deserialization issues, as 'foo' was saved as a serialized string. self.assertEqual(len(w), 0) self.assertEqual(instance.json, 'foo') class QueryTests(TestCase): def test_values_deserializes_result(self): JSONModel.objects.create(json={'a': 'b'}) instance = JSONModel.objects.values('json').get() self.assertEqual(instance['json'], {'a': 'b'}) data = JSONModel.objects.values_list('json', flat=True).get() self.assertEqual(data, {'a': 'b'}) def test_deferred_value(self): JSONModel.objects.create(json={'a': 'b'}) instance = JSONModel.objects.defer('json').get() self.assertEqual(instance.json, {'a': 'b'}) def test_exact_lookup(self): JSONModel.objects.create(json={'foo': 'bar'}) JSONModel.objects.create(json={'bar': 'baz'}) self.assertEqual(JSONModel.objects.count(), 2) self.assertEqual(JSONModel.objects.filter(json={'foo': 'bar'}).count(), 1) def test_exact_none_lookup(self): # Note that nullable JSON fields store a 'null' value, while non-nullable # fields serialize as '"null"'. That said, the query prep will ensure the # correct value is passed. JSONNotRequiredModel.objects.create(json=None) JSONNotRequiredModel.objects.create(json=100) self.assertEqual(JSONNotRequiredModel.objects.count(), 2) self.assertEqual(JSONNotRequiredModel.objects.filter(json=None).count(), 1) JSONRequiredModel.objects.create(json=None) JSONRequiredModel.objects.create(json=100) self.assertEqual(JSONRequiredModel.objects.count(), 2) self.assertEqual(JSONRequiredModel.objects.filter(json=None).count(), 1) def test_isnull_lookup(self): JSONNotRequiredModel.objects.create(json=None) JSONNotRequiredModel.objects.create(json=100) self.assertEqual(JSONNotRequiredModel.objects.count(), 2) self.assertEqual(JSONNotRequiredModel.objects.filter(json__isnull=True).count(), 1) # isnull is incompatible with non-nullable fields, as the value is # serialized as '"null"'. JSONRequiredModel.objects.create(json=None) JSONRequiredModel.objects.create(json=100) self.assertEqual(JSONRequiredModel.objects.count(), 2) self.assertEqual(JSONRequiredModel.objects.filter(json__isnull=True).count(), 0) def test_regex_lookup(self): JSONModel.objects.create(json={'boom': 'town'}) JSONModel.objects.create(json={'move': 'town'}) JSONModel.objects.create(json={'save': 'town'}) self.assertEqual(JSONModel.objects.count(), 3) self.assertEqual(JSONModel.objects.filter(json__regex=r'boom').count(), 1) self.assertEqual(JSONModel.objects.filter(json__regex=r'town').count(), 3)
from __future__ import absolute_import from django import forms from django.contrib import messages from django.core.urlresolvers import reverse from django.db import transaction from django.db.models import F from django.http import HttpResponse, HttpResponseRedirect from django.utils.translation import ugettext_lazy as _ from sentry import features, roles from sentry.auth import manager from sentry.auth.helper import AuthHelper from sentry.models import AuditLogEntryEvent, AuthProvider, OrganizationMember, User from sentry.plugins import Response from sentry.tasks.auth import email_missing_links, email_unlink_notifications from sentry.utils import db from sentry.utils.http import absolute_uri from sentry.web.frontend.base import OrganizationView ERR_NO_SSO = _('The SSO feature is not enabled for this organization.') OK_PROVIDER_DISABLED = _('SSO authentication has been disabled.') OK_REMINDERS_SENT = _( 'A reminder email has been sent to members who have not yet linked their accounts.' ) class AuthProviderSettingsForm(forms.Form): require_link = forms.BooleanField( label=_('Require SSO'), help_text=_('Require members use a valid linked SSO account to access this organization'), required=False, ) default_role = forms.ChoiceField( label=_('Default Role'), choices=roles.get_choices(), help_text=_( 'The default role new members will receive when logging in for the first time.' ), ) class OrganizationAuthSettingsView(OrganizationView): # We restrict auth settings to org:admin as it allows a non-owner to # escalate members to own by disabling the default role. required_scope = 'org:admin' def _disable_provider(self, request, organization, auth_provider): self.create_audit_entry( request, organization=organization, target_object=auth_provider.id, event=AuditLogEntryEvent.SSO_DISABLE, data=auth_provider.get_audit_log_data(), ) if db.is_sqlite(): for om in OrganizationMember.objects.filter(organization=organization): setattr(om.flags, 'sso:linked', False) setattr(om.flags, 'sso:invalid', False) om.save() else: OrganizationMember.objects.filter( organization=organization, ).update( flags=F('flags').bitand( ~getattr(OrganizationMember.flags, 'sso:linked'), ).bitand( ~getattr(OrganizationMember.flags, 'sso:invalid'), ), ) user_ids = OrganizationMember.objects.filter(organization=organization).values('user') User.objects.filter(id__in=user_ids).update(is_managed=False) email_unlink_notifications.delay(organization.id, request.user.id, auth_provider.provider) auth_provider.delete() def handle_existing_provider(self, request, organization, auth_provider): provider = auth_provider.get_provider() if request.method == 'POST': op = request.POST.get('op') if op == 'disable': self._disable_provider(request, organization, auth_provider) messages.add_message( request, messages.SUCCESS, OK_PROVIDER_DISABLED, ) next_uri = reverse('sentry-organization-auth-settings', args=[organization.slug]) return self.redirect(next_uri) elif op == 'reinvite': email_missing_links.delay(organization.id, request.user.id, provider.key) messages.add_message( request, messages.SUCCESS, OK_REMINDERS_SENT, ) next_uri = reverse( 'sentry-organization-auth-provider-settings', args=[ organization.slug]) return self.redirect(next_uri) form = AuthProviderSettingsForm( data=request.POST if request.POST.get('op') == 'settings' else None, initial={ 'require_link': not auth_provider.flags.allow_unlinked, 'default_role': organization.default_role, }, ) if form.is_valid(): auth_provider.flags.allow_unlinked = not form.cleaned_data['require_link'] auth_provider.save() organization.default_role = form.cleaned_data['default_role'] organization.save() view = provider.get_configure_view() response = view(request, organization, auth_provider) if isinstance(response, HttpResponse): return response elif isinstance(response, Response): response = response.render( request, { 'auth_provider': auth_provider, 'organization': organization, 'provider': provider, } ) pending_links_count = OrganizationMember.objects.filter( organization=organization, flags=~getattr(OrganizationMember.flags, 'sso:linked'), ).count() context = { 'form': form, 'pending_links_count': pending_links_count, 'login_url': absolute_uri(reverse('sentry-organization-home', args=[organization.slug])), 'auth_provider': auth_provider, 'provider_name': provider.name, 'content': response, } return self.respond('sentry/organization-auth-provider-settings.html', context) @transaction.atomic def handle(self, request, organization): if not features.has('organizations:sso', organization, actor=request.user): messages.add_message( request, messages.ERROR, ERR_NO_SSO, ) return HttpResponseRedirect( reverse('sentry-organization-home', args=[organization.slug]) ) try: auth_provider = AuthProvider.objects.get( organization=organization, ) except AuthProvider.DoesNotExist: pass else: return self.handle_existing_provider( request=request, organization=organization, auth_provider=auth_provider, ) if request.method == 'POST': provider_key = request.POST.get('provider') if not manager.exists(provider_key): raise ValueError('Provider not found: {}'.format(provider_key)) helper = AuthHelper( request=request, organization=organization, provider_key=provider_key, flow=AuthHelper.FLOW_SETUP_PROVIDER, ) feature = helper.provider.required_feature if feature and not features.has(feature, organization, actor=request.user): return HttpResponse('Provider is not enabled', status=401) if request.POST.get('init'): helper.init_pipeline() if not helper.pipeline_is_valid(): return helper.error('Something unexpected happened during authentication.') # render first time setup view return helper.current_step() # Otherwise user is in bad state since frontend/react should handle this case return HttpResponseRedirect( reverse('sentry-organization-home', args=[organization.slug]) )
import sys import traceback from PyQt4 import QtGui from PyQt4 import QtCore from PyQt4.QtCore import Qt import aaf from qt_aafmodel import AAFModel import clip_menu class GraphicsTimeSlider(QtGui.QGraphicsRectItem): def __init__(self,parent=None): super(GraphicsTimeSlider,self).__init__(parent) #self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable,True) self.height = 100 self.edge_spacing = 10 pen = QtGui.QPen() pen.setBrush(Qt.blue) self.setPen(pen) self.setBrush(Qt.blue) self.frame = 0 self.setZValue(100) self.setPos(0,0) def setHeight(self,value): self.height = value self.adjust() def setFrame(self,value): self.frame = int(value) self.adjust() #print "frame =", self.frame def getFrame(self): return self.frame def adjust(self): rect = QtCore.QRectF(0,0,1,self.height) rect.adjust(0,-self.edge_spacing,0,self.edge_spacing) self.setRect(rect) self.setPos(self.frame, 0) class GraphicsClip(QtGui.QGraphicsRectItem): def __init__(self,length, parent=None): super(GraphicsClip,self).__init__(parent) self.length = length self.track = None self.left = None self.right = None self.name = None self._reference = None self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable,True) def getReference(self): return self._reference def adjust(self): height = self.track.height self.setRect(QtCore.QRectF(0,0, self.length, height)) y = self.track.y() x = 0 if self.left: x = self.left.x() + self.left.length self.setPos(x,y) def paint(self,p,opt,w): super(GraphicsClip,self).paint(p,opt,w) if self.name: p.save() nameRect = QtCore.QRectF(self.rect()) #setCosmetic(True) p.pen().setCosmetic(True) p.drawText(nameRect,Qt.AlignLeft,self.name) p.restore() def contextMenuEvent(self, contextEvent): reload(clip_menu) clip_menu.clip_menu(contextEvent, self) class GraphicsClipTransition(GraphicsClip): def __init__(self,length, parent=None): super(GraphicsClipTransition,self).__init__(length, parent) self.cutpoint = 0 def paint(self,p,opt,w): super(GraphicsClipTransition,self).paint(p,opt,w) p.save() rect = self.rect() p.drawLine(rect.bottomLeft(), rect.topRight()) p.drawLine(rect.bottomLeft() + QtCore.QPointF(self.cutpoint, 0), rect.topLeft() + QtCore.QPointF(self.cutpoint, 0),) p.restore() class GraphicsTrack(QtGui.QGraphicsRectItem): def __init__(self,parent=None): super(GraphicsTrack,self).__init__(parent) self.height = 20 self.length = 0 self.name = "Track" self._reference = None self.timeline = None self.parent = None self.clips = [] def addClip(self,length,reference=None, transtion=False): if transtion: clip = GraphicsClipTransition(length) clip.cutpoint = reference.cutpoint else: clip = GraphicsClip(length) clip.track = self clip._reference = reference if self.clips: prev_clip = self.clips[-1] prev_clip.right = clip clip.left = prev_clip scene = self.scene() scene.addItem(clip) self.clips.append(clip) clip.adjust() self.length += length self.adjust() return clip def adjust(self): self.setRect(QtCore.QRectF(0,0,self.length,self.height)) spacing = self.timeline.track_spacing if self.parent: y = self.parent.y() self.setY(y + self.parent.height + spacing) for clip in self.clips: clip.adjust() class AAFTimeline(QtGui.QGraphicsScene): def __init__(self,parent=None): super(AAFTimeline,self).__init__(parent) self.tracks = [] self.track_spacing = 10 self.edge_spacing = 50 self.timeSlider = None self.timeSliderDrag = False def addTrack(self): track = GraphicsTrack() track.timeline = self if self.tracks: track.parent = self.tracks[-1] track.adjust() self.tracks.append(track) self.addItem(track) self.updateSceneRect() return track def updateSceneRect(self): rect = QtCore.QRectF() for track in self.tracks: rect = rect.united(track.sceneBoundingRect()) height = rect.height() rect.adjust(0,-self.edge_spacing,0,self.edge_spacing) self.setSceneRect(rect) self.timeSlider.edge_spacing = self.edge_spacing self.timeSlider.setHeight(height) def setFrame(self,value): self.timeSlider.setFrame(value) def getFrame(self): return self.timeSlider.getFrame() def clear(self): super(AAFTimeline,self).clear() self.tracks = [] self.timeSlider = GraphicsTimeSlider() self.timeSlider.edge_space = self.edge_spacing self.timeSlider.setPos(0,0) self.addItem(self.timeSlider) def adjustHeight(self, value): for track in self.tracks: track.height += value track.adjust() self.updateSceneRect() class AAFTimelineGraphicsView(QtGui.QGraphicsView): def __init__(self,parent=None): super(AAFTimelineGraphicsView,self).__init__(parent) self.timeSliderDrag = False #self.setViewportUpdateMode(QtGui.QGraphicsView.FullViewportUpdate) self.marginWidth = 90 self.topMaginHeight = 35 self.setViewportMargins(self.marginWidth, self.topMaginHeight, 0, 0) self.timelineWidget = TimeLineWidget(self) self.timelineWidget.frameChanged.connect(self.setCurrentFrame) self.timelineWidget.snap.connect(self.snapToNearest) self.frameSpinbox = QtGui.QSpinBox(self) self.frameSpinbox.setFixedSize(self.marginWidth-3,self.topMaginHeight-3) self.frameSpinbox.setRange(-1000000,10000000) self.frameSpinbox.valueChanged.connect(self.setCurrentFrame) self.trackWidgets = [] def updateTrackLabels(self,offset=0): scene = self.scene() edge = self.mapToScene(0,0) for track in self.trackWidgets: track.hide() for i, track in enumerate(scene.tracks): rect = track.rect() pos = track.pos() widget_pos = self.mapFromScene(pos) widget_height = self.mapFromScene(rect.bottomLeft()).y() - self.mapFromScene(rect.topLeft()).y() if i+1 > len(self.trackWidgets): l = QtGui.QLabel(self) l.setFrameStyle(QtGui.QFrame.Panel) self.trackWidgets.append(l) label = self.trackWidgets[i] label.show() label.move(0,widget_pos.y() + self.topMaginHeight) label.setText(track.name) label.setFixedWidth(self.marginWidth) label.setFixedHeight(widget_height + 2) self.frameSpinbox.raise_() def markIn(self,value): print "markIn", value self.timelineWidget.markIn(value) def markOut(self,value): print "markOut", value self.timelineWidget.markOut(value) def clearMarks(self): print "clear marks" self.timelineWidget.clearMarks() def setCurrentFrame(self,value): scene = self.scene() if scene: scene.setFrame(value) self.updateTimeLine() sliderRect = QtCore.QRectF(scene.timeSlider.sceneBoundingRect()) y = self.verticalScrollBar().value() self.ensureVisible(sliderRect) self.verticalScrollBar().setValue(y) #Don't change the Y Scroll self.frameSpinbox.setValue(int(value)) self.repaint() def currentFrame(self): scene = self.scene() if scene: return scene.getFrame() def updateTimeLine(self): t = self.timelineWidget t.move(QtCore.QPoint(self.marginWidth,0)) t.setFixedWidth(self.viewport().width()) t.setFixedHeight(self.topMaginHeight) scene = self.scene() if scene: t.setScale(self.transform().m11()) #print t.scale t.start = self.mapToScene(0,0).x() t.setCurrentFrame(self.currentFrame()) #t.end = self.mapToScene(self.width() - self.m, self.topMaginHeight).x() t.repaint() def snapToNearest(self,radius = 50): pos = self.mapFromScene(self.currentFrame(), 0) item = self.nearestItemAt(pos,radius) if item: self.setCurrentFrame(item.pos().x()) def nearestItemAt(self,pos,radius = 50): scene = self.scene() sceneRect = scene.sceneRect() top = self.mapFromScene(sceneRect.topLeft()).y() bottom = self.mapFromScene(sceneRect.bottomRight()).y() rect = QtCore.QRect(pos.x() - radius, top,pos.x() + radius, bottom) rectF = QtCore.QRectF(rect) nearest = None scenePos = self.mapToScene(pos) min_x = self.mapToScene(pos.x() - radius, 0).x() max_x = self.mapToScene(pos.x() + radius, 0).x() rectF = QtCore.QRectF(min_x, sceneRect.top(), max_x, sceneRect.bottom()) last_item = None last_distance = None for item in scene.items(rectF,mode=Qt.IntersectsItemBoundingRect): if isinstance(item, GraphicsClip): itemX = item.pos().x() if itemX > min_x and itemX < max_x: distance = abs(scenePos.x() - itemX) if last_item is None: last_item = item last_distance = distance else: if distance < last_distance: last_item = item last_distance = distance return last_item def paintEvent(self, event): #self.updateTrackLabels() result = super(AAFTimelineGraphicsView,self).paintEvent(event) self.updateTrackLabels() self.updateTimeLine() def mousePressEvent(self,event): pos = event.pos() scenePos = self.mapToScene(pos) print "scene",scenePos scene = self.scene() if scene: if not scene.itemAt(scenePos): self.setCurrentFrame(scenePos.x()) self.timeSliderDrag = True if event.modifiers() == Qt.ControlModifier: self.snapToNearest() event.accept() super(AAFTimelineGraphicsView,self).mousePressEvent(event) def mouseMoveEvent(self, event): pos = event.pos() scenePos = self.mapToScene(pos) scene = self.scene() if self.timeSliderDrag: self.setCurrentFrame(scenePos.x()) if event.modifiers() == Qt.ControlModifier: self.snapToNearest() super(AAFTimelineGraphicsView,self).mouseMoveEvent(event) def mouseReleaseEvent(self,event): if self.timeSliderDrag: self.timeSliderDrag = False super(AAFTimelineGraphicsView,self).mouseReleaseEvent(event) def wheelEvent(self, event): if event.modifiers() == Qt.AltModifier: self.setTransformationAnchor(QtGui.QGraphicsView.AnchorUnderMouse) scaleFactorX = 1.15 scaleFactorY = 1 if event.delta() > 0: self.scale(scaleFactorX, 1) else: self.scale(1.0 / scaleFactorX, 1) else: super(AAFTimelineGraphicsView,self).wheelEvent(event) def zoom(self, value): self.setTransformationAnchor(QtGui.QGraphicsView.AnchorViewCenter) scaleFactorX = 1.15 #transform = self.transform() if value > 0: self.scale(scaleFactorX, 1) else: self.scale(1.0 / scaleFactorX, 1) def keyPressEvent(self, event): scene = self.scene() if scene: if event.key() == Qt.Key_F: mode=Qt.KeepAspectRatioByExpanding if event.modifiers() == Qt.ShiftModifier: mode = Qt.IgnoreAspectRatio self.fitInView(scene.sceneRect(),mode=mode) elif event.modifiers() == Qt.ControlModifier: if event.key() == Qt.Key_L: scene.adjustHeight(2) elif event.key() == Qt.Key_K: scene.adjustHeight(-2) elif event.key() == Qt.Key_BracketLeft: self.zoom(-1) elif event.key() == Qt.Key_BracketRight: self.zoom(1) elif event.key() == Qt.Key_Right: self.setCurrentFrame(self.currentFrame() + 1) elif event.key() == Qt.Key_Left: self.setCurrentFrame(self.currentFrame() - 1) elif event.key() == Qt.Key_I: self.markIn(self.currentFrame()) elif event.key() == Qt.Key_O: self.markOut(self.currentFrame()) elif event.key() == Qt.Key_G: self.clearMarks() else: super(AAFTimelineGraphicsView,self).keyPressEvent(event) else: super(AAFTimelineGraphicsView,self).keyPressEvent(event) class TimeLineWidget(QtGui.QWidget): frameChanged = QtCore.pyqtSignal(int) snap = QtCore.pyqtSignal(int) def __init__(self,parent): super(TimeLineWidget,self).__init__(parent) fps = 24 self.start = 0 self.end = 1 self.scale = 1 self.snapRadius = 50 self.currentFrame = 10 self.silderDrag = True self.fps = fps self.steps = (1,2,3,int(fps/4), int(fps/2), fps,fps*2, fps*30, fps*30*5,fps*30*15,fps*30*30,fps*30*60) self.mark_in = None self.mark_out = None def markIn(self,value): self.mark_in= value self.repaint() def markOut(self,value): self.mark_out = value self.repaint() def clearMarks(self): self.mark_in = None self.mark_out = None self.repaint() def setCurrentFrame(self,value): self.currentFrame = int(value) self.repaint() def setScale(self,value): self.scale = value self.end = (self.width() / self.scale) + self.start def setEnd(self, value): self.end = value self.scale = self.length() / self.width() def length(self): return self.end - self.start def mapFromFrame(self,value): return (float(value) - self.start) * self.scale def mapToFrame(self, value): frame = (value/ float(self.width()) * self.length()) + self.start return int(frame) def mousePressEvent(self, event): frame = self.mapToFrame(event.pos().x()) self.setCurrentFrame(frame) self.silderDrag = True self.frameChanged.emit(frame) if event.modifiers() == Qt.ControlModifier: self.snap.emit(self.snapRadius) super(TimeLineWidget,self).mousePressEvent(event) def mouseMoveEvent(self, event): if self.silderDrag: frame = self.mapToFrame(event.pos().x()) self.setCurrentFrame(frame) self.frameChanged.emit(frame) if event.modifiers() == Qt.ControlModifier: self.snap.emit(self.snapRadius) super(TimeLineWidget,self).mouseMoveEvent(event) def mouseReleaseEvent(self,event): if self.silderDrag: self.silderDrag = False super(TimeLineWidget,self).mouseMoveEvent(event) def paintEvent(self, event): super(TimeLineWidget,self).paintEvent(event) painter =QtGui.QPainter() painter.begin(self) #painter.setBrush(Qt.black) rect = self.rect() rect.adjust(0,0,0,-2) painter.drawRect(rect) #paint timeslider rect = QtCore.QRectF(0,0,1.0 * self.scale,self.height()) rect.translate(self.mapFromFrame(self.currentFrame), 0) pen =QtGui.QPen(Qt.blue) painter.setPen(pen) painter.setBrush(Qt.blue) painter.drawRect(rect) #paint marks painter.setPen(QtGui.QPen(Qt.black)) painter.setBrush(Qt.NoBrush) fm = QtGui.QFontMetricsF(painter.font()) #draw markin if not self.mark_in is None: painter.save() height = self.height() * .4 x = self.mapFromFrame(self.mark_in) char = ']' font_width = fm.width(char) painter.drawText(QtCore.QPointF(x-font_width,height),char) painter.restore() #draw markout if not self.mark_out is None: painter.save() height = self.height() * .4 x = self.mapFromFrame(self.mark_out) char = '[' #font_width = fm.width(char) painter.drawText(QtCore.QPointF(x,height),char) painter.restore() #draw selection if not self.mark_in is None and not self.mark_out is None: painter.save() selection_rect = QtCore.QRectF(0, 0, (self.mark_out - self.mark_in)*self.scale, self.height()) selection_rect.translate(self.mapFromFrame(self.mark_in), 0) color = QtGui.QColor(Qt.blue) color.setAlphaF(.4) brush = QtGui.QBrush(color) painter.setBrush(brush) painter.setPen(QtGui.QPen(color)) painter.drawRect(selection_rect) painter.restore() length = self.length() last_tick = 0 last_text = -90 step = 1 fps = self.fps #find a optimized step #this should be adjusted of different frame rates for step in self.steps: if self.width() / length * step > 5: break start = int(round(self.start/step) * step) #start at a multple of step for i in xrange(start, int(self.end), step): x = self.mapFromFrame(i) if x - last_tick > 5: last_tick = x height_ratio = .7 if i % (step * 2) == 0: height_ratio = .5 painter.drawLine(x, self.height() * height_ratio, x, self.height()-4) if i & 1 == 0: #text for only even numbers if int(round(i/step) * step) == i: #only multiples of step if x - last_text > 100: last_text = x height = self.height() * .4 painter.drawText(QtCore.QPointF(x,height), str(i)) painter.end() def AddMobFromIndex(index,grahicsview): treeItem = index.internalPointer() mob = treeItem.item if isinstance(mob, aaf.mob.Mob): SetMob(mob,grahicsview) def get_tracks(mob,trackType= 'Picture'): tracks = [] for slot in mob.slots(): segment = slot.segment if segment.media_kind == trackType: if isinstance(segment, aaf.component.NestedScope): for nested_segment in segment.segments(): if isinstance(nested_segment, aaf.component.Sequence): tracks.append(nested_segment) elif isinstance(segment, aaf.component.Sequence): tracks.append(segment) elif isinstance(segment, aaf.component.SourceClip): tracks.append([segment]) elif isinstance(segment, aaf.component.Selector): tracks.append([segment.selected]) elif isinstance(segment, aaf.component.EssenceGroup): #choices = [] #for c in xrange(segment.CountChoices()): #choices.append(segment.GetChoiceAt(c)) tracks.append([segment]) return tracks def get_transition_offset(index,component_list): offset = 0 nextItem = None prevousItem = None if len(component_list) > index + 1: nextItem = component_list[index + 1] if index != 0: prevousItem = component_list[index -1] if isinstance(nextItem, aaf.component.Transition): offset -= nextItem.length - nextItem.cutpoint if isinstance(prevousItem, aaf.component.Transition): offset -= prevousItem.cutpoint return offset def get_source_clip_name(item): ref = item.resolve_ref() if ref: if ref.name: return ref.name for clip in item.walk(): ref = clip.resolve_ref() if ref: if ref.name: return ref.name return "SourceClip" def get_operation_group_name(item): operation_name = item.operation for segment in item.input_segments(): if isinstance(segment, aaf.component.SourceClip): name = get_source_clip_name(segment) if name: return "%s(%s)" % (name,operation_name) else: for component in segment.components(): #print component if isinstance(component, aaf.component.SourceClip): name = get_source_clip_name(component) if name: return "%s(%s)" % (name,operation_name) def get_selector_name(item): segment = item.selected if isinstance(segment, aaf.component.SourceClip): return get_source_clip_name(segment) elif isinstance(segment, aaf.component.Sequence): for component in segment.components(): if isinstance(component, aaf.component.SourceClip): return get_source_clip_name(component) return "Selector" def SetMob(mob,grahicsview): scene = grahicsview.scene() scene.clear() video_tracks = get_tracks(mob) last_clip = None for track_num, segment in reversed(list(enumerate(video_tracks))): track = scene.addTrack() track.name = "Track V%i" % (track_num+1) track._reference = video_tracks[track_num] length = 0 if isinstance(segment, list): components = segment else: components = segment.components() for i,component in enumerate(components): color =Qt.red transtion = False if isinstance(component,aaf.component.Transition): last_clip.length -= component.length track.length -= component.length last_clip.adjust() color = Qt.yellow transtion = True #continue transition_offset = 0 if last_clip: if isinstance(last_clip._reference, aaf.component.Transition): transition_offset = last_clip._reference.length - last_clip._reference.cutpoint transition_offset = last_clip._reference.length #print component, component.cutpoint, component.length #continue #if not isinstance(component,aaf.component.Transition): #transition_offset = get_transition_offset(i,components) component_length = component.length - transition_offset clip = track.addClip(component_length,component, transtion) last_clip = clip #make filler and scope grey name = None if isinstance(component,(aaf.component.Filler, aaf.component.ScopeReference)): color = Qt.gray elif isinstance(component, aaf.component.SourceClip): name = get_source_clip_name(component) elif isinstance(component, aaf.component.OperationGroup): color = Qt.magenta #segment = component.GetInputSegmentAt(0) name = get_operation_group_name(component) if not name: name = component.operation if isinstance(component, aaf.component.Selector): name = get_selector_name(component) color - Qt.darkYellow clip.setBrush(color) if name: clip.name = name clip.adjust() length += component_length scene.updateSceneRect() if __name__ == "__main__": from optparse import OptionParser parser = OptionParser() (options, args) = parser.parse_args() if not args: parser.error("not enough arguments") file_path = args[0] f = aaf.open(file_path) app = QtGui.QApplication(sys.argv) window = QtGui.QSplitter() #layout = QtGui.QHBoxLayout() header = f.header storage = f.storage topLevelMobs = list(storage.toplevel_mobs()) model = AAFModel(storage) timeline = AAFTimeline() tree = QtGui.QTreeView() tree.setModel(model) tree.resize(650,600) tree.expandToDepth(0) tree.resizeColumnToContents(0) graphicsview = AAFTimelineGraphicsView() graphicsview.resize(400,600) graphicsview.setScene(timeline) tree.doubleClicked.connect(lambda x,y=graphicsview: AddMobFromIndex(x,y)) if topLevelMobs: SetMob(topLevelMobs[0],graphicsview) window.addWidget(tree) window.addWidget(graphicsview) window.resize(900,600) #window.setLayout(layout) window.show() #graphicsview.show() sys.exit(app.exec_())
import requests import zipfile from simpledbf import Dbf5 import re import os import sqlalchemy as sa from geocoder.data_loader import ETLThing class CookCountyETL(ETLThing): def download(self, download_url=None): addresses = requests.get(download_url, stream=True) with open(self.zip_file_path, 'wb') as f: for chunk in addresses.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() def createTable(self): dbf_file_path = '' with zipfile.ZipFile(self.zip_file_path, 'r') as zf: for name in zf.namelist(): if name.endswith('.dbf'): dbf_file_path = zf.extract(name, path='downloads') type_lookup = { 'N': 'INTEGER', 'D': 'DATE', } dbf = Dbf5(dbf_file_path) all_fields = [] self.fieldnames = [] for field in dbf.fields: name, type, length = field name = self.slugify(name) if name != 'deletionflag': if type == 'C': sql = '%s VARCHAR(%s)' % (name, length) elif type == 'N' and length > 10: sql = '%s DOUBLE PRECISION' % (name) else: sql = '%s %s' % (name, type_lookup[type]) all_fields.append(sql) self.fieldnames.append(name) fields_sql = ','.join(all_fields) create_table_sql = ''' CREATE TABLE {0} ({1}) '''.format(self.table_name, fields_sql) self.executeTransaction(create_table_sql) if os.path.exists(self.csv_file_path): os.remove(self.csv_file_path) dbf.to_csv(self.csv_file_path, header=False, chunksize=1024) def mergeTables(self): final_fields = ''' objectid AS id, addressid AS address_id, addrnopref AS address_number_prefix, addrno AS address_number, addrnosuff AS address_number_suffix, addrnosep AS address_number_separator, addrnocom AS address_number_common, stnameprd, stnameprm, stnameprt, stname AS street_name, stnamepot, stnamepod, stnamepom, stnamecom, subaddtype AS subaddress_type, subaddid AS subaddress_id, subaddelem, subaddcom, lndmrkname AS landmark_name, placename AS place_name, uspspn AS usps_place_name, uspspngnis AS gnis_place_id, uspsst AS usps_state, zip5 AS zipcode, gnismuni AS gnis_municipality_id, gnistwp AS gnis_township_id, gniscnty AS gnis_county_id, gnisstate AS gnis_state_id, uspsboxtyp AS usps_box_type, uspsboxid AS usps_box_id, uspsbox AS usps_box, addrdeliv AS delivery_address, cmpaddabrv AS complete_street_address, addrlastli AS city_state_zipcode, TRIM(COALESCE(LOWER(cmpaddabrv::VARCHAR), '')) || ' ' || TRIM(COALESCE(LOWER(addrlastli::VARCHAR), '')) AS complete_address, xposition AS x_coordinate, yposition AS y_coordinate, longitude::double precision, latitude::double precision, usgridcord AS usng_address, pinsource AS pin_source, pin, anomaly, coordaccu AS coordinate_accuracy, univrsldt, editor, edittime AS edit_time, edittype AS edit_type, pwaeditor, pwaedtdate, pwa_commen AS edit_comment, pwa_status, geocode_mu AS geocode_municipality, document_s, comment ''' # This is a little stupid since we are just loading chicago # addresses but I am lazy and did not want to rewrite stuff. create_all_addresses = ''' CREATE TABLE cook_county_addresses AS ( SELECT {0} FROM chicago_addresses ) '''.format(final_fields) self.executeTransaction(create_all_addresses) add_pk = ''' ALTER TABLE cook_county_addresses ADD PRIMARY KEY (id) ''' self.executeTransaction(add_pk) pin_index = ''' CREATE INDEX pin_idx ON cook_county_addresses (pin) ''' self.executeTransaction(pin_index) pin_index = ''' CREATE INDEX address_id_idx ON cook_county_addresses (address_id) ''' self.executeTransaction(pin_index) class ChicagoETL(CookCountyETL): region_name = 'chicago' table_name = 'chicago_addresses' zip_file_path = 'downloads/chicago_addresses.zip' four_by_four = 'jev2-4wjs' class SuburbsETL(CookCountyETL): region_name = 'suburbs' table_name = 'suburban_addresses' zip_file_path = 'downloads/suburbs_addresses.zip' four_by_four = '6mf5-x8ic' if __name__ == "__main__": import argparse from sqlalchemy import create_engine parser = argparse.ArgumentParser( description='Bulk load Cook County addresses into a PostgreSQL database.' ) parser.add_argument('--download', action='store_true', help='Download fresh address data from Cook County') parser.add_argument('--load_data', action='store_true', help='Load address data into database') parser.add_argument('--train', action='store_true', help="Train an already initialized database") parser.add_argument('--block', action='store_true', help="Pre-block addresses") args = parser.parse_args() cook_county_data_portal = 'https://datacatalog.cookcountyil.gov/api/geospatial/%s?method=export&format=Original' if args.load_data: engine = create_engine('postgresql://localhost:5432/geocoder') connection = engine.connect() chicago = ChicagoETL(connection, 'chicago_addresses') download_url = None if args.download: download_url = cook_county_data_portal % chicago.four_by_four chicago.run(download_url=download_url) chicago.mergeTables() # Skipping the suburbs for now # suburbs = SuburbsETL(connection, 'suburban_addresses') # # if args.download: # download_url = cook_county_data_portal % suburbs.four_by_four # # suburbs.run(download_url=download_url) # # suburbs.mergeTables() connection.close() if args.train: from geocoder.deduper import DatabaseGazetteer import simplejson as json import dedupe engine = create_engine('postgresql://localhost:5432/geocoder') deduper = DatabaseGazetteer([{'field': 'complete_address', 'type': 'Address'}], engine=engine) messy_data = json.load(open('geocoder/data/messy_addresses.json')) deduper.drawSample(messy_data, sample_size=30000) if os.path.exists('geocoder/data/training.json'): print('reading labeled examples from geocoder/data/training.json') with open('geocoder/data/training.json') as tf : deduper.readTraining(tf) dedupe.consoleLabel(deduper) deduper.train(ppc=0.1, index_predicates=False) # When finished, save our training away to disk with open('geocoder/data/training.json', 'w') as tf : deduper.writeTraining(tf) # Save our weights and predicates to disk. If the settings file # exists, we will skip all the training and learning next time we run # this file. with open('geocoder/data/dedupe.settings', 'wb') as sf : deduper.writeSettings(sf) deduper.cleanupTraining() if args.block: from geocoder.deduper import StaticDatabaseGazetteer engine = create_engine('postgresql://localhost:5432/geocoder') with open('geocoder/data/dedupe.settings', 'rb') as sf: deduper = StaticDatabaseGazetteer(sf, engine=engine) deduper.createMatchBlocksTable()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for linear.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import tempfile import numpy as np import six from tensorflow.python.client import session as tf_session from tensorflow.python.estimator import estimator from tensorflow.python.estimator import run_config from tensorflow.python.estimator.canned import linear from tensorflow.python.estimator.canned import metric_keys from tensorflow.python.estimator.export import export from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.feature_column import feature_column as feature_column_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.training import checkpoint_utils from tensorflow.python.training import optimizer from tensorflow.python.training import saver from tensorflow.python.training import session_run_hook # Names of variables created by model. _AGE_WEIGHT_NAME = 'linear/linear_model/age/weights' _HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights' _BIAS_NAME = 'linear/linear_model/bias_weights' _LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights' def _save_variables_to_ckpt(model_dir): init_all_op = [variables.global_variables_initializer()] with tf_session.Session() as sess: sess.run(init_all_op) saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt')) class _CheckPartitionerVarHook(session_run_hook.SessionRunHook): """A `SessionRunHook` to check a paritioned variable.""" def __init__(self, test_case, var_name, var_dim, partitions): self._test_case = test_case self._var_name = var_name self._var_dim = var_dim self._partitions = partitions def begin(self): with variable_scope.variable_scope( variable_scope.get_variable_scope()) as scope: scope.reuse_variables() partitioned_weight = variable_scope.get_variable( self._var_name, shape=(self._var_dim, 1)) self._test_case.assertTrue( isinstance(partitioned_weight, variables.PartitionedVariable)) for part in partitioned_weight: self._test_case.assertEqual(self._var_dim // self._partitions, part.get_shape()[0]) class LinearRegressorPartitionerTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def testPartitioner(self): x_dim = 64 partitions = 4 def _partitioner(shape, dtype): del dtype # unused; required by Fn signature. # Only partition the embedding tensor. return [partitions, 1] if shape[0] == x_dim else [1] regressor = linear.LinearRegressor( feature_columns=( feature_column_lib.categorical_column_with_hash_bucket( 'language', hash_bucket_size=x_dim),), partitioner=_partitioner, model_dir=self._model_dir) def _input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english', 'spanish'], indices=[[0, 0], [0, 1]], dense_shape=[1, 2]) }, [[10.]] hook = _CheckPartitionerVarHook( self, _LANGUAGE_WEIGHT_NAME, x_dim, partitions) regressor.train( input_fn=_input_fn, steps=1, hooks=[hook]) def testDefaultPartitionerWithMultiplePsReplicas(self): partitions = 2 x_dim = 4 * 64 << 20 class FakeRunConfig(run_config.RunConfig): @property def num_ps_replicas(self): return partitions # Mock the device setter as ps is not available on test machines. with test.mock.patch.object(estimator, '_get_replica_device_setter', return_value=lambda _: '/cpu:0'): linear_regressor = linear.LinearRegressor( feature_columns=( feature_column_lib.categorical_column_with_hash_bucket( 'language', hash_bucket_size=x_dim),), config=FakeRunConfig(), model_dir=self._model_dir) def _input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english', 'spanish'], indices=[[0, 0], [0, 1]], dense_shape=[1, 2]) }, [[10.]] hook = _CheckPartitionerVarHook( self, _LANGUAGE_WEIGHT_NAME, x_dim, partitions) linear_regressor.train( input_fn=_input_fn, steps=1, hooks=[hook]) # TODO(b/36813849): Add tests with dynamic shape inputs using placeholders. class LinearRegressorEvaluationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def test_evaluation_for_simple_data(self): with ops.Graph().as_default(): variables.Variable([[11.0]], name=_AGE_WEIGHT_NAME) variables.Variable([2.0], name=_BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir) eval_metrics = linear_regressor.evaluate( input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1) # Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9. self.assertDictEqual({ metric_keys.MetricKeys.LOSS: 9., metric_keys.MetricKeys.LOSS_MEAN: 9., ops.GraphKeys.GLOBAL_STEP: 100 }, eval_metrics) def test_evaluation_batch(self): """Tests evaluation for batch_size==2.""" with ops.Graph().as_default(): variables.Variable([[11.0]], name=_AGE_WEIGHT_NAME) variables.Variable([2.0], name=_BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir) eval_metrics = linear_regressor.evaluate( input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1) # Logit is (1. * 11.0 + 2.0) = 13, while label is 10. # Loss per example is 3**2 = 9. # Training loss is the sum over batch = 9 + 9 = 18 # Average loss is the average over batch = 9 self.assertDictEqual({ metric_keys.MetricKeys.LOSS: 18., metric_keys.MetricKeys.LOSS_MEAN: 9., ops.GraphKeys.GLOBAL_STEP: 100 }, eval_metrics) def test_evaluation_weights(self): """Tests evaluation with weights.""" with ops.Graph().as_default(): variables.Variable([[11.0]], name=_AGE_WEIGHT_NAME) variables.Variable([2.0], name=_BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) def _input_fn(): features = { 'age': ((1,), (1,)), 'weights': ((1.,), (2.,)) } labels = ((10.,), (10.,)) return features, labels linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('age'),), weight_feature_key='weights', model_dir=self._model_dir) eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1) # Logit is (1. * 11.0 + 2.0) = 13, while label is 10. # Loss per example is 3**2 = 9. # Training loss is the weighted sum over batch = 9 + 2*9 = 27 # average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9 self.assertDictEqual({ metric_keys.MetricKeys.LOSS: 27., metric_keys.MetricKeys.LOSS_MEAN: 9., ops.GraphKeys.GLOBAL_STEP: 100 }, eval_metrics) def test_evaluation_for_multi_dimensions(self): x_dim = 3 label_dim = 2 with ops.Graph().as_default(): variables.Variable( [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=_AGE_WEIGHT_NAME) variables.Variable([7.0, 8.0], name=_BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) linear_regressor = linear.LinearRegressor( feature_columns=( feature_column_lib.numeric_column('age', shape=(x_dim,)),), label_dimension=label_dim, model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={ 'age': np.array([[2., 4., 5.]]), }, y=np.array([[46., 58.]]), batch_size=1, num_epochs=None, shuffle=False) eval_metrics = linear_regressor.evaluate( input_fn=input_fn, steps=1) self.assertItemsEqual(( metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN, ops.GraphKeys.GLOBAL_STEP ), eval_metrics.keys()) # Logit is # [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0] # [3.0, 4.0] # [5.0, 6.0] # which is [46, 58] self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS]) def test_evaluation_for_multiple_feature_columns(self): with ops.Graph().as_default(): variables.Variable([[10.0]], name=_AGE_WEIGHT_NAME) variables.Variable([[2.0]], name=_HEIGHT_WEIGHT_NAME) variables.Variable([5.0], name=_BIAS_NAME) variables.Variable( 100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) batch_size = 2 feature_columns = [ feature_column_lib.numeric_column('age'), feature_column_lib.numeric_column('height') ] input_fn = numpy_io.numpy_input_fn( x={ 'age': np.array([20, 40]), 'height': np.array([4, 8]) }, y=np.array([[213.], [421.]]), batch_size=batch_size, num_epochs=None, shuffle=False) est = linear.LinearRegressor( feature_columns=feature_columns, model_dir=self._model_dir) eval_metrics = est.evaluate(input_fn=input_fn, steps=1) self.assertItemsEqual(( metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN, ops.GraphKeys.GLOBAL_STEP ), eval_metrics.keys()) # Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] = # [213.0, 421.0], while label is [213., 421.]. Loss = 0. self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS]) class LinearRegressorPredictTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def test_1d(self): """Tests predict when all variables are one-dimensional.""" with ops.Graph().as_default(): variables.Variable([[10.]], name='linear/linear_model/x/weights') variables.Variable([.2], name=_BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('x'),), model_dir=self._model_dir) predict_input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[2.]])}, y=None, batch_size=1, num_epochs=1, shuffle=False) predictions = linear_regressor.predict(input_fn=predict_input_fn) predicted_scores = list([x['predictions'] for x in predictions]) # x * weight + bias = 2. * 10. + .2 = 20.2 self.assertAllClose([[20.2]], predicted_scores) def testMultiDim(self): """Tests predict when all variables are multi-dimenstional.""" batch_size = 2 label_dimension = 3 x_dim = 4 feature_columns = ( feature_column_lib.numeric_column('x', shape=(x_dim,)),) with ops.Graph().as_default(): variables.Variable( # shape=[x_dim, label_dimension] [[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]], name='linear/linear_model/x/weights') variables.Variable( # shape=[label_dimension] [.2, .4, .6], name=_BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) linear_regressor = linear.LinearRegressor( feature_columns=feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) predict_input_fn = numpy_io.numpy_input_fn( # x shape=[batch_size, x_dim] x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])}, y=None, batch_size=batch_size, num_epochs=1, shuffle=False) predictions = linear_regressor.predict(input_fn=predict_input_fn) predicted_scores = list([x['predictions'] for x in predictions]) # score = x * weight + bias, shape=[batch_size, label_dimension] self.assertAllClose( [[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]], predicted_scores) def testTwoFeatureColumns(self): """Tests predict with two feature columns.""" with ops.Graph().as_default(): variables.Variable([[10.]], name='linear/linear_model/x0/weights') variables.Variable([[20.]], name='linear/linear_model/x1/weights') variables.Variable([.2], name=_BIAS_NAME) variables.Variable(100, name='global_step', dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) linear_regressor = linear.LinearRegressor( feature_columns=( feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1')), model_dir=self._model_dir) predict_input_fn = numpy_io.numpy_input_fn( x={'x0': np.array([[2.]]), 'x1': np.array([[3.]])}, y=None, batch_size=1, num_epochs=1, shuffle=False) predictions = linear_regressor.predict(input_fn=predict_input_fn) predicted_scores = list([x['predictions'] for x in predictions]) # x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2 self.assertAllClose([[80.2]], predicted_scores) class LinearRegressorIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def test_complete_flow(self): label_dimension = 2 batch_size = 10 feature_columns = [ feature_column_lib.numeric_column('x', shape=(2,)) ] est = linear.LinearRegressor( feature_columns=feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) # TRAIN # learn y = x train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) est.train(train_input_fn, steps=200) # EVALUTE eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=1, shuffle=False) scores = est.evaluate(eval_input_fn) self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores)) # PREDICT predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=None, batch_size=batch_size, num_epochs=1, shuffle=False) predictions = list( [x['predictions'] for x in est.predict(predict_input_fn)]) self.assertAllClose(data, predictions, atol=0.01) # EXPORT feature_spec = feature_column_lib.make_parse_example_spec( feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def _assert_close(expected, actual, rtol=1e-04, name='assert_close'): with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope: expected = ops.convert_to_tensor(expected, name='expected') actual = ops.convert_to_tensor(actual, name='actual') rdiff = math_ops.abs(expected - actual, 'diff') / expected rtol = ops.convert_to_tensor(rtol, name='rtol') return check_ops.assert_less( rdiff, rtol, data=( 'Condition expected =~ actual did not hold element-wise:' 'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff, 'rtol = ', rtol, ), name=scope) class LinearRegressorTrainingTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _mockOptimizer(self, expected_loss=None): expected_var_names = [ '%s/part_0:0' % _AGE_WEIGHT_NAME, '%s/part_0:0' % _BIAS_NAME ] def _minimize(loss, global_step): trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual( expected_var_names, [var.name for var in trainable_vars]) # Verify loss. We can't check the value directly, so we add an assert op. self.assertEquals(0, loss.shape.ndims) if expected_loss is None: return state_ops.assign_add(global_step, 1).op assert_loss = _assert_close( math_ops.to_float(expected_loss, name='expected'), loss, name='assert_loss') with ops.control_dependencies((assert_loss,)): return state_ops.assign_add(global_step, 1).op mock_optimizer = test.mock.NonCallableMock( spec=optimizer.Optimizer, wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer')) mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize) # NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks. # So, return mock_optimizer itself for deepcopy. mock_optimizer.__deepcopy__ = lambda _: mock_optimizer return mock_optimizer def _assertCheckpoint( self, expected_global_step, expected_age_weight=None, expected_bias=None): shapes = { name: shape for (name, shape) in checkpoint_utils.list_variables(self._model_dir) } self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP]) self.assertEqual( expected_global_step, checkpoint_utils.load_variable( self._model_dir, ops.GraphKeys.GLOBAL_STEP)) self.assertEqual([1, 1], shapes[_AGE_WEIGHT_NAME]) if expected_age_weight is not None: self.assertEqual( expected_age_weight, checkpoint_utils.load_variable(self._model_dir, _AGE_WEIGHT_NAME)) self.assertEqual([1], shapes[_BIAS_NAME]) if expected_bias is not None: self.assertEqual( expected_bias, checkpoint_utils.load_variable(self._model_dir, _BIAS_NAME)) def testFromScratchWithDefaultOptimizer(self): # Create LinearRegressor. label = 5. age = 17 linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir) # Train for a few steps, and validate final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self._assertCheckpoint(num_steps) def testFromScratch(self): # Create LinearRegressor. label = 5. age = 17 # loss = (logits - label)^2 = (0 - 5.)^2 = 25. mock_optimizer = self._mockOptimizer(expected_loss=25.) linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir, optimizer=mock_optimizer) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assertCheckpoint( expected_global_step=num_steps, expected_age_weight=0., expected_bias=0.) def testFromCheckpoint(self): # Create initial checkpoint. age_weight = 10.0 bias = 5.0 initial_global_step = 100 with ops.Graph().as_default(): variables.Variable([[age_weight]], name=_AGE_WEIGHT_NAME) variables.Variable([bias], name=_BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) # logits = age * age_weight + bias = 17 * 10. + 5. = 175 # loss = (logits - label)^2 = (175 - 5)^2 = 28900 mock_optimizer = self._mockOptimizer(expected_loss=28900.) linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir, optimizer=mock_optimizer) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assertCheckpoint( expected_global_step=initial_global_step + num_steps, expected_age_weight=age_weight, expected_bias=bias) def testFromCheckpointMultiBatch(self): # Create initial checkpoint. age_weight = 10.0 bias = 5.0 initial_global_step = 100 with ops.Graph().as_default(): variables.Variable([[age_weight]], name=_AGE_WEIGHT_NAME) variables.Variable([bias], name=_BIAS_NAME) variables.Variable( initial_global_step, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64) _save_variables_to_ckpt(self._model_dir) # logits = age * age_weight + bias # logits[0] = 17 * 10. + 5. = 175 # logits[1] = 15 * 10. + 5. = 155 # loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004 mock_optimizer = self._mockOptimizer(expected_loss=52004.) linear_regressor = linear.LinearRegressor( feature_columns=(feature_column_lib.numeric_column('age'),), model_dir=self._model_dir, optimizer=mock_optimizer) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, and validate optimizer and final checkpoint. num_steps = 10 linear_regressor.train( input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))), steps=num_steps) self.assertEqual(1, mock_optimizer.minimize.call_count) self._assertCheckpoint( expected_global_step=initial_global_step + num_steps, expected_age_weight=age_weight, expected_bias=bias) if __name__ == '__main__': test.main()
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import json import os import re import textwrap import time import uuid from oslo_serialization import jsonutils from oslo_utils import encodeutils import pkg_resources import prettytable import six from six.moves.urllib import parse from novaclient import exceptions from novaclient.i18n import _ VALID_KEY_REGEX = re.compile(r"[\w\.\- :]+$", re.UNICODE) def env(*args, **kwargs): """Returns the first environment variable set. If all are empty, defaults to '' or keyword arg `default`. """ for arg in args: value = os.environ.get(arg) if value: return value return kwargs.get('default', '') def get_service_type(f): """Retrieves service type from function.""" return getattr(f, 'service_type', None) def unauthenticated(func): """Adds 'unauthenticated' attribute to decorated function. Usage: >>> @unauthenticated ... def mymethod(f): ... pass """ func.unauthenticated = True return func def isunauthenticated(func): """Checks if the function does not require authentication. Mark such functions with the `@unauthenticated` decorator. :returns: bool """ return getattr(func, 'unauthenticated', False) def arg(*args, **kwargs): """Decorator for CLI args. Example: >>> @arg("name", help="Name of the new entity") ... def entity_create(args): ... pass """ def _decorator(func): add_arg(func, *args, **kwargs) return func return _decorator def add_arg(func, *args, **kwargs): """Bind CLI arguments to a shell.py `do_foo` function.""" if not hasattr(func, 'arguments'): func.arguments = [] # NOTE(sirp): avoid dups that can occur when the module is shared across # tests. if (args, kwargs) not in func.arguments: # Because of the semantics of decorator composition if we just append # to the options list positional options will appear to be backwards. func.arguments.insert(0, (args, kwargs)) def service_type(stype): """Adds 'service_type' attribute to decorated function. Usage: .. code-block:: python @service_type('volume') def mymethod(f): ... """ def inner(f): f.service_type = stype return f return inner def add_resource_manager_extra_kwargs_hook(f, hook): """Add hook to bind CLI arguments to ResourceManager calls. The `do_foo` calls in shell.py will receive CLI args and then in turn pass them through to the ResourceManager. Before passing through the args, the hooks registered here will be called, giving us a chance to add extra kwargs (taken from the command-line) to what's passed to the ResourceManager. """ if not hasattr(f, 'resource_manager_kwargs_hooks'): f.resource_manager_kwargs_hooks = [] names = [h.__name__ for h in f.resource_manager_kwargs_hooks] if hook.__name__ not in names: f.resource_manager_kwargs_hooks.append(hook) def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False): """Return extra_kwargs by calling resource manager kwargs hooks.""" hooks = getattr(f, "resource_manager_kwargs_hooks", []) extra_kwargs = {} for hook in hooks: hook_kwargs = hook(args) hook_name = hook.__name__ conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys()) if conflicting_keys and not allow_conflicts: msg = (_("Hook '%(hook_name)s' is attempting to redefine " "attributes '%(conflicting_keys)s'") % {'hook_name': hook_name, 'conflicting_keys': conflicting_keys}) raise exceptions.NoUniqueMatch(msg) extra_kwargs.update(hook_kwargs) return extra_kwargs def pretty_choice_list(l): return ', '.join("'%s'" % i for i in l) def pretty_choice_dict(d): """Returns a formatted dict as 'key=value'.""" return pretty_choice_list(['%s=%s' % (k, d[k]) for k in sorted(d.keys())]) def print_list(objs, fields, formatters={}, sortby_index=None): if sortby_index is None: sortby = None else: sortby = fields[sortby_index] mixed_case_fields = ['serverId'] pt = prettytable.PrettyTable([f for f in fields], caching=False) pt.align = 'l' for o in objs: row = [] for field in fields: if field in formatters: row.append(formatters[field](o)) else: if field in mixed_case_fields: field_name = field.replace(' ', '_') else: field_name = field.lower().replace(' ', '_') data = getattr(o, field_name, '') if data is None: data = '-' # '\r' would break the table, so remove it. data = six.text_type(data).replace("\r", "") row.append(data) pt.add_row(row) if sortby is not None: result = encodeutils.safe_encode(pt.get_string(sortby=sortby)) else: result = encodeutils.safe_encode(pt.get_string()) if six.PY3: result = result.decode() print(result) def _flatten(data, prefix=None): """Flatten a dict, using name as a prefix for the keys of dict. >>> _flatten('cpu_info', {'arch':'x86_64'}) [('cpu_info_arch': 'x86_64')] """ if isinstance(data, dict): for key, value in six.iteritems(data): new_key = '%s_%s' % (prefix, key) if prefix else key if isinstance(value, (dict, list)) and value: for item in _flatten(value, new_key): yield item else: yield new_key, value else: yield prefix, data def flatten_dict(data): """Return a new dict whose sub-dicts have been merged into the original. Each of the parents keys are prepended to the child's to prevent collisions. Any string elements will be JSON parsed before flattening. >>> flatten_dict({'service': {'host':'cloud9@compute-068', 'id': 143}}) {'service_host': colud9@compute-068', 'service_id': 143} """ data = data.copy() # Try and decode any nested JSON structures. for key, value in six.iteritems(data): if isinstance(value, six.string_types): try: data[key] = json.loads(value) except ValueError: pass return dict(_flatten(data)) def print_dict(d, dict_property="Property", dict_value="Value", wrap=0): pt = prettytable.PrettyTable([dict_property, dict_value], caching=False) pt.align = 'l' for k, v in sorted(d.items()): # convert dict to str to check length if isinstance(v, (dict, list)): v = jsonutils.dumps(v) if wrap > 0: v = textwrap.fill(six.text_type(v), wrap) # if value has a newline, add in multiple rows # e.g. fault with stacktrace if v and isinstance(v, six.string_types) and (r'\n' in v or '\r' in v): # '\r' would break the table, so remove it. if '\r' in v: v = v.replace('\r', '') lines = v.strip().split(r'\n') col1 = k for line in lines: pt.add_row([col1, line]) col1 = '' else: if v is None: v = '-' pt.add_row([k, v]) result = encodeutils.safe_encode(pt.get_string()) if six.PY3: result = result.decode() print(result) def find_resource(manager, name_or_id, wrap_exception=True, **find_args): """Helper for the _find_* methods.""" # for str id which is not uuid (for Flavor, Keypair and hypervsior in cells # environments search currently) if getattr(manager, 'is_alphanum_id_allowed', False): try: return manager.get(name_or_id) except exceptions.NotFound: pass # first try to get entity as uuid try: tmp_id = encodeutils.safe_encode(name_or_id) if six.PY3: tmp_id = tmp_id.decode() uuid.UUID(tmp_id) return manager.get(tmp_id) except (TypeError, ValueError, exceptions.NotFound): pass # then try to get entity as name try: try: resource = getattr(manager, 'resource_class', None) name_attr = resource.NAME_ATTR if resource else 'name' kwargs = {name_attr: name_or_id} kwargs.update(find_args) return manager.find(**kwargs) except exceptions.NotFound: pass # then try to find entity by human_id try: return manager.find(human_id=name_or_id, **find_args) except exceptions.NotFound: pass except exceptions.NoUniqueMatch: msg = (_("Multiple %(class)s matches found for '%(name)s', use an ID " "to be more specific.") % {'class': manager.resource_class.__name__.lower(), 'name': name_or_id}) if wrap_exception: raise exceptions.CommandError(msg) raise exceptions.NoUniqueMatch(msg) # finally try to get entity as integer id try: return manager.get(int(name_or_id)) except (TypeError, ValueError, exceptions.NotFound): msg = (_("No %(class)s with a name or ID of '%(name)s' exists.") % {'class': manager.resource_class.__name__.lower(), 'name': name_or_id}) if wrap_exception: raise exceptions.CommandError(msg) raise exceptions.NotFound(404, msg) def format_servers_list_networks(server): output = [] for (network, addresses) in server.networks.items(): if len(addresses) == 0: continue addresses_csv = ', '.join(addresses) group = "%s=%s" % (network, addresses_csv) output.append(group) return '; '.join(output) def format_security_groups(groups): return ', '.join(group['name'] for group in groups) def _format_field_name(attr): """Format an object attribute in a human-friendly way.""" # Split at ':' and leave the extension name as-is. parts = attr.rsplit(':', 1) name = parts[-1].replace('_', ' ') # Don't title() on mixed case if name.isupper() or name.islower(): name = name.title() parts[-1] = name return ': '.join(parts) def make_field_formatter(attr, filters=None): """ Given an object attribute, return a formatted field name and a formatter suitable for passing to print_list. Optionally pass a dict mapping attribute names to a function. The function will be passed the value of the attribute and should return the string to display. """ filter_ = None if filters: filter_ = filters.get(attr) def get_field(obj): field = getattr(obj, attr, '') if field and filter_: field = filter_(field) return field name = _format_field_name(attr) formatter = get_field return name, formatter def safe_issubclass(*args): """Like issubclass, but will just return False if not a class.""" try: if issubclass(*args): return True except TypeError: pass return False def do_action_on_many(action, resources, success_msg, error_msg): """Helper to run an action on many resources.""" failure_flag = False for resource in resources: try: action(resource) print(success_msg % resource) except Exception as e: failure_flag = True print(e) if failure_flag: raise exceptions.CommandError(error_msg) def load_entry_point(ep_name, name=None): """Try to load the entry point ep_name that matches name.""" for ep in pkg_resources.iter_entry_points(ep_name, name=name): try: # FIXME(dhellmann): It would be better to use stevedore # here, since it abstracts this difference in behavior # between versions of setuptools, but this seemed like a # more expedient fix. if hasattr(ep, 'resolve') and hasattr(ep, 'require'): return ep.resolve() else: return ep.load(require=False) except (ImportError, pkg_resources.UnknownExtra, AttributeError): continue def is_integer_like(val): """Returns validation of a value as an integer.""" try: int(val) return True except (TypeError, ValueError, AttributeError): return False def validate_flavor_metadata_keys(keys): for key in keys: valid_name = VALID_KEY_REGEX.match(key) if not valid_name: msg = _('Invalid key: "%s". Keys may only contain letters, ' 'numbers, spaces, underscores, periods, colons and ' 'hyphens.') raise exceptions.CommandError(msg % key) @contextlib.contextmanager def record_time(times, enabled, *args): """Record the time of a specific action. :param times: A list of tuples holds time data. :type times: list :param enabled: Whether timing is enabled. :type enabled: bool :param *args: Other data to be stored besides time data, these args will be joined to a string. """ if not enabled: yield else: start = time.time() yield end = time.time() times.append((' '.join(args), start, end)) def prepare_query_string(params): """Convert dict params to query string""" params = sorted(params.items(), key=lambda x: x[0]) return '?%s' % parse.urlencode(params) if params else ''
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os from copy import copy from io import StringIO import pytest import numpy as np from ..registry import _readers, _writers, _identifiers from .. import registry as io_registry from ...table import Table _READERS_ORIGINAL = copy(_readers) _WRITERS_ORIGINAL = copy(_writers) _IDENTIFIERS_ORIGINAL = copy(_identifiers) class TestData: read = classmethod(io_registry.read) write = io_registry.write def setup_function(function): _readers.clear() _writers.clear() _identifiers.clear() def empty_reader(*args, **kwargs): return TestData() def empty_writer(table, *args, **kwargs): pass def empty_identifier(*args, **kwargs): return True def test_get_reader_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.get_reader('test', TestData) assert str(exc.value).startswith( "No reader defined for format 'test' and class 'TestData'") def test_get_writer_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.get_writer('test', TestData) assert str(exc.value).startswith( "No writer defined for format 'test' and class 'TestData'") def test_register_reader(): io_registry.register_reader('test1', TestData, empty_reader) io_registry.register_reader('test2', TestData, empty_reader) assert io_registry.get_reader('test1', TestData) == empty_reader assert io_registry.get_reader('test2', TestData) == empty_reader io_registry.unregister_reader('test1', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_reader('test1', TestData) assert io_registry.get_reader('test2', TestData) == empty_reader io_registry.unregister_reader('test2', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_reader('test2', TestData) def test_register_writer(): io_registry.register_writer('test1', TestData, empty_writer) io_registry.register_writer('test2', TestData, empty_writer) assert io_registry.get_writer('test1', TestData) == empty_writer assert io_registry.get_writer('test2', TestData) == empty_writer io_registry.unregister_writer('test1', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_writer('test1', TestData) assert io_registry.get_writer('test2', TestData) == empty_writer io_registry.unregister_writer('test2', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_writer('test2', TestData) def test_register_identifier(): io_registry.register_identifier('test1', TestData, empty_identifier) io_registry.register_identifier('test2', TestData, empty_identifier) io_registry.unregister_identifier('test1', TestData) io_registry.unregister_identifier('test2', TestData) def test_register_reader_invalid(): io_registry.register_reader('test', TestData, empty_reader) with pytest.raises(io_registry.IORegistryError) as exc: io_registry.register_reader('test', TestData, empty_reader) assert (str(exc.value) == "Reader for format 'test' and class 'TestData' " "is already defined") def test_register_writer_invalid(): io_registry.register_writer('test', TestData, empty_writer) with pytest.raises(io_registry.IORegistryError) as exc: io_registry.register_writer('test', TestData, empty_writer) assert (str(exc.value) == "Writer for format 'test' and class 'TestData' " "is already defined") def test_register_identifier_invalid(): io_registry.register_identifier('test', TestData, empty_identifier) with pytest.raises(io_registry.IORegistryError) as exc: io_registry.register_identifier('test', TestData, empty_identifier) assert (str(exc.value) == "Identifier for format 'test' and class " "'TestData' is already defined") def test_unregister_reader_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.unregister_reader('test', TestData) assert str(exc.value) == "No reader defined for format 'test' and class 'TestData'" def test_unregister_writer_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.unregister_writer('test', TestData) assert str(exc.value) == "No writer defined for format 'test' and class 'TestData'" def test_unregister_identifier_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.unregister_identifier('test', TestData) assert str(exc.value) == "No identifier defined for format 'test' and class 'TestData'" def test_register_reader_force(): io_registry.register_reader('test', TestData, empty_reader) io_registry.register_reader('test', TestData, empty_reader, force=True) def test_register_writer_force(): io_registry.register_writer('test', TestData, empty_writer) io_registry.register_writer('test', TestData, empty_writer, force=True) def test_register_identifier_force(): io_registry.register_identifier('test', TestData, empty_identifier) io_registry.register_identifier('test', TestData, empty_identifier, force=True) def test_read_noformat(): with pytest.raises(io_registry.IORegistryError) as exc: TestData.read() assert str(exc.value).startswith("Format could not be identified.") def test_write_noformat(): with pytest.raises(io_registry.IORegistryError) as exc: TestData().write() assert str(exc.value).startswith("Format could not be identified.") def test_read_noformat_arbitrary(): """Test that all identifier functions can accept arbitrary input""" _identifiers.update(_IDENTIFIERS_ORIGINAL) with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(object()) assert str(exc.value).startswith("Format could not be identified.") def test_read_noformat_arbitrary_file(tmpdir): """Tests that all identifier functions can accept arbitrary files""" _readers.update(_READERS_ORIGINAL) testfile = str(tmpdir.join('foo.example')) with open(testfile, 'w') as f: f.write("Hello world") with pytest.raises(io_registry.IORegistryError) as exc: Table.read(testfile) assert str(exc.value).startswith("Format could not be identified.") def test_write_noformat_arbitrary(): """Test that all identifier functions can accept arbitrary input""" _identifiers.update(_IDENTIFIERS_ORIGINAL) with pytest.raises(io_registry.IORegistryError) as exc: TestData().write(object()) assert str(exc.value).startswith("Format could not be identified.") def test_write_noformat_arbitrary_file(tmpdir): """Tests that all identifier functions can accept arbitrary files""" _writers.update(_WRITERS_ORIGINAL) testfile = str(tmpdir.join('foo.example')) with pytest.raises(io_registry.IORegistryError) as exc: Table().write(testfile) assert str(exc.value).startswith("Format could not be identified.") def test_read_toomanyformats(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True) io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True) with pytest.raises(io_registry.IORegistryError) as exc: TestData.read() assert str(exc.value) == "Format is ambiguous - options are: test1, test2" def test_write_toomanyformats(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True) io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True) with pytest.raises(io_registry.IORegistryError) as exc: TestData().write() assert str(exc.value) == "Format is ambiguous - options are: test1, test2" def test_read_format_noreader(): with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(format='test') assert str(exc.value).startswith( "No reader defined for format 'test' and class 'TestData'") def test_write_format_nowriter(): with pytest.raises(io_registry.IORegistryError) as exc: TestData().write(format='test') assert str(exc.value).startswith( "No writer defined for format 'test' and class 'TestData'") def test_read_identifier(tmpdir): io_registry.register_identifier( 'test1', TestData, lambda o, path, fileobj, *x, **y: path.endswith('a')) io_registry.register_identifier( 'test2', TestData, lambda o, path, fileobj, *x, **y: path.endswith('b')) # Now check that we got past the identifier and are trying to get # the reader. The io_registry.get_reader will fail but the error message # will tell us if the identifier worked. filename = tmpdir.join("testfile.a").strpath open(filename, 'w').close() with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(filename) assert str(exc.value).startswith( "No reader defined for format 'test1' and class 'TestData'") filename = tmpdir.join("testfile.b").strpath open(filename, 'w').close() with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(filename) assert str(exc.value).startswith( "No reader defined for format 'test2' and class 'TestData'") def test_write_identifier(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: x[0].startswith('a')) io_registry.register_identifier('test2', TestData, lambda o, *x, **y: x[0].startswith('b')) # Now check that we got past the identifier and are trying to get # the reader. The io_registry.get_writer will fail but the error message # will tell us if the identifier worked. with pytest.raises(io_registry.IORegistryError) as exc: TestData().write('abc') assert str(exc.value).startswith( "No writer defined for format 'test1' and class 'TestData'") with pytest.raises(io_registry.IORegistryError) as exc: TestData().write('bac') assert str(exc.value).startswith( "No writer defined for format 'test2' and class 'TestData'") def test_identifier_origin(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: o == 'read') io_registry.register_identifier('test2', TestData, lambda o, *x, **y: o == 'write') io_registry.register_reader('test1', TestData, empty_reader) io_registry.register_writer('test2', TestData, empty_writer) # There should not be too many formats defined TestData.read() TestData().write() with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(format='test2') assert str(exc.value).startswith( "No reader defined for format 'test2' and class 'TestData'") with pytest.raises(io_registry.IORegistryError) as exc: TestData().write(format='test1') assert str(exc.value).startswith( "No writer defined for format 'test1' and class 'TestData'") def test_read_valid_return(): io_registry.register_reader('test', TestData, lambda: TestData()) t = TestData.read(format='test') assert isinstance(t, TestData) def test_read_invalid_return(): io_registry.register_reader('test', TestData, lambda: 'spam') with pytest.raises(TypeError) as exc: TestData.read(format='test') assert str(exc.value) == "reader should return a TestData instance" def test_non_existing_unknown_ext(): """Raise the correct error when attempting to read a non-existing file with an unknown extension.""" with pytest.raises(OSError): data = Table.read('non-existing-file-with-unknown.ext') def test_read_basic_table(): data = np.array(list(zip([1, 2, 3], ['a', 'b', 'c'])), dtype=[(str('A'), int), (str('B'), '|U1')]) io_registry.register_reader('test', Table, lambda x: Table(x)) t = Table.read(data, format='test') assert t.keys() == ['A', 'B'] for i in range(3): assert t['A'][i] == data['A'][i] assert t['B'][i] == data['B'][i] def test_register_readers_with_same_name_on_different_classes(): # No errors should be generated if the same name is registered for # different objects...but this failed under python3 io_registry.register_reader('test', TestData, lambda: TestData()) io_registry.register_reader('test', Table, lambda: Table()) t = TestData.read(format='test') assert isinstance(t, TestData) tbl = Table.read(format='test') assert isinstance(tbl, Table) def test_inherited_registration(): # check that multi-generation inheritance works properly, # meaning that a child inherits from parents before # grandparents, see astropy/astropy#7156 class Child1(Table): pass class Child2(Child1): pass def _read(): return Table() def _read1(): return Child1() # check that reader gets inherited io_registry.register_reader('test', Table, _read) assert io_registry.get_reader('test', Child2) is _read # check that nearest ancestor is identified # (i.e. that the reader for Child2 is the registered method # for Child1, and not Table) io_registry.register_reader('test', Child1, _read1) assert io_registry.get_reader('test', Child2) is _read1 def teardown_function(function): _readers.update(_READERS_ORIGINAL) _writers.update(_WRITERS_ORIGINAL) _identifiers.update(_IDENTIFIERS_ORIGINAL) class TestSubclass: """ Test using registry with a Table sub-class """ def test_read_table_subclass(self): class MyTable(Table): pass data = ['a b', '1 2'] mt = MyTable.read(data, format='ascii') t = Table.read(data, format='ascii') assert np.all(mt == t) assert mt.colnames == t.colnames assert type(mt) is MyTable def test_write_table_subclass(self): buffer = StringIO() class MyTable(Table): pass mt = MyTable([[1], [2]], names=['a', 'b']) mt.write(buffer, format='ascii') assert buffer.getvalue() == os.linesep.join(['a b', '1 2', ''])
import pytest from raft import log def mle(index, term, committed=False, msgid='', msg={}): return dict(index=index, term=term, committed=committed, msgid=msgid, msg=msg) def test_le(): # a's term is greater than b's a = {1: mle(1, 2), 2: mle(2, 2), 3: mle(3, 4)} b = {1: mle(1, 2), 2: mle(2, 2), 3: mle(3, 3)} ra = log.RaftLog(a) rb = log.RaftLog(b) assert ra > rb # terms are equal a = {1: mle(1, 2), 2: mle(2, 2), 3: mle(3, 4)} b = {1: mle(1, 2), 2: mle(2, 2), 3: mle(3, 4)} ra = log.RaftLog(a) rb = log.RaftLog(b) assert ra <= rb assert rb <= ra # terms equal but more commits in b a = {1: mle(1, 2), 2: mle(2, 2), 3: mle(3, 4)} b = {1: mle(1, 2), 2: mle(2, 2), 3: mle(3, 4), 4: mle(4, 4)} ra = log.RaftLog(a) rb = log.RaftLog(b) assert rb > ra def test_dump(): rl = log.RaftLog(None) dump = {0: {'term': 0, 'msgid': '', 'committed': True, 'acked': [], 'msg': {}, 'index': 0}} assert rl.dump() == dump def test_get_max_index_term(): rl = log.RaftLog(None) le = log.logentry(2, 'abcd', {}) rl.add(le) assert rl.get_max_index_term() == (1, 2) le = log.logentry(6, 'abcdefg', {}) rl.add(le) assert rl.get_max_index_term() == (2, 6) def test_has_uuid(): rl = log.RaftLog(None) le = log.logentry(2, 'abcd', {}) rl.add(le) assert rl.has_uuid('abcd') == True assert rl.has_uuid('dcba') == False def test_maxindex(): rl = log.RaftLog(None) assert rl.maxindex() == 0 le = log.logentry(2, 'abcd', {}) rl.add(le) assert rl.maxindex() == 1 le = log.logentry(2, 'abcde', {}) le['index'] = 12 rl.add(le) assert rl.maxindex() == 12 le = log.logentry(2, 'abcdef', {}) le['index'] = 5 rl.add(le) assert rl.maxindex() == 5 def test_get(): rl = log.RaftLog(None) assert rl.get(2) == None le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(2, 'abcde', {}) rl.add(le1) rl.add(le2) assert rl.get(2) == le2 assert rl.get_by_uuid('abcd') == le1 assert rl.get_by_index(1) == le1 def test_get_term(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(4, 'abcde', {}) rl.add(le1) rl.add(le2) assert rl.get_term_of(0) == 0 assert rl.get_term_of(1) == 2 assert rl.get_term_of(2) == 4 def test_remove(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(4, 'abcde', {}) rl.add(le1) rl.add(le2) rl.remove(1) assert rl.get_by_uuid('abcd') == None assert rl.get_by_index(1) == None def test_add(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(4, 'abcde', {}) rl.add(le1) rl.add(le2) assert le1['index'] == 1 assert le2['index'] == 2 # double adds do nothing le1_2 = log.logentry(2, 'abcd', {}) rl.add(le1_2) assert 'index' not in le1_2 assert rl.get_by_uuid('abcd') == le1 le = log.logentry(6, 'xyz', {}) le['index'] = 1 rl.add(le) assert rl.get_by_uuid('abcd') == None assert rl.get_by_uuid('abcde') == None def test_add_ack(): rl = log.RaftLog(None) le = log.logentry(6, 'xyz', {}) rl.add(le) rl.add_ack(1, 6, 'f') assert 'f' in rl.get_by_uuid('xyz')['acked'] def test_num_acked(): rl = log.RaftLog(None) le = log.logentry(6, 'xyz', {}) rl.add(le) assert rl.num_acked(1) == 0 rl.add_ack(1, 6, 'f') assert rl.num_acked(1) == 1 # double acks don't increase our count rl.add_ack(1, 6, 'f') assert rl.num_acked(1) == 1 rl.add_ack(1, 6, 'g') assert rl.num_acked(1) == 2 def test_commit(): rl = log.RaftLog(None) le = log.logentry(6, 'xyz', {}) rl.add(le) assert le['committed'] == False rl.commit(1, 6) assert le['committed'] == True with pytest.raises(AssertionError): rl.commit(1, 8) def test_force_commit(): rl = log.RaftLog(None) le = log.logentry(6, 'xyz', {}) rl.add(le) assert le['committed'] == False rl.force_commit(1) assert le['committed'] == True rl.force_commit(5) # bad indices do nothing def test_is_committed(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(2, 'abcde', {}) le3 = log.logentry(4, 'abcdef', {}) rl.add(le1) rl.add(le2) rl.add(le3) rl.commit(2, 2) assert rl.is_committed(1, 2) == True assert rl.is_committed(2, 2) == True assert rl.is_committed(2, 1) == False assert rl.is_committed(3, 4) == False def test_committed_by_uuid(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(2, 'abcde', {}) le3 = log.logentry(4, 'abcdef', {}) rl.add(le1) rl.add(le2) rl.add(le3) rl.commit(2, 2) assert rl.is_committed_by_uuid('abcd') == True assert rl.is_committed_by_uuid('abcdef') == False assert rl.is_committed_by_uuid('abcde') == True assert rl.is_committed_by_uuid('xyz') == False def test_logs_after_index(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(2, 'abcde', {}) le3 = log.logentry(4, 'abcdef', {}) rl.add(le1) rl.add(le2) rl.add(le3) assert rl.logs_after_index(1) == {2: le2, 3: le3} def test_committed_logs_after_index(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(2, 'abcde', {}) le3 = log.logentry(4, 'abcdef', {}) rl.add(le1) rl.add(le2) rl.add(le3) rl.commit(2, 2) assert rl.committed_logs_after_index(1) == {2: le2} def test_get_commit_index(): rl = log.RaftLog(None) assert rl.get_commit_index() == 0 le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(2, 'abcde', {}) le3 = log.logentry(4, 'abcdef', {}) rl.add(le1) rl.add(le2) rl.add(le3) assert rl.get_commit_index() == 0 rl.commit(2, 2) assert rl.get_commit_index() == 2 rl = log.RaftLog(None) rl.get_by_index(0)['committed'] = False assert rl.get_commit_index() == 0 def test_exists(): rl = log.RaftLog(None) le1 = log.logentry(2, 'abcd', {}) le2 = log.logentry(2, 'abcde', {}) le3 = log.logentry(4, 'abcdef', {}) rl.add(le1) rl.add(le2) rl.add(le3) assert rl.exists(0, 0) == True assert rl.exists(1, 2) == True assert rl.exists(1, 1) == False assert rl.exists(3, 4) == True
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """High level operations on graphs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import sys import threading import time import numpy as np from six import reraise from tensorflow.contrib.framework import load_variable from tensorflow.contrib.framework.python.ops import ops as contrib_ops from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.learn.python.learn import monitors as monitors_lib from tensorflow.core.framework import summary_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import resources from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import coordinator from tensorflow.python.training import monitored_session from tensorflow.python.training import queue_runner from tensorflow.python.training import saver as tf_saver from tensorflow.python.training import session_manager as session_manager_lib from tensorflow.python.training import summary_io from tensorflow.python.training import supervisor as tf_supervisor from tensorflow.python.util.deprecation import deprecated # Singleton for SummaryWriter per logdir folder. _SUMMARY_WRITERS = {} # Lock protecting _SUMMARY_WRITERS _summary_writer_lock = threading.Lock() _graph_action_deprecation = deprecated( '2017-02-15', 'graph_actions.py will be deleted. Use tf.train.* utilities instead. ' 'You can use learn/estimators/estimator.py as an example.') @_graph_action_deprecation def clear_summary_writers(): """Clear cached summary writers. Currently only used for unit tests.""" return summary_io.SummaryWriterCache.clear() def get_summary_writer(logdir): """Returns single SummaryWriter per logdir in current run. Args: logdir: str, folder to write summaries. Returns: Existing `SummaryWriter` object or new one if never wrote to given directory. """ return summary_io.SummaryWriterCache.get(logdir) def _make_saver(graph, keep_checkpoint_max=5): vars_to_save = (graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) + graph.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS)) if vars_to_save: return tf_saver.Saver(vars_to_save, sharded=True, max_to_keep=keep_checkpoint_max) else: return None def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None): logging.info('Loading model from checkpoint: %s.', checkpoint_path) saver = saver or _make_saver(graph) if saver: saver.restore(session, checkpoint_path) else: logging.info('No variables found in graph, not creating Saver() object.') def _run_with_monitors(session, step, tensors, feed_dict, monitors): """Runs session for given tensors with monitor callbacks.""" for monitor in monitors: tensors += monitor.step_begin(step) tensors = list(set(tensors)) outputs = session.run(tensors, feed_dict=feed_dict) outputs = dict(zip( [t.name if isinstance(t, ops.Tensor) else t for t in tensors], outputs)) should_stop = False for monitor in monitors: induce_stop = monitor.step_end(step, outputs) should_stop = should_stop or induce_stop return outputs, should_stop def _monitored_train(graph, output_dir, train_op, loss_op, global_step_tensor=None, init_op=None, init_feed_dict=None, init_fn=None, log_every_steps=10, supervisor_is_chief=True, supervisor_master='', supervisor_save_model_secs=600, supervisor_save_model_steps=None, keep_checkpoint_max=5, keep_checkpoint_every_n_hours=10000.0, supervisor_save_summaries_secs=None, supervisor_save_summaries_steps=100, feed_fn=None, steps=None, fail_on_nan_loss=True, hooks=None, max_steps=None): """Train a model via monitored_session. Given `graph`, a directory to write outputs to (`output_dir`), and some ops, run a training loop. The given `train_op` performs one step of training on the model. The `loss_op` represents the objective function of the training. It is expected to increment the `global_step_tensor`, a scalar integer tensor counting training steps. This function uses `Supervisor` to initialize the graph (from a checkpoint if one is available in `output_dir`), write summaries defined in the graph, and write regular checkpoints as defined by `supervisor_save_model_secs`. Training continues until `global_step_tensor` evaluates to `max_steps`, or, if `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the program is terminated with exit code 1. Args: graph: A graph to train. It is expected that this graph is not in use elsewhere. output_dir: A directory to write outputs to. train_op: An op that performs one training step when run. loss_op: A scalar loss tensor. global_step_tensor: A tensor representing the global step. If none is given, one is extracted from the graph using the same logic as in `Supervisor`. init_op: An op that initializes the graph. If `None`, use `Supervisor`'s default. init_feed_dict: A dictionary that maps `Tensor` objects to feed values. This feed dictionary will be used when `init_op` is evaluated. init_fn: Optional callable passed to Supervisor to initialize the model. log_every_steps: Output logs regularly. The logs contain timing data and the current loss. A `0` or negative value disables logging. supervisor_is_chief: Whether the current process is the chief supervisor in charge of restoring the model and running standard services. supervisor_master: The master string to use when preparing the session. supervisor_save_model_secs: Save checkpoints every this many seconds. Can not be specified with `supervisor_save_model_steps`. supervisor_save_model_steps: Save checkpoints every this many steps. Can not be specified with `supervisor_save_model_secs`. keep_checkpoint_max: The maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. If None or 0, all checkpoint files are kept. This is simply passed as the max_to_keep arg to `tf.train.Saver` constructor. keep_checkpoint_every_n_hours: In addition to keeping the most recent `keep_checkpoint_max` checkpoint files, you might want to keep one checkpoint file for every N hours of training. This can be useful if you want to later analyze how a model progressed during a long training session. For example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep one checkpoint file for every 2 hours of training. The default value of 10,000 hours effectively disables the feature. supervisor_save_summaries_secs: Save summaries every `supervisor_save_summaries_secs` seconds when training. supervisor_save_summaries_steps: Save summaries every `supervisor_save_summaries_steps` steps when training. Exactly one of `supervisor_save_model_steps` and `supervisor_save_model_secs` should be specified, and the other should be None. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. steps: Trains for this many steps (e.g. current global step + `steps`). fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op` evaluates to `NaN`. If false, continue training as if nothing happened. hooks: List of `SessionRunHook` subclass instances. Used for callbacks inside the training loop. max_steps: Number of total steps for which to train model. If `None`, train forever. Two calls fit(steps=100) means 200 training iterations. On the other hand two calls of fit(max_steps=100) means, second call will not do any iteration since first call did all 100 steps. Returns: The final loss value. Raises: ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor` is not provided. See `tf.contrib.framework.get_global_step` for how we look up the latter if not provided explicitly. NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever evaluates to `NaN`. ValueError: If both `steps` and `max_steps` are not `None`. """ if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') if not output_dir: raise ValueError('Output directory should be non-empty %s.' % output_dir) if train_op is None: raise ValueError('Missing train_op.') if loss_op is None: raise ValueError('Missing loss_op.') if hooks is None: hooks = [] if not isinstance(hooks, list): raise ValueError('Hooks should be a list.') with graph.as_default(): global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) if global_step_tensor is None: raise ValueError('No "global_step" was provided or found in the graph.') if max_steps is not None: try: start_step = load_variable(output_dir, global_step_tensor.name) if max_steps <= start_step: logging.info('Skipping training since max_steps has already saved.') return None except: # pylint: disable=bare-except pass # Adapted SessionRunHooks such as ExportMonitor depend on the # CheckpointSaverHook to be executed before they should be executed. # The `hooks` param comprises of deprecated monitor hooks # (such as ExportMonitor). Appending them after the basic_session_run_hooks. all_hooks = [] with graph.as_default(): all_hooks.append(basic_session_run_hooks.NanTensorHook( loss_op, fail_on_nan_loss=fail_on_nan_loss)) if log_every_steps > 0: all_hooks.append(basic_session_run_hooks.LoggingTensorHook({ 'loss': loss_op.name, 'step': global_step_tensor.name }, every_n_iter=log_every_steps)) def make_saver(): return tf_saver.Saver( sharded=True, max_to_keep=keep_checkpoint_max, keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, defer_build=True) scaffold = monitored_session.Scaffold( init_op=init_op, init_feed_dict=init_feed_dict, init_fn=init_fn, saver=monitored_session.Scaffold.get_or_default('saver', ops.GraphKeys.SAVERS, make_saver)) if not supervisor_is_chief: session_creator = monitored_session.WorkerSessionCreator( scaffold=scaffold, master=supervisor_master) else: session_creator = monitored_session.ChiefSessionCreator( scaffold=scaffold, checkpoint_dir=output_dir, master=supervisor_master) summary_writer = summary_io.SummaryWriterCache.get(output_dir) all_hooks.append( basic_session_run_hooks.StepCounterHook( summary_writer=summary_writer)) all_hooks.append( basic_session_run_hooks.SummarySaverHook( save_secs=supervisor_save_summaries_secs, save_steps=supervisor_save_summaries_steps, summary_writer=summary_writer, scaffold=scaffold)) if (supervisor_save_model_secs is not None or supervisor_save_model_steps is not None): all_hooks.append( basic_session_run_hooks.CheckpointSaverHook( output_dir, save_secs=supervisor_save_model_secs, save_steps=supervisor_save_model_steps, scaffold=scaffold)) if steps is not None or max_steps is not None: all_hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps)) all_hooks.extend(hooks) with monitored_session.MonitoredSession( session_creator=session_creator, hooks=all_hooks) as super_sess: loss = None while not super_sess.should_stop(): _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else None) summary_io.SummaryWriterCache.clear() return loss @_graph_action_deprecation def train(graph, output_dir, train_op, loss_op, global_step_tensor=None, init_op=None, init_feed_dict=None, init_fn=None, log_every_steps=10, supervisor_is_chief=True, supervisor_master='', supervisor_save_model_secs=600, keep_checkpoint_max=5, supervisor_save_summaries_steps=100, feed_fn=None, steps=None, fail_on_nan_loss=True, monitors=None, max_steps=None): """Train a model. Given `graph`, a directory to write outputs to (`output_dir`), and some ops, run a training loop. The given `train_op` performs one step of training on the model. The `loss_op` represents the objective function of the training. It is expected to increment the `global_step_tensor`, a scalar integer tensor counting training steps. This function uses `Supervisor` to initialize the graph (from a checkpoint if one is available in `output_dir`), write summaries defined in the graph, and write regular checkpoints as defined by `supervisor_save_model_secs`. Training continues until `global_step_tensor` evaluates to `max_steps`, or, if `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the program is terminated with exit code 1. Args: graph: A graph to train. It is expected that this graph is not in use elsewhere. output_dir: A directory to write outputs to. train_op: An op that performs one training step when run. loss_op: A scalar loss tensor. global_step_tensor: A tensor representing the global step. If none is given, one is extracted from the graph using the same logic as in `Supervisor`. init_op: An op that initializes the graph. If `None`, use `Supervisor`'s default. init_feed_dict: A dictionary that maps `Tensor` objects to feed values. This feed dictionary will be used when `init_op` is evaluated. init_fn: Optional callable passed to Supervisor to initialize the model. log_every_steps: Output logs regularly. The logs contain timing data and the current loss. supervisor_is_chief: Whether the current process is the chief supervisor in charge of restoring the model and running standard services. supervisor_master: The master string to use when preparing the session. supervisor_save_model_secs: Save a checkpoint every `supervisor_save_model_secs` seconds when training. keep_checkpoint_max: The maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. If None or 0, all checkpoint files are kept. This is simply passed as the max_to_keep arg to tf.train.Saver constructor. supervisor_save_summaries_steps: Save summaries every `supervisor_save_summaries_steps` seconds when training. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. steps: Trains for this many steps (e.g. current global step + `steps`). fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op` evaluates to `NaN`. If false, continue training as if nothing happened. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. max_steps: Number of total steps for which to train model. If `None`, train forever. Two calls fit(steps=100) means 200 training iterations. On the other hand two calls of fit(max_steps=100) means, second call will not do any iteration since first call did all 100 steps. Returns: The final loss value. Raises: ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor` is not provided. See `tf.contrib.framework.get_global_step` for how we look up the latter if not provided explicitly. NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever evaluates to `NaN`. ValueError: If both `steps` and `max_steps` are not `None`. """ while True: try: return _train_internal(graph, output_dir, train_op, loss_op, global_step_tensor, init_op, init_feed_dict, init_fn, log_every_steps, supervisor_is_chief, supervisor_master, supervisor_save_model_secs, keep_checkpoint_max, supervisor_save_summaries_steps, feed_fn, steps, fail_on_nan_loss, monitors, max_steps) except errors.AbortedError: # Happens when PS restarts, keep training. logging.warning('Training got Aborted error. Keep training.') def _train_internal(graph, output_dir, train_op, loss_op, global_step_tensor, init_op, init_feed_dict, init_fn, log_every_steps, supervisor_is_chief, supervisor_master, supervisor_save_model_secs, keep_checkpoint_max, supervisor_save_summaries_steps, feed_fn, steps, fail_on_nan_loss, monitors, max_steps): """See train.""" if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') if not output_dir: raise ValueError('Output directory should be non-empty %s.' % output_dir) if train_op is None: raise ValueError('Missing train_op.') if loss_op is None: raise ValueError('Missing loss_op.') with graph.as_default(): global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) if global_step_tensor is None: raise ValueError('No "global_step" was provided or found in the graph.') # Get current step. try: start_step = load_variable(output_dir, global_step_tensor.name) except (errors.NotFoundError, ValueError): start_step = 0 summary_writer = (get_summary_writer(output_dir) if supervisor_is_chief else None) # Add default chief monitors if none were provided. if not monitors: monitors = monitors_lib.get_default_monitors( loss_op=loss_op, summary_op=logging_ops.get_summary_op(), save_summary_steps=supervisor_save_summaries_steps, summary_writer=summary_writer) if supervisor_is_chief else [] # TODO(ipolosukhin): Replace all functionality of Supervisor # with Chief-Exclusive Monitors. if not supervisor_is_chief: # Prune list of monitor to the ones runnable on all workers. monitors = [monitor for monitor in monitors if monitor.run_on_all_workers] if max_steps is None: max_steps = (start_step + steps) if steps else None # Start monitors, can create graph parts. for monitor in monitors: monitor.begin(max_steps=max_steps) supervisor = tf_supervisor.Supervisor( graph, init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT, init_feed_dict=init_feed_dict, is_chief=supervisor_is_chief, logdir=output_dir, saver=_make_saver(graph, keep_checkpoint_max), global_step=global_step_tensor, summary_op=None, summary_writer=summary_writer, save_model_secs=supervisor_save_model_secs, init_fn=init_fn) session = supervisor.PrepareSession(master=supervisor_master, start_standard_services=True) supervisor.StartQueueRunners(session) with session: get_current_step = lambda: session.run(global_step_tensor) start_step = get_current_step() last_step = start_step last_log_step = start_step loss_value = None logging.info('Training steps [%d,%s)', last_step, 'inf' if max_steps is None else str(max_steps)) excinfo = None try: while not supervisor.ShouldStop() and ( (max_steps is None) or (last_step < max_steps)): start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None outputs, should_stop = _run_with_monitors( session, last_step + 1, [train_op, loss_op], feed_dict, monitors) loss_value = outputs[loss_op.name] if np.isnan(loss_value): failure_message = 'Model diverged with loss = NaN.' if fail_on_nan_loss: logging.error(failure_message) raise monitors_lib.NanLossDuringTrainingError() else: logging.warning(failure_message) if should_stop: break this_step = get_current_step() if this_step <= last_step: logging.error( 'Global step was not incremented by train op at step %s' ': new step %d', last_step, this_step) last_step = this_step is_last_step = (max_steps is not None) and (last_step >= max_steps) if is_last_step or (last_step - last_log_step >= log_every_steps): logging.info( 'training step %d, loss = %.5f (%.3f sec/batch).', last_step, loss_value, float(time.time() - start_time)) last_log_step = last_step except errors.OutOfRangeError as e: logging.warn('Got exception during tf.learn training loop possibly ' 'due to exhausted input queue %s.', e) except StopIteration: logging.info('Exhausted input iterarator.') except BaseException as e: # pylint: disable=broad-except # Hold on to any other exceptions while we try recording a final # checkpoint and summary. excinfo = sys.exc_info() finally: try: # Call supervisor.Stop() from within a try block because it re-raises # exceptions thrown by the supervised threads. supervisor.Stop(close_summary_writer=False) # Save one last checkpoint and summaries # TODO(wicke): This should be handled by Supervisor # In case we encountered an exception in the try block before we updated # last_step, update it here (again). last_step = get_current_step() if supervisor_is_chief: ckpt_path = supervisor.save_path logging.info('Saving checkpoint for step %d to checkpoint: %s.', last_step, ckpt_path) supervisor.saver.save(session, ckpt_path, global_step=last_step) # Finish monitors. for monitor in monitors: monitor.end() # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: logging.warn('OutOfRangeError in tf.learn final checkpoint possibly ' 'due to exhausted input queue. Note: summary_op is not ' 'expected to trigger dequeues. %s.', e) except BaseException as e: # pylint: disable=broad-except # If we don't already have an exception to re-raise, raise this one. if not excinfo: raise # Otherwise, log this one and raise the other in the finally block. logging.error('Got exception during tf.learn final checkpoint %s.', e) finally: if excinfo: reraise(*excinfo) return loss_value def _get_first_op_from_collection(collection_name): elements = ops.get_collection(collection_name) if elements: return elements[0] return None def _get_saver(): """Lazy init and return saver.""" saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS) if saver is None and variables.global_variables(): saver = tf_saver.Saver() ops.add_to_collection(ops.GraphKeys.SAVERS, saver) return saver def _get_ready_op(): ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP) if ready_op is None: ready_op = variables.report_uninitialized_variables() ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op) return ready_op def _get_local_init_op(): local_init_op = _get_first_op_from_collection( ops.GraphKeys.LOCAL_INIT_OP) if local_init_op is None: op_list = [variables.local_variables_initializer(), data_flow_ops.tables_initializer()] if op_list: local_init_op = control_flow_ops.group(*op_list) ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op) return local_init_op def _eval_results_to_str(eval_results): return ', '.join('%s = %s' % (k, v) for k, v in sorted(eval_results.items())) def _write_summary_results(output_dir, eval_results, current_global_step): """Writes eval results into summary file in given dir.""" logging.info('Saving evaluation summary for step %d: %s', current_global_step, _eval_results_to_str(eval_results)) summary_writer = get_summary_writer(output_dir) summary = summary_pb2.Summary() for key in eval_results: if eval_results[key] is None: continue value = summary.value.add() value.tag = key if (isinstance(eval_results[key], np.float32) or isinstance(eval_results[key], float)): value.simple_value = float(eval_results[key]) else: logging.warn('Skipping summary for %s, must be a float or np.float32.', key) summary_writer.add_summary(summary, current_global_step) summary_writer.flush() @_graph_action_deprecation def evaluate(graph, output_dir, checkpoint_path, eval_dict, update_op=None, global_step_tensor=None, supervisor_master='', log_every_steps=10, feed_fn=None, max_steps=None): """Evaluate a model loaded from a checkpoint. Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval loop for `max_steps` steps, or until an exception (generally, an end-of-input signal from a reader operation) is raised from running `eval_dict`. In each step of evaluation, all tensors in the `eval_dict` are evaluated, and every `log_every_steps` steps, they are logged. At the very end of evaluation, a summary is evaluated (finding the summary ops using `Supervisor`'s logic) and written to `output_dir`. Args: graph: A `Graph` to train. It is expected that this graph is not in use elsewhere. output_dir: A string containing the directory to write a summary to. checkpoint_path: A string containing the path to a checkpoint to restore. Can be `None` if the graph doesn't require loading any variables. eval_dict: A `dict` mapping string names to tensors to evaluate. It is evaluated in every logging step. The result of the final evaluation is returned. If `update_op` is None, then it's evaluated in every step. If `max_steps` is `None`, this should depend on a reader that will raise an end-of-input exception when the inputs are exhausted. update_op: A `Tensor` which is run in every step. global_step_tensor: A `Variable` containing the global step. If `None`, one is extracted from the graph using the same logic as in `Supervisor`. Used to place eval summaries on training curves. supervisor_master: The master string to use when preparing the session. log_every_steps: Integer. Output logs every `log_every_steps` evaluation steps. The logs contain the `eval_dict` and timing information. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. max_steps: Integer. Evaluate `eval_dict` this many times. Returns: A tuple `(eval_results, global_step)`: eval_results: A `dict` mapping `string` to numeric values (`int`, `float`) that are the result of running eval_dict in the last step. `None` if no eval steps were run. global_step: The global step this evaluation corresponds to. Raises: ValueError: if `output_dir` is empty. """ if not output_dir: raise ValueError('Output directory should be non-empty %s.' % output_dir) with graph.as_default(): global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) # Create or get summary op, global_step and saver. saver = _get_saver() local_init_op = _get_local_init_op() ready_for_local_init_op = _get_first_op_from_collection( ops.GraphKeys.READY_FOR_LOCAL_INIT_OP) ready_op = _get_ready_op() session_manager = session_manager_lib.SessionManager( local_init_op=local_init_op, ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op) session, initialized = session_manager.recover_session( master=supervisor_master, saver=saver, checkpoint_dir=checkpoint_path) # Start queue runners. coord = coordinator.Coordinator() threads = queue_runner.start_queue_runners(session, coord) with session: if not initialized: logging.warning('Failed to initialize from %s.', checkpoint_path) # TODO(ipolosukhin): This should be failing, but old code relies on that. session.run(variables.global_variables_initializer()) if checkpoint_path: _restore_from_checkpoint(session, graph, checkpoint_path, saver) current_global_step = session.run(global_step_tensor) eval_results = None # TODO(amodei): Fix this to run through the eval set exactly once. step = 0 eval_step = None feed_dict = None logging.info('Eval steps [%d,%s) for training step %d.', step, 'inf' if max_steps is None else str(max_steps), current_global_step) try: try: while (max_steps is None) or (step < max_steps): step += 1 start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None if update_op is not None: session.run(update_op, feed_dict=feed_dict) else: eval_results = session.run(eval_dict, feed_dict=feed_dict) eval_step = step # TODO(wicke): We should assert that the global step hasn't changed. if step % log_every_steps == 0: if eval_step is None or step != eval_step: eval_results = session.run(eval_dict, feed_dict=feed_dict) eval_step = step duration = time.time() - start_time logging.info('Results after %d steps (%.3f sec/batch): %s.', step, float(duration), _eval_results_to_str(eval_results)) finally: if eval_results is None or step != eval_step: eval_results = session.run(eval_dict, feed_dict=feed_dict) eval_step = step # Stop session first, before queue runners. session.close() # Stop queue runners. try: coord.request_stop() coord.join(threads, stop_grace_period_secs=120) except (RuntimeError, errors.CancelledError) as e: logging.warning('Coordinator didn\'t stop cleanly: %s', e) # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: if max_steps is None: logging.info('Input queue is exhausted.') else: logging.warn('Input queue is exhausted: %s.', e) # catch StopIteration which is thrown is DataReader is out of data. except StopIteration as e: if max_steps is None: logging.info('Input iterator is exhausted.') else: logging.warn('Input iterator is exhausted: %s.', e) # Save summaries for this evaluation. _write_summary_results(output_dir, eval_results, current_global_step) return eval_results, current_global_step @_graph_action_deprecation def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1): """Run `output_dict` tensors `n` times, with the same `feed_dict` each run. Args: output_dict: A `dict` mapping string names to tensors to run. Must all be from the same graph. feed_dict: `dict` of input values to feed each run. restore_checkpoint_path: A string containing the path to a checkpoint to restore. n: Number of times to repeat. Returns: A list of `n` `dict` objects, each containing values read from `output_dict` tensors. """ return run_feeds( output_dict=output_dict, feed_dicts=itertools.repeat(feed_dict, n), restore_checkpoint_path=restore_checkpoint_path) @_graph_action_deprecation def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None): """Run `output_dict` tensors with each input in `feed_dicts`. If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise, init all variables. Args: output_dict: A `dict` mapping string names to `Tensor` objects to run. Tensors must all be from the same graph. feed_dicts: Iterable of `dict` objects of input values to feed. restore_checkpoint_path: A string containing the path to a checkpoint to restore. Yields: A sequence of dicts of values read from `output_dict` tensors, one item yielded for each item in `feed_dicts`. Keys are the same as `output_dict`, values are the results read from the corresponding `Tensor` in `output_dict`. Raises: ValueError: if `output_dict` or `feed_dicts` is None or empty. """ if not output_dict: raise ValueError('output_dict is invalid: %s.' % output_dict) if not feed_dicts: raise ValueError('feed_dicts is invalid: %s.' % feed_dicts) graph = contrib_ops.get_graph_from_inputs(output_dict.values()) with graph.as_default() as g: with tf_session.Session('') as session: session.run( resources.initialize_resources(resources.shared_resources() + resources.local_resources())) if restore_checkpoint_path: _restore_from_checkpoint(session, g, restore_checkpoint_path) else: session.run(variables.global_variables_initializer()) session.run(variables.local_variables_initializer()) session.run(data_flow_ops.tables_initializer()) coord = coordinator.Coordinator() threads = None try: threads = queue_runner.start_queue_runners(session, coord=coord) for f in feed_dicts: yield session.run(output_dict, f) finally: coord.request_stop() if threads: coord.join(threads, stop_grace_period_secs=120) @_graph_action_deprecation def run_feeds(*args, **kwargs): """See run_feeds_iter(). Returns a `list` instead of an iterator.""" return list(run_feeds_iter(*args, **kwargs)) @_graph_action_deprecation def infer(restore_checkpoint_path, output_dict, feed_dict=None): """Restore graph from `restore_checkpoint_path` and run `output_dict` tensors. If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise, init all variables. Args: restore_checkpoint_path: A string containing the path to a checkpoint to restore. output_dict: A `dict` mapping string names to `Tensor` objects to run. Tensors must all be from the same graph. feed_dict: `dict` object mapping `Tensor` objects to input values to feed. Returns: Dict of values read from `output_dict` tensors. Keys are the same as `output_dict`, values are the results read from the corresponding `Tensor` in `output_dict`. Raises: ValueError: if `output_dict` or `feed_dicts` is None or empty. """ return run_feeds(output_dict=output_dict, feed_dicts=[feed_dict] if feed_dict is not None else [None], restore_checkpoint_path=restore_checkpoint_path)[0]
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for tfdbg v2 dumping callback.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import shutil import tempfile import threading from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import debug_event_pb2 from tensorflow.python.debug.lib import debug_events_reader from tensorflow.python.debug.lib import dumping_callback from tensorflow.python.debug.lib import dumping_callback_test_lib from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.keras import models from tensorflow.python.keras.applications import mobilenet_v2 from tensorflow.python.keras.layers import core from tensorflow.python.keras.layers import recurrent_v2 from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest def _create_simple_recurrent_keras_model(input_shape): """Create a simple tf.keras model containing a recurrent layer for testing.""" model = models.Sequential() model.add(recurrent_v2.LSTM( 10, input_shape=input_shape, kernel_initializer="zeros", recurrent_initializer="zeros")) model.add(core.Dense(1, kernel_initializer="zeros")) model.compile(loss="mse", optimizer="sgd") return model class TracingCallbackTest( dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase): def setUp(self): super(TracingCallbackTest, self).setUp() self.dump_root = tempfile.mkdtemp() def tearDown(self): if os.path.isdir(self.dump_root): shutil.rmtree(self.dump_root, ignore_errors=True) dumping_callback.disable_dump_debug_info() super(TracingCallbackTest, self).tearDown() def testInvalidTensorDebugModeCausesError(self): with self.assertRaisesRegexp( ValueError, r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*" r"Valid options.*NO_TENSOR.*"): dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode="NONSENSICAL") def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self): dumping_callback.disable_dump_debug_info() @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) def testPureEagerOpExecution(self, tensor_debug_mode): """Test catching Infinity in eager op execution: float32.""" writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode) x = constant_op.constant(10.0) zero = constant_op.constant(0.0) one = constant_op.constant(1.0) two = constant_op.constant(2.0) three = constant_op.constant(3.0) # Use Collatz conjecture as a test case. while x > one: if math_ops.equal(x % two, zero): x = x / two else: x = x * three + one writer.FlushNonExecutionFiles() self._readAndCheckMetadataFile() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() # Before FlushExecutionFiles() is called, the .execution file should be # empty. reader = debug_events_reader.DebugEventsReader(self.dump_root) execution_iter = reader.execution_iterator() with self.assertRaises(StopIteration): next(execution_iter) # After the flushing, the .execution file should hold the appropriate # contents. writer.FlushExecutionFiles() execution_iter = reader.execution_iterator() prev_wall_time = 1 executed_op_types = [] tensor_values = collections.defaultdict(lambda: []) for debug_event in execution_iter: self.assertGreaterEqual(debug_event.wall_time, prev_wall_time) prev_wall_time = debug_event.wall_time execution = debug_event.execution executed_op_types.append(execution.op_type) self.assertTrue(execution.input_tensor_ids) self.assertTrue(execution.output_tensor_ids) if tensor_debug_mode == "NO_TENSOR": # Due to the NO_TENSOR tensor debug mode, tensor_protos ought to # be empty. self.assertFalse(execution.tensor_protos) elif tensor_debug_mode == "FULL_TENSOR": # Under the FULL_TENSOR mode, the value of the tensor should be # available through `tensor_protos`. tensor_value = float( tensor_util.MakeNdarray(execution.tensor_protos[0])) tensor_values[execution.op_type].append(tensor_value) # Verify the code_location field. self.assertTrue(execution.code_location.stack_frame_ids) for stack_frame_id in execution.code_location.stack_frame_ids: self.assertIn(stack_frame_id, stack_frame_by_id) if tensor_debug_mode == "FULL_TENSOR": self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0]) self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1]) self.assertAllClose(tensor_values["Mul"], [15]) self.assertAllClose(tensor_values["AddV2"], [16]) self.assertEqual( executed_op_types, [ "Greater", "FloorMod", "Equal", "RealDiv", # 10 --> 5 "Greater", "FloorMod", "Equal", "Mul", "AddV2", # 5 --> 16 "Greater", "FloorMod", "Equal", "RealDiv", # 16 --> 8 "Greater", "FloorMod", "Equal", "RealDiv", # 8 --> 4 "Greater", "FloorMod", "Equal", "RealDiv", # 4 --> 2 "Greater", "FloorMod", "Equal", "RealDiv", # 2 --> 1 "Greater" ]) # Due to the pure eager op execution, the .graph file and the # .graph_execution_traces file ought to be empty. graphs_iterator = reader.graphs_iterator() with self.assertRaises(StopIteration): next(graphs_iterator) graph_trace_iter = reader.graph_execution_traces_iterator() with self.assertRaises(StopIteration): next(graph_trace_iter) @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) @test_util.run_in_graph_and_eager_modes def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode) @def_function.function def log_sum(x, y): return math_ops.log(x + y) @def_function.function def sin1p_log_sum(x, y): return math_ops.sin(1.0 + log_sum(x, y)) x = constant_op.constant(2.0) y = constant_op.constant(3.0) self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0))) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() if context.executing_eagerly(): # NOTE(b/142486213): Execution of the TF function happens with # Session.run() in v1 graph mode, so doesn't get logged to the # .execution file. executed_op_types, _, _, _, _ = self._readAndCheckExecutionFile() executed_op_types = [op_type for op_type in executed_op_types if "sin1p_log_sum" in op_type] self.assertLen(executed_op_types, 1) stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() (context_ids, op_types, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id) self.assertIn("AddV2", op_types) self.assertIn("Log", op_types) self.assertIn("Sin", op_types) (op_names, _, _, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2", "Sin"]) if tensor_debug_mode == "NO_TENSOR": # Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to # be an empty float32 tensor. for tensor_value in tensor_values: self.assertEqual(tensor_value.dtype, np.float32) self.assertEqual(tensor_value.shape, (0,)) elif tensor_debug_mode == "FULL_TENSOR": self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op. self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op. self.assertAllClose(tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op. self.assertAllClose(tensor_values[3], np.sin(np.log(5.0) + 1.0)) # Sin op. @parameterized.named_parameters( ("AddV2", "AddV2"), ("Log", "Log"), ("AddV2AndLog", "(AddV2|Log)"), ) @test_util.run_in_graph_and_eager_modes def testOpRegex(self, op_regex): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode="FULL_TENSOR", op_regex=op_regex) @def_function.function def log_sum(x, y): return math_ops.log(x + y) @def_function.function def sin1p_log_sum(x, y): return math_ops.sin(1.0 + log_sum(x, y)) x = constant_op.constant(2.0) y = constant_op.constant(3.0) self.assertAllClose( self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0))) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() (context_ids, op_types, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id) self.assertIn("AddV2", op_types) self.assertIn("Log", op_types) self.assertIn("Sin", op_types) (op_names, _, _, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] if op_regex == "AddV2": self.assertEqual(executed_op_types, ["AddV2", "AddV2"]) self.assertLen(tensor_values, 2) self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op. self.assertAllClose(tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op. elif op_regex == "Log": self.assertEqual(executed_op_types, ["Log"]) self.assertLen(tensor_values, 1) self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op. else: # "(AddV2|Log)" self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"]) self.assertLen(tensor_values, 3) self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op. self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op. self.assertAllClose(tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op. def testIncorrectTensorDTypeArgFormatLeadsToError(self): with self.assertRaisesRegexp( ValueError, r".*expected.*list.*tuple.*callable.*but received.*\{\}"): dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=dict()) with self.assertRaisesRegexp( ValueError, r".*expected.*list.*tuple.*callable.*but received.*"): dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes="float32") with self.assertRaisesRegexp( ValueError, r".*expected.*list.*tuple.*callable.*but received.*"): dumping_callback.enable_dump_debug_info( self.dump_root, tensor_dtypes=dtypes.float32) with self.assertRaises(TypeError): dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[ lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer]) @parameterized.named_parameters( ("float", [dtypes.float32], None), ("float_only_sum", ["float32"], "Sum"), ("float_no_sum", (dtypes.float32,), "(?!Sum)"), ("int", [dtypes.int32], None), ("int_via_lambda", lambda dtype: dtype.is_integer, None), ("exclude_Sum", None, "(?!Sum)"), ("All", None, None), ) @test_util.run_in_graph_and_eager_modes def testTensorDTypesAndOpRegexFilters(self, tensor_dtypes, op_regex): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode="FULL_TENSOR", tensor_dtypes=tensor_dtypes, op_regex=op_regex) @def_function.function def unique_sum(xs): """Sum over the unique values, for testing.""" unique_xs, indices = array_ops.unique(xs) return math_ops.reduce_sum(unique_xs), indices xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32) y, indices = self.evaluate(unique_sum(xs)) self.assertAllClose(y, 17.) self.assertAllEqual(indices, [0, 1, 2, 3, 0]) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() (context_ids, _, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id) (op_names, _, _, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] if tensor_dtypes == [dtypes.float32] and not op_regex: self.assertEqual(executed_op_types, ["Unique", "Sum"]) self.assertLen(tensor_values, 2) self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values. self.assertAllClose(tensor_values[1], 17.) # Sum. elif tensor_dtypes == ["float32"] and op_regex == "Sum": self.assertEqual(executed_op_types, ["Sum"]) self.assertLen(tensor_values, 1) self.assertAllClose(tensor_values[0], 17.) # Sum. elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)": self.assertEqual(executed_op_types, ["Unique"]) self.assertLen(tensor_values, 1) self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values. elif tensor_dtypes == [dtypes.int32] and not op_regex: self.assertEqual(executed_op_types, ["Unique"]) self.assertLen(tensor_values, 1) self.assertAllEqual(tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices. elif callable(tensor_dtypes) and not op_regex: self.assertEqual(executed_op_types, ["Unique"]) self.assertLen(tensor_values, 1) self.assertAllEqual(tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices. elif not tensor_dtypes and op_regex == "(?!Sum)": self.assertEqual(executed_op_types, ["Unique", "Unique"]) self.assertLen(tensor_values, 2) self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values. self.assertAllEqual(tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices. else: # "All". self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"]) self.assertLen(tensor_values, 3) self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values. self.assertAllEqual(tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices. self.assertAllClose(tensor_values[2], 17.) # Sum. @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) @test_util.run_in_graph_and_eager_modes def testFunctionExecutionWithControlFlow(self, tensor_debug_mode): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode) @def_function.function def iterative_doubling(x, times): i = constant_op.constant(0, dtype=dtypes.int32) while i < times: x = x * 2.0 i += 1 return x x = constant_op.constant(0.5, dtype=dtypes.float32) times = constant_op.constant(4, dtype=dtypes.int32) self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0) writer.FlushNonExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() # Verify the content of the .graphs file. context_ids, op_types, op_name_to_op_type, _ = ( self._readAndCheckGraphsFile(stack_frame_by_id)) self.assertIn("Less", op_types) self.assertIn("Mul", op_types) self.assertIn("AddV2", op_types) # Before FlushExecutionFiles() is called, the .execution and # .graph_execution_traces files should be both empty. reader = debug_events_reader.DebugEventsReader(self.dump_root) execution_iter = reader.execution_iterator() graph_execution_traces_iter = reader.graph_execution_traces_iterator() with self.assertRaises(StopIteration): next(execution_iter) with self.assertRaises(StopIteration): next(graph_execution_traces_iter) # TODO(cais): Backport execution instrumentation to tf.Session. writer.FlushExecutionFiles() # After the flushing, the .execution file should hold the appropriate # contents. if context.executing_eagerly(): (executed_op_types, input_tensor_ids, output_tensor_ids, tensor_debug_modes, tensor_values) = self._readAndCheckExecutionFile() # NOTE(b/142486213): Execution of the TF function happens with # Session.run() in v1 graph mode, hence it doesn't get logged to the # .execution file. self.assertLen(executed_op_types, 1) self.assertIn("iterative_doubling", executed_op_types[0]) self.assertLen(input_tensor_ids[0], 2) self.assertLen(output_tensor_ids[0], 1) self.assertEqual(tensor_debug_modes[0], debug_event_pb2.TensorDebugMode.Value(tensor_debug_mode)) if tensor_debug_mode == "FULL_TENSOR": self.assertAllClose(tensor_values, [[8.0]]) (op_names, _, output_slots, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] # The Less op should have been executed 5 times. self.assertEqual(executed_op_types.count("Less"), 5) # The last executed op should be Less. self.assertEqual(executed_op_types[-1], "Less") # The Mul op should have been executed 4 times. self.assertEqual(executed_op_types.count("Mul"), 4) # The AddV2 op should have been run, but we refrain from asserting on how # many times it's executed. self.assertIn("AddV2", executed_op_types) for output_slot in output_slots: self.assertEqual(output_slot, 0) if tensor_debug_mode == "NO_TENSOR": # Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to # be an empty float32 tensor. for tensor_value in tensor_values: self.assertEqual(tensor_value.dtype, np.float32) self.assertEqual(tensor_value.shape, (0,)) elif tensor_debug_mode == "FULL_TENSOR": less_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Less" ] self.assertAllClose(less_values, [True, True, True, True, False]) mul_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Mul" ] self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0]) def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self): dumping_callback.enable_dump_debug_info(self.dump_root) writer = dumping_callback.enable_dump_debug_info(self.dump_root) x = constant_op.constant([10.0, 12.0, 10.0]) for _ in range(2): array_ops.unique(x) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() reader = debug_events_reader.DebugEventsReader(self.dump_root) execution_iter = reader.execution_iterator() for _ in range(2): debug_event = next(execution_iter) self.assertGreater(debug_event.wall_time, 0) execution = debug_event.execution self.assertEqual(execution.op_type, "Unique") self.assertEqual(execution.num_outputs, 2) self.assertTrue(execution.code_location) with self.assertRaises(StopIteration): next(execution_iter) def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self): dumping_callback.enable_dump_debug_info(self.dump_root) new_dump_root = self.dump_root + "_new_dump_root" writer = dumping_callback.enable_dump_debug_info(new_dump_root) x = constant_op.constant([10.0, 12.0, 10.0]) for _ in range(2): array_ops.unique(x) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() reader = debug_events_reader.DebugEventsReader(new_dump_root) execution_iter = reader.execution_iterator() for _ in range(2): debug_event = next(execution_iter) self.assertGreater(debug_event.wall_time, 0) execution = debug_event.execution self.assertEqual(execution.op_type, "Unique") self.assertEqual(execution.num_outputs, 2) self.assertTrue(execution.code_location) with self.assertRaises(StopIteration): next(execution_iter) old_dump_root_reader = debug_events_reader.DebugEventsReader(self.dump_root) execution_iter = old_dump_root_reader.execution_iterator() # The old dump root shouldn't have been written to. with self.assertRaises(StopIteration): next(execution_iter) def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self): """Assert that calling enable_dump_debug_info() with different tensor-debug modes. It should lead to overwriting of the previously-configured mode. """ writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode="NO_TENSOR") @def_function.function def add_1_divide_by_2(x): return (x + 1.0) / 2.0 self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() context_ids, _, _, _ = self._readAndCheckGraphsFile(stack_frame_by_id) _, _, _, _, tensor_values = self._readAndCheckExecutionFile() self.assertEqual(tensor_values, [[]]) (_, _, _, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) self.assertLen(tensor_values, 2) for tensor_value in tensor_values: self.assertEqual(tensor_value.dtype, np.float32) self.assertEqual(tensor_value.shape, (0,)) with self.assertRaisesRegexp( ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"): dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode="FULL_TENSOR") @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) def testDisableTracingWorks(self, tensor_debug_mode): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode) dumping_callback.disable_dump_debug_info() x = constant_op.constant([10.0, 12.0, 10.0]) for _ in range(2): array_ops.unique(x) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() reader = debug_events_reader.DebugEventsReader(self.dump_root) source_files_iter = reader.source_files_iterator() stack_frames_iter = reader.stack_frames_iterator() execution_iter = reader.execution_iterator() # No source-file, stack-frame or execution data should have been dumped. with self.assertRaises(StopIteration): next(source_files_iter) with self.assertRaises(StopIteration): next(stack_frames_iter) with self.assertRaises(StopIteration): next(execution_iter) @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode): """Dumping from multiple threads using the same setting.""" writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode) x = variables.Variable(10.0, dtype=dtypes.float32) y = variables.Variable(3.0, dtype=dtypes.float32) @def_function.function def increase_x(): return x.assign_add(y * 2.0) increase_x() num_threads = 3 threads = [] for _ in range(num_threads): threads.append(threading.Thread(target=increase_x)) for thread in threads: thread.start() for thread in threads: thread.join() # 10 --> 16 --> 22 --> 28 --> 34. self.assertAllClose(x.read_value(), 34.0) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() reader = debug_events_reader.DebugEventsReader(self.dump_root) execution_iter = reader.execution_iterator() prev_wall_time = 1 for debug_event in execution_iter: self.assertGreaterEqual(debug_event.wall_time, prev_wall_time) prev_wall_time = debug_event.wall_time (context_ids, _, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id) (op_names, _, output_slots, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads) self.assertEqual( executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads)) for output_slot in output_slots: self.assertEqual(output_slot, 0) if tensor_debug_mode == "NO_TENSOR": for tensor_value in tensor_values: self.assertEqual(tensor_value.dtype, np.float32) self.assertEqual(tensor_value.shape, (0,)) elif tensor_debug_mode == "FULL_TENSOR": mul_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Mul" ] self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0]) def testMultiThreadedDumpingWithDifferentSettings(self): dump_root_1 = os.path.join(self.dump_root, "dump_root_1") dump_root_2 = os.path.join(self.dump_root, "dump_root_2") v1 = variables.Variable(10.0, dtype=dtypes.float32) v2 = variables.Variable(3.0, dtype=dtypes.float32) def add_negative_v1_squared_to_itself(): writer = dumping_callback.enable_dump_debug_info( dump_root_1, tensor_debug_mode="FULL_TENSOR") # Run in a loop to facilitate interleaving between threads. for _ in range(3): v1.assign_add(-(v1 ** 2.0)) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() def add_negative_v2_squared_to_itself(): writer = dumping_callback.enable_dump_debug_info( dump_root_2, tensor_debug_mode="FULL_TENSOR") v2_squared = v2 ** 2.0 # Since dumping is disabled before the Neg op is called, no tensor data # should be dumped from the op, but this shouldn't affect the dumping of # the tensor data from the Neg op in `add_negative_v1_squared_to_itself`. # Both behavior is checked below. dumping_callback.disable_dump_debug_info() negative_v2_squared = -v2_squared v2.assign_add(negative_v2_squared) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() # v2 is mutated on a sub-thread. sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself) sub_thread.start() add_negative_v1_squared_to_itself() # v1 is mutated on the main thread. sub_thread.join() # 10 - 10 * 10 = -90. # -90 - (-90 * -90) = -8190. # -8190 - (-8190 * -8190) = -67084290. self.assertAllClose(v1.read_value(), -67084290.0) self.assertAllClose(v2.read_value(), -6.0) (executed_op_types, _, _, _, tensor_values) = self._readAndCheckExecutionFile(dump_root=dump_root_1) v1_squared_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Pow"] negative_v1_squared_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Neg"] self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]]) self.assertAllClose( negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]]) (executed_op_types, _, _, _, tensor_values) = self._readAndCheckExecutionFile(dump_root=dump_root_2) self.assertNotIn("Neg", executed_op_types) v2_squared_values = tensor_values[executed_op_types.index("Pow")] self.assertAllClose(v2_squared_values, [9.0]) @test_util.run_in_graph_and_eager_modes def testNestedContextIsCapturedByGraphOpCreationHistory(self): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode="NO_TENSOR") @def_function.function def iterative_doubling(x, times): i = constant_op.constant(0, dtype=dtypes.int32) while i < times: x = x * 2.0 - 1.0 i += 1 return x x = constant_op.constant(2.0, dtype=dtypes.float32) times = constant_op.constant(4, dtype=dtypes.int32) # 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17. self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() (_, _, op_name_to_op_type, op_name_to_context_id) = self._readAndCheckGraphsFile(stack_frame_by_id) less_op_names = [op_name for op_name in op_name_to_op_type if op_name_to_op_type[op_name] == "Less"] less_context_ids = [op_name_to_context_id[op_name] for op_name in less_op_names] mul_op_names = [op_name for op_name in op_name_to_op_type if op_name_to_op_type[op_name] == "Mul"] mul_context_ids = [op_name_to_context_id[op_name] for op_name in mul_op_names] sub_op_names = [op_name for op_name in op_name_to_op_type if op_name_to_op_type[op_name] == "Sub"] sub_context_ids = [op_name_to_context_id[op_name] for op_name in sub_op_names] self.assertLen(less_context_ids, 1) self.assertLen(mul_context_ids, 1) self.assertLen(sub_context_ids, 1) self.assertTrue(less_context_ids[0]) self.assertTrue(mul_context_ids[0]) self.assertTrue(sub_context_ids[0]) # The Less op is from the while-loop cond context and hence should have # a different innermost context ID from the mul and sub ops, which are both # from the while-loop body context. self.assertNotEqual(less_context_ids[0], mul_context_ids[0]) self.assertNotEqual(less_context_ids[0], sub_context_ids[0]) # The Mul and Sub ops are from the same innermost context. self.assertEqual(mul_context_ids[0], sub_context_ids[0]) @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) @test_util.run_in_graph_and_eager_modes def testSimpleKerasRecurrentModelPredict(self, tensor_debug_mode): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode) model = _create_simple_recurrent_keras_model([3, 4]) batch_size = 5 xs = np.ones([batch_size, 3, 4]) self.assertAllClose(model.predict(xs), np.zeros([batch_size, 1])) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() (context_ids, op_types, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id) # Simply assert that graph are recorded and refrain from asserting on the # internal details of the Keras model. self.assertTrue(context_ids) self.assertTrue(op_types) self.assertTrue(op_name_to_op_type) if context.executing_eagerly(): # NOTE(b/142486213): Execution of the TF function happens with # Session.run() in v1 graph mode, hence it doesn't get logged to the # .execution file. (executed_op_types, _, _, _, tensor_values) = self._readAndCheckExecutionFile() self.assertTrue(executed_op_types) for value_list in tensor_values: if tensor_debug_mode == "NO_TENSOR": self.assertFalse(value_list) (op_names, _, _, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] # These are the ops that we can safely assume to have been executed during # the model prediction. self.assertIn("MatMul", executed_op_types) self.assertIn("BiasAdd", executed_op_types) # On the GPU, CudnnRNN is used in lieu of the default op-by-op # implementation. self.assertTrue( ("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or "CudnnRNN" in executed_op_types)) # Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to # be an empty float32 tensor. if tensor_debug_mode == "NO_TENSOR": for tensor_value in tensor_values: self.assertEqual(tensor_value.dtype, np.float32) self.assertEqual(tensor_value.shape, (0,)) else: # Refrain from asserting the internal implementation details of the LSTM # layer. concrete_tensor_values = [ value for value in tensor_values if value is not None and value.size > 0 ] self.assertTrue(concrete_tensor_values) @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) @test_util.run_in_graph_and_eager_modes def testSimpleKerasRecurrentModelFit(self, tensor_debug_mode): writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode) model = _create_simple_recurrent_keras_model([3, 4]) xs = np.ones([5, 3, 4]) ys = np.ones([5, 1]) history = model.fit(xs, ys, epochs=3, verbose=0) self.assertAllClose( history.history["loss"], [1.0, 0.9603999853134155, 0.9223681688308716]) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() (context_ids, op_types, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id) # Simply assert that graph are recorded and refrain from asserting on the # internal details of the Keras model. self.assertTrue(context_ids) self.assertTrue(op_types) self.assertTrue(op_name_to_op_type) if context.executing_eagerly(): # NOTE(b/142486213): Execution of the TF function happens with # Session.run() in v1 graph mode, hence it doesn't get logged to the # .execution file. (executed_op_types, _, _, _, tensor_values) = self._readAndCheckExecutionFile() self.assertTrue(executed_op_types) if tensor_debug_mode == "NO_TENSOR": for value_list in tensor_values: self.assertFalse(value_list) (op_names, _, _, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] # These are the ops that we can safely assume to have been executed during # the recurrent model's fit() call. self.assertIn("MatMul", executed_op_types) self.assertIn("BiasAdd", executed_op_types) # On the GPU, CudnnRNN is used in lieu of the default op-by-op # implementation. self.assertTrue( ("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or "CudnnRNN" in executed_op_types)) self.assertTrue( ("SigmoidGrad" in executed_op_types and "TanhGrad" in executed_op_types or "CudnnRNNBackprop" in executed_op_types)) if tensor_debug_mode == "NO_TENSOR": # Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought # to be an empty float32 tensor. for tensor_value in tensor_values: self.assertEqual(tensor_value.dtype, np.float32) self.assertEqual(tensor_value.shape, (0,)) @parameterized.named_parameters( ("NoTensor", "NO_TENSOR"), ("FullTensor", "FULL_TENSOR"), ) @test_util.run_in_graph_and_eager_modes def testMobiletNetV2Fit(self, tensor_debug_mode): """Test training Keras MobileNetV2 works with dumping.""" # Use a large circular-buffer to make sure we capture all the executed ops. writer = dumping_callback.enable_dump_debug_info( self.dump_root, tensor_debug_mode=tensor_debug_mode, circular_buffer_size=100000) model = mobilenet_v2.MobileNetV2( input_shape=(32, 32, 3), alpha=0.1, weights=None) y = model.layers[22].output y = core.Flatten()(y) y = core.Dense(1)(y) model = models.Model(inputs=model.inputs, outputs=y) batch_size = 2 xs = np.zeros([batch_size] + list(model.input_shape[1:])) ys = np.zeros([batch_size] + list(model.output_shape[1:])) model.compile(optimizer="sgd", loss="mse") epochs = 1 history = model.fit(xs, ys, epochs=epochs, verbose=0) self.assertLen(history.history["loss"], epochs) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames() (context_ids, op_types, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id) # Simply assert that graph are recorded and refrain from asserting on the # internal details of the Keras model. self.assertTrue(context_ids) self.assertTrue(op_types) self.assertTrue(op_name_to_op_type) if context.executing_eagerly(): # NOTE(b/142486213): Execution of the TF function happens with # Session.run() in v1 graph mode, hence it doesn't get logged to the # .execution file. executed_op_types, _, _, _, _ = self._readAndCheckExecutionFile() self.assertTrue(executed_op_types) (op_names, _, _, tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids) executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names] # These are the ops that we can safely assume to have been executed during # the model's fit() call. self.assertIn("Conv2D", executed_op_types) self.assertIn("Relu6", executed_op_types) self.assertIn("Conv2DBackpropFilter", executed_op_types) self.assertIn("Relu6Grad", executed_op_types) if tensor_debug_mode == "NO_TENSOR": # Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to # be an empty float32 tensor. for tensor_value in tensor_values: self.assertEqual(tensor_value.dtype, np.float32) self.assertEqual(tensor_value.shape, (0,)) elif tensor_debug_mode == "FULL_TENSOR": conv2d_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Conv2D" ] self.assertTrue(conv2d_values) for conv2d_value in conv2d_values: self.assertGreater(len(conv2d_value.shape), 1) self.assertEqual(conv2d_value.shape[0], batch_size) relu6_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Relu6" ] self.assertTrue(relu6_values) for relu6_value in relu6_values: self.assertGreater(len(relu6_value.shape), 1) self.assertEqual(relu6_value.shape[0], batch_size) conv2d_bp_filter_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Conv2DBackpropFilter" ] self.assertTrue(conv2d_bp_filter_values) for conv2d_bp_filter_value in conv2d_bp_filter_values: self.assertGreater(len(conv2d_bp_filter_value.shape), 1) relu6_grad_values = [ tensor_values[i] for i, op_type in enumerate(executed_op_types) if op_type == "Relu6Grad" ] self.assertTrue(relu6_grad_values) for relu6_grad_value in relu6_grad_values: self.assertGreater(len(relu6_grad_value.shape), 1) if __name__ == "__main__": ops.enable_eager_execution() googletest.main()
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import warnings from pyspark import since, keyword_only from pyspark.ml.util import * from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper from pyspark.ml.param.shared import * from pyspark.ml.common import inherit_doc from pyspark.sql import DataFrame __all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary', 'KMeans', 'KMeansModel', 'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary', 'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering'] class ClusteringSummary(JavaWrapper): """ .. note:: Experimental Clustering results for a given model. .. versionadded:: 2.1.0 """ @property @since("2.1.0") def predictionCol(self): """ Name for column of predicted clusters in `predictions`. """ return self._call_java("predictionCol") @property @since("2.1.0") def predictions(self): """ DataFrame produced by the model's `transform` method. """ return self._call_java("predictions") @property @since("2.1.0") def featuresCol(self): """ Name for column of features in `predictions`. """ return self._call_java("featuresCol") @property @since("2.1.0") def k(self): """ The number of clusters the model was trained with. """ return self._call_java("k") @property @since("2.1.0") def cluster(self): """ DataFrame of predicted cluster centers for each training data point. """ return self._call_java("cluster") @property @since("2.1.0") def clusterSizes(self): """ Size of (number of data points in) each cluster. """ return self._call_java("clusterSizes") @property @since("2.4.0") def numIter(self): """ Number of iterations. """ return self._call_java("numIter") class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable, HasTrainingSummary): """ Model fitted by GaussianMixture. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def weights(self): """ Weight for each Gaussian distribution in the mixture. This is a multinomial probability distribution over the k Gaussians, where weights[i] is the weight for Gaussian i, and weights sum to 1. """ return self._call_java("weights") @property @since("2.0.0") def gaussiansDF(self): """ Retrieve Gaussian distributions as a DataFrame. Each row represents a Gaussian Distribution. The DataFrame has two columns: mean (Vector) and cov (Matrix). """ return self._call_java("gaussiansDF") @property @since("2.1.0") def summary(self): """ Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the training set. An exception is thrown if no summary exists. """ if self.hasSummary: return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @inherit_doc class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed, HasProbabilityCol, JavaMLWritable, JavaMLReadable): """ GaussianMixture clustering. This class performs expectation maximization for multivariate Gaussian Mixture Models (GMMs). A GMM represents a composite distribution of independent Gaussian distributions with associated "mixing" weights specifying each's contribution to the composite. Given a set of sample points, this class will maximize the log-likelihood for a mixture of k Gaussians, iterating until the log-likelihood changes by less than convergenceTol, or until it has reached the max number of iterations. While this process is generally guaranteed to converge, it is not guaranteed to find a global optimum. .. note:: For high-dimensional data (with many features), this algorithm may perform poorly. This is due to high-dimensional data (a) making it difficult to cluster at all (based on statistical/theoretical arguments) and (b) numerical issues with Gaussian distributions. >>> from pyspark.ml.linalg import Vectors >>> data = [(Vectors.dense([-0.1, -0.05 ]),), ... (Vectors.dense([-0.01, -0.1]),), ... (Vectors.dense([0.9, 0.8]),), ... (Vectors.dense([0.75, 0.935]),), ... (Vectors.dense([-0.83, -0.68]),), ... (Vectors.dense([-0.91, -0.76]),)] >>> df = spark.createDataFrame(data, ["features"]) >>> gm = GaussianMixture(k=3, tol=0.0001, ... maxIter=10, seed=10) >>> model = gm.fit(df) >>> model.hasSummary True >>> summary = model.summary >>> summary.k 3 >>> summary.clusterSizes [2, 2, 2] >>> summary.logLikelihood 8.14636... >>> weights = model.weights >>> len(weights) 3 >>> model.gaussiansDF.select("mean").head() Row(mean=DenseVector([0.825, 0.8675])) >>> model.gaussiansDF.select("cov").head() Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False)) >>> transformed = model.transform(df).select("features", "prediction") >>> rows = transformed.collect() >>> rows[4].prediction == rows[5].prediction True >>> rows[2].prediction == rows[3].prediction True >>> gmm_path = temp_path + "/gmm" >>> gm.save(gmm_path) >>> gm2 = GaussianMixture.load(gmm_path) >>> gm2.getK() 3 >>> model_path = temp_path + "/gmm_model" >>> model.save(model_path) >>> model2 = GaussianMixtureModel.load(model_path) >>> model2.hasSummary False >>> model2.weights == model.weights True >>> model2.gaussiansDF.select("mean").head() Row(mean=DenseVector([0.825, 0.8675])) >>> model2.gaussiansDF.select("cov").head() Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False)) .. versionadded:: 2.0.0 """ k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " + "Must be > 1.", typeConverter=TypeConverters.toInt) @keyword_only def __init__(self, featuresCol="features", predictionCol="prediction", k=2, probabilityCol="probability", tol=0.01, maxIter=100, seed=None): """ __init__(self, featuresCol="features", predictionCol="prediction", k=2, \ probabilityCol="probability", tol=0.01, maxIter=100, seed=None) """ super(GaussianMixture, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture", self.uid) self._setDefault(k=2, tol=0.01, maxIter=100) kwargs = self._input_kwargs self.setParams(**kwargs) def _create_model(self, java_model): return GaussianMixtureModel(java_model) @keyword_only @since("2.0.0") def setParams(self, featuresCol="features", predictionCol="prediction", k=2, probabilityCol="probability", tol=0.01, maxIter=100, seed=None): """ setParams(self, featuresCol="features", predictionCol="prediction", k=2, \ probabilityCol="probability", tol=0.01, maxIter=100, seed=None) Sets params for GaussianMixture. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.0.0") def setK(self, value): """ Sets the value of :py:attr:`k`. """ return self._set(k=value) @since("2.0.0") def getK(self): """ Gets the value of `k` """ return self.getOrDefault(self.k) class GaussianMixtureSummary(ClusteringSummary): """ .. note:: Experimental Gaussian mixture clustering results for a given model. .. versionadded:: 2.1.0 """ @property @since("2.1.0") def probabilityCol(self): """ Name for column of predicted probability of each cluster in `predictions`. """ return self._call_java("probabilityCol") @property @since("2.1.0") def probability(self): """ DataFrame of probabilities of each cluster for each training data point. """ return self._call_java("probability") @property @since("2.2.0") def logLikelihood(self): """ Total log-likelihood for this model on the given data. """ return self._call_java("logLikelihood") class KMeansSummary(ClusteringSummary): """ .. note:: Experimental Summary of KMeans. .. versionadded:: 2.1.0 """ @property @since("2.4.0") def trainingCost(self): """ K-means cost (sum of squared distances to the nearest centroid for all points in the training dataset). This is equivalent to sklearn's inertia. """ return self._call_java("trainingCost") class KMeansModel(JavaModel, GeneralJavaMLWritable, JavaMLReadable, HasTrainingSummary): """ Model fitted by KMeans. .. versionadded:: 1.5.0 """ @since("1.5.0") def clusterCenters(self): """Get the cluster centers, represented as a list of NumPy arrays.""" return [c.toArray() for c in self._call_java("clusterCenters")] @property @since("2.1.0") def summary(self): """ Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the training set. An exception is thrown if no summary exists. """ if self.hasSummary: return KMeansSummary(super(KMeansModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @inherit_doc class KMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed, JavaMLWritable, JavaMLReadable): """ K-means clustering with a k-means++ like initialization mode (the k-means|| algorithm by Bahmani et al). >>> from pyspark.ml.linalg import Vectors >>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),), ... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)] >>> df = spark.createDataFrame(data, ["features"]) >>> kmeans = KMeans(k=2, seed=1) >>> model = kmeans.fit(df) >>> centers = model.clusterCenters() >>> len(centers) 2 >>> transformed = model.transform(df).select("features", "prediction") >>> rows = transformed.collect() >>> rows[0].prediction == rows[1].prediction True >>> rows[2].prediction == rows[3].prediction True >>> model.hasSummary True >>> summary = model.summary >>> summary.k 2 >>> summary.clusterSizes [2, 2] >>> summary.trainingCost 2.0 >>> kmeans_path = temp_path + "/kmeans" >>> kmeans.save(kmeans_path) >>> kmeans2 = KMeans.load(kmeans_path) >>> kmeans2.getK() 2 >>> model_path = temp_path + "/kmeans_model" >>> model.save(model_path) >>> model2 = KMeansModel.load(model_path) >>> model2.hasSummary False >>> model.clusterCenters()[0] == model2.clusterCenters()[0] array([ True, True], dtype=bool) >>> model.clusterCenters()[1] == model2.clusterCenters()[1] array([ True, True], dtype=bool) .. versionadded:: 1.5.0 """ k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.", typeConverter=TypeConverters.toInt) initMode = Param(Params._dummy(), "initMode", "The initialization algorithm. This can be either \"random\" to " + "choose random points as initial cluster centers, or \"k-means||\" " + "to use a parallel variant of k-means++", typeConverter=TypeConverters.toString) initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " + "initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt) @keyword_only def __init__(self, featuresCol="features", predictionCol="prediction", k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, distanceMeasure="euclidean"): """ __init__(self, featuresCol="features", predictionCol="prediction", k=2, \ initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \ distanceMeasure="euclidean") """ super(KMeans, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid) self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, distanceMeasure="euclidean") kwargs = self._input_kwargs self.setParams(**kwargs) def _create_model(self, java_model): return KMeansModel(java_model) @keyword_only @since("1.5.0") def setParams(self, featuresCol="features", predictionCol="prediction", k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, distanceMeasure="euclidean"): """ setParams(self, featuresCol="features", predictionCol="prediction", k=2, \ initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \ distanceMeasure="euclidean") Sets params for KMeans. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("1.5.0") def setK(self, value): """ Sets the value of :py:attr:`k`. """ return self._set(k=value) @since("1.5.0") def getK(self): """ Gets the value of `k` """ return self.getOrDefault(self.k) @since("1.5.0") def setInitMode(self, value): """ Sets the value of :py:attr:`initMode`. """ return self._set(initMode=value) @since("1.5.0") def getInitMode(self): """ Gets the value of `initMode` """ return self.getOrDefault(self.initMode) @since("1.5.0") def setInitSteps(self, value): """ Sets the value of :py:attr:`initSteps`. """ return self._set(initSteps=value) @since("1.5.0") def getInitSteps(self): """ Gets the value of `initSteps` """ return self.getOrDefault(self.initSteps) @since("2.4.0") def setDistanceMeasure(self, value): """ Sets the value of :py:attr:`distanceMeasure`. """ return self._set(distanceMeasure=value) @since("2.4.0") def getDistanceMeasure(self): """ Gets the value of `distanceMeasure` """ return self.getOrDefault(self.distanceMeasure) class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable, HasTrainingSummary): """ Model fitted by BisectingKMeans. .. versionadded:: 2.0.0 """ @since("2.0.0") def clusterCenters(self): """Get the cluster centers, represented as a list of NumPy arrays.""" return [c.toArray() for c in self._call_java("clusterCenters")] @since("2.0.0") def computeCost(self, dataset): """ Computes the sum of squared distances between the input points and their corresponding cluster centers. ..note:: Deprecated in 3.0.0. It will be removed in future versions. Use ClusteringEvaluator instead. You can also get the cost on the training dataset in the summary. """ warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use " "ClusteringEvaluator instead. You can also get the cost on the training " "dataset in the summary.", DeprecationWarning) return self._call_java("computeCost", dataset) @property @since("2.1.0") def summary(self): """ Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the training set. An exception is thrown if no summary exists. """ if self.hasSummary: return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @inherit_doc class BisectingKMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasSeed, JavaMLWritable, JavaMLReadable): """ A bisecting k-means algorithm based on the paper "A comparison of document clustering techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark. The algorithm starts from a single cluster that contains all points. Iteratively it finds divisible clusters on the bottom level and bisects each of them using k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible. The bisecting steps of clusters on the same level are grouped together to increase parallelism. If bisecting all divisible clusters on the bottom level would result more than `k` leaf clusters, larger clusters get higher priority. >>> from pyspark.ml.linalg import Vectors >>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),), ... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)] >>> df = spark.createDataFrame(data, ["features"]) >>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0) >>> model = bkm.fit(df) >>> centers = model.clusterCenters() >>> len(centers) 2 >>> model.computeCost(df) 2.0 >>> model.hasSummary True >>> summary = model.summary >>> summary.k 2 >>> summary.clusterSizes [2, 2] >>> summary.trainingCost 2.000... >>> transformed = model.transform(df).select("features", "prediction") >>> rows = transformed.collect() >>> rows[0].prediction == rows[1].prediction True >>> rows[2].prediction == rows[3].prediction True >>> bkm_path = temp_path + "/bkm" >>> bkm.save(bkm_path) >>> bkm2 = BisectingKMeans.load(bkm_path) >>> bkm2.getK() 2 >>> bkm2.getDistanceMeasure() 'euclidean' >>> model_path = temp_path + "/bkm_model" >>> model.save(model_path) >>> model2 = BisectingKMeansModel.load(model_path) >>> model2.hasSummary False >>> model.clusterCenters()[0] == model2.clusterCenters()[0] array([ True, True], dtype=bool) >>> model.clusterCenters()[1] == model2.clusterCenters()[1] array([ True, True], dtype=bool) .. versionadded:: 2.0.0 """ k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.", typeConverter=TypeConverters.toInt) minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize", "The minimum number of points (if >= 1.0) or the minimum " + "proportion of points (if < 1.0) of a divisible cluster.", typeConverter=TypeConverters.toFloat) @keyword_only def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"): """ __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \ seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean") """ super(BisectingKMeans, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans", self.uid) self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.0.0") def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"): """ setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \ seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean") Sets params for BisectingKMeans. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.0.0") def setK(self, value): """ Sets the value of :py:attr:`k`. """ return self._set(k=value) @since("2.0.0") def getK(self): """ Gets the value of `k` or its default value. """ return self.getOrDefault(self.k) @since("2.0.0") def setMinDivisibleClusterSize(self, value): """ Sets the value of :py:attr:`minDivisibleClusterSize`. """ return self._set(minDivisibleClusterSize=value) @since("2.0.0") def getMinDivisibleClusterSize(self): """ Gets the value of `minDivisibleClusterSize` or its default value. """ return self.getOrDefault(self.minDivisibleClusterSize) @since("2.4.0") def setDistanceMeasure(self, value): """ Sets the value of :py:attr:`distanceMeasure`. """ return self._set(distanceMeasure=value) @since("2.4.0") def getDistanceMeasure(self): """ Gets the value of `distanceMeasure` or its default value. """ return self.getOrDefault(self.distanceMeasure) def _create_model(self, java_model): return BisectingKMeansModel(java_model) class BisectingKMeansSummary(ClusteringSummary): """ .. note:: Experimental Bisecting KMeans clustering results for a given model. .. versionadded:: 2.1.0 """ @property @since("3.0.0") def trainingCost(self): """ Sum of squared distances to the nearest centroid for all points in the training dataset. This is equivalent to sklearn's inertia. """ return self._call_java("trainingCost") @inherit_doc class LDAModel(JavaModel): """ Latent Dirichlet Allocation (LDA) model. This abstraction permits for different underlying representations, including local and distributed data structures. .. versionadded:: 2.0.0 """ @since("2.0.0") def isDistributed(self): """ Indicates whether this instance is of type DistributedLDAModel """ return self._call_java("isDistributed") @since("2.0.0") def vocabSize(self): """Vocabulary size (number of terms or words in the vocabulary)""" return self._call_java("vocabSize") @since("2.0.0") def topicsMatrix(self): """ Inferred topics, where each topic is represented by a distribution over terms. This is a matrix of size vocabSize x k, where each column is a topic. No guarantees are given about the ordering of the topics. WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by the Expectation-Maximization ("em") `optimizer`, then this method could involve collecting a large amount of data to the driver (on the order of vocabSize x k). """ return self._call_java("topicsMatrix") @since("2.0.0") def logLikelihood(self, dataset): """ Calculates a lower bound on the log likelihood of the entire corpus. See Equation (16) in the Online LDA paper (Hoffman et al., 2010). WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when :py:attr:`optimizer` is set to "em"), this involves collecting a large :py:func:`topicsMatrix` to the driver. This implementation may be changed in the future. """ return self._call_java("logLikelihood", dataset) @since("2.0.0") def logPerplexity(self, dataset): """ Calculate an upper bound on perplexity. (Lower is better.) See Equation (16) in the Online LDA paper (Hoffman et al., 2010). WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when :py:attr:`optimizer` is set to "em"), this involves collecting a large :py:func:`topicsMatrix` to the driver. This implementation may be changed in the future. """ return self._call_java("logPerplexity", dataset) @since("2.0.0") def describeTopics(self, maxTermsPerTopic=10): """ Return the topics described by their top-weighted terms. """ return self._call_java("describeTopics", maxTermsPerTopic) @since("2.0.0") def estimatedDocConcentration(self): """ Value for :py:attr:`LDA.docConcentration` estimated from data. If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false, then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter. """ return self._call_java("estimatedDocConcentration") @inherit_doc class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable): """ Distributed model fitted by :py:class:`LDA`. This type of model is currently only produced by Expectation-Maximization (EM). This model stores the inferred topics, the full training dataset, and the topic distribution for each training document. .. versionadded:: 2.0.0 """ @since("2.0.0") def toLocal(self): """ Convert this distributed model to a local representation. This discards info about the training dataset. WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver. """ model = LocalLDAModel(self._call_java("toLocal")) # SPARK-10931: Temporary fix to be removed once LDAModel defines Params model._create_params_from_java() model._transfer_params_from_java() return model @since("2.0.0") def trainingLogLikelihood(self): """ Log likelihood of the observed tokens in the training set, given the current parameter estimates: log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters) Notes: - This excludes the prior; for that, use :py:func:`logPrior`. - Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given the hyperparameters. - This is computed from the topic distributions computed during training. If you call :py:func:`logLikelihood` on the same training dataset, the topic distributions will be computed again, possibly giving different results. """ return self._call_java("trainingLogLikelihood") @since("2.0.0") def logPrior(self): """ Log probability of the current parameter estimate: log P(topics, topic distributions for docs | alpha, eta) """ return self._call_java("logPrior") @since("2.0.0") def getCheckpointFiles(self): """ If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may be saved checkpoint files. This method is provided so that users can manage those files. .. note:: Removing the checkpoints can cause failures if a partition is lost and is needed by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up the checkpoints when this model and derivative data go out of scope. :return List of checkpoint files from training """ return self._call_java("getCheckpointFiles") @inherit_doc class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable): """ Local (non-distributed) model fitted by :py:class:`LDA`. This model stores the inferred topics only; it does not store info about the training dataset. .. versionadded:: 2.0.0 """ pass @inherit_doc class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval, JavaMLReadable, JavaMLWritable): """ Latent Dirichlet Allocation (LDA), a topic model designed for text documents. Terminology: - "term" = "word": an element of the vocabulary - "token": instance of a term appearing in a document - "topic": multinomial distribution over terms representing some concept - "document": one piece of text, corresponding to one row in the input data Original LDA paper (journal version): Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003. Input data (featuresCol): LDA is given a collection of documents as input data, via the featuresCol parameter. Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the count for the corresponding term (word) in the document. Feature transformers such as :py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer` can be useful for converting text to word count vectors. >>> from pyspark.ml.linalg import Vectors, SparseVector >>> from pyspark.ml.clustering import LDA >>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])], ... [2, SparseVector(2, {0: 1.0})],], ["id", "features"]) >>> lda = LDA(k=2, seed=1, optimizer="em") >>> model = lda.fit(df) >>> model.isDistributed() True >>> localModel = model.toLocal() >>> localModel.isDistributed() False >>> model.vocabSize() 2 >>> model.describeTopics().show() +-----+-----------+--------------------+ |topic|termIndices| termWeights| +-----+-----------+--------------------+ | 0| [1, 0]|[0.50401530077160...| | 1| [0, 1]|[0.50401530077160...| +-----+-----------+--------------------+ ... >>> model.topicsMatrix() DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0) >>> lda_path = temp_path + "/lda" >>> lda.save(lda_path) >>> sameLDA = LDA.load(lda_path) >>> distributed_model_path = temp_path + "/lda_distributed_model" >>> model.save(distributed_model_path) >>> sameModel = DistributedLDAModel.load(distributed_model_path) >>> local_model_path = temp_path + "/lda_local_model" >>> localModel.save(local_model_path) >>> sameLocalModel = LocalLDAModel.load(local_model_path) .. versionadded:: 2.0.0 """ k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.", typeConverter=TypeConverters.toInt) optimizer = Param(Params._dummy(), "optimizer", "Optimizer or inference algorithm used to estimate the LDA model. " "Supported: online, em", typeConverter=TypeConverters.toString) learningOffset = Param(Params._dummy(), "learningOffset", "A (positive) learning parameter that downweights early iterations." " Larger values make early iterations count less", typeConverter=TypeConverters.toFloat) learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an" "exponential decay rate. This should be between (0.5, 1.0] to " "guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat) subsamplingRate = Param(Params._dummy(), "subsamplingRate", "Fraction of the corpus to be sampled and used in each iteration " "of mini-batch gradient descent, in range (0, 1].", typeConverter=TypeConverters.toFloat) optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration", "Indicates whether the docConcentration (Dirichlet parameter " "for document-topic distribution) will be optimized during " "training.", typeConverter=TypeConverters.toBoolean) docConcentration = Param(Params._dummy(), "docConcentration", "Concentration parameter (commonly named \"alpha\") for the " "prior placed on documents' distributions over topics (\"theta\").", typeConverter=TypeConverters.toListFloat) topicConcentration = Param(Params._dummy(), "topicConcentration", "Concentration parameter (commonly named \"beta\" or \"eta\") for " "the prior placed on topic' distributions over terms.", typeConverter=TypeConverters.toFloat) topicDistributionCol = Param(Params._dummy(), "topicDistributionCol", "Output column with estimates of the topic mixture distribution " "for each document (often called \"theta\" in the literature). " "Returns a vector of zeros for an empty document.", typeConverter=TypeConverters.toString) keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint", "(For EM optimizer) If using checkpointing, this indicates whether" " to keep the last checkpoint. If false, then the checkpoint will be" " deleted. Deleting the checkpoint can cause failures if a data" " partition is lost, so set this bit with care.", TypeConverters.toBoolean) @keyword_only def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10, k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51, subsamplingRate=0.05, optimizeDocConcentration=True, docConcentration=None, topicConcentration=None, topicDistributionCol="topicDistribution", keepLastCheckpoint=True): """ __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\ k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\ subsamplingRate=0.05, optimizeDocConcentration=True,\ docConcentration=None, topicConcentration=None,\ topicDistributionCol="topicDistribution", keepLastCheckpoint=True) """ super(LDA, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid) self._setDefault(maxIter=20, checkpointInterval=10, k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51, subsamplingRate=0.05, optimizeDocConcentration=True, topicDistributionCol="topicDistribution", keepLastCheckpoint=True) kwargs = self._input_kwargs self.setParams(**kwargs) def _create_model(self, java_model): if self.getOptimizer() == "em": return DistributedLDAModel(java_model) else: return LocalLDAModel(java_model) @keyword_only @since("2.0.0") def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10, k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51, subsamplingRate=0.05, optimizeDocConcentration=True, docConcentration=None, topicConcentration=None, topicDistributionCol="topicDistribution", keepLastCheckpoint=True): """ setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\ k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\ subsamplingRate=0.05, optimizeDocConcentration=True,\ docConcentration=None, topicConcentration=None,\ topicDistributionCol="topicDistribution", keepLastCheckpoint=True) Sets params for LDA. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.0.0") def setK(self, value): """ Sets the value of :py:attr:`k`. >>> algo = LDA().setK(10) >>> algo.getK() 10 """ return self._set(k=value) @since("2.0.0") def getK(self): """ Gets the value of :py:attr:`k` or its default value. """ return self.getOrDefault(self.k) @since("2.0.0") def setOptimizer(self, value): """ Sets the value of :py:attr:`optimizer`. Currently only support 'em' and 'online'. >>> algo = LDA().setOptimizer("em") >>> algo.getOptimizer() 'em' """ return self._set(optimizer=value) @since("2.0.0") def getOptimizer(self): """ Gets the value of :py:attr:`optimizer` or its default value. """ return self.getOrDefault(self.optimizer) @since("2.0.0") def setLearningOffset(self, value): """ Sets the value of :py:attr:`learningOffset`. >>> algo = LDA().setLearningOffset(100) >>> algo.getLearningOffset() 100.0 """ return self._set(learningOffset=value) @since("2.0.0") def getLearningOffset(self): """ Gets the value of :py:attr:`learningOffset` or its default value. """ return self.getOrDefault(self.learningOffset) @since("2.0.0") def setLearningDecay(self, value): """ Sets the value of :py:attr:`learningDecay`. >>> algo = LDA().setLearningDecay(0.1) >>> algo.getLearningDecay() 0.1... """ return self._set(learningDecay=value) @since("2.0.0") def getLearningDecay(self): """ Gets the value of :py:attr:`learningDecay` or its default value. """ return self.getOrDefault(self.learningDecay) @since("2.0.0") def setSubsamplingRate(self, value): """ Sets the value of :py:attr:`subsamplingRate`. >>> algo = LDA().setSubsamplingRate(0.1) >>> algo.getSubsamplingRate() 0.1... """ return self._set(subsamplingRate=value) @since("2.0.0") def getSubsamplingRate(self): """ Gets the value of :py:attr:`subsamplingRate` or its default value. """ return self.getOrDefault(self.subsamplingRate) @since("2.0.0") def setOptimizeDocConcentration(self, value): """ Sets the value of :py:attr:`optimizeDocConcentration`. >>> algo = LDA().setOptimizeDocConcentration(True) >>> algo.getOptimizeDocConcentration() True """ return self._set(optimizeDocConcentration=value) @since("2.0.0") def getOptimizeDocConcentration(self): """ Gets the value of :py:attr:`optimizeDocConcentration` or its default value. """ return self.getOrDefault(self.optimizeDocConcentration) @since("2.0.0") def setDocConcentration(self, value): """ Sets the value of :py:attr:`docConcentration`. >>> algo = LDA().setDocConcentration([0.1, 0.2]) >>> algo.getDocConcentration() [0.1..., 0.2...] """ return self._set(docConcentration=value) @since("2.0.0") def getDocConcentration(self): """ Gets the value of :py:attr:`docConcentration` or its default value. """ return self.getOrDefault(self.docConcentration) @since("2.0.0") def setTopicConcentration(self, value): """ Sets the value of :py:attr:`topicConcentration`. >>> algo = LDA().setTopicConcentration(0.5) >>> algo.getTopicConcentration() 0.5... """ return self._set(topicConcentration=value) @since("2.0.0") def getTopicConcentration(self): """ Gets the value of :py:attr:`topicConcentration` or its default value. """ return self.getOrDefault(self.topicConcentration) @since("2.0.0") def setTopicDistributionCol(self, value): """ Sets the value of :py:attr:`topicDistributionCol`. >>> algo = LDA().setTopicDistributionCol("topicDistributionCol") >>> algo.getTopicDistributionCol() 'topicDistributionCol' """ return self._set(topicDistributionCol=value) @since("2.0.0") def getTopicDistributionCol(self): """ Gets the value of :py:attr:`topicDistributionCol` or its default value. """ return self.getOrDefault(self.topicDistributionCol) @since("2.0.0") def setKeepLastCheckpoint(self, value): """ Sets the value of :py:attr:`keepLastCheckpoint`. >>> algo = LDA().setKeepLastCheckpoint(False) >>> algo.getKeepLastCheckpoint() False """ return self._set(keepLastCheckpoint=value) @since("2.0.0") def getKeepLastCheckpoint(self): """ Gets the value of :py:attr:`keepLastCheckpoint` or its default value. """ return self.getOrDefault(self.keepLastCheckpoint) @inherit_doc class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable, JavaMLWritable): """ .. note:: Experimental Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by `Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power iteration on a normalized pair-wise similarity matrix of the data. This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method to run the PowerIterationClustering algorithm. .. seealso:: `Wikipedia on Spectral clustering <http://en.wikipedia.org/wiki/Spectral_clustering>`_ >>> data = [(1, 0, 0.5), ... (2, 0, 0.5), (2, 1, 0.7), ... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9), ... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1), ... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)] >>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1) >>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight") >>> assignments = pic.assignClusters(df) >>> assignments.sort(assignments.id).show(truncate=False) +---+-------+ |id |cluster| +---+-------+ |0 |0 | |1 |0 | |2 |0 | |3 |0 | |4 |0 | |5 |1 | +---+-------+ ... >>> pic_path = temp_path + "/pic" >>> pic.save(pic_path) >>> pic2 = PowerIterationClustering.load(pic_path) >>> pic2.getK() 2 >>> pic2.getMaxIter() 40 .. versionadded:: 2.4.0 """ k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.", typeConverter=TypeConverters.toInt) initMode = Param(Params._dummy(), "initMode", "The initialization algorithm. This can be either " + "'random' to use a random vector as vertex properties, or 'degree' to use " + "a normalized sum of similarities with other vertices. Supported options: " + "'random' and 'degree'.", typeConverter=TypeConverters.toString) srcCol = Param(Params._dummy(), "srcCol", "Name of the input column for source vertex IDs.", typeConverter=TypeConverters.toString) dstCol = Param(Params._dummy(), "dstCol", "Name of the input column for destination vertex IDs.", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst", weightCol=None): """ __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\ weightCol=None) """ super(PowerIterationClustering, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.clustering.PowerIterationClustering", self.uid) self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.4.0") def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst", weightCol=None): """ setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\ weightCol=None) Sets params for PowerIterationClustering. """ kwargs = self._input_kwargs return self._set(**kwargs) @since("2.4.0") def setK(self, value): """ Sets the value of :py:attr:`k`. """ return self._set(k=value) @since("2.4.0") def getK(self): """ Gets the value of :py:attr:`k` or its default value. """ return self.getOrDefault(self.k) @since("2.4.0") def setInitMode(self, value): """ Sets the value of :py:attr:`initMode`. """ return self._set(initMode=value) @since("2.4.0") def getInitMode(self): """ Gets the value of :py:attr:`initMode` or its default value. """ return self.getOrDefault(self.initMode) @since("2.4.0") def setSrcCol(self, value): """ Sets the value of :py:attr:`srcCol`. """ return self._set(srcCol=value) @since("2.4.0") def getSrcCol(self): """ Gets the value of :py:attr:`srcCol` or its default value. """ return self.getOrDefault(self.srcCol) @since("2.4.0") def setDstCol(self, value): """ Sets the value of :py:attr:`dstCol`. """ return self._set(dstCol=value) @since("2.4.0") def getDstCol(self): """ Gets the value of :py:attr:`dstCol` or its default value. """ return self.getOrDefault(self.dstCol) @since("2.4.0") def assignClusters(self, dataset): """ Run the PIC algorithm and returns a cluster assignment for each input vertex. :param dataset: A dataset with columns src, dst, weight representing the affinity matrix, which is the matrix A in the PIC paper. Suppose the src column value is i, the dst column value is j, the weight column value is similarity s,,ij,, which must be nonnegative. This is a symmetric matrix and hence s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are ignored, because we assume s,,ij,, = 0.0. :return: A dataset that contains columns of vertex id and the corresponding cluster for the id. The schema of it will be: - id: Long - cluster: Int .. versionadded:: 2.4.0 """ self._transfer_params_to_java() jdf = self._java_obj.assignClusters(dataset._jdf) return DataFrame(jdf, dataset.sql_ctx) if __name__ == "__main__": import doctest import numpy import pyspark.ml.clustering from pyspark.sql import SparkSession try: # Numpy 1.14+ changed it's string format. numpy.set_printoptions(legacy='1.13') except TypeError: pass globs = pyspark.ml.clustering.__dict__.copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: spark = SparkSession.builder\ .master("local[2]")\ .appName("ml.clustering tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark import tempfile temp_path = tempfile.mkdtemp() globs['temp_path'] = temp_path try: (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() finally: from shutil import rmtree try: rmtree(temp_path) except OSError: pass if failure_count: sys.exit(-1)
""" An OcuMOL Leap plugin for PyMOL. To install go to the Plugin menu in PyMOL and under the Install New Plugin tab click Choose file... and select this file. """ import locale import os, sys import Pmw import re import Tkinter from Tkinter import * import pymol from pymol import cmd from ocumol.src.helper.helper import PrintException from ocumol.src.hands.leap_only import PymolListener from ocumol.src.pymol.pymolHmd import PymolHmd def __init__(self): """ Add OcuMOL Leap to the PyMOL Plugins menu. """ self.menuBar.addmenuitem('Plugin', 'command', 'Launch OcuMOL Leap', command = lambda s=self: OcuMOLLeapPlugin(s), label='OcuMOL Leap',) def AnimateElemInnerLoop(pluginObj, poseCoordName): args = [poseCoordName] for argN in pluginObj.animateElemColumnStrings: key = '%s_%s' % (poseCoordName, argN) if pluginObj.animateElemFields[key].getvalue()=='': return False convFunc = int if argN=='period' else float val = convFunc(pluginObj.animateElemFields[key].getvalue()) args.append(val) pluginObj.hmd.initAnimateElement(*args) return True def AnimateElemClosure(argName, pluginObj): def AnimateElemSubclosure(poseCoordName): def AnimateElemValidateFloat(txt): if txt=='': return Pmw.OK try: locale.atof(txt) AnimateElemInnerLoop(pluginObj, poseCoordName) return Pmw.OK except ValueError: return Pmw.PARTIAL def AnimateElemValidateInt(txt): if txt=='': return Pmw.OK try: locale.atoi(txt) AnimateElemInnerLoop(pluginObj, poseCoordName) return Pmw.OK except ValueError: return Pmw.PARTIAL if argName=='period': return AnimateElemValidateInt else: return AnimateElemValidateFloat return AnimateElemSubclosure def DebugClosure(hmdObj, pluginObj): def Callback(buttonName, val): try: hmdObj.__setattr__('debugMode', val) if val==True: if 'Rift Debug' not in pluginObj.notebook.pagenames(): pluginObj.initRiftDebugPage() else: if 'Rift Debug' in pluginObj.notebook.pagenames(): pluginObj.delRiftDebugPage() except: PrintException() return Callback def RadioMultipleClosure(obj, buttonAttrNameDict): def Callback(buttonName, val): obj.__setattr__(buttonAttrNameDict[buttonName], val) # if obj.running: # obj.InitPymol() return Callback def RadioSingleClosure(obj, attrName, targetButtonName): def Callback(buttonName): if buttonName==targetButtonName: obj.__setattr__(attrName, True) if obj.running: obj.initPymol() return Callback def RotationClosure(obj): def Callback(buttonName): if buttonName=='Natural rotation': obj.__setattr__('naturalRotation', True) else: obj.__setattr__('naturalRotation', False) if obj.running: obj.setOriginAtMolecule() return Callback def ValidatePDBClosure(obj): def ValidatePDB(txt): if len(txt)==4 and txt==re.match('[a-zA-Z0-9]*', txt).group(0): if obj.running: obj.reinitPymol(pdb=txt) else: obj.pdb = txt return Pmw.OK else: return Pmw.PARTIAL return ValidatePDB ### OcuMOLLeap code ### class OcuMOLLeapPlugin: def __init__(self, app): # the t_e block here ensures that we actually get an error message when stuff goes wrong try: self.app = app # create placeholders for listeners self.hand=0 # set up hmd and callbacks self.hmd = PymolHmd() # initialize the main ocumol megawidget self.initNotebook() # initialize the subwidgets self.initRiftPage() self.initLeapPage() self.initAboutPopup() # finish up the initialization of the main megawidget self.initNotebookFinalize() except: PrintException() def initAboutPopup(self): # Create About Pop-up Pmw.aboutversion('1.0') Pmw.aboutcopyright( 'Apache License\n' + 'Version 2.0, January 2004') Pmw.aboutcontact( 'PyMOL Oculus Rift Viewer and Leap Motion Mover\n' + 'Max Klein, Jeliazko Jeliazkov, Henry Lessen, and Mariusz Matyszewski, 2015.\n' + 'https://github.com/lqtza/OcuMOL_Leap') # note github link cannot be copied for some reason... self.about = Pmw.AboutDialog(self.parent,applicationname="OcuMOL Leap") self.about.withdraw() def initLeapPage(self): # Create Leap Motion Page page = self.notebook.add('Leap Mover') group = Pmw.Group(page, tag_text='Leap Motion Mover') group.pack(fill='both', expand=1, padx=5, pady=5) self.leapOpt = Pmw.OptionMenu(group.interior(), initialitem='Move', items=('Move','Edit'), labelpos='w', label_text='Mover Mode') self.leapOpt.pack(padx=1,pady=1) def initNotebook(self): # set up the main ocumol megawidget (which is a notebook) self.parent = self.app.root self.dialog = Pmw.Dialog(self.parent, buttons=("Run Rift Only", "Run Leap Only", "Run Both", "About" ), command=self.execute, title='OcuMOL Leap') self.dialog.withdraw() Pmw.setbusycursorattributes(self.dialog.component('hull')) self.notebook = Pmw.NoteBook(self.dialog.interior()) self.notebook.pack(fill='both', expand=1, padx=5, pady=5) def initNotebookFinalize(self): # finish setting up the notebook (which is the main ocumol megawidget) # temporarily init the Rift Debug page so the widget starts up at the right size self.initRiftDebugPage() self.notebook.setnaturalsize() self.dialog.show() # remove the Rift Debug page until the user selects debug mode self.delRiftDebugPage() def initRiftPage(self): # set up some callbacks DebugModeCallback = DebugClosure(self.hmd, self) MoleculeCallback = RadioSingleClosure(self.hmd, 'editMolecule', 'Yes') RotationCallback = RotationClosure(self.hmd) ValidatePDB = ValidatePDBClosure(self.hmd) # Create Oculus Rift Page riftPage = self.notebook.add('Rift Visualizer') riftGroup = Pmw.Group(riftPage, tag_text='Oculus Rift Visualizer') riftGroup.pack(fill='both', expand=1, padx=5, pady=5) # Radio buttons to select for rotation method self.rotationRadio = Pmw.RadioSelect(riftGroup.interior(), command=RotationCallback, frame_borderwidth=2, frame_relief='ridge', labelpos='w', label_text='Rotation method:', orient='horizontal') self.rotationRadio.add('Natural rotation') self.rotationRadio.add('Molecule rotation') self.rotationRadio.setvalue('Natural rotation') self.rotationRadio.pack(padx=1, pady=1) # Radio buttons to select for view editing at initialization self.moleculeRadio = Pmw.RadioSelect(riftGroup.interior(), command=MoleculeCallback, frame_borderwidth=2, frame_relief='ridge', labelpos='w', label_text='Edit view:', orient='horizontal') self.moleculeRadio.add('Yes') self.moleculeRadio.add('No') self.moleculeRadio.setvalue('No') self.moleculeRadio.pack(padx=1,pady=1) # Text to input pdb id, for testing self.pdbText = Pmw.EntryField(riftGroup.interior(), labelpos='w', label_text='Load PDB ID:', validate=ValidatePDB) self.pdbText.pack(padx=1, pady=1) # Text to input stereo shift parameter, see wiki for more info # TODO make this a slider self.stereoShiftText = Pmw.EntryField(riftGroup.interior(), labelpos='w', label_text='Stereo shift:', modifiedcommand=self.changed, validate={'validator' : 'real'}, value='1.0') self.stereoShiftText.pack(padx=1, pady=1) # Text to input stereo angle parameter, see wiki for more info # TODO make this a slider self.stereoAngleText = Pmw.EntryField(riftGroup.interior(), labelpos='w', label_text='Stereo angle:', modifiedcommand=self.changed, value='1.0', validate={'validator' : 'real'}) self.stereoAngleText.pack(padx=1, pady=1) # Check button to run the HMD in debug mode, which will keep going even if the Rift is not attached/detected self.debugModeCheck = Pmw.RadioSelect(riftGroup.interior(), buttontype='checkbutton', command=DebugModeCallback, frame_borderwidth=2, frame_relief='ridge', labelpos='w', label_text='', orient='horizontal') self.debugModeCheck.add('Debug mode') self.debugModeCheck.pack(padx=1,pady=1) def initRiftDebugPage(self): # Create Rift Debug Page riftDebugPage = self.notebook.insert('Rift Debug', before='Leap Mover') riftDebugGroup = Pmw.Group(riftDebugPage, tag_text='Rift Debug') riftDebugGroup.pack(fill='both', expand=1, padx=5, pady=5) labelFrame = Tkinter.Frame(riftDebugGroup.interior()) labelFrame.pack(fill='both', expand=1) animateLabel = Tkinter.Label(labelFrame, text='Debug animation controls:') animateLabel.grid(column=0, row=0, sticky='nw') labelFrame.grid_rowconfigure(0, weight=1) labelFrame.grid_columnconfigure(0, weight=1) frame = Tkinter.Frame(riftDebugGroup.interior()) frame.pack(fill='both', expand=1) self.animateElemColumnStrings = ['min', 'max', 'period'] columnLabels = {} for i in range(3): columnLabels[i] = Tkinter.Label(frame, text=self.animateElemColumnStrings[i]) columnLabels[i].grid(column=i+1, row=1, sticky='nw') self.animateElemRowStrings = ['xRot', 'yRot', 'zRot', 'x', 'y', 'z'] rowLabels = {} for i in range(6): rowLabels[i] = Tkinter.Label(frame, text=self.animateElemRowStrings[i]) rowLabels[i].grid(column=0, row=i+2, sticky='nw') animateElemClosures = {'min':AnimateElemClosure(argName='min', pluginObj=self), 'max':AnimateElemClosure(argName='max', pluginObj=self), 'period':AnimateElemClosure(argName='period', pluginObj=self)} self.animateElemFields = {} for i,rs in enumerate(self.animateElemRowStrings): for j,cs in enumerate(self.animateElemColumnStrings): key = '%s_%s' % (rs, cs) AnimateElemValidate = animateElemClosures[cs](poseCoordName=rs) self.animateElemFields[key] = Pmw.EntryField(frame, validate=AnimateElemValidate) self.animateElemFields[key].grid(column=j+1, row=i+2, stick='nw') # all of the fields in a given row must exist before validation can work, so we defer setting values until here for cs in self.animateElemColumnStrings: key = '%s_%s' % (rs, cs) val = '1' if cs=='period' else '0.0' self.animateElemFields[key].setentry(val) # for some reason, pmw checkbuttons don't work with grid :( # buttons = {} # for i in range(6): # buttons = Pmw.RadioSelect(frame, # buttontype='checkbutton', # orient='horizontal') # buttons[i].grid(column=3, row=i, sticky='w') animateExplanation = ['You can use the above panel to set independent ', 'animations on each of the 6 degrees of freedom ', 'OcuMol reads from the Rift.\n\n', 'min: sets the low end of the range of the ', 'animation.\n', 'max: sets the high end of the range of the ', 'animation.\n', 'period: sets the time it takes the animation to ', 'cycle.\n\n', '(note: min & max have units of radians for the ', 'rotations, and standard PyMol unit for the ', 'translations)\n', '(note: period is measured in terms of count of ', 'tracking refreshes, which currently defaults to ', 'a rate of %d per second)\n' % self.hmd.trackingRefresh] animateExplanation = ''.join(animateExplanation) animateExplanationLabel = Tkinter.Label(frame, justify=Tkinter.LEFT, text=animateExplanation, wraplength=400) animateExplanationLabel.grid(column=1, row=8, columnspan=3, sticky='nw') frame.grid_rowconfigure(8, weight=1) frame.grid_columnconfigure(3, weight=1) def changed(self): # change stereo settings cmd.set('stereo_shift',self.stereoShiftText.getvalue()) cmd.set('stereo_angle',self.stereoAngleText.getvalue()) def delRiftDebugPage(self): self.notebook.delete('Rift Debug') def execute(self, result): if result: if result=='Run Rift Only': # set initial stereo properties cmd.set('stereo_shift', self.stereoShiftText.getvalue()) cmd.set('stereo_angle', self.stereoAngleText.getvalue()) self.hmd.start() # we've already run the rift, so let's run the hand support # if that's what has been called for if result=='Run Both': self.hand = PymolListener() elif result=='Run Leap Only': # Leap Motion needed for this... name convention is poor. # Currently doesn't work... exits on init. self.hand = PymolListener() elif result=='About': self.about.show() else: self.quit() def quit(self): self.dialog.destroy()
# -*- coding: UTF-8 -*- from django.core.cache import cache from django.db.models.signals import pre_delete, post_save from django.dispatch import Signal import functools import hashlib try: from inspect import getcallargs except ImportError: import sys from inspect import getargspec, ismethod def getcallargs(func, *positional, **named): """Get the mapping of arguments to values. A dict is returned, with keys the function argument names (including the names of the * and ** arguments, if any), and values the respective bound values from 'positional' and 'named'.""" args, varargs, varkw, defaults = getargspec(func) f_name = func.__name__ arg2value = {} # The following closures are basically because of tuple parameter unpacking. assigned_tuple_params = [] def assign(arg, value): if isinstance(arg, str): arg2value[arg] = value else: assigned_tuple_params.append(arg) value = iter(value) for i, subarg in enumerate(arg): try: subvalue = next(value) except StopIteration: raise ValueError('need more than %d %s to unpack' % (i, 'values' if i > 1 else 'value')) assign(subarg,subvalue) try: next(value) except StopIteration: pass else: raise ValueError('too many values to unpack') def is_assigned(arg): if isinstance(arg,str): return arg in arg2value return arg in assigned_tuple_params if ismethod(func) and func.im_self is not None: # implicit 'self' (or 'cls' for classmethods) argument positional = (func.im_self,) + positional num_pos = len(positional) num_total = num_pos + len(named) num_args = len(args) num_defaults = len(defaults) if defaults else 0 for arg, value in zip(args, positional): assign(arg, value) if varargs: if num_pos > num_args: assign(varargs, positional[-(num_pos-num_args):]) else: assign(varargs, ()) elif 0 < num_args < num_pos: raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at most' if defaults else 'exactly', num_args, 'arguments' if num_args > 1 else 'argument', num_total)) elif num_args == 0 and num_total: if varkw: if num_pos: # XXX: We should use num_pos, but Python also uses num_total: raise TypeError('%s() takes exactly 0 arguments ' '(%d given)' % (f_name, num_total)) else: raise TypeError('%s() takes no arguments (%d given)' % (f_name, num_total)) for arg in args: if isinstance(arg, str) and arg in named: if is_assigned(arg): raise TypeError("%s() got multiple values for keyword " "argument '%s'" % (f_name, arg)) else: assign(arg, named.pop(arg)) if defaults: # fill in any missing values with the defaults for arg, value in zip(args[-num_defaults:], defaults): if not is_assigned(arg): assign(arg, value) if varkw: assign(varkw, named) elif named: unexpected = next(iter(named)) if isinstance(unexpected, unicode): unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace') raise TypeError("%s() got an unexpected keyword argument '%s'" % (f_name, unexpected)) unassigned = num_args - len([arg for arg in args if is_assigned(arg)]) if unassigned: num_required = num_args - num_defaults raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at least' if defaults else 'exactly', num_required, 'arguments' if num_required > 1 else 'argument', num_total)) return arg2value WEEK = 7 * 24 * 60 * 60 cache_invalidated = Signal(providing_args=['keys']) class CacheFunction(object): CACHE_MISS = object() def __init__(self, prefix='', timeout=WEEK, fhash=None, fkey=None): self.prefix = prefix self.timeout = timeout if fhash is None: fhash = self.hash_key self.fhash = fhash if fkey is None: fkey = self.generate_key self.fkey = fkey def __call__(self, *args, **kwargs): if args: if kwargs or not callable(args[0]): raise TypeError("invalid usage") return self._decorator(args[0]) else: return functools.partial(self._decorator, **kwargs) def _decorator(self, func, invalidate=None, key=None, signals=(), models=(), timeout=None): if key is None: key = func.__name__ if invalidate is None: invalidate = (func.__name__,) if timeout is None: timeout = self.timeout @functools.wraps(func) def wrapper(*args, **kwargs): k = self.fhash(self.fkey(key, func, args, kwargs)) data = cache.get(k, self.CACHE_MISS) if data is self.CACHE_MISS: data = func(*args, **kwargs) cache.set(k, data, timeout) return data if invalidate: def iwrapper(sender, **kwargs): try: keys = kwargs['cache_keys'] except KeyError: if callable(invalidate): keys = invalidate(sender, **kwargs) else: keys = invalidate if keys: if isinstance(keys, basestring): keys = (keys,) prefixed = [ self.prefix + k for k in keys ] cache.delete_many(map(self.fhash, prefixed)) wrapper.invalidated.send(wrapper, cache_keys=keys) for s in signals: s.connect(iwrapper, weak=False) for m in models: post_save.connect(iwrapper, sender=m, weak=False) pre_delete.connect(iwrapper, sender=m, weak=False) def get_from_cache(fargs): cache_keys = {} for ix, farg in enumerate(fargs): if isinstance(farg, (list, tuple))\ and len(farg) == 2\ and isinstance(farg[0], (list, tuple))\ and isinstance(farg[1], dict): args, kwargs = farg elif isinstance(farg, dict): args = () kwargs = farg else: args = farg kwargs = {} k = self.fhash(self.fkey(key, func, args, kwargs)) cache_keys[k] = (ix, farg) results = cache.get_many(cache_keys.keys()) output = [ self.CACHE_MISS ] * len(fargs) for k, v in cache_keys.items(): ix = v[0] try: output[ix] = results[k] except KeyError: pass return output wrapper.get_from_cache = get_from_cache wrapper.invalidated = Signal(providing_args=['cache_keys']) return wrapper def hash_key(self, key): if isinstance(key, unicode): key = key.encode('utf-8') return hashlib.md5(key).hexdigest() def generate_key(self, key, func, args, kwargs): if callable(key): return key(func, *args, **kwargs) cargs = getcallargs(func, *args, **kwargs) try: k = key % args except TypeError: k = key % cargs return self.prefix + k
# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging import six import sqlalchemy as sa from sqlalchemy import func from sqlalchemy import or_ from sqlalchemy import orm from sqlalchemy.orm import joinedload from sqlalchemy import sql from neutron._i18n import _, _LE, _LI, _LW from neutron.common import constants from neutron.common import utils as n_utils from neutron import context as n_ctx from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import api as db_api from neutron.db import l3_attrs_db from neutron.db import model_base from neutron.extensions import l3agentscheduler from neutron.extensions import router_availability_zone as router_az from neutron import manager from neutron.plugins.common import constants as service_constants LOG = logging.getLogger(__name__) L3_AGENTS_SCHEDULER_OPTS = [ cfg.StrOpt('router_scheduler_driver', default='neutron.scheduler.l3_agent_scheduler.' 'LeastRoutersScheduler', help=_('Driver to use for scheduling ' 'router to a default L3 agent')), cfg.BoolOpt('router_auto_schedule', default=True, help=_('Allow auto scheduling of routers to L3 agent.')), cfg.BoolOpt('allow_automatic_l3agent_failover', default=False, help=_('Automatically reschedule routers from offline L3 ' 'agents to online L3 agents.')), ] cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS) # default messaging timeout is 60 sec, so 2 here is chosen to not block API # call for more than 2 minutes AGENT_NOTIFY_MAX_ATTEMPTS = 2 class RouterL3AgentBinding(model_base.BASEV2): """Represents binding between neutron routers and L3 agents.""" router_id = sa.Column(sa.String(36), sa.ForeignKey("routers.id", ondelete='CASCADE'), primary_key=True) l3_agent = orm.relation(agents_db.Agent) l3_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, agentschedulers_db.AgentSchedulerDbMixin): """Mixin class to add l3 agent scheduler extension to plugins using the l3 agent for routing. """ router_scheduler = None def start_periodic_l3_agent_status_check(self): if not cfg.CONF.allow_automatic_l3agent_failover: LOG.info(_LI("Skipping period L3 agent status check because " "automatic router rescheduling is disabled.")) return self.add_agent_status_check( self.reschedule_routers_from_down_agents) def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('L3', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = n_ctx.get_admin_context() down_bindings = ( context.session.query(RouterL3AgentBinding). join(agents_db.Agent). filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up). outerjoin(l3_attrs_db.RouterExtraAttributes, l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id). filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(), l3_attrs_db.RouterExtraAttributes.ha == sql.null()))) try: agents_back_online = set() for binding in down_bindings: if binding.l3_agent_id in agents_back_online: continue else: agent = self._get_agent(context, binding.l3_agent_id) if agent.is_active: agents_back_online.add(binding.l3_agent_id) continue agent_mode = self._get_agent_mode(binding.l3_agent) if agent_mode == constants.L3_AGENT_MODE_DVR: # rescheduling from l3 dvr agent on compute node doesn't # make sense. Router will be removed from that agent once # there are no dvr serviceable ports on that compute node LOG.warn(_LW('L3 DVR agent on node %(host)s is down. ' 'Not rescheduling from agent in \'dvr\' ' 'mode.'), {'host': binding.l3_agent.host}) continue LOG.warn(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds."), {'router': binding.router_id, 'agent': binding.l3_agent_id, 'dead_time': agent_dead_limit}) try: self.reschedule_router(context, binding.router_id) except (l3agentscheduler.RouterReschedulingFailed, oslo_messaging.RemoteError): # Catch individual router rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception(_LE("Exception encountered during router " "rescheduling.")) def _get_agent_mode(self, agent_db): agent_conf = self.get_configuration_dict(agent_db) return agent_conf.get(constants.L3_AGENT_MODE, constants.L3_AGENT_MODE_LEGACY) def validate_agent_router_combination(self, context, agent, router): """Validate if the router can be correctly assigned to the agent. :raises: RouterL3AgentMismatch if attempting to assign DVR router to legacy agent, or centralized router to compute's L3 agents. :raises: InvalidL3Agent if attempting to assign router to an unsuitable agent (disabled, type != L3, incompatible configuration) :raises: DVRL3CannotAssignToDvrAgent if attempting to assign DVR router from one DVR Agent to another. """ if agent['agent_type'] != constants.AGENT_TYPE_L3: raise l3agentscheduler.InvalidL3Agent(id=agent['id']) is_distributed = router.get('distributed') agent_mode = self._get_agent_mode(agent) router_type = ( 'distributed' if is_distributed else 'centralized') is_agent_router_types_incompatible = ( agent_mode == constants.L3_AGENT_MODE_DVR and not is_distributed or agent_mode == constants.L3_AGENT_MODE_LEGACY and is_distributed ) if is_agent_router_types_incompatible: raise l3agentscheduler.RouterL3AgentMismatch( router_type=router_type, router_id=router['id'], agent_mode=agent_mode, agent_id=agent['id']) if agent_mode == constants.L3_AGENT_MODE_DVR and is_distributed: raise l3agentscheduler.DVRL3CannotAssignToDvrAgent( router_type=router_type, router_id=router['id'], agent_id=agent['id']) is_suitable_agent = ( agentschedulers_db.services_available(agent['admin_state_up']) and (self.get_l3_agent_candidates(context, router, [agent], ignore_admin_state=True) or self.get_snat_candidates(router, [agent])) ) if not is_suitable_agent: raise l3agentscheduler.InvalidL3Agent(id=agent['id']) def check_l3_agent_router_binding(self, context, router_id, agent_id): query = context.session.query(RouterL3AgentBinding) bindings = query.filter_by(router_id=router_id, l3_agent_id=agent_id).all() return bool(bindings) def check_agent_router_scheduling_needed(self, context, agent, router): """Check if the router scheduling is needed. :raises: RouterHostedByL3Agent if router is already assigned to a different agent. :returns: True if scheduling is needed, otherwise False """ router_id = router['id'] agent_id = agent['id'] query = context.session.query(RouterL3AgentBinding) bindings = query.filter_by(router_id=router_id).all() if not bindings: return True for binding in bindings: if binding.l3_agent_id == agent_id: # router already bound to the agent we need return False if router.get('ha'): return True # legacy router case: router is already bound to some agent raise l3agentscheduler.RouterHostedByL3Agent( router_id=router_id, agent_id=bindings[0].l3_agent_id) def create_router_to_agent_binding(self, context, agent, router): """Create router to agent binding.""" router_id = router['id'] agent_id = agent['id'] if self.router_scheduler: try: if router.get('ha'): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) self.router_scheduler.create_ha_port_and_bind( plugin, context, router['id'], router['tenant_id'], agent) else: self.router_scheduler.bind_router( context, router_id, agent) except db_exc.DBError: raise l3agentscheduler.RouterSchedulingFailed( router_id=router_id, agent_id=agent_id) def add_router_to_l3_agent(self, context, agent_id, router_id): """Add a l3 agent to host a router.""" with context.session.begin(subtransactions=True): router = self.get_router(context, router_id) agent = self._get_agent(context, agent_id) self.validate_agent_router_combination(context, agent, router) if self.check_agent_router_scheduling_needed( context, agent, router): self.create_router_to_agent_binding(context, agent, router) else: return l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if l3_notifier: l3_notifier.router_added_to_agent( context, [router_id], agent.host) def remove_router_from_l3_agent(self, context, agent_id, router_id): """Remove the router from l3 agent. After removal, the router will be non-hosted until there is update which leads to re-schedule or be added to another agent manually. """ agent = self._get_agent(context, agent_id) self._unbind_router(context, router_id, agent_id) router = self.get_router(context, router_id) if router.get('ha'): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) plugin.delete_ha_interfaces_on_host(context, router_id, agent.host) l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if l3_notifier: l3_notifier.router_removed_from_agent( context, router_id, agent.host) def _unbind_router(self, context, router_id, agent_id): with context.session.begin(subtransactions=True): query = context.session.query(RouterL3AgentBinding) query = query.filter( RouterL3AgentBinding.router_id == router_id, RouterL3AgentBinding.l3_agent_id == agent_id) query.delete() def _unschedule_router(self, context, router_id, agents_ids): with context.session.begin(subtransactions=True): for agent_id in agents_ids: self._unbind_router(context, router_id, agent_id) def reschedule_router(self, context, router_id, candidates=None): """Reschedule router to (a) new l3 agent(s) Remove the router from the agent(s) currently hosting it and schedule it again """ cur_agents = self.list_l3_agents_hosting_router( context, router_id)['agents'] with context.session.begin(subtransactions=True): cur_agents_ids = [agent['id'] for agent in cur_agents] self._unschedule_router(context, router_id, cur_agents_ids) self.schedule_router(context, router_id, candidates=candidates) new_agents = self.list_l3_agents_hosting_router( context, router_id)['agents'] if not new_agents: raise l3agentscheduler.RouterReschedulingFailed( router_id=router_id) self._notify_agents_router_rescheduled(context, router_id, cur_agents, new_agents) def _notify_agents_router_rescheduled(self, context, router_id, old_agents, new_agents): l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if not l3_notifier: return old_hosts = [agent['host'] for agent in old_agents] new_hosts = [agent['host'] for agent in new_agents] for host in set(old_hosts) - set(new_hosts): l3_notifier.router_removed_from_agent( context, router_id, host) for agent in new_agents: # Need to make sure agents are notified or unschedule otherwise for attempt in range(AGENT_NOTIFY_MAX_ATTEMPTS): try: l3_notifier.router_added_to_agent( context, [router_id], agent['host']) break except oslo_messaging.MessagingException: LOG.warning(_LW('Failed to notify L3 agent on host ' '%(host)s about added router. Attempt ' '%(attempt)d out of %(max_attempts)d'), {'host': agent['host'], 'attempt': attempt + 1, 'max_attempts': AGENT_NOTIFY_MAX_ATTEMPTS}) else: self._unbind_router(context, router_id, agent['id']) raise l3agentscheduler.RouterReschedulingFailed( router_id=router_id) def list_routers_on_l3_agent(self, context, agent_id): query = context.session.query(RouterL3AgentBinding.router_id) query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id) router_ids = [item[0] for item in query] if router_ids: return {'routers': self.get_routers(context, filters={'id': router_ids})} else: # Exception will be thrown if the requested agent does not exist. self._get_agent(context, agent_id) return {'routers': []} def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, constants.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, router_ids=router_ids, active=True) return self.get_sync_data(context, router_ids=router_ids, active=True) def list_active_sync_routers_on_active_l3_agent( self, context, host, router_ids): agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_L3, host) if not agentschedulers_db.services_available(agent.admin_state_up): return [] query = context.session.query(RouterL3AgentBinding.router_id) query = query.filter( RouterL3AgentBinding.l3_agent_id == agent.id) if router_ids: query = query.filter( RouterL3AgentBinding.router_id.in_(router_ids)) router_ids = [item[0] for item in query] if router_ids: return self._get_active_l3_agent_routers_sync_data(context, host, agent, router_ids) return [] def get_l3_agents_hosting_routers(self, context, router_ids, admin_state_up=None, active=None): if not router_ids: return [] query = context.session.query(RouterL3AgentBinding) query = query.options(orm.contains_eager( RouterL3AgentBinding.l3_agent)) query = query.join(RouterL3AgentBinding.l3_agent) query = query.filter(RouterL3AgentBinding.router_id.in_(router_ids)) if admin_state_up is not None: query = (query.filter(agents_db.Agent.admin_state_up == admin_state_up)) l3_agents = [binding.l3_agent for binding in query] if active is not None: l3_agents = [l3_agent for l3_agent in l3_agents if not agents_db.AgentDbMixin.is_agent_down( l3_agent['heartbeat_timestamp'])] return l3_agents def _get_l3_bindings_hosting_routers(self, context, router_ids): if not router_ids: return [] query = context.session.query(RouterL3AgentBinding) query = query.options(joinedload('l3_agent')).filter( RouterL3AgentBinding.router_id.in_(router_ids)) return query.all() def list_l3_agents_hosting_router(self, context, router_id): with context.session.begin(subtransactions=True): bindings = self._get_l3_bindings_hosting_routers( context, [router_id]) return {'agents': [self._make_agent_dict(binding.l3_agent) for binding in bindings]} def get_l3_agents(self, context, active=None, filters=None): query = context.session.query(agents_db.Agent) query = query.filter( agents_db.Agent.agent_type == constants.AGENT_TYPE_L3) if active is not None: query = (query.filter(agents_db.Agent.admin_state_up == active)) if filters: for key, value in six.iteritems(filters): column = getattr(agents_db.Agent, key, None) if column: if not value: return [] query = query.filter(column.in_(value)) agent_modes = filters.get('agent_modes', []) if agent_modes: agent_mode_key = '\"agent_mode\": \"' configuration_filter = ( [agents_db.Agent.configurations.contains('%s%s\"' % (agent_mode_key, agent_mode)) for agent_mode in agent_modes]) query = query.filter(or_(*configuration_filter)) return [l3_agent for l3_agent in query if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent( active, l3_agent)] def check_ports_exist_on_l3agent( self, context, l3_agent, subnet_ids): """ This function checks for existence of dvr serviceable ports on the host, running the input l3agent. """ core_plugin = manager.NeutronManager.get_plugin() # NOTE(swami):Before checking for existence of dvr # serviceable ports on the host managed by the l3 # agent, let's verify if at least one subnet has # dhcp enabled. If so, then the host will have a # dvr serviceable port, which is in fact the DHCP # port. # This optimization is valid assuming that the L3 # DVR_SNAT node will be the one hosting the DHCP # Agent. agent_mode = self._get_agent_mode(l3_agent) for subnet_id in subnet_ids: subnet_dict = core_plugin.get_subnet(context, subnet_id) if (subnet_dict['enable_dhcp'] and ( agent_mode == constants.L3_AGENT_MODE_DVR_SNAT)): return True filter = {'fixed_ips': {'subnet_id': subnet_ids}} ports = core_plugin.get_ports(context, filters=filter) for port in ports: if (n_utils.is_dvr_serviced(port['device_owner']) and l3_agent['host'] == port['binding:host_id']): return True return False def get_l3_agent_candidates(self, context, sync_router, l3_agents, ignore_admin_state=False): """Get the valid l3 agents for the router from a list of l3_agents.""" candidates = [] is_router_distributed = sync_router.get('distributed', False) if is_router_distributed: subnet_ids = self.get_subnet_ids_on_router( context, sync_router['id']) for l3_agent in l3_agents: if not ignore_admin_state and not l3_agent.admin_state_up: # ignore_admin_state True comes from manual scheduling # where admin_state_up judgement is already done. continue agent_conf = self.get_configuration_dict(l3_agent) router_id = agent_conf.get('router_id', None) handle_internal_only_routers = agent_conf.get( 'handle_internal_only_routers', True) gateway_external_network_id = agent_conf.get( 'gateway_external_network_id', None) agent_mode = agent_conf.get(constants.L3_AGENT_MODE, constants.L3_AGENT_MODE_LEGACY) if router_id and router_id != sync_router['id']: continue ex_net_id = (sync_router['external_gateway_info'] or {}).get( 'network_id') if ((not ex_net_id and not handle_internal_only_routers) or (ex_net_id and gateway_external_network_id and ex_net_id != gateway_external_network_id)): continue if agent_mode in ( constants.L3_AGENT_MODE_LEGACY, constants.L3_AGENT_MODE_DVR_SNAT) and ( not is_router_distributed): candidates.append(l3_agent) elif (is_router_distributed and subnet_ids and agent_mode.startswith(constants.L3_AGENT_MODE_DVR) and ( self.check_ports_exist_on_l3agent( context, l3_agent, subnet_ids))): candidates.append(l3_agent) return candidates def auto_schedule_routers(self, context, host, router_ids): if self.router_scheduler: return self.router_scheduler.auto_schedule_routers( self, context, host, router_ids) def schedule_router(self, context, router, candidates=None): if self.router_scheduler: return self.router_scheduler.schedule( self, context, router, candidates=candidates) def schedule_routers(self, context, routers): """Schedule the routers to l3 agents.""" for router in routers: self.schedule_router(context, router, candidates=None) def get_l3_agent_with_min_routers(self, context, agent_ids): """Return l3 agent with the least number of routers.""" if not agent_ids: return None query = context.session.query( agents_db.Agent, func.count( RouterL3AgentBinding.router_id ).label('count')).outerjoin(RouterL3AgentBinding).group_by( agents_db.Agent.id, RouterL3AgentBinding.l3_agent_id).order_by('count') res = query.filter(agents_db.Agent.id.in_(agent_ids)).first() return res[0] class AZL3AgentSchedulerDbMixin(L3AgentSchedulerDbMixin, router_az.RouterAvailabilityZonePluginBase): """Mixin class to add availability_zone supported l3 agent scheduler.""" def get_router_availability_zones(self, router_id): session = db_api.get_session() with session.begin(): query = session.query(agents_db.Agent.availability_zone) query = query.join(RouterL3AgentBinding) query = query.filter( RouterL3AgentBinding.router_id == router_id) query = query.group_by(agents_db.Agent.availability_zone) return [item[0] for item in query]
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import brbn import email.utils as _email import json as _json import logging as _logging import os as _os import quopri as _quopri import re as _re import sqlite3 as _sqlite import time as _time import textwrap as _textwrap from datetime import datetime as _datetime from pencil import * _log = _logging.getLogger("haystack") _strings = StringCatalog(__file__) _topics = _json.loads(_strings["topics"]) class Haystack(brbn.Application): def __init__(self, home_dir): super().__init__(home_dir) path = _os.path.join(self.home, "data", "data.sqlite") self.database = Database(path) self.root_resource = _IndexPage(self) self.search_page = _SearchPage(self) self.thread_page = _ThreadPage(self) self.message_page = _MessagePage(self) def receive_request(self, request): request.database_connection = self.database.connect() try: return super().receive_request(request) finally: request.database_connection.close() class _IndexPage(brbn.Page): def __init__(self, app): super().__init__(app, "/", _strings["index_page_body"]) def get_title(self, request): return "Haystack" @brbn.xml def render_topics(self, request): items = list() for topic in _topics: href = self.app.search_page.get_href(request, query=topic) text = xml_escape(topic) items.append(html_a(text, href)) return html_ul(items, class_="four-column") class _SearchPage(brbn.Page): def __init__(self, app): super().__init__(app, "/search", _strings["search_page_body"]) def get_title(self, request): query = request.get("query") return "Search '{}'".format(query) def render_query(self, request): return request.get("query") @brbn.xml def render_threads(self, request): query = request.get("query") sql = ("select * from messages where id in " "(select distinct thread_id from messages_fts " " where messages_fts match ? limit 1000) " "order by date desc") escaped_query = query.replace("\"", "\"\"") records = self.app.database.query(request, sql, escaped_query) thread = Thread() rows = list() for record in records: thread.load_from_record(record) thread_link = thread.get_link(request) row = [ thread_link, xml_escape(thread.from_address), thread.authored_words, xml_escape(str(_email.formatdate(thread.date)[:-6])), ] rows.append(row) return html_table(rows, False, class_="messages four") class _ThreadPage(brbn.Page): def __init__(self, app): super().__init__(app, "/thread", _strings["thread_page_body"]) def get_title(self, request): return "Thread '{}'".format(request.thread.subject) def process(self, request): id = request.get("id") request.thread = self.app.database.get(request, Message, id) sql = ("select * from messages " "where thread_id = ? " "order by thread_position, date asc " "limit 1000") records = self.app.database.query(request, sql, request.thread.id) request.messages = list() request.messages_by_id = dict() for record in records: message = Message() message.load_from_record(record) request.messages.append(message) request.messages_by_id[message.id] = message def render_title(self, request): return request.thread.subject @brbn.xml def render_index(self, request): rows = list() for i, message in enumerate(request.messages): date = _time.strftime("%d %b %Y", _time.gmtime(message.date)) number = i + 1 title = self.get_message_title(request, message, number) row = [ html_a(xml_escape(title), "#{}".format(number)), xml_escape(date), message.authored_words, ] rows.append(row) return html_table(rows, False, class_="messages") @brbn.xml def render_messages(self, request): out = list() for i, message in enumerate(request.messages): number = i + 1 title = self.get_message_title(request, message, number) out.append(html_elem("h2", title, id=str(number))) out.append(html_elem("pre", xml_escape(message.content))) return "\n".join(out) def get_message_title(self, request, message, number): title = "{}. {}".format(number, message.from_name) if message.in_reply_to_id is not None: rmessage = request.messages_by_id.get(message.in_reply_to_id) if rmessage is not None: rperson = rmessage.from_name title = "{} replying to {}".format(title, rperson) return title class _MessagePage(brbn.Page): def __init__(self, app): super().__init__(app, "/message", _strings["message_page_body"]) def get_title(self, request): return "Message '{}'".format(request.message.subject) def process(self, request): id = request.get("id") request.message = self.app.database.get(request, Message, id) def render_title(self, request): return request.message.subject @brbn.xml def render_thread_link(self, request): thread = None thread_id = request.message.thread_id thread_link = xml_escape(thread_id) if thread_id is not None: try: thread = self.app.database.get(request, Message, thread_id) except ObjectNotFound: pass if thread is not None: thread_link = thread.get_link(request) return thread_link @brbn.xml def render_in_reply_to_link(self, request): rmessage = None rmessage_id = request.message.in_reply_to_id rmessage_link = nvl(xml_escape(rmessage_id), "[None]") if rmessage_id is not None: try: rmessage = self.database.get(request, Message, rmessage_id) except ObjectNotFound: pass if rmessage is not None: rmessage_link = rmessage.get_link(request) return rmessage_link @brbn.xml def render_headers(self, request): message = request.message from_field = "{} <{}>".format(message.from_name, message.from_address) items = ( ("ID", xml_escape(message.id)), ("List", xml_escape(message.list_id)), ("From", xml_escape(from_field)), ("Date", xml_escape(_email.formatdate(message.date))), ("Subject", xml_escape(message.subject)), ) return html_table(items, False, True, class_="headers") @brbn.xml def render_content(self, request): message = request.message content = "" if message.content is not None: lines = list() for line in message.content.splitlines(): line = line.strip() if line.startswith(">"): m = _re.match("^[> ]+", line) prefix = "\n{}".format(m.group(0)) line = prefix.join(_textwrap.wrap(line, 80)) line = html_span(xml_escape(line), class_="quoted") else: line = "\n".join(_textwrap.wrap(line, 80)) line = xml_escape(line) lines.append(line) content = "\n".join(lines) return content class Database: def __init__(self, path): self.path = path _log.info("Using database at {}".format(self.path)) def connect(self): # XXX thread local connections return _sqlite.connect(self.path) def create_schema(self): columns = list() for name in Message.fields: field_type = Message.field_types.get(name, str) column_type = "text" if field_type == int: column_type = "integer" column = "{} {}".format(name, column_type) columns.append(column) statements = list() columns = ", ".join(columns) ddl = "create table messages ({});".format(columns) statements.append(ddl) ddl = "create index messages_id_idx on messages (id);" statements.append(ddl) columns = ", ".join(Message.fts_fields) ddl = ("create virtual table messages_fts using fts4 " "({}, notindexed=id, notindexed=thread_id, tokenize=porter)" "".format(columns)) statements.append(ddl) conn = self.connect() cursor = conn.cursor() try: for statement in statements: cursor.execute(statement) finally: conn.close() def optimize(self): conn = self.connect() cursor = conn.cursor() ddl = "insert into messages_fts (messages_fts) values ('optimize')" try: cursor.execute(ddl) finally: conn.close() def cursor(self, request): return request.database_connection.cursor() def query(self, request, sql, *args): cursor = self.cursor(request) try: cursor.execute(sql, args) return cursor.fetchall() finally: cursor.close() def get(self, request, cls, id): _log.debug("Getting {} with ID {}".format(cls.__name__, id)) assert issubclass(cls, _DatabaseObject), cls assert id is not None sql = "select * from {} where id = ?".format(cls.table) cursor = self.cursor(request) try: cursor.execute(sql, [id]) record = cursor.fetchone() finally: cursor.close() if record is None: raise ObjectNotFound() obj = cls() obj.load_from_record(record) return obj class ObjectNotFound(Exception): pass class _DatabaseObject: table = None def __init__(self, id, name, parent=None): self.id = id self._name = name self.parent = parent def __repr__(self): return format_repr(self, self.id) @property def name(self): return self._name def get_link_href(self, request): raise NotImplementedError() def get_link_text(self, request): return self.name def get_link(self, request, text=None): href = self.get_link_href(request) if text is None: text = self.get_link_text(request) return "<a href=\"{}\">{}</a>".format(href, xml_escape(text)) class Message(_DatabaseObject): table = "messages" fields = [ "id", "in_reply_to_id", "from_name", "from_address", "list_id", "date", "subject", "content_type", "content", "authored_content", "authored_words", "thread_id", "thread_position", ] field_types = { "date": int, "authored_words": int, "thread_position": int, } field_mbox_keys = { "id": "Message-ID", "in_reply_to_id": "In-Reply-To", "list_id": "List-Id", "subject": "Subject", "content_type": "Content-Type", } fts_fields = [ "id", "thread_id", "subject", "authored_content", ] def __init__(self): super().__init__(None, None) for name in self.fields: setattr(self, name, None) @property def name(self): return self.subject def load_from_mbox_message(self, mbox_message): for name in self.field_mbox_keys: mbox_key = self.field_mbox_keys[name] value = mbox_message.get(mbox_key) field_type = self.field_types.get(name, str) if value is not None: value = field_type(value) setattr(self, name, value) name, address = _email.parseaddr(mbox_message["From"]) self.from_name = name self.from_address = address tup = _email.parsedate(mbox_message["Date"]) self.date = _time.mktime(tup) content = _get_mbox_content(mbox_message) assert content is not None self.content = content self.authored_content = _get_authored_content(self.content) self.authored_words = len(self.authored_content.split()) def load_from_record(self, record): for i, name in enumerate(self.fields): value = record[i] field_type = self.field_types.get(name, str) if value is not None: value = field_type(value) setattr(self, name, value) def save(self, cursor): columns = ", ".join(self.fields) values = ", ".join("?" * len(self.fields)) args = [getattr(self, x) for x in self.fields] dml = "insert into messages ({}) values ({})".format(columns, values) cursor.execute(dml, args) columns = ", ".join(self.fts_fields) values = ", ".join("?" * len(self.fts_fields)) args = [getattr(self, x) for x in self.fts_fields] dml = "insert into messages_fts ({}) values ({})".format(columns, values) cursor.execute(dml, args) def get_link_href(self, request): return request.app.message_page.get_href(request, id=self.id) def get_link_title(self, request): return self.subject class Thread(Message): def get_link_href(self, request): return request.app.thread_page.get_href(request, id=self.id) def _get_mbox_content(mbox_message): content_type = None content_encoding = None content = None if mbox_message.is_multipart(): for part in mbox_message.walk(): if part.get_content_type() == "text/plain": content_type = "text/plain" content_encoding = part["Content-Transfer-Encoding"] content = part.get_payload() if content_type is None: content_type = mbox_message.get_content_type() content_encoding = mbox_message["Content-Transfer-Encoding"] content = mbox_message.get_payload() assert content_type is not None assert content is not None if content_encoding == "quoted-printable": content = _quopri.decodestring(content) content = content.decode("utf-8", errors="replace") if content_type == "text/html": content = strip_tags(content) return content def _get_authored_content(content): lines = list() for line in content.splitlines(): line = line.strip() if line.startswith(">"): continue lines.append(line) return "\n".join(lines)
# # Copyright (c) 2008-10, Mahadevan R All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of this software, nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """Pass managers and passes. This module provides the LLVM pass managers and the passes themselves. All transformation passes listed at http://www.llvm.org/docs/Passes.html are available. """ import llvm # top-level, for common stuff import llvm.ee as ee # target data import llvm.core as core # module, function etc. import llvm._core as _core # C wrappers # passes PASS_AAEVAL = 1 PASS_AGGRESSIVE_DCE = 3 PASS_ALIAS_ANALYSIS_COUNTER = 4 PASS_ALWAYS_INLINER = 5 PASS_ARGUMENT_PROMOTION = 6 PASS_BASIC_ALIAS_ANALYSIS = 7 PASS_BLOCK_PLACEMENT = 8 PASS_BREAK_CRITICAL_EDGES = 9 PASS_CFG_SIMPLIFICATION = 10 PASS_CODE_GEN_PREPARE = 11 PASS_CONSTANT_MERGE = 12 PASS_CONSTANT_PROPAGATION = 13 PASS_DBG_INFO_PRINTER = 14 PASS_DEAD_ARG_ELIMINATION = 15 PASS_DEAD_CODE_ELIMINATION = 16 PASS_DEAD_INST_ELIMINATION = 17 PASS_DEAD_STORE_ELIMINATION = 18 PASS_DEAD_TYPE_ELIMINATION = 19 PASS_DEMOTE_REGISTER_TO_MEMORY = 20 PASS_DOM_ONLY_PRINTER = 21 PASS_DOM_ONLY_VIEWER = 22 PASS_DOM_PRINTER = 23 PASS_DOM_VIEWER = 24 PASS_EDGE_PROFILER = 25 PASS_FUNCTION_ATTRS = 26 PASS_FUNCTION_INLINING = 27 #PASS_GEP_SPLITTER = 28 PASS_GLOBAL_DCE = 29 PASS_GLOBAL_OPTIMIZER = 30 PASS_GLOBALS_MOD_REF = 31 PASS_GVN = 32 PASS_IND_VAR_SIMPLIFY = 33 PASS_INST_COUNT = 34 PASS_INSTRUCTION_COMBINING = 35 PASS_INSTRUCTION_NAMER = 36 PASS_IP_CONSTANT_PROPAGATION = 37 PASS_IPSCCP = 38 PASS_JUMP_THREADING = 39 PASS_LAZY_VALUE_INFO = 40 PASS_LCSSA = 41 PASS_LICM = 42 #PASS_LIVE_VALUES = 43 PASS_LOOP_DELETION = 44 PASS_LOOP_DEPENDENCE_ANALYSIS = 45 PASS_LOOP_EXTRACTOR = 46 #PASS_LOOP_INDEX_SPLIT = 47 PASS_LOOP_ROTATE = 48 PASS_LOOP_SIMPLIFY = 49 PASS_LOOP_STRENGTH_REDUCE = 50 PASS_LOOP_UNROLL = 51 PASS_LOOP_UNSWITCH = 52 PASS_LOWER_INVOKE = 53 PASS_LOWER_SET_JMP = 54 PASS_LOWER_SWITCH = 55 PASS_MEM_CPY_OPT = 56 PASS_MERGE_FUNCTIONS = 57 PASS_NO_AA = 58 PASS_NO_PROFILE_INFO = 59 PASS_OPTIMAL_EDGE_PROFILER = 60 PASS_PARTIAL_INLINING = 61 #PASS_PARTIAL_SPECIALIZATION = 62 #PASS_POST_DOM_ONLY_PRINTER = 63 #PASS_POST_DOM_ONLY_VIEWER = 64 #PASS_POST_DOM_PRINTER = 65 #PASS_POST_DOM_VIEWER = 66 PASS_PROFILE_ESTIMATOR = 67 PASS_PROFILE_LOADER = 68 PASS_PROFILE_VERIFIER = 69 PASS_PROMOTE_MEMORY_TO_REGISTER = 70 PASS_PRUNE_EH = 71 PASS_REASSOCIATE = 72 PASS_SCALAR_EVOLUTION_ALIAS_ANALYSIS = 73 PASS_SCALAR_REPL_AGGREGATES = 74 PASS_SCCP = 76 #PASS_SIMPLIFY_HALF_POWR_LIB_CALLS = 77 PASS_SIMPLIFY_LIB_CALLS = 78 PASS_SINGLE_LOOP_EXTRACTOR = 79 PASS_STRIP_DEAD_PROTOTYPES = 82 PASS_STRIP_NON_DEBUG_SYMBOLS = 83 PASS_STRIP_SYMBOLS = 84 PASS_STRUCT_RET_PROMOTION = 85 PASS_TAIL_CALL_ELIMINATION = 86 PASS_TAIL_DUPLICATION = 87 PASS_UNIFY_FUNCTION_EXIT_NODES = 88 PASS_INTERNALIZE = 89 #===----------------------------------------------------------------------=== # Helper functions #===----------------------------------------------------------------------=== _pass_creator = { PASS_AAEVAL : _core.LLVMAddAAEvalPass, PASS_AGGRESSIVE_DCE : _core.LLVMAddAggressiveDCEPass, PASS_ALIAS_ANALYSIS_COUNTER : _core.LLVMAddAliasAnalysisCounterPass, PASS_ALWAYS_INLINER : _core.LLVMAddAlwaysInlinerPass, PASS_ARGUMENT_PROMOTION : _core.LLVMAddArgumentPromotionPass, PASS_BASIC_ALIAS_ANALYSIS : _core.LLVMAddBasicAliasAnalysisPass, PASS_BLOCK_PLACEMENT : _core.LLVMAddBlockPlacementPass, PASS_BREAK_CRITICAL_EDGES : _core.LLVMAddBreakCriticalEdgesPass, PASS_CFG_SIMPLIFICATION : _core.LLVMAddCFGSimplificationPass, PASS_CODE_GEN_PREPARE : _core.LLVMAddCodeGenPreparePass, PASS_CONSTANT_MERGE : _core.LLVMAddConstantMergePass, PASS_CONSTANT_PROPAGATION : _core.LLVMAddConstantPropagationPass, PASS_DBG_INFO_PRINTER : _core.LLVMAddDbgInfoPrinterPass, PASS_DEAD_ARG_ELIMINATION : _core.LLVMAddDeadArgEliminationPass, PASS_DEAD_CODE_ELIMINATION : _core.LLVMAddDeadCodeEliminationPass, PASS_DEAD_INST_ELIMINATION : _core.LLVMAddDeadInstEliminationPass, PASS_DEAD_STORE_ELIMINATION : _core.LLVMAddDeadStoreEliminationPass, PASS_DEAD_TYPE_ELIMINATION : _core.LLVMAddDeadTypeEliminationPass, PASS_DEMOTE_REGISTER_TO_MEMORY : _core.LLVMAddDemoteRegisterToMemoryPass, PASS_DOM_ONLY_PRINTER : _core.LLVMAddDomOnlyPrinterPass, PASS_DOM_ONLY_VIEWER : _core.LLVMAddDomOnlyViewerPass, PASS_DOM_PRINTER : _core.LLVMAddDomPrinterPass, PASS_DOM_VIEWER : _core.LLVMAddDomViewerPass, PASS_EDGE_PROFILER : _core.LLVMAddEdgeProfilerPass, PASS_FUNCTION_ATTRS : _core.LLVMAddFunctionAttrsPass, PASS_FUNCTION_INLINING : _core.LLVMAddFunctionInliningPass, #PASS_GEP_SPLITTER : _core.LLVMAddGEPSplitterPass, PASS_GLOBAL_DCE : _core.LLVMAddGlobalDCEPass, PASS_GLOBAL_OPTIMIZER : _core.LLVMAddGlobalOptimizerPass, PASS_GLOBALS_MOD_REF : _core.LLVMAddGlobalsModRefPass, PASS_GVN : _core.LLVMAddGVNPass, PASS_IND_VAR_SIMPLIFY : _core.LLVMAddIndVarSimplifyPass, PASS_INST_COUNT : _core.LLVMAddInstCountPass, PASS_INSTRUCTION_COMBINING : _core.LLVMAddInstructionCombiningPass, PASS_INSTRUCTION_NAMER : _core.LLVMAddInstructionNamerPass, PASS_IP_CONSTANT_PROPAGATION : _core.LLVMAddIPConstantPropagationPass, PASS_IPSCCP : _core.LLVMAddIPSCCPPass, PASS_JUMP_THREADING : _core.LLVMAddJumpThreadingPass, PASS_LAZY_VALUE_INFO : _core.LLVMAddLazyValueInfoPass, PASS_LCSSA : _core.LLVMAddLCSSAPass, PASS_LICM : _core.LLVMAddLICMPass, #PASS_LIVE_VALUES : _core.LLVMAddLiveValuesPass, PASS_LOOP_DELETION : _core.LLVMAddLoopDeletionPass, PASS_LOOP_DEPENDENCE_ANALYSIS : _core.LLVMAddLoopDependenceAnalysisPass, PASS_LOOP_EXTRACTOR : _core.LLVMAddLoopExtractorPass, #PASS_LOOP_INDEX_SPLIT : _core.LLVMAddLoopIndexSplitPass, PASS_LOOP_ROTATE : _core.LLVMAddLoopRotatePass, PASS_LOOP_SIMPLIFY : _core.LLVMAddLoopSimplifyPass, PASS_LOOP_STRENGTH_REDUCE : _core.LLVMAddLoopStrengthReducePass, PASS_LOOP_UNROLL : _core.LLVMAddLoopUnrollPass, PASS_LOOP_UNSWITCH : _core.LLVMAddLoopUnswitchPass, PASS_LOWER_INVOKE : _core.LLVMAddLowerInvokePass, PASS_LOWER_SET_JMP : _core.LLVMAddLowerSetJmpPass, PASS_LOWER_SWITCH : _core.LLVMAddLowerSwitchPass, PASS_MEM_CPY_OPT : _core.LLVMAddMemCpyOptPass, PASS_MERGE_FUNCTIONS : _core.LLVMAddMergeFunctionsPass, PASS_NO_AA : _core.LLVMAddNoAAPass, PASS_NO_PROFILE_INFO : _core.LLVMAddNoProfileInfoPass, PASS_OPTIMAL_EDGE_PROFILER : _core.LLVMAddOptimalEdgeProfilerPass, PASS_PARTIAL_INLINING : _core.LLVMAddPartialInliningPass, #PASS_PARTIAL_SPECIALIZATION : _core.LLVMAddPartialSpecializationPass, #PASS_POST_DOM_ONLY_PRINTER : _core.LLVMAddPostDomOnlyPrinterPass, #PASS_POST_DOM_ONLY_VIEWER : _core.LLVMAddPostDomOnlyViewerPass, #PASS_POST_DOM_PRINTER : _core.LLVMAddPostDomPrinterPass, #PASS_POST_DOM_VIEWER : _core.LLVMAddPostDomViewerPass, PASS_PROFILE_ESTIMATOR : _core.LLVMAddProfileEstimatorPass, PASS_PROFILE_LOADER : _core.LLVMAddProfileLoaderPass, PASS_PROFILE_VERIFIER : _core.LLVMAddProfileVerifierPass, PASS_PROMOTE_MEMORY_TO_REGISTER : _core.LLVMAddPromoteMemoryToRegisterPass, PASS_PRUNE_EH : _core.LLVMAddPruneEHPass, PASS_REASSOCIATE : _core.LLVMAddReassociatePass, PASS_SCALAR_EVOLUTION_ALIAS_ANALYSIS : _core.LLVMAddScalarEvolutionAliasAnalysisPass, PASS_SCALAR_REPL_AGGREGATES : _core.LLVMAddScalarReplAggregatesPass, PASS_SCCP : _core.LLVMAddSCCPPass, #PASS_SIMPLIFY_HALF_POWR_LIB_CALLS : _core.LLVMAddSimplifyHalfPowrLibCallsPass, PASS_SIMPLIFY_LIB_CALLS : _core.LLVMAddSimplifyLibCallsPass, PASS_SINGLE_LOOP_EXTRACTOR : _core.LLVMAddSingleLoopExtractorPass, PASS_STRIP_DEAD_PROTOTYPES : _core.LLVMAddStripDeadPrototypesPass, PASS_STRIP_NON_DEBUG_SYMBOLS : _core.LLVMAddStripNonDebugSymbolsPass, PASS_STRIP_SYMBOLS : _core.LLVMAddStripSymbolsPass, PASS_STRUCT_RET_PROMOTION : _core.LLVMAddStructRetPromotionPass, PASS_TAIL_CALL_ELIMINATION : _core.LLVMAddTailCallEliminationPass, PASS_TAIL_DUPLICATION : _core.LLVMAddTailDuplicationPass, PASS_UNIFY_FUNCTION_EXIT_NODES : _core.LLVMAddUnifyFunctionExitNodesPass, PASS_INTERNALIZE : _core.LLVMAddInternalize2Pass, } #===----------------------------------------------------------------------=== # Pass manager #===----------------------------------------------------------------------=== class PassManager(object): @staticmethod def new(): return PassManager(_core.LLVMCreatePassManager()) def __init__(self, ptr): self.ptr = ptr def __del__(self): _core.LLVMDisposePassManager(self.ptr) def add(self, tgt_data_or_pass_id): if isinstance(tgt_data_or_pass_id, ee.TargetData): self._add_target_data(tgt_data_or_pass_id) elif tgt_data_or_pass_id in _pass_creator: self._add_pass(tgt_data_or_pass_id) else: raise llvm.LLVMException, \ ("invalid pass_id (%s)" % str(tgt_data_or_pass_id)) def _add_target_data(self, tgt): _core.LLVMAddTargetData(tgt.ptr, self.ptr) def _add_pass(self, pass_id): cfn = _pass_creator[pass_id] cfn(self.ptr) def run(self, module): core.check_is_module(module) return _core.LLVMRunPassManager(self.ptr, module.ptr) class FunctionPassManager(PassManager): @staticmethod def new(module): core.check_is_module(module) ptr = _core.LLVMCreateFunctionPassManagerForModule(module.ptr) return FunctionPassManager(ptr) def __init__(self, ptr): PassManager.__init__(self, ptr) def initialize(self): _core.LLVMInitializeFunctionPassManager(self.ptr) def run(self, fn): core.check_is_function(fn) return _core.LLVMRunFunctionPassManager(self.ptr, fn.ptr) def finalize(self): _core.LLVMFinalizeFunctionPassManager(self.ptr)
#!/usr/bin/env python # -*- coding: UTF-8 -*- import os import re #import webapp2 #import jinja2 # used for templates import logging import parsers #from google.appengine.ext import ndb #from google.appengine.ext import blobstore #from google.appengine.api import users #from google.appengine.ext.webapp import blobstore_handlers logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug . log = logging.getLogger(__name__) schemasInitialized = False extensionsLoaded = False extensionLoadErrors = "" SCHEMA_VERSION=1.999999 sitename = "schema.org" sitemode = "mainsite" # whitespaced list for CSS tags, # e.g. "mainsite testsite", "extensionsite" when off expected domains DYNALOAD = True # permits read_schemas to be re-invoked live. #JINJA_ENVIRONMENT = jinja2.Environment( # loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')), # extensions=['jinja2.ext.autoescape'], autoescape=True) debugging = False # Core API: we have a single schema graph built from triples and units. NodeIDMap = {} ext_re = re.compile(r'([^\w,])+') all_layers = {} all_terms = {} # Utility declaration of W3C Initial Context # From http://www.w3.org/2011/rdfa-context/rdfa-1.1 # and http://www.w3.org/2013/json-ld-context/rdfa11 # Enables all these prefixes without explicit declaration when # using schema.org's JSON-LD context file. # namespaces = """ "cat": "http://www.w3.org/ns/dcat#", "qb": "http://purl.org/linked-data/cube#", "org": "http://www.w3.org/ns/org#", "grddl": "http://www.w3.org/2003/g/data-view#", "ma": "http://www.w3.org/ns/ma-ont#", "owl": "http://www.w3.org/2002/07/owl#", "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "rdfa": "http://www.w3.org/ns/rdfa#", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", "rif": "http://www.w3.org/2007/rif#", "rr": "http://www.w3.org/ns/r2rml#", "skos": "http://www.w3.org/2004/02/skos/core#", "skosxl": "http://www.w3.org/2008/05/skos-xl#", "wdr": "http://www.w3.org/2007/05/powder#", "void": "http://rdfs.org/ns/void#", "wdrs": "http://www.w3.org/2007/05/powder-s#", "xhv": "http://www.w3.org/1999/xhtml/vocab#", "xml": "http://www.w3.org/XML/1998/namespace", "xsd": "http://www.w3.org/2001/XMLSchema#", "prov": "http://www.w3.org/ns/prov#", "sd": "http://www.w3.org/ns/sparql-service-description#", "org": "http://www.w3.org/ns/org#", "gldp": "http://www.w3.org/ns/people#", "cnt": "http://www.w3.org/2008/content#", "dcat": "http://www.w3.org/ns/dcat#", "earl": "http://www.w3.org/ns/earl#", "ht": "http://www.w3.org/2006/http#", "ptr": "http://www.w3.org/2009/pointers#", "cc": "http://creativecommons.org/ns#", "ctag": "http://commontag.org/ns#", "dc": "http://purl.org/dc/terms/", "dcterms": "http://purl.org/dc/terms/", "foaf": "http://xmlns.com/foaf/0.1/", "gr": "http://purl.org/goodrelations/v1#", "ical": "http://www.w3.org/2002/12/cal/icaltzd#", "og": "http://ogp.me/ns#", "rev": "http://purl.org/stuff/rev#", "sioc": "http://rdfs.org/sioc/ns#", "v": "http://rdf.data-vocabulary.org/#", "vcard": "http://www.w3.org/2006/vcard/ns#", "schema": "http://schema.org/", "describedby": "http://www.w3.org/2007/05/powder-s#describedby", "license": "http://www.w3.org/1999/xhtml/vocab#license", "role": "http://www.w3.org/1999/xhtml/vocab#role", """ class DataCacheTool(): def __init__ (self): self._DataCache = {} self.setCurrent("core") def getCache(self,cache=None): if cache == None: cache = self._CurrentDataCache if cache in self._DataCache.keys(): return self._DataCache[cache] else: log.debug("DataCache Invalid cache name '%s'" % cache) return None def get(self,key,cache=None): return self.getCache(cache).get(key) def put(self,key,val,cache=None): self.getCache(cache)[key] = val def setCurrent(self,current): self._CurrentDataCache = current if(self._DataCache.get(self._CurrentDataCache) == None): self._DataCache[self._CurrentDataCache] = {} log.debug("Setting _CurrentDataCache: %s",self._CurrentDataCache) def getCurrent(self): return self._CurrentDataCache def keys(self): return self._DataCache.keys() DataCache = DataCacheTool() class Unit (): """ Unit represents a node in our schema graph. IDs are local, e.g. "Person" or use simple prefixes, e.g. rdfs:Class. """ def __init__ (self, id): self.id = id NodeIDMap[id] = self self.arcsIn = [] self.arcsOut = [] self.examples = [] self.usage = 0 self.home = None self.subtypes = None def __str__(self): return self.id def GetImmediateSubtypes(self, layers='core'): return GetImmediateSubtypes(self, layers=layers) def setUsage(self, count): self.usage = count @staticmethod def GetUnit (id, createp=False): """Return a Unit representing a node in the schema graph. Argument: createp -- should we create node if we don't find it? (default: False) """ if (id in NodeIDMap): return NodeIDMap[id] if (createp != False): return Unit(id) def typeOf(self, type, layers='core'): """Boolean, true if the unit has an rdf:type matching this type.""" types = GetTargets( Unit.GetUnit("typeOf"), self, layers ) return (type in types) # Function needs rewriting to use GetTargets(arc,src,layers) and recurse def subClassOf(self, type, layers='core'): """Boolean, true if the unit has an rdfs:subClassOf matching this type, direct or implied (in specified layer(s)).""" if (self.id == type.id): return True parents = GetTargets( Unit.GetUnit("rdfs:subClassOf"), self, layers ) if type in parents: return True else: for p in parents: if p.subClassOf(type, layers): return True return False def directInstanceOf(self, type, layers='core'): """Boolean, true if the unit has a direct typeOf (aka rdf:type) property matching this type, direct or implied (in specified layer(s)).""" mytypes = GetTargets( Unit.GetUnit("typeOf"), self, layers ) if type in mytypes: return True return False # TODO: consider an API for implied types too? def isClass(self, layers='core'): """Does this unit represent a class/type?""" return self.typeOf(Unit.GetUnit("rdfs:Class"), layers=layers) def isAttribute(self, layers='core'): """Does this unit represent an attribute/property?""" return self.typeOf(Unit.GetUnit("rdf:Property"), layers=layers) def isEnumeration(self, layers='core'): """Does this unit represent an enumerated type?""" return self.subClassOf(Unit.GetUnit("Enumeration"), layers=layers) def isEnumerationValue(self, layers='core'): """Does this unit represent a member of an enumerated type?""" types = GetTargets(Unit.GetUnit("typeOf"), self , layers=layers) log.debug("isEnumerationValue() called on %s, found %s types. layers: %s" % (self.id, str( len( types ) ), layers ) ) found_enum = False for t in types: if t.subClassOf(Unit.GetUnit("Enumeration"), layers=layers): found_enum = True return found_enum def isDataType(self, layers='core'): """ Does this unit represent a DataType type or sub-type? DataType and its children do not descend from Thing, so we need to treat it specially. """ if (self.directInstanceOf(Unit.GetUnit("DataType"), layers=layers)): return True subs = GetTargets(Unit.GetUnit("typeOf"), self, layers=layers) subs += GetTargets(Unit.GetUnit("rdfs:subClassOf"), self, layers=layers) for p in subs: if p.isDataType(layers=layers): return True return False @staticmethod def storePrefix(prefix): """Stores the prefix declaration for a given class or property""" # Currently defined just to let the tests pass pass # e.g. <http://schema.org/actors> <http://schema.org/supersededBy> <http://schema.org/actor> . def superseded(self, layers='core'): """Has this property been superseded? (i.e. deprecated/archaic), in any of these layers.""" supersededBy_values = GetTargets( Unit.GetUnit("supersededBy"), self, layers ) return ( len(supersededBy_values) > 0) def supersedes(self, layers='core'): """Returns a property (assume max 1) that is supersededBy this one, or nothing.""" olderterms = GetSources( Unit.GetUnit("supersededBy"), self, layers ) if len(olderterms) > 0: return olderterms[0] else: return None def supersedes_all(self, layers='core'): """Returns terms that is supersededBy by this later one, or nothing. (in this layer)""" return(GetSources( Unit.GetUnit("supersededBy"), self, layers )) # so we want sources of arcs pointing here with 'supersededBy' # e.g. vendor supersededBy seller ; returns newer 'seller' for earlier 'vendor'. def supersededBy(self, layers='core'): """Returns a property (assume max 1) that supersededs this one, or nothing.""" newerterms = GetTargets( Unit.GetUnit("supersededBy"), self, layers ) if len(newerterms)>0: return newerterms.pop() else: return None return ret def getHomeLayer(self,defaultToCore=False): ret = self.home if ret == None: if defaultToCore: ret = 'core' else: log.info("WARNING %s has no home extension defined!!" % self.id) ret = "" return ret def superproperties(self, layers='core'): """Returns super-properties of this one.""" if not self.isAttribute(layers=layers): logging.debug("Non-property %s won't have subproperties." % self.id) return None superprops = GetTargets(Unit.GetUnit("rdfs:subPropertyOf"),self, layers=layers ) return superprops def subproperties(self, layers='core'): """Returns direct subproperties of this property.""" if not self.isAttribute(layers=layers): logging.debug("Non-property %s won't have subproperties." % self.id) return None subprops = GetSources(Unit.GetUnit("rdfs:subPropertyOf"),self, layers=layers ) return subprops def inverseproperty(self, layers="core"): """A property that is an inverseOf this one, e.g. alumni vs alumniOf.""" a = GetTargets(Unit.GetUnit("inverseOf"), self, layers=layers) b = GetSources(Unit.GetUnit("inverseOf"), self, layers=layers) if len(a)>0: return a.pop() else: if len(b) > 0: return b.pop() else: return None for triple in self.arcsOut: if (triple.target != None and triple.arc.id == "inverseOf"): return triple.target for triple in self.arcsIn: if (triple.source != None and triple.arc.id == "inverseOf"): return triple.source return None def UsageStr (self) : str = self.usage if (str == '1') : return "Between 10 and 100 domains" elif (str == '2'): return "Between 100 and 1000 domains" elif (str == '3'): return "Between 1000 and 10,000 domains" elif (str == '4'): return "Between 10,000 and 50,000 domains" elif (str == '5'): return "Between 50,000 and 100,000 domains" elif (str == '7'): return "Between 100,000 and 250,000 domains" elif (str == '8'): return "Between 250,000 and 500,000 domains" elif (str == '9'): return "Between 500,000 and 1,000,000 domains" elif (str == '10'): return "Over 1,000,000 domains" else: return "Fewer than 10 domains" # NOTE: each Triple is in exactly one layer, by default 'core'. When we # read_schemas() from data/ext/{x}/*.rdfa each schema triple is given a # layer named "x". Access to triples can default to layer="core" or take # a custom layer or layers, e.g. layers="bib", or layers=["bib", "foo"]. # This is verbose but at least explicit. If we move towards making better # use of external templates for site generation we could reorganize. # For now e.g. 'grep GetSources api.py| grep -v layer' and # 'grep GetTargets api.py| grep -v layer' etc. can check for non-layered usage. # # Units, on the other hand, are layer-independent. For now we have only a # crude inLayer(layerlist, unit) API to check which layers mention a term. class Triple (): """Triple represents an edge in the graph: source, arc and target/text.""" def __init__ (self, source, arc, target, text, layer='core'): """Triple constructor keeps state via source node's arcsOut.""" self.source = source source.arcsOut.append(self) self.arc = arc self.layer = layer if (target != None): self.target = target self.text = None target.arcsIn.append(self) elif (text != None): self.text = text self.target = None def __str__ (self): ret = "" if self.source != None: ret += "%s " % self.source if self.target != None: ret += "%s " % self.target if self.arc != None: ret += "%s " % self.arc return ret @staticmethod def AddTriple(source, arc, target, layer='core'): """AddTriple stores a thing-valued new Triple within source Unit.""" if (source == None or arc == None or target == None): return else: # for any term mentioned as subject or object, we register the layer # TODO: make this into a function x = all_terms.get(source.id) # subjects if x is None: x = [] if layer not in x: x.append(layer) all_terms[source.id]= x x = all_terms.get(target.id) # objects if x is None: x = [] if layer not in x: x.append(layer) all_terms[target.id]= x return Triple(source, arc, target, None, layer) @staticmethod def AddTripleText(source, arc, text, layer='core'): """AddTriple stores a string-valued new Triple within source Unit.""" if (source == None or arc == None or text == None): return else: return Triple(source, arc, None, text, layer) def GetTargets(arc, source, layers='core'): """All values for a specified arc on specified graph node (within any of the specified layers).""" # log.debug("GetTargets checking in layer: %s for unit: %s arc: %s" % (layers, source.id, arc.id)) targets = {} for triple in source.arcsOut: if (triple.arc == arc): if (triple.target != None and triple.layer in layers): targets[triple.target] = 1 elif (triple.text != None and triple.layer in layers): targets[triple.text] = 1 return targets.keys() def GetSources(arc, target, layers='core'): """All source nodes for a specified arc pointing to a specified node (within any of the specified layers).""" # log.debug("GetSources checking in layer: %s for unit: %s arc: %s" % (layers, target.id, arc.id)) sources = {} for triple in target.arcsIn: if (triple.arc == arc and triple.layer in layers): sources[triple.source] = 1 return sources.keys() def GetArcsIn(target, layers='core'): """All incoming arc types for this specified node (within any of the specified layers).""" arcs = {} for triple in target.arcsIn: if triple.layer in layers: arcs[triple.arc] = 1 return arcs.keys() def GetArcsOut(source, layers='core'): """All outgoing arc types for this specified node.""" arcs = {} for triple in source.arcsOut: if triple.layer in layers: arcs[triple.arc] = 1 return arcs.keys() # Utility API def GetComment(node, layers='core') : """Get the first rdfs:comment we find on this node (or "No comment"), within any of the specified layers.""" tx = GetTargets(Unit.GetUnit("rdfs:comment"), node, layers=layers ) if len(tx) > 0: return tx[0] else: return "No comment" def GetImmediateSubtypes(n, layers='core'): """Get this type's immediate subtypes, i.e. that are subClassOf this.""" if n==None: return None subs = GetSources( Unit.GetUnit("rdfs:subClassOf"), n, layers=layers) if (n.isDataType() or n.id == "DataType"): subs += GetSources( Unit.GetUnit("typeOf"), n, layers=layers) subs.sort(key=lambda x: x.id) return subs def GetImmediateSupertypes(n, layers='core'): """Get this type's immediate supertypes, i.e. that we are subClassOf.""" if n==None: return None sups = GetTargets( Unit.GetUnit("rdfs:subClassOf"), n, layers=layers) if (n.isDataType() or n.id == "DataType"): sups += GetTargets( Unit.GetUnit("typeOf"), n, layers=layers) sups.sort(key=lambda x: x.id) return sups def GetAllTypes(layers='core'): """Return all types in the graph.""" if DataCache.get('AllTypes'): logging.debug("DataCache HIT: Alltypes") return DataCache.get('AllTypes') else: logging.debug("DataCache MISS: Alltypes") mynode = Unit.GetUnit("Thing") subbed = {} todo = [mynode] while todo: current = todo.pop() subs = GetImmediateSubtypes(current, layers=layers) subbed[current] = 1 for sc in subs: if subbed.get(sc.id) == None: todo.append(sc) DataCache.put('AllTypes',subbed.keys()) return subbed.keys() def GetAllProperties(layers='core'): """Return all properties in the graph.""" if DataCache.get('AllProperties'): logging.debug("DataCache HIT: AllProperties") return DataCache.get('AllProperties') else: logging.debug("DataCache MISS: AllProperties") mynode = Unit.GetUnit("Thing") sorted_all_properties = sorted(GetSources(Unit.GetUnit("typeOf"), Unit.GetUnit("rdf:Property"), layers=layers), key=lambda u: u.id) DataCache.put('AllProperties',sorted_all_properties) return sorted_all_properties def GetParentList(start_unit, end_unit=None, path=[], layers='core'): """ Returns one or more lists, each giving a path from a start unit to a supertype parent unit. example: for path in GetParentList( Unit.GetUnit("Restaurant") ): pprint.pprint(', '.join([str(x.id) for x in path ])) 'Restaurant, FoodEstablishment, LocalBusiness, Organization, Thing' 'Restaurant, FoodEstablishment, LocalBusiness, Place, Thing' """ if not end_unit: end_unit = Unit.GetUnit("Thing") arc=Unit.GetUnit("rdfs:subClassOf") logging.debug("from %s to %s - path length %d" % (start_unit.id, end_unit.id, len(path) ) ) path = path + [start_unit] if start_unit == end_unit: return [path] if not Unit.GetUnit(start_unit.id): return [] paths = [] for node in GetTargets(arc, start_unit, layers=layers): if node not in path: newpaths = GetParentList(node, end_unit, path, layers=layers) for newpath in newpaths: paths.append(newpath) return paths def HasMultipleBaseTypes(typenode, layers='core'): """True if this unit represents a type with more than one immediate supertype.""" return len( GetTargets( Unit.GetUnit("rdfs:subClassOf"), typenode, layers ) ) > 1 class Example (): @staticmethod def AddExample(terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core'): """ Add an Example (via constructor registering it with the terms that it mentions, i.e. stored in term.examples). """ # todo: fix partial examples: if (len(terms) > 0 and len(original_html) > 0 and (len(microdata) > 0 or len(rdfa) > 0 or len(jsonld) > 0)): typeinfo = "".join( [" %s " % t.id for t in terms] ) if "FakeEntryNeeded" in typeinfo or terms==[]: return if (len(terms) > 0 and len(original_html) > 0 and len(microdata) > 0 and len(rdfa) > 0 and len(jsonld) > 0): return Example(terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core') else: log.info("API AddExample skipped a case due to missing value(s) in example. Target terms: %s ORIG: %s MICRODATA: %s RDFA: %s JSON: %s EGMETA: %s " % ( typeinfo, original_html, microdata, rdfa, jsonld, egmeta ) ) def get(self, name, layers='core') : """Exposes original_content, microdata, rdfa and jsonld versions (in the layer(s) specified).""" if name == 'original_html': return self.original_html if name == 'microdata': return self.microdata if name == 'rdfa': return self.rdfa if name == 'jsonld': return self.jsonld def __init__ (self, terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core'): """Example constructor, registers itself with the relevant Unit(s).""" self.terms = terms self.original_html = original_html self.microdata = microdata self.rdfa = rdfa self.jsonld = jsonld self.egmeta = egmeta self.layer = layer for term in terms: if "id" in egmeta: logging.debug("Created Example with ID %s and type %s" % ( egmeta["id"], term.id )) term.examples.append(self) def GetExamples(node, layers='core'): """Returns the examples (if any) for some Unit node.""" return node.examples def GetExtMappingsRDFa(node, layers='core'): """Self-contained chunk of RDFa HTML markup with mappings for this term.""" if (node.isClass()): equivs = GetTargets(Unit.GetUnit("owl:equivalentClass"), node, layers=layers) if len(equivs) > 0: markup = '' for c in equivs: if (c.id.startswith('http')): markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c.id else: markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c.id return markup if (node.isAttribute()): equivs = GetTargets(Unit.GetUnit("owl:equivalentProperty"), node, layers) if len(equivs) > 0: markup = '' for c in equivs: markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c.id return markup return "<!-- no external mappings noted for this term. -->" def GetJsonLdContext(layers='core'): """Generates a basic JSON-LD context file for schema.org.""" # Caching assumes the context is neutral w.r.t. our hostname. if DataCache.get('JSONLDCONTEXT'): log.debug("DataCache: recycled JSONLDCONTEXT") return DataCache.get('JSONLDCONTEXT') else: global namespaces jsonldcontext = "{\"@context\": {\n" jsonldcontext += namespaces ; jsonldcontext += " \"@vocab\": \"http://schema.org/\",\n" url = Unit.GetUnit("URL") date = Unit.GetUnit("Date") datetime = Unit.GetUnit("DateTime") properties = sorted(GetSources(Unit.GetUnit("typeOf"), Unit.GetUnit("rdf:Property"), layers=layers), key=lambda u: u.id) for p in properties: range = GetTargets(Unit.GetUnit("rangeIncludes"), p, layers=layers) type = None if url in range: type = "@id" elif date in range: type = "Date" elif datetime in range: type = "DateTime" if type: jsonldcontext += " \"" + p.id + "\": { \"@type\": \"" + type + "\" }," jsonldcontext += "}}\n" jsonldcontext = jsonldcontext.replace("},}}","}\n }\n}") jsonldcontext = jsonldcontext.replace("},","},\n") DataCache.put('JSONLDCONTEXT',jsonldcontext) log.debug("DataCache: added JSONLDCONTEXT") return jsonldcontext #### UTILITIES def inLayer(layerlist, node): """Does a unit get its type mentioned in a layer?""" if (node is None): return False log.debug("Looking in %s for %s" % (layerlist, node.id )) if len(GetTargets(Unit.GetUnit("typeOf"), node, layers=layerlist) ) > 0: log.debug("Found typeOf for node %s in layers: %s" % (node.id, layerlist )) return True if len(GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layerlist) ) > 0: log.info("Found rdfs:subClassOf") # TODO: should we really test for any mention of a term, not just typing? return True log.debug("inLayer: Failed to find in %s for %s" % (layerlist, node.id)) return False def read_file (filename): """Read a file from disk, return it as a single string.""" strs = [] file_path = full_path(filename) import codecs log.debug("READING FILE: filename=%s file_path=%s " % (filename, file_path ) ) for line in codecs.open(file_path, 'r', encoding="utf8").readlines(): strs.append(line) return "".join(strs) def full_path(filename): """convert local file name to full path.""" import os.path folder = os.path.dirname(os.path.realpath(__file__)) return os.path.join(folder, filename) def setHomeValues(items,layer='core',defaultToCore=False): global extensionLoadErrors for node in items: if(node == None): continue home = GetTargets( Unit.GetUnit("isPartOf"), node, layer ) if(len(home) > 0): if(node.home != None): msg = "ERROR: %s trying to overwite home from %s to %s" % (node.id,node.home,home[0].id) log.info(msg) extensionLoadErrors += msg + '\n' else: h = home[0].id.strip() if h.startswith("http://"): h = h[7:] node.home = re.match( r'([\w\-_]+)[\.:]?', h).group(1) if(node.home == 'schema'): node.home = 'core' elif node.home == None: if defaultToCore: node.home = "core" else: msg = "ERROR: %s has no home defined" % (node.id) log.info(msg) extensionLoadErrors += msg + '\n' def read_schemas(loadExtensions=False): """Read/parse/ingest schemas from data/*.rdfa. Also data/*examples.txt""" import os.path import glob import re global schemasInitialized if (not schemasInitialized or DYNALOAD): log.info("(re)loading core and annotations.") files = glob.glob("data/*.rdfa") file_paths = [] for f in files: file_paths.append(full_path(f)) parser = parsers.MakeParserOfType('rdfa', None) items = parser.parse(file_paths, "core") #set default home for those in core that do not have one setHomeValues(items,"core",True) files = glob.glob("data/*examples.txt") read_examples(files) files = glob.glob("data/2015-04-vocab_counts.txt") for file in files: usage_data = read_file(file) parser = parsers.UsageFileParser(None) parser.parse(usage_data) schemasInitialized = True def read_extensions(extensions): import os.path import glob import re global extensionsLoaded extfiles = [] expfiles = [] if not extensionsLoaded: #2nd load will throw up errors and duplicate terms log.info("(re)scanning for extensions.") for i in extensions: extfiles += glob.glob("data/ext/%s/*.rdfa" % i) expfiles += glob.glob("data/ext/%s/*examples.txt" % i) log.info("Extensions found: %s ." % " , ".join(extfiles) ) fnstrip_re = re.compile("\/.*") for ext in extfiles: ext_file_path = full_path(ext) extid = ext.replace('data/ext/', '') extid = re.sub(fnstrip_re,'',extid) log.info("Preparing to parse extension data: %s as '%s'" % (ext_file_path, "%s" % extid)) parser = parsers.MakeParserOfType('rdfa', None) all_layers[extid] = "1" extitems = parser.parse([ext_file_path], layer="%s" % extid) # put schema triples in a layer setHomeValues(extitems,extid,False) read_examples(expfiles) extensionsLoaded = True def read_examples(files): example_contents = [] for f in files: example_content = read_file(f) example_contents.append(example_content) log.debug("examples loaded from: %s" % f) parser = parsers.ParseExampleFile(None) parser.parse(example_contents)